Merge remote-tracking branch 'scott/next' into next

Merge Freescale updates
diff --git a/Documentation/DocBook/media/v4l/dev-codec.xml b/Documentation/DocBook/media/v4l/dev-codec.xml
index dca0ecd..ff44c16 100644
--- a/Documentation/DocBook/media/v4l/dev-codec.xml
+++ b/Documentation/DocBook/media/v4l/dev-codec.xml
@@ -1,18 +1,27 @@
   <title>Codec Interface</title>
 
-  <note>
-    <title>Suspended</title>
-
-    <para>This interface has been be suspended from the V4L2 API
-implemented in Linux 2.6 until we have more experience with codec
-device interfaces.</para>
-  </note>
-
   <para>A V4L2 codec can compress, decompress, transform, or otherwise
-convert video data from one format into another format, in memory.
-Applications send data to be converted to the driver through a
-&func-write; call, and receive the converted data through a
-&func-read; call. For efficiency a driver may also support streaming
-I/O.</para>
+convert video data from one format into another format, in memory. Typically
+such devices are memory-to-memory devices (i.e. devices with the
+<constant>V4L2_CAP_VIDEO_M2M</constant> or <constant>V4L2_CAP_VIDEO_M2M_MPLANE</constant>
+capability set).
+</para>
 
-  <para>[to do]</para>
+  <para>A memory-to-memory video node acts just like a normal video node, but it
+supports both output (sending frames from memory to the codec hardware) and
+capture (receiving the processed frames from the codec hardware into memory)
+stream I/O. An application will have to setup the stream
+I/O for both sides and finally call &VIDIOC-STREAMON; for both capture and output
+to start the codec.</para>
+
+  <para>Video compression codecs use the MPEG controls to setup their codec parameters
+(note that the MPEG controls actually support many more codecs than just MPEG).
+See <xref linkend="mpeg-controls"></xref>.</para>
+
+  <para>Memory-to-memory devices can often be used as a shared resource: you can
+open the video node multiple times, each application setting up their own codec properties
+that are local to the file handle, and each can use it independently from the others.
+The driver will arbitrate access to the codec and reprogram it whenever another file
+handler gets access. This is different from the usual video node behavior where the video properties
+are global to the device (i.e. changing something through one file handle is visible
+through another file handle).</para>
diff --git a/Documentation/DocBook/media/v4l/v4l2.xml b/Documentation/DocBook/media/v4l/v4l2.xml
index bfc93cd..bfe823d 100644
--- a/Documentation/DocBook/media/v4l/v4l2.xml
+++ b/Documentation/DocBook/media/v4l/v4l2.xml
@@ -493,7 +493,7 @@
 </partinfo>
 
 <title>Video for Linux Two API Specification</title>
- <subtitle>Revision 3.9</subtitle>
+ <subtitle>Revision 3.10</subtitle>
 
   <chapter id="common">
     &sub-common;
diff --git a/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt b/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
index 3f62adf..de9f6b7 100644
--- a/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
+++ b/Documentation/devicetree/bindings/media/exynos-fimc-lite.txt
@@ -2,7 +2,7 @@
 
 Required properties:
 
-- compatible	: should be "samsung,exynos4212-fimc" for Exynos4212 and
+- compatible	: should be "samsung,exynos4212-fimc-lite" for Exynos4212 and
 		  Exynos4412 SoCs;
 - reg		: physical base address and size of the device memory mapped
 		  registers;
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index f98ca63..3458d63 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -420,10 +420,10 @@
 	for a passive TCP connection will happen after 63seconds.
 
 tcp_syncookies - BOOLEAN
-	Only valid when the kernel was compiled with CONFIG_SYNCOOKIES
+	Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
 	Send out syncookies when the syn backlog queue of a socket
 	overflows. This is to prevent against the common 'SYN flood attack'
-	Default: FALSE
+	Default: 1
 
 	Note, that syncookies is fallback facility.
 	It MUST NOT be used to help highly loaded servers to stand
diff --git a/Documentation/powerpc/00-INDEX b/Documentation/powerpc/00-INDEX
index dd9e9280..05026ce 100644
--- a/Documentation/powerpc/00-INDEX
+++ b/Documentation/powerpc/00-INDEX
@@ -14,6 +14,8 @@
 	- IBM "Hypervisor Virtual Console Server" Installation Guide
 mpc52xx.txt
 	- Linux 2.6.x on MPC52xx family
+pmu-ebb.txt
+	- Description of the API for using the PMU with Event Based Branches.
 qe_firmware.txt
 	- describes the layout of firmware binaries for the Freescale QUICC
 	  Engine and the code that parses and uploads the microcode therein.
diff --git a/Documentation/powerpc/pmu-ebb.txt b/Documentation/powerpc/pmu-ebb.txt
new file mode 100644
index 0000000..73cd163
--- /dev/null
+++ b/Documentation/powerpc/pmu-ebb.txt
@@ -0,0 +1,137 @@
+PMU Event Based Branches
+========================
+
+Event Based Branches (EBBs) are a feature which allows the hardware to
+branch directly to a specified user space address when certain events occur.
+
+The full specification is available in Power ISA v2.07:
+
+  https://www.power.org/documentation/power-isa-version-2-07/
+
+One type of event for which EBBs can be configured is PMU exceptions. This
+document describes the API for configuring the Power PMU to generate EBBs,
+using the Linux perf_events API.
+
+
+Terminology
+-----------
+
+Throughout this document we will refer to an "EBB event" or "EBB events". This
+just refers to a struct perf_event which has set the "EBB" flag in its
+attr.config. All events which can be configured on the hardware PMU are
+possible "EBB events".
+
+
+Background
+----------
+
+When a PMU EBB occurs it is delivered to the currently running process. As such
+EBBs can only sensibly be used by programs for self-monitoring.
+
+It is a feature of the perf_events API that events can be created on other
+processes, subject to standard permission checks. This is also true of EBB
+events, however unless the target process enables EBBs (via mtspr(BESCR)) no
+EBBs will ever be delivered.
+
+This makes it possible for a process to enable EBBs for itself, but not
+actually configure any events. At a later time another process can come along
+and attach an EBB event to the process, which will then cause EBBs to be
+delivered to the first process. It's not clear if this is actually useful.
+
+
+When the PMU is configured for EBBs, all PMU interrupts are delivered to the
+user process. This means once an EBB event is scheduled on the PMU, no non-EBB
+events can be configured. This means that EBB events can not be run
+concurrently with regular 'perf' commands, or any other perf events.
+
+It is however safe to run 'perf' commands on a process which is using EBBs. The
+kernel will in general schedule the EBB event, and perf will be notified that
+its events could not run.
+
+The exclusion between EBB events and regular events is implemented using the
+existing "pinned" and "exclusive" attributes of perf_events. This means EBB
+events will be given priority over other events, unless they are also pinned.
+If an EBB event and a regular event are both pinned, then whichever is enabled
+first will be scheduled and the other will be put in error state. See the
+section below titled "Enabling an EBB event" for more information.
+
+
+Creating an EBB event
+---------------------
+
+To request that an event is counted using EBB, the event code should have bit
+63 set.
+
+EBB events must be created with a particular, and restrictive, set of
+attributes - this is so that they interoperate correctly with the rest of the
+perf_events subsystem.
+
+An EBB event must be created with the "pinned" and "exclusive" attributes set.
+Note that if you are creating a group of EBB events, only the leader can have
+these attributes set.
+
+An EBB event must NOT set any of the "inherit", "sample_period", "freq" or
+"enable_on_exec" attributes.
+
+An EBB event must be attached to a task. This is specified to perf_event_open()
+by passing a pid value, typically 0 indicating the current task.
+
+All events in a group must agree on whether they want EBB. That is all events
+must request EBB, or none may request EBB.
+
+EBB events must specify the PMC they are to be counted on. This ensures
+userspace is able to reliably determine which PMC the event is scheduled on.
+
+
+Enabling an EBB event
+---------------------
+
+Once an EBB event has been successfully opened, it must be enabled with the
+perf_events API. This can be achieved either via the ioctl() interface, or the
+prctl() interface.
+
+However, due to the design of the perf_events API, enabling an event does not
+guarantee that it has been scheduled on the PMU. To ensure that the EBB event
+has been scheduled on the PMU, you must perform a read() on the event. If the
+read() returns EOF, then the event has not been scheduled and EBBs are not
+enabled.
+
+This behaviour occurs because the EBB event is pinned and exclusive. When the
+EBB event is enabled it will force all other non-pinned events off the PMU. In
+this case the enable will be successful. However if there is already an event
+pinned on the PMU then the enable will not be successful.
+
+
+Reading an EBB event
+--------------------
+
+It is possible to read() from an EBB event. However the results are
+meaningless. Because interrupts are being delivered to the user process the
+kernel is not able to count the event, and so will return a junk value.
+
+
+Closing an EBB event
+--------------------
+
+When an EBB event is finished with, you can close it using close() as for any
+regular event. If this is the last EBB event the PMU will be deconfigured and
+no further PMU EBBs will be delivered.
+
+
+EBB Handler
+-----------
+
+The EBB handler is just regular userspace code, however it must be written in
+the style of an interrupt handler. When the handler is entered all registers
+are live (possibly) and so must be saved somehow before the handler can invoke
+other code.
+
+It's up to the program how to handle this. For C programs a relatively simple
+option is to create an interrupt frame on the stack and save registers there.
+
+Fork
+----
+
+EBB events are not inherited across fork. If the child process wishes to use
+EBBs it should open a new event for itself. Similarly the EBB state in
+BESCR/EBBHR/EBBRR is cleared across fork().
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index bb8b0dc..77d68e2 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -29,6 +29,8 @@
   alc271-dmic	Enable ALC271X digital mic workaround
   inv-dmic	Inverted internal mic workaround
   lenovo-dock   Enables docking station I/O for some Lenovos
+  dell-headset-multi	Headset jack, which can also be used as mic-in
+  dell-headset-dock	Headset jack (without mic-in), and also dock I/O
 
 ALC662/663/272
 ==============
@@ -42,6 +44,7 @@
   asus-mode7	ASUS
   asus-mode8	ASUS
   inv-dmic	Inverted internal mic workaround
+  dell-headset-multi	Headset jack, which can also be used as mic-in
 
 ALC680
 ======
diff --git a/MAINTAINERS b/MAINTAINERS
index 5be702c..b888b1a 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3103,6 +3103,13 @@
 S:	Maintained
 F:	drivers/media/rc/ene_ir.*
 
+ENHANCED ERROR HANDLING (EEH)
+M:	Gavin Shan <shangw@linux.vnet.ibm.com>
+L:	linuxppc-dev@lists.ozlabs.org
+S:	Supported
+F:	Documentation/powerpc/eeh-pci-error-recovery.txt
+F:	arch/powerpc/kernel/eeh*.c
+
 EPSON S1D13XXX FRAMEBUFFER DRIVER
 M:	Kristoffer Ericson <kristoffer.ericson@gmail.com>
 S:	Maintained
@@ -3220,7 +3227,7 @@
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
 M:	Robert Love <robert.w.love@intel.com>
-L:	devel@open-fcoe.org
+L:	fcoe-devel@open-fcoe.org
 W:	www.Open-FCoE.org
 S:	Supported
 F:	drivers/scsi/libfc/
@@ -6149,7 +6156,6 @@
 L:	linux-pci@vger.kernel.org
 S:	Supported
 F:	Documentation/PCI/pci-error-recovery.txt
-F:	Documentation/powerpc/eeh-pci-error-recovery.txt
 
 PCI SUBSYSTEM
 M:	Bjorn Helgaas <bhelgaas@google.com>
diff --git a/Makefile b/Makefile
index 9040016..e5e3ba0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 10
 SUBLEVEL = 0
-EXTRAVERSION = -rc5
+EXTRAVERSION =
 NAME = Unicycling Gorilla
 
 # *DOCUMENTATION*
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 49d993c..136f263 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1087,6 +1087,20 @@
 source "arch/arm/Kconfig-nommu"
 endif
 
+config PJ4B_ERRATA_4742
+	bool "PJ4B Errata 4742: IDLE Wake Up Commands can Cause the CPU Core to Cease Operation"
+	depends on CPU_PJ4B && MACH_ARMADA_370
+	default y
+	help
+	  When coming out of either a Wait for Interrupt (WFI) or a Wait for
+	  Event (WFE) IDLE states, a specific timing sensitivity exists between
+	  the retiring WFI/WFE instructions and the newly issued subsequent
+	  instructions.  This sensitivity can result in a CPU hang scenario.
+	  Workaround:
+	  The software must insert either a Data Synchronization Barrier (DSB)
+	  or Data Memory Barrier (DMB) command immediately after the WFI/WFE
+	  instruction
+
 config ARM_ERRATA_326103
 	bool "ARM errata: FSR write bit incorrect on a SWP to read-only memory"
 	depends on CPU_V6
@@ -1189,6 +1203,16 @@
 	   is not correctly implemented in PL310 as clean lines are not
 	   invalidated as a result of these operations.
 
+config ARM_ERRATA_643719
+	bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
+	depends on CPU_V7 && SMP
+	help
+	  This option enables the workaround for the 643719 Cortex-A9 (prior to
+	  r1p0) erratum. On affected cores the LoUIS bit field of the CLIDR
+	  register returns zero when it should return one. The workaround
+	  corrects this value, ensuring cache maintenance operations which use
+	  it behave as intended and avoiding data corruption.
+
 config ARM_ERRATA_720789
 	bool "ARM errata: TLBIASIDIS and TLBIMVAIS operations can broadcast a faulty ASID"
 	depends on CPU_V7
@@ -2006,7 +2030,7 @@
 
 config KEXEC
 	bool "Kexec system call (EXPERIMENTAL)"
-	depends on (!SMP || HOTPLUG_CPU)
+	depends on (!SMP || PM_SLEEP_SMP)
 	help
 	  kexec is a system call that implements the ability to shutdown your
 	  current kernel, and to start another kernel.  It is like a reboot
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 79e9bdb..120b83b 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -116,7 +116,8 @@
 
 # Make sure files are removed during clean
 extra-y       += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern \
-		 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs)
+		 lib1funcs.S ashldi3.S $(libfdt) $(libfdt_hdrs) \
+		 hyp-stub.S
 
 ifeq ($(CONFIG_FUNCTION_TRACER),y)
 ORIG_CFLAGS := $(KBUILD_CFLAGS)
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 1460d9b..8e1248f 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -409,8 +409,8 @@
 			ti,hwmods = "gpmc";
 			reg = <0x50000000 0x2000>;
 			interrupts = <100>;
-			num-cs = <7>;
-			num-waitpins = <2>;
+			gpmc,num-cs = <7>;
+			gpmc,num-waitpins = <2>;
 			#address-cells = <2>;
 			#size-cells = <1>;
 			status = "disabled";
diff --git a/arch/arm/boot/dts/armada-xp-gp.dts b/arch/arm/boot/dts/armada-xp-gp.dts
index 3ee63d1..76db557 100644
--- a/arch/arm/boot/dts/armada-xp-gp.dts
+++ b/arch/arm/boot/dts/armada-xp-gp.dts
@@ -39,8 +39,9 @@
 	};
 
 	soc {
-		ranges = <0          0 0xd0000000 0x100000
-			  0xf0000000 0 0xf0000000 0x1000000>;
+		ranges = <0          0 0xd0000000 0x100000  /* Internal registers 1MiB */
+			  0xe0000000 0 0xe0000000 0x8100000 /* PCIe */
+			  0xf0000000 0 0xf0000000 0x1000000 /* Device Bus, NOR 16MiB  */>;
 
 		internal-regs {
 			serial@12000 {
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index 46b7850..fdea75c 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -27,8 +27,9 @@
 	};
 
 	soc {
-		ranges = <0          0 0xd0000000 0x100000
-			  0xf0000000 0 0xf0000000 0x8000000>;
+		ranges = <0          0 0xd0000000 0x100000	/* Internal registers 1MiB */
+			  0xe0000000 0 0xe0000000 0x8100000     /* PCIe */
+			  0xf0000000 0 0xf0000000 0x8000000     /* Device Bus, NOR 128MiB   */>;
 
 		internal-regs {
 			serial@12000 {
diff --git a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
index d1650fb..ded558b 100644
--- a/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
+++ b/arch/arm/boot/dts/exynos5250-pinctrl.dtsi
@@ -763,7 +763,7 @@
 		};
 	};
 
-	pinctrl@03680000 {
+	pinctrl@03860000 {
 		gpz: gpz {
 			gpio-controller;
 			#gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 0673524..fc9fb3d 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -161,9 +161,9 @@
 		interrupts = <0 50 0>;
 	};
 
-	pinctrl_3: pinctrl@03680000 {
+	pinctrl_3: pinctrl@03860000 {
 		compatible = "samsung,exynos5250-pinctrl";
-		reg = <0x0368000 0x1000>;
+		reg = <0x03860000 0x1000>;
 		interrupts = <0 47 0>;
 	};
 
diff --git a/arch/arm/boot/dts/omap4-panda-common.dtsi b/arch/arm/boot/dts/omap4-panda-common.dtsi
index 03bd60d..eeb734e 100644
--- a/arch/arm/boot/dts/omap4-panda-common.dtsi
+++ b/arch/arm/boot/dts/omap4-panda-common.dtsi
@@ -56,9 +56,23 @@
 	};
 };
 
+&omap4_pmx_wkup {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+			&twl6030_wkup_pins
+	>;
+
+	twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+		pinctrl-single,pins = <
+			0x14 0x2        /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+		>;
+	};
+};
+
 &omap4_pmx_core {
 	pinctrl-names = "default";
 	pinctrl-0 = <
+			&twl6030_pins
 			&twl6040_pins
 			&mcpdm_pins
 			&mcbsp1_pins
@@ -66,6 +80,12 @@
 			&tpd12s015_pins
 	>;
 
+	twl6030_pins: pinmux_twl6030_pins {
+		pinctrl-single,pins = <
+			0x15e 0x4118	/* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+		>;
+	};
+
 	twl6040_pins: pinmux_twl6040_pins {
 		pinctrl-single,pins = <
 			0xe0 0x3	/* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index a35d9cd..98505a2 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -142,9 +142,23 @@
 	};
 };
 
+&omap4_pmx_wkup {
+	pinctrl-names = "default";
+	pinctrl-0 = <
+			&twl6030_wkup_pins
+	>;
+
+	twl6030_wkup_pins: pinmux_twl6030_wkup_pins {
+		pinctrl-single,pins = <
+			0x14 0x2        /* fref_clk0_out.sys_drm_msecure OUTPUT | MODE2 */
+		>;
+	};
+};
+
 &omap4_pmx_core {
 	pinctrl-names = "default";
 	pinctrl-0 = <
+			&twl6030_pins
 			&twl6040_pins
 			&mcpdm_pins
 			&dmic_pins
@@ -179,6 +193,12 @@
 		>;
 	};
 
+	twl6030_pins: pinmux_twl6030_pins {
+		pinctrl-single,pins = <
+			0x15e 0x4118	/* sys_nirq1.sys_nirq1 OMAP_WAKEUP_EN | INPUT_PULLUP | MODE0 */
+		>;
+	};
+
 	twl6040_pins: pinmux_twl6040_pins {
 		pinctrl-single,pins = <
 			0xe0 0x3	/* hdq_sio.gpio_127 OUTPUT | MODE3 */
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 3dd7ff8..635cae2 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -538,6 +538,7 @@
 			interrupts = <0 41 0x4>;
 			ti,hwmods = "timer5";
 			ti,timer-dsp;
+			ti,timer-pwm;
 		};
 
 		timer6: timer@4013a000 {
@@ -574,6 +575,7 @@
 			reg = <0x4803e000 0x80>;
 			interrupts = <0 45 0x4>;
 			ti,hwmods = "timer9";
+			ti,timer-pwm;
 		};
 
 		timer10: timer@48086000 {
@@ -581,6 +583,7 @@
 			reg = <0x48086000 0x80>;
 			interrupts = <0 46 0x4>;
 			ti,hwmods = "timer10";
+			ti,timer-pwm;
 		};
 
 		timer11: timer@48088000 {
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index bff7138..17d0ae8 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -320,9 +320,7 @@
 }
 
 #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
-static inline void flush_kernel_dcache_page(struct page *page)
-{
-}
+extern void flush_kernel_dcache_page(struct page *);
 
 #define flush_dcache_mmap_lock(mapping) \
 	spin_lock_irq(&(mapping)->tree_lock)
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 7652712..dba62cb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -32,6 +32,8 @@
 
 #define MPIDR_HWID_BITMASK 0xFFFFFF
 
+#define MPIDR_INVALID (~MPIDR_HWID_BITMASK)
+
 #define MPIDR_LEVEL_BITS 8
 #define MPIDR_LEVEL_MASK ((1 << MPIDR_LEVEL_BITS) - 1)
 
diff --git a/arch/arm/include/asm/glue-proc.h b/arch/arm/include/asm/glue-proc.h
index ac1dd54..8017e94 100644
--- a/arch/arm/include/asm/glue-proc.h
+++ b/arch/arm/include/asm/glue-proc.h
@@ -230,6 +230,15 @@
 # endif
 #endif
 
+#ifdef CONFIG_CPU_PJ4B
+# ifdef CPU_NAME
+#  undef  MULTI_CPU
+#  define MULTI_CPU
+# else
+#  define CPU_NAME cpu_pj4b
+# endif
+#endif
+
 #ifndef MULTI_CPU
 #define cpu_proc_init			__glue(CPU_NAME,_proc_init)
 #define cpu_proc_fin			__glue(CPU_NAME,_proc_fin)
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h
index aaa61b6..e789832 100644
--- a/arch/arm/include/asm/smp_plat.h
+++ b/arch/arm/include/asm/smp_plat.h
@@ -49,7 +49,7 @@
 /*
  * Logical CPU mapping.
  */
-extern int __cpu_logical_map[];
+extern u32 __cpu_logical_map[];
 #define cpu_logical_map(cpu)	__cpu_logical_map[cpu]
 /*
  * Retrieve logical cpu index corresponding to a given MPIDR[23:0]
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c
index 5af04f6..5859c8b 100644
--- a/arch/arm/kernel/devtree.c
+++ b/arch/arm/kernel/devtree.c
@@ -82,7 +82,7 @@
 	u32 i, j, cpuidx = 1;
 	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 
-	u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = UINT_MAX };
+	u32 tmp_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 	bool bootcpu_valid = false;
 	cpus = of_find_node_by_path("/cpus");
 
@@ -92,6 +92,9 @@
 	for_each_child_of_node(cpus, cpu) {
 		u32 hwid;
 
+		if (of_node_cmp(cpu->type, "cpu"))
+			continue;
+
 		pr_debug(" * %s...\n", cpu->full_name);
 		/*
 		 * A device tree containing CPU nodes with missing "reg"
@@ -149,9 +152,10 @@
 		tmp_map[i] = hwid;
 	}
 
-	if (WARN(!bootcpu_valid, "DT missing boot CPU MPIDR[23:0], "
-				 "fall back to default cpu_logical_map\n"))
+	if (!bootcpu_valid) {
+		pr_warn("DT missing boot CPU MPIDR[23:0], fall back to default cpu_logical_map\n");
 		return;
+	}
 
 	/*
 	 * Since the boot CPU node contains proper data, and all nodes have
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 8ef8c93..4fb074c 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -134,6 +134,10 @@
 	unsigned long reboot_code_buffer_phys;
 	void *reboot_code_buffer;
 
+	if (num_online_cpus() > 1) {
+		pr_err("kexec: error: multiple CPUs still online\n");
+		return;
+	}
 
 	page_list = image->head & PAGE_MASK;
 
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 282de48..6e8931c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -184,30 +184,61 @@
 
 __setup("reboot=", reboot_setup);
 
+/*
+ * Called by kexec, immediately prior to machine_kexec().
+ *
+ * This must completely disable all secondary CPUs; simply causing those CPUs
+ * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
+ * kexec'd kernel to use any and all RAM as it sees fit, without having to
+ * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
+ * functionality embodied in disable_nonboot_cpus() to achieve this.
+ */
 void machine_shutdown(void)
 {
-#ifdef CONFIG_SMP
-	smp_send_stop();
-#endif
+	disable_nonboot_cpus();
 }
 
+/*
+ * Halting simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this.
+ */
 void machine_halt(void)
 {
-	machine_shutdown();
+	smp_send_stop();
+
 	local_irq_disable();
 	while (1);
 }
 
+/*
+ * Power-off simply requires that the secondary CPUs stop performing any
+ * activity (executing tasks, handling interrupts). smp_send_stop()
+ * achieves this. When the system power is turned off, it will take all CPUs
+ * with it.
+ */
 void machine_power_off(void)
 {
-	machine_shutdown();
+	smp_send_stop();
+
 	if (pm_power_off)
 		pm_power_off();
 }
 
+/*
+ * Restart requires that the secondary CPUs stop performing any activity
+ * while the primary CPU resets the system. Systems with a single CPU can
+ * use soft_restart() as their machine descriptor's .restart hook, since that
+ * will cause the only available CPU to reset. Systems with multiple CPUs must
+ * provide a HW restart implementation, to ensure that all CPUs reset at once.
+ * This is required so that any code running after reset on the primary CPU
+ * doesn't have to co-ordinate with other CPUs to ensure they aren't still
+ * executing pre-reset code, and using RAM that the primary CPU's code wishes
+ * to use. Implementing such co-ordination would be essentially impossible.
+ */
 void machine_restart(char *cmd)
 {
-	machine_shutdown();
+	smp_send_stop();
 
 	arm_pm_restart(reboot_mode, cmd);
 
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 1522c7a..b4b1d39 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -444,7 +444,7 @@
 	    : "r14");
 }
 
-int __cpu_logical_map[NR_CPUS];
+u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 
 void __init smp_setup_processor_id(void)
 {
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 550d63c..5919eb4 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -651,17 +651,6 @@
 	smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-static void smp_kill_cpus(cpumask_t *mask)
-{
-	unsigned int cpu;
-	for_each_cpu(cpu, mask)
-		platform_cpu_kill(cpu);
-}
-#else
-static void smp_kill_cpus(cpumask_t *mask) { }
-#endif
-
 void smp_send_stop(void)
 {
 	unsigned long timeout;
@@ -679,8 +668,6 @@
 
 	if (num_online_cpus() > 1)
 		pr_warning("SMP: failed to stop secondary CPUs\n");
-
-	smp_kill_cpus(&mask);
 }
 
 /*
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 827cde4..e96fd71 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -22,9 +22,10 @@
 
 	kirkwood_pcie_id(&dev, &rev);
 
-	if ((dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0) ||
-	    (dev == MV88F6282_DEV_ID))
+	if (dev == MV88F6281_DEV_ID && rev >= MV88F6281_REV_A0)
 		return MPP_F6281_MASK;
+	if (dev == MV88F6282_DEV_ID)
+		return MPP_F6282_MASK;
 	if (dev == MV88F6192_DEV_ID && rev >= MV88F6192_REV_A0)
 		return MPP_F6192_MASK;
 	if (dev == MV88F6180_DEV_ID)
diff --git a/arch/arm/mach-omap2/clock36xx.c b/arch/arm/mach-omap2/clock36xx.c
index 8f3bf4e..bbd6a3f 100644
--- a/arch/arm/mach-omap2/clock36xx.c
+++ b/arch/arm/mach-omap2/clock36xx.c
@@ -20,11 +20,12 @@
 
 #include <linux/kernel.h>
 #include <linux/clk.h>
+#include <linux/clk-provider.h>
 #include <linux/io.h>
 
 #include "clock.h"
 #include "clock36xx.h"
-
+#define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 /**
  * omap36xx_pwrdn_clk_enable_with_hsdiv_restore - enable clocks suffering
@@ -39,29 +40,28 @@
  */
 int omap36xx_pwrdn_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
 {
-	struct clk_hw_omap *parent;
+	struct clk_divider *parent;
 	struct clk_hw *parent_hw;
-	u32 dummy_v, orig_v, clksel_shift;
+	u32 dummy_v, orig_v;
 	int ret;
 
 	/* Clear PWRDN bit of HSDIVIDER */
 	ret = omap2_dflt_clk_enable(clk);
 
 	parent_hw = __clk_get_hw(__clk_get_parent(clk->clk));
-	parent = to_clk_hw_omap(parent_hw);
+	parent = to_clk_divider(parent_hw);
 
 	/* Restore the dividers */
 	if (!ret) {
-		clksel_shift = __ffs(parent->clksel_mask);
-		orig_v = __raw_readl(parent->clksel_reg);
+		orig_v = __raw_readl(parent->reg);
 		dummy_v = orig_v;
 
 		/* Write any other value different from the Read value */
-		dummy_v ^= (1 << clksel_shift);
-		__raw_writel(dummy_v, parent->clksel_reg);
+		dummy_v ^= (1 << parent->shift);
+		__raw_writel(dummy_v, parent->reg);
 
 		/* Write the original divider */
-		__raw_writel(orig_v, parent->clksel_reg);
+		__raw_writel(orig_v, parent->reg);
 	}
 
 	return ret;
diff --git a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
index 075f7cc..69337af 100644
--- a/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_33xx_data.c
@@ -2007,6 +2007,13 @@
 	},
 };
 
+/* uart2 */
+static struct omap_hwmod_dma_info uart2_edma_reqs[] = {
+	{ .name = "tx",	.dma_req = 28, },
+	{ .name = "rx",	.dma_req = 29, },
+	{ .dma_req = -1 }
+};
+
 static struct omap_hwmod_irq_info am33xx_uart2_irqs[] = {
 	{ .irq = 73 + OMAP_INTC_START, },
 	{ .irq = -1 },
@@ -2018,7 +2025,7 @@
 	.clkdm_name	= "l4ls_clkdm",
 	.flags		= HWMOD_SWSUP_SIDLE_ACT,
 	.mpu_irqs	= am33xx_uart2_irqs,
-	.sdma_reqs	= uart1_edma_reqs,
+	.sdma_reqs	= uart2_edma_reqs,
 	.main_clk	= "dpll_per_m2_div4_ck",
 	.prcm		= {
 		.omap4	= {
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c018593..5a2d803 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -546,8 +546,10 @@
 	/* Clear any pending PRCM interrupts */
 	omap2_prm_write_mod_reg(0, OCP_MOD, OMAP3_PRM_IRQSTATUS_MPU_OFFSET);
 
-	if (omap3_has_iva())
-		omap3_iva_idle();
+	/*
+	 * We need to idle iva2_pwrdm even on am3703 with no iva2.
+	 */
+	omap3_iva_idle();
 
 	omap3_d2d_idle();
 }
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 9936c18..8f595c0 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -101,8 +101,10 @@
 	struct device_node *np;
 
 	np = of_find_matching_node(NULL, pwrc_ids);
-	if (!np)
-		panic("unable to find compatible pwrc node in dtb\n");
+	if (!np) {
+		pr_err("unable to find compatible sirf pwrc node in dtb\n");
+		return -ENOENT;
+	}
 
 	/*
 	 * pwrc behind rtciobrg is not located in memory space
diff --git a/arch/arm/mach-prima2/rstc.c b/arch/arm/mach-prima2/rstc.c
index 435019c..d5e0cbc 100644
--- a/arch/arm/mach-prima2/rstc.c
+++ b/arch/arm/mach-prima2/rstc.c
@@ -28,8 +28,10 @@
 	struct device_node *np;
 
 	np = of_find_matching_node(NULL, rstc_ids);
-	if (!np)
-		panic("unable to find compatible rstc node in dtb\n");
+	if (!np) {
+		pr_err("unable to find compatible sirf rstc node in dtb\n");
+		return -ENOENT;
+	}
 
 	sirfsoc_rstc_base = of_iomap(np, 0);
 	if (!sirfsoc_rstc_base)
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 15451ee..515b000 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -92,6 +92,14 @@
 	mrc	p15, 1, r0, c0, c0, 1		@ read clidr, r0 = clidr
 	ALT_SMP(ands	r3, r0, #(7 << 21))	@ extract LoUIS from clidr
 	ALT_UP(ands	r3, r0, #(7 << 27))	@ extract LoUU from clidr
+#ifdef CONFIG_ARM_ERRATA_643719
+	ALT_SMP(mrceq	p15, 0, r2, c0, c0, 0)	@ read main ID register
+	ALT_UP(moveq	pc, lr)			@ LoUU is zero, so nothing to do
+	ldreq	r1, =0x410fc090                 @ ID of ARM Cortex A9 r0p?
+	biceq	r2, r2, #0x0000000f             @ clear minor revision number
+	teqeq	r2, r1                          @ test for errata affected core and if so...
+	orreqs	r3, #(1 << 21)			@   fix LoUIS value (and set flags state to 'ne')
+#endif
 	ALT_SMP(mov	r3, r3, lsr #20)	@ r3 = LoUIS * 2
 	ALT_UP(mov	r3, r3, lsr #26)	@ r3 = LoUU * 2
 	moveq	pc, lr				@ return if level == 0
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 0d473cc..32aa586 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -301,6 +301,39 @@
 EXPORT_SYMBOL(flush_dcache_page);
 
 /*
+ * Ensure cache coherency for the kernel mapping of this page. We can
+ * assume that the page is pinned via kmap.
+ *
+ * If the page only exists in the page cache and there are no user
+ * space mappings, this is a no-op since the page was already marked
+ * dirty at creation.  Otherwise, we need to flush the dirty kernel
+ * cache lines directly.
+ */
+void flush_kernel_dcache_page(struct page *page)
+{
+	if (cache_is_vivt() || cache_is_vipt_aliasing()) {
+		struct address_space *mapping;
+
+		mapping = page_mapping(page);
+
+		if (!mapping || mapping_mapped(mapping)) {
+			void *addr;
+
+			addr = page_address(page);
+			/*
+			 * kmap_atomic() doesn't set the page virtual
+			 * address for highmem pages, and
+			 * kunmap_atomic() takes care of cache
+			 * flushing already.
+			 */
+			if (!IS_ENABLED(CONFIG_HIGHMEM) || addr)
+				__cpuc_flush_dcache_area(addr, PAGE_SIZE);
+		}
+	}
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
+/*
  * Flush an anonymous page so that users of get_user_pages()
  * can safely access the data.  The expected sequence is:
  *
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index e0d8565..4d409e6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -616,10 +616,12 @@
 	} while (pte++, addr += PAGE_SIZE, addr != end);
 }
 
-static void __init map_init_section(pmd_t *pmd, unsigned long addr,
+static void __init __map_init_section(pmd_t *pmd, unsigned long addr,
 			unsigned long end, phys_addr_t phys,
 			const struct mem_type *type)
 {
+	pmd_t *p = pmd;
+
 #ifndef CONFIG_ARM_LPAE
 	/*
 	 * In classic MMU format, puds and pmds are folded in to
@@ -638,7 +640,7 @@
 		phys += SECTION_SIZE;
 	} while (pmd++, addr += SECTION_SIZE, addr != end);
 
-	flush_pmd_entry(pmd);
+	flush_pmd_entry(p);
 }
 
 static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
@@ -661,7 +663,7 @@
 		 */
 		if (type->prot_sect &&
 				((addr | next | phys) & ~SECTION_MASK) == 0) {
-			map_init_section(pmd, addr, next, phys, type);
+			__map_init_section(pmd, addr, next, phys, type);
 		} else {
 			alloc_init_pte(pmd, addr, next,
 						__phys_to_pfn(phys), type);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index d51225f..eb5293a 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -57,6 +57,12 @@
 }
 EXPORT_SYMBOL(flush_dcache_page);
 
+void flush_kernel_dcache_page(struct page *page)
+{
+	__cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
+}
+EXPORT_SYMBOL(flush_kernel_dcache_page);
+
 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
 		       unsigned long uaddr, void *dst, const void *src,
 		       unsigned long len)
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S
index d217e97..aaeb6c1 100644
--- a/arch/arm/mm/proc-fa526.S
+++ b/arch/arm/mm/proc-fa526.S
@@ -81,7 +81,6 @@
  */
 	.align	4
 ENTRY(cpu_fa526_do_idle)
-	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
 	mov	pc, lr
 
 
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index f9a0aa7..e3c48a3 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -333,3 +333,8 @@
 	.endif
 	.size	\name\()_tlb_fns, . - \name\()_tlb_fns
 .endm
+
+.macro globl_equ x, y
+	.globl	\x
+	.equ	\x, \y
+.endm
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 2c73a73..e35fec3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -140,6 +140,29 @@
 ENDPROC(cpu_v7_do_resume)
 #endif
 
+#ifdef CONFIG_CPU_PJ4B
+	globl_equ	cpu_pj4b_switch_mm,     cpu_v7_switch_mm
+	globl_equ	cpu_pj4b_set_pte_ext,	cpu_v7_set_pte_ext
+	globl_equ	cpu_pj4b_proc_init,	cpu_v7_proc_init
+	globl_equ	cpu_pj4b_proc_fin, 	cpu_v7_proc_fin
+	globl_equ	cpu_pj4b_reset,	   	cpu_v7_reset
+#ifdef CONFIG_PJ4B_ERRATA_4742
+ENTRY(cpu_pj4b_do_idle)
+	dsb					@ WFI may enter a low-power mode
+	wfi
+	dsb					@barrier
+	mov	pc, lr
+ENDPROC(cpu_pj4b_do_idle)
+#else
+	globl_equ	cpu_pj4b_do_idle,  	cpu_v7_do_idle
+#endif
+	globl_equ	cpu_pj4b_dcache_clean_area,	cpu_v7_dcache_clean_area
+	globl_equ	cpu_pj4b_do_suspend,	cpu_v7_do_suspend
+	globl_equ	cpu_pj4b_do_resume,	cpu_v7_do_resume
+	globl_equ	cpu_pj4b_suspend_size,	cpu_v7_suspend_size
+
+#endif
+
 	__CPUINIT
 
 /*
@@ -350,6 +373,9 @@
 
 	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
 	define_processor_functions v7, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+#ifdef CONFIG_CPU_PJ4B
+	define_processor_functions pj4b, dabort=v7_early_abort, pabort=v7_pabort, suspend=1
+#endif
 
 	.section ".rodata"
 
@@ -362,7 +388,7 @@
 	/*
 	 * Standard v7 proc info content
 	 */
-.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0
+.macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
 	ALT_SMP(.long	PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
 			PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
 	ALT_UP(.long	PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@@ -375,7 +401,7 @@
 	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_FAST_MULT | \
 		HWCAP_EDSP | HWCAP_TLS | \hwcaps
 	.long	cpu_v7_name
-	.long	v7_processor_functions
+	.long	\proc_fns
 	.long	v7wbi_tlb_fns
 	.long	v6_user_fns
 	.long	v7_cache_fns
@@ -407,12 +433,14 @@
 	/*
 	 * Marvell PJ4B processor.
 	 */
+#ifdef CONFIG_CPU_PJ4B
 	.type   __v7_pj4b_proc_info, #object
 __v7_pj4b_proc_info:
-	.long	0x562f5840
-	.long	0xfffffff0
-	__v7_proc __v7_pj4b_setup
+	.long	0x560f5800
+	.long	0xff0fff00
+	__v7_proc __v7_pj4b_setup, proc_fns = pj4b_processor_functions
 	.size	__v7_pj4b_proc_info, . - __v7_pj4b_proc_info
+#endif
 
 	/*
 	 * ARM Ltd. Cortex A7 processor.
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 53210ec..bd7124c 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -16,6 +16,7 @@
 #include <linux/suspend.h>
 #include <linux/errno.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 #include <linux/serial_core.h>
 #include <linux/io.h>
 
@@ -261,7 +262,8 @@
 	 * require a full power-cycle)
 	*/
 
-	if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
+	if (!of_have_populated_dt() &&
+	    !any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) &&
 	    !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) {
 		printk(KERN_ERR "%s: No wake-up sources!\n", __func__);
 		printk(KERN_ERR "%s: Aborting sleep\n", __func__);
@@ -270,8 +272,11 @@
 
 	/* save all necessary core registers not covered by the drivers */
 
-	samsung_pm_save_gpios();
-	samsung_pm_saved_gpios();
+	if (!of_have_populated_dt()) {
+		samsung_pm_save_gpios();
+		samsung_pm_saved_gpios();
+	}
+
 	s3c_pm_save_uarts();
 	s3c_pm_save_core();
 
@@ -310,8 +315,11 @@
 
 	s3c_pm_restore_core();
 	s3c_pm_restore_uarts();
-	samsung_pm_restore_gpios();
-	s3c_pm_restored_gpios();
+
+	if (!of_have_populated_dt()) {
+		samsung_pm_restore_gpios();
+		s3c_pm_restored_gpios();
+	}
 
 	s3c_pm_debug_init();
 
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 1e49e5eb..9ba33c4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1336,6 +1336,7 @@
 		return;
 	}
 
+	perf_callchain_store(entry, regs->pc);
 	tail = (struct frame_tail __user *)regs->regs[29];
 
 	while (entry->nr < PERF_MAX_STACK_DEPTH &&
diff --git a/arch/ia64/include/asm/irqflags.h b/arch/ia64/include/asm/irqflags.h
index 1bf2cf2..cec6c06 100644
--- a/arch/ia64/include/asm/irqflags.h
+++ b/arch/ia64/include/asm/irqflags.h
@@ -11,6 +11,7 @@
 #define _ASM_IA64_IRQFLAGS_H
 
 #include <asm/pal.h>
+#include <asm/kregs.h>
 
 #ifdef CONFIG_IA64_DEBUG_IRQ
 extern unsigned long last_cli_ip;
diff --git a/arch/metag/include/asm/hugetlb.h b/arch/metag/include/asm/hugetlb.h
index f545477..471f481 100644
--- a/arch/metag/include/asm/hugetlb.h
+++ b/arch/metag/include/asm/hugetlb.h
@@ -2,6 +2,7 @@
 #define _ASM_METAG_HUGETLB_H
 
 #include <asm/page.h>
+#include <asm-generic/hugetlb.h>
 
 
 static inline int is_hugepage_only_range(struct mm_struct *mm,
diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h
index 678f68d..8730c0a 100644
--- a/arch/mn10300/include/asm/irqflags.h
+++ b/arch/mn10300/include/asm/irqflags.h
@@ -13,9 +13,8 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
-#ifndef __ASSEMBLY__
-#include <linux/smp.h>
-#endif
+/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */
+#include <asm/smp.h>
 
 /*
  * interrupt control
diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h
index 6745dbe..56c4241 100644
--- a/arch/mn10300/include/asm/smp.h
+++ b/arch/mn10300/include/asm/smp.h
@@ -24,6 +24,7 @@
 #ifndef __ASSEMBLY__
 #include <linux/threads.h>
 #include <linux/cpumask.h>
+#include <linux/thread_info.h>
 #endif
 
 #ifdef CONFIG_SMP
@@ -85,7 +86,7 @@
 extern void smp_init_cpus(void);
 extern void smp_cache_interrupt(void);
 extern void send_IPI_allbutself(int irq);
-extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait);
 
 extern void arch_send_call_function_single_ipi(int cpu);
 extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
@@ -100,6 +101,7 @@
 #ifndef __ASSEMBLY__
 
 static inline void smp_init_cpus(void) {}
+#define raw_smp_processor_id() 0
 
 #endif /* __ASSEMBLY__ */
 #endif /* CONFIG_SMP */
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h
index 780560b..d7966e0 100644
--- a/arch/mn10300/include/asm/uaccess.h
+++ b/arch/mn10300/include/asm/uaccess.h
@@ -161,7 +161,7 @@
 
 #define __get_user_check(x, ptr, size)					\
 ({									\
-	const __typeof__(ptr) __guc_ptr = (ptr);			\
+	const __typeof__(*(ptr))* __guc_ptr = (ptr);			\
 	int _e;								\
 	if (likely(__access_ok((unsigned long) __guc_ptr, (size))))	\
 		_e = __get_user_nocheck((x), __guc_ptr, (size));	\
diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c
index 33c3bd1..ebac9c1 100644
--- a/arch/mn10300/kernel/setup.c
+++ b/arch/mn10300/kernel/setup.c
@@ -38,6 +38,7 @@
 /* For PCI or other memory-mapped resources */
 unsigned long pci_mem_start = 0x18000000;
 
+static char __initdata cmd_line[COMMAND_LINE_SIZE];
 char redboot_command_line[COMMAND_LINE_SIZE] =
 	"console=ttyS0,115200 root=/dev/mtdblock3 rw";
 
@@ -74,45 +75,19 @@
 };
 
 /*
- *
+ * Pick out the memory size.  We look for mem=size,
+ * where size is "size[KkMm]"
  */
-static void __init parse_mem_cmdline(char **cmdline_p)
+static int __init early_mem(char *p)
 {
-	char *from, *to, c;
-
-	/* save unparsed command line copy for /proc/cmdline */
-	strcpy(boot_command_line, redboot_command_line);
-
-	/* see if there's an explicit memory size option */
-	from = redboot_command_line;
-	to = redboot_command_line;
-	c = ' ';
-
-	for (;;) {
-		if (c == ' ' && !memcmp(from, "mem=", 4)) {
-			if (to != redboot_command_line)
-				to--;
-			memory_size = memparse(from + 4, &from);
-		}
-
-		c = *(from++);
-		if (!c)
-			break;
-
-		*(to++) = c;
-	}
-
-	*to = '\0';
-	*cmdline_p = redboot_command_line;
+	memory_size = memparse(p, &p);
 
 	if (memory_size == 0)
 		panic("Memory size not known\n");
 
-	memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
-		memory_size;
-	if (memory_end > phys_memory_end)
-		memory_end = phys_memory_end;
+	return 0;
 }
+early_param("mem", early_mem);
 
 /*
  * architecture specific setup
@@ -125,7 +100,20 @@
 	cpu_init();
 	unit_setup();
 	smp_init_cpus();
-	parse_mem_cmdline(cmdline_p);
+
+	/* save unparsed command line copy for /proc/cmdline */
+	strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE);
+
+	/* populate cmd_line too for later use, preserving boot_command_line */
+	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
+	*cmdline_p = cmd_line;
+
+	parse_early_param();
+
+	memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS +
+		memory_size;
+	if (memory_end > phys_memory_end)
+		memory_end = phys_memory_end;
 
 	init_mm.start_code = (unsigned long)&_text;
 	init_mm.end_code = (unsigned long) &_etext;
diff --git a/arch/parisc/include/asm/mmzone.h b/arch/parisc/include/asm/mmzone.h
index cc50d33..b6b34a0 100644
--- a/arch/parisc/include/asm/mmzone.h
+++ b/arch/parisc/include/asm/mmzone.h
@@ -27,7 +27,7 @@
 
 #define PFNNID_SHIFT (30 - PAGE_SHIFT)
 #define PFNNID_MAP_MAX  512     /* support 512GB */
-extern unsigned char pfnnid_map[PFNNID_MAP_MAX];
+extern signed char pfnnid_map[PFNNID_MAP_MAX];
 
 #ifndef CONFIG_64BIT
 #define pfn_is_io(pfn) ((pfn & (0xf0000000UL >> PAGE_SHIFT)) == (0xf0000000UL >> PAGE_SHIFT))
@@ -46,7 +46,7 @@
 	i = pfn >> PFNNID_SHIFT;
 	BUG_ON(i >= ARRAY_SIZE(pfnnid_map));
 
-	return (int)pfnnid_map[i];
+	return pfnnid_map[i];
 }
 
 static inline int pfn_valid(int pfn)
diff --git a/arch/parisc/include/asm/pci.h b/arch/parisc/include/asm/pci.h
index 3234f49..4651540 100644
--- a/arch/parisc/include/asm/pci.h
+++ b/arch/parisc/include/asm/pci.h
@@ -225,4 +225,9 @@
 	return channel ? 15 : 14;
 }
 
+#define HAVE_PCI_MMAP
+
+extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+	enum pci_mmap_state mmap_state, int write_combine);
+
 #endif /* __ASM_PARISC_PCI_H */
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 9e2d2e4..8722756 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1205,6 +1205,7 @@
 	{HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
 	{HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
 	{HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
+	{HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
 	{HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
 	{HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
 	{HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S
index 36d7f40..b743a80 100644
--- a/arch/parisc/kernel/pacache.S
+++ b/arch/parisc/kernel/pacache.S
@@ -860,7 +860,7 @@
 #endif
 
 	ldil		L%dcache_stride, %r1
-	ldw		R%dcache_stride(%r1), %r1
+	ldw		R%dcache_stride(%r1), r31
 
 #ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
@@ -868,26 +868,26 @@
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 #endif
 	add		%r28, %r25, %r25
-	sub		%r25, %r1, %r25
+	sub		%r25, r31, %r25
 
 
-1:      fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
-	fdc,m		%r1(%r28)
+1:      fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
+	fdc,m		r31(%r28)
 	cmpb,COND(<<)		%r28, %r25,1b
-	fdc,m		%r1(%r28)
+	fdc,m		r31(%r28)
 
 	sync
 
@@ -936,7 +936,7 @@
 #endif
 
 	ldil		L%icache_stride, %r1
-	ldw		R%icache_stride(%r1), %r1
+	ldw		R%icache_stride(%r1), %r31
 
 #ifdef CONFIG_64BIT
 	depdi,z		1, 63-PAGE_SHIFT,1, %r25
@@ -944,28 +944,28 @@
 	depwi,z		1, 31-PAGE_SHIFT,1, %r25
 #endif
 	add		%r28, %r25, %r25
-	sub		%r25, %r1, %r25
+	sub		%r25, %r31, %r25
 
 
 	/* fic only has the type 26 form on PA1.1, requiring an
 	 * explicit space specification, so use %sr4 */
-1:      fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
-	fic,m		%r1(%sr4,%r28)
+1:      fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
 	cmpb,COND(<<)	%r28, %r25,1b
-	fic,m		%r1(%sr4,%r28)
+	fic,m		%r31(%sr4,%r28)
 
 	sync
 
diff --git a/arch/parisc/kernel/pci.c b/arch/parisc/kernel/pci.c
index 6030905..64f2764 100644
--- a/arch/parisc/kernel/pci.c
+++ b/arch/parisc/kernel/pci.c
@@ -220,6 +220,33 @@
 }
 
 
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+			enum pci_mmap_state mmap_state, int write_combine)
+{
+	unsigned long prot;
+
+	/*
+	 * I/O space can be accessed via normal processor loads and stores on
+	 * this platform but for now we elect not to do this and portable
+	 * drivers should not do this anyway.
+	 */
+	if (mmap_state == pci_mmap_io)
+		return -EINVAL;
+
+	if (write_combine)
+		return -EINVAL;
+
+	/*
+	 * Ignore write-combine; for now only return uncached mappings.
+	 */
+	prot = pgprot_val(vma->vm_page_prot);
+	prot |= _PAGE_NO_CACHE;
+	vma->vm_page_prot = __pgprot(prot);
+
+	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+		vma->vm_end - vma->vm_start, vma->vm_page_prot);
+}
+
 /*
  * A driver is enabling the device.  We make sure that all the appropriate
  * bits are set to allow the device to operate as the driver is expecting.
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index 1c96564..505b56c 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -47,7 +47,7 @@
 
 #ifdef CONFIG_DISCONTIGMEM
 struct node_map_data node_data[MAX_NUMNODES] __read_mostly;
-unsigned char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
+signed char pfnnid_map[PFNNID_MAP_MAX] __read_mostly;
 #endif
 
 static struct resource data_resource = {
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index c4dfbaf..bea8587 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -296,6 +296,7 @@
 CONFIG_SQUASHFS_XATTR=y
 CONFIG_SQUASHFS_LZO=y
 CONFIG_SQUASHFS_XZ=y
+CONFIG_PSTORE=y
 CONFIG_NFS_FS=y
 CONFIG_NFS_V3_ACL=y
 CONFIG_NFS_V4=y
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index a0b11fb..09a8743 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -151,7 +151,6 @@
 
 extern struct eeh_ops *eeh_ops;
 extern int eeh_subsystem_enabled;
-extern struct mutex eeh_mutex;
 extern raw_spinlock_t confirm_error_lock;
 extern int eeh_probe_mode;
 
@@ -173,16 +172,6 @@
 	return (eeh_probe_mode == EEH_PROBE_MODE_DEV);
 }
 
-static inline void eeh_lock(void)
-{
-	mutex_lock(&eeh_mutex);
-}
-
-static inline void eeh_unlock(void)
-{
-	mutex_unlock(&eeh_mutex);
-}
-
 static inline void eeh_serialize_lock(unsigned long *flags)
 {
 	raw_spin_lock_irqsave(&confirm_error_lock, *flags);
@@ -213,13 +202,13 @@
 
 void *eeh_dev_init(struct device_node *dn, void *data);
 void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
-int __init eeh_init(void);
+int eeh_init(void);
 int __init eeh_ops_register(struct eeh_ops *ops);
 int __exit eeh_ops_unregister(const char *name);
 unsigned long eeh_check_failure(const volatile void __iomem *token,
 				unsigned long val);
 int eeh_dev_check_failure(struct eeh_dev *edev);
-void __init eeh_addr_cache_build(void);
+void eeh_addr_cache_build(void);
 void eeh_add_device_tree_early(struct device_node *);
 void eeh_add_device_tree_late(struct pci_bus *);
 void eeh_add_sysfs_files(struct pci_bus *);
@@ -271,9 +260,6 @@
 
 static inline void eeh_remove_bus_device(struct pci_dev *dev, int purge_pe) { }
 
-static inline void eeh_lock(void) { }
-static inline void eeh_unlock(void) { }
-
 #define EEH_POSSIBLE_ERROR(val, type) (0)
 #define EEH_IO_ERROR_VALUE(size) (-1UL)
 #endif /* CONFIG_EEH */
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8e5fae8..07ca627 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -358,12 +358,12 @@
 	/* No guest interrupts come through here */	\
 	SET_SCRATCH0(r13);		/* save r13 */	\
 	EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
-				       EXC_STD, KVMTEST_PR, vec)
+				       EXC_STD, NOTEST, vec)
 
 #define STD_RELON_EXCEPTION_PSERIES_OOL(vec, label)		\
 	.globl label##_relon_pSeries;				\
 label##_relon_pSeries:						\
-	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST_PR, vec);	\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec);		\
 	EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_STD)
 
 #define STD_RELON_EXCEPTION_HV(loc, vec, label)		\
@@ -374,12 +374,12 @@
 	/* No guest interrupts come through here */	\
 	SET_SCRATCH0(r13);	/* save r13 */		\
 	EXCEPTION_RELON_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
-				       EXC_HV, KVMTEST, vec)
+				       EXC_HV, NOTEST, vec)
 
 #define STD_RELON_EXCEPTION_HV_OOL(vec, label)			\
 	.globl label##_relon_hv;				\
 label##_relon_hv:						\
-	EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, vec);		\
+	EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, vec);		\
 	EXCEPTION_RELON_PROLOG_PSERIES_1(label##_common, EXC_HV)
 
 /* This associate vector numbers with bits in paca->irq_happened */
@@ -513,7 +513,7 @@
  */
 #define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr)		  \
 	EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
-			 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
+			 FINISH_NAP;DISABLE_INTS;RUNLATCH_ON)
 
 /*
  * When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index 98d1422..c34656a 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -130,13 +130,6 @@
 extern void iommu_init_early_dart(void);
 extern void iommu_init_early_pasemi(void);
 
-#ifdef CONFIG_PCI
-extern void pci_iommu_init(void);
-extern void pci_direct_iommu_init(void);
-#else
-static inline void pci_iommu_init(void) { }
-#endif
-
 extern void alloc_dart_table(void);
 #if defined(CONFIG_PPC64) && defined(CONFIG_PM)
 static inline void iommu_save(void)
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index b1e7f2a..9b12f88 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -66,7 +66,8 @@
 
 	u8	reserved6[48];
 	u8	cede_latency_hint;
-	u8	reserved7[7];
+	u8	ebb_regs_in_use;
+	u8	reserved7[6];
 	u8	dtl_enable_mask;	/* Dispatch Trace Log mask */
 	u8	donate_dedicated_cpu;	/* Donate dedicated CPU cycles */
 	u8	fpregs_in_use;
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 3d6fbb0..c4cf011 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -351,6 +351,7 @@
 				  int ssize, unsigned int psize)
 {
 	BUG();
+	return -1;
 }
 #endif
 extern void hash_failure_debug(unsigned long ea, unsigned long access,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index f265049..2dd7bfc 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -60,6 +60,7 @@
 #define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */
 #define PPMU_HAS_SIER		0x00000040 /* Has SIER */
 #define PPMU_BHRB		0x00000080 /* has BHRB feature enabled */
+#define PPMU_EBB		0x00000100 /* supports event based branch */
 
 /*
  * Values for flags to get_alternatives()
@@ -68,6 +69,11 @@
 #define PPMU_LIMITED_PMC_REQD	2	/* have to put this on a limited PMC */
 #define PPMU_ONLY_COUNT_RUN	4	/* only counting in run state */
 
+/*
+ * We use the event config bit 63 as a flag to request EBB.
+ */
+#define EVENT_CONFIG_EBB_SHIFT	63
+
 extern int register_power_pmu(struct power_pmu *);
 
 struct pt_regs;
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 9fe1129..47a35b0 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -287,9 +287,9 @@
 	unsigned long	siar;
 	unsigned long	sdar;
 	unsigned long	sier;
-	unsigned long	mmcr0;
 	unsigned long	mmcr2;
-	unsigned long	mmcra;
+	unsigned 	mmcr0;
+	unsigned 	used_ebb;
 #endif
 };
 
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 4a9e408..5d7d9c2 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -621,11 +621,15 @@
 #define   MMCR0_PMXE	0x04000000UL /* performance monitor exception enable */
 #define   MMCR0_FCECE	0x02000000UL /* freeze ctrs on enabled cond or event */
 #define   MMCR0_TBEE	0x00400000UL /* time base exception enable */
+#define   MMCR0_EBE	0x00100000UL /* Event based branch enable */
+#define   MMCR0_PMCC	0x000c0000UL /* PMC control */
+#define   MMCR0_PMCC_U6	0x00080000UL /* PMC1-6 are R/W by user (PR) */
 #define   MMCR0_PMC1CE	0x00008000UL /* PMC1 count enable*/
 #define   MMCR0_PMCjCE	0x00004000UL /* PMCj count enable*/
 #define   MMCR0_TRIGGER	0x00002000UL /* TRIGGER enable */
 #define   MMCR0_PMAO	0x00000080UL /* performance monitor alert has occurred, set to 0 after handling exception */
 #define   MMCR0_SHRFC	0x00000040UL /* SHRre freeze conditions between threads */
+#define   MMCR0_FC56	0x00000010UL /* freeze counters 5 and 6 */
 #define   MMCR0_FCTI	0x00000008UL /* freeze counters in tags inactive mode */
 #define   MMCR0_FCTA	0x00000004UL /* freeze counters in tags active mode */
 #define   MMCR0_FCWAIT	0x00000002UL /* freeze counter in WAIT state */
@@ -673,6 +677,11 @@
 #define   SIER_SIAR_VALID	0x0400000	/* SIAR contents valid */
 #define   SIER_SDAR_VALID	0x0200000	/* SDAR contents valid */
 
+/* When EBB is enabled, some of MMCR0/MMCR2/SIER are user accessible */
+#define MMCR0_USER_MASK	(MMCR0_FC | MMCR0_PMXE | MMCR0_PMAO)
+#define MMCR2_USER_MASK	0x4020100804020000UL /* (FC1P|FC2P|FC3P|FC4P|FC5P|FC6P) */
+#define SIER_USER_MASK	0x7fffffUL
+
 #define SPRN_PA6T_MMCR0 795
 #define   PA6T_MMCR0_EN0	0x0000000000000001UL
 #define   PA6T_MMCR0_EN1	0x0000000000000002UL
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 34fd704..c7a8bfc 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -350,8 +350,8 @@
 			(devfn << 8) | (reg & 0xff);
 }
 
-extern void __cpuinit rtas_give_timebase(void);
-extern void __cpuinit rtas_take_timebase(void);
+extern void rtas_give_timebase(void);
+extern void rtas_take_timebase(void);
 
 #ifdef CONFIG_PPC_RTAS
 static inline int page_is_rtas_user_buf(unsigned long pfn)
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 200d763..49a13e0 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -67,4 +67,18 @@
 }
 #endif
 
+static inline void clear_task_ebb(struct task_struct *t)
+{
+#ifdef CONFIG_PPC_BOOK3S_64
+    /* EBB perf events are not inherited, so clear all EBB state. */
+    t->thread.bescr = 0;
+    t->thread.mmcr2 = 0;
+    t->thread.mmcr0 = 0;
+    t->thread.siar = 0;
+    t->thread.sdar = 0;
+    t->thread.sier = 0;
+    t->thread.used_ebb = 0;
+#endif
+}
+
 #endif /* _ASM_POWERPC_SWITCH_TO_H */
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 50f261b..0d9cecd 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -22,7 +22,7 @@
 extern unsigned long vdso32_sigtramp;
 extern unsigned long vdso32_rt_sigtramp;
 
-int __cpuinit vdso_getcpu_init(void);
+int vdso_getcpu_init(void);
 
 #else /* __ASSEMBLY__ */
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index dc684b1..c7e8afc 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -132,7 +132,6 @@
 	DEFINE(THREAD_SIER, offsetof(struct thread_struct, sier));
 	DEFINE(THREAD_MMCR0, offsetof(struct thread_struct, mmcr0));
 	DEFINE(THREAD_MMCR2, offsetof(struct thread_struct, mmcr2));
-	DEFINE(THREAD_MMCRA, offsetof(struct thread_struct, mmcra));
 #endif
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 	DEFINE(PACATMSCRATCH, offsetof(struct paca_struct, tm_scratch));
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index 92c6b00..9262cf2 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -131,7 +131,8 @@
 	return cache_type_info[cache->type].name;
 }
 
-static void __cpuinit cache_init(struct cache *cache, int type, int level, struct device_node *ofnode)
+static void cache_init(struct cache *cache, int type, int level,
+		       struct device_node *ofnode)
 {
 	cache->type = type;
 	cache->level = level;
@@ -140,7 +141,7 @@
 	list_add(&cache->list, &cache_list);
 }
 
-static struct cache *__cpuinit new_cache(int type, int level, struct device_node *ofnode)
+static struct cache *new_cache(int type, int level, struct device_node *ofnode)
 {
 	struct cache *cache;
 
@@ -324,7 +325,8 @@
 	return of_get_property(np, "cache-unified", NULL);
 }
 
-static struct cache *__cpuinit cache_do_one_devnode_unified(struct device_node *node, int level)
+static struct cache *cache_do_one_devnode_unified(struct device_node *node,
+						  int level)
 {
 	struct cache *cache;
 
@@ -335,7 +337,8 @@
 	return cache;
 }
 
-static struct cache *__cpuinit cache_do_one_devnode_split(struct device_node *node, int level)
+static struct cache *cache_do_one_devnode_split(struct device_node *node,
+						int level)
 {
 	struct cache *dcache, *icache;
 
@@ -357,7 +360,7 @@
 	return NULL;
 }
 
-static struct cache *__cpuinit cache_do_one_devnode(struct device_node *node, int level)
+static struct cache *cache_do_one_devnode(struct device_node *node, int level)
 {
 	struct cache *cache;
 
@@ -369,7 +372,8 @@
 	return cache;
 }
 
-static struct cache *__cpuinit cache_lookup_or_instantiate(struct device_node *node, int level)
+static struct cache *cache_lookup_or_instantiate(struct device_node *node,
+						 int level)
 {
 	struct cache *cache;
 
@@ -385,7 +389,7 @@
 	return cache;
 }
 
-static void __cpuinit link_cache_lists(struct cache *smaller, struct cache *bigger)
+static void link_cache_lists(struct cache *smaller, struct cache *bigger)
 {
 	while (smaller->next_local) {
 		if (smaller->next_local == bigger)
@@ -396,13 +400,13 @@
 	smaller->next_local = bigger;
 }
 
-static void __cpuinit do_subsidiary_caches_debugcheck(struct cache *cache)
+static void do_subsidiary_caches_debugcheck(struct cache *cache)
 {
 	WARN_ON_ONCE(cache->level != 1);
 	WARN_ON_ONCE(strcmp(cache->ofnode->type, "cpu"));
 }
 
-static void __cpuinit do_subsidiary_caches(struct cache *cache)
+static void do_subsidiary_caches(struct cache *cache)
 {
 	struct device_node *subcache_node;
 	int level = cache->level;
@@ -423,7 +427,7 @@
 	}
 }
 
-static struct cache *__cpuinit cache_chain_instantiate(unsigned int cpu_id)
+static struct cache *cache_chain_instantiate(unsigned int cpu_id)
 {
 	struct device_node *cpu_node;
 	struct cache *cpu_cache = NULL;
@@ -448,7 +452,7 @@
 	return cpu_cache;
 }
 
-static struct cache_dir *__cpuinit cacheinfo_create_cache_dir(unsigned int cpu_id)
+static struct cache_dir *cacheinfo_create_cache_dir(unsigned int cpu_id)
 {
 	struct cache_dir *cache_dir;
 	struct device *dev;
@@ -653,7 +657,7 @@
 	.default_attrs = cache_index_default_attrs,
 };
 
-static void __cpuinit cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
+static void cacheinfo_create_index_opt_attrs(struct cache_index_dir *dir)
 {
 	const char *cache_name;
 	const char *cache_type;
@@ -696,7 +700,8 @@
 	kfree(buf);
 }
 
-static void __cpuinit cacheinfo_create_index_dir(struct cache *cache, int index, struct cache_dir *cache_dir)
+static void cacheinfo_create_index_dir(struct cache *cache, int index,
+				       struct cache_dir *cache_dir)
 {
 	struct cache_index_dir *index_dir;
 	int rc;
@@ -722,7 +727,8 @@
 	kfree(index_dir);
 }
 
-static void __cpuinit cacheinfo_sysfs_populate(unsigned int cpu_id, struct cache *cache_list)
+static void cacheinfo_sysfs_populate(unsigned int cpu_id,
+				     struct cache *cache_list)
 {
 	struct cache_dir *cache_dir;
 	struct cache *cache;
@@ -740,7 +746,7 @@
 	}
 }
 
-void __cpuinit cacheinfo_cpu_online(unsigned int cpu_id)
+void cacheinfo_cpu_online(unsigned int cpu_id)
 {
 	struct cache *cache;
 
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index af2b9ae..39954fe 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -103,9 +103,6 @@
  */
 int eeh_probe_mode;
 
-/* Global EEH mutex */
-DEFINE_MUTEX(eeh_mutex);
-
 /* Lock to avoid races due to multiple reports of an error */
 DEFINE_RAW_SPINLOCK(confirm_error_lock);
 
@@ -235,16 +232,30 @@
 {
 	size_t loglen = 0;
 	struct eeh_dev *edev;
+	bool valid_cfg_log = true;
 
-	eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
-	eeh_ops->configure_bridge(pe);
-	eeh_pe_restore_bars(pe);
+	/*
+	 * When the PHB is fenced or dead, it's pointless to collect
+	 * the data from PCI config space because it should return
+	 * 0xFF's. For ER, we still retrieve the data from the PCI
+	 * config space.
+	 */
+	if (eeh_probe_mode_dev() &&
+	    (pe->type & EEH_PE_PHB) &&
+	    (pe->state & (EEH_PE_ISOLATED | EEH_PE_PHB_DEAD)))
+		valid_cfg_log = false;
 
-	pci_regs_buf[0] = 0;
-	eeh_pe_for_each_dev(pe, edev) {
-		loglen += eeh_gather_pci_data(edev, pci_regs_buf,
-				EEH_PCI_REGS_LOG_LEN);
-        }
+	if (valid_cfg_log) {
+		eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
+		eeh_ops->configure_bridge(pe);
+		eeh_pe_restore_bars(pe);
+
+		pci_regs_buf[0] = 0;
+		eeh_pe_for_each_dev(pe, edev) {
+			loglen += eeh_gather_pci_data(edev, pci_regs_buf + loglen,
+						      EEH_PCI_REGS_LOG_LEN - loglen);
+		}
+	}
 
 	eeh_ops->get_log(pe, severity, pci_regs_buf, loglen);
 }
@@ -318,7 +329,9 @@
 	eeh_serialize_unlock(flags);
 	eeh_send_failure_event(phb_pe);
 
-	WARN(1, "EEH: PHB failure detected\n");
+	pr_err("EEH: PHB#%x failure detected\n",
+		phb_pe->phb->global_number);
+	dump_stack();
 
 	return 1;
 out:
@@ -447,7 +460,10 @@
 	 * a stack trace will help the device-driver authors figure
 	 * out what happened.  So print that out.
 	 */
-	WARN(1, "EEH: failure detected\n");
+	pr_err("EEH: Frozen PE#%x detected on PHB#%x\n",
+		pe->addr, pe->phb->global_number);
+	dump_stack();
+
 	return 1;
 
 dn_unlock:
@@ -740,7 +756,7 @@
  * Even if force-off is set, the EEH hardware is still enabled, so that
  * newer systems can boot.
  */
-int __init eeh_init(void)
+int eeh_init(void)
 {
 	struct pci_controller *hose, *tmp;
 	struct device_node *phb;
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 1d5d9a6..f9ac123 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -194,7 +194,7 @@
 	}
 
 	/* Skip any devices for which EEH is not enabled. */
-	if (!edev->pe) {
+	if (!eeh_probe_mode_dev() && !edev->pe) {
 #ifdef DEBUG
 		pr_info("PCI: skip building address cache for=%s - %s\n",
 			pci_name(dev), dn->full_name);
@@ -285,7 +285,7 @@
  * Must be run late in boot process, after the pci controllers
  * have been scanned for devices (after all device resources are known).
  */
-void __init eeh_addr_cache_build(void)
+void eeh_addr_cache_build(void)
 {
 	struct device_node *dn;
 	struct eeh_dev *edev;
@@ -294,8 +294,6 @@
 	spin_lock_init(&pci_io_addr_cache_root.piar_lock);
 
 	for_each_pci_dev(dev) {
-		eeh_addr_cache_insert_dev(dev);
-
 		dn = pci_device_to_OF_node(dev);
 		if (!dn)
 			continue;
@@ -308,6 +306,8 @@
 		dev->dev.archdata.edev = edev;
 		edev->pdev = dev;
 
+		eeh_addr_cache_insert_dev(dev);
+
 		eeh_sysfs_add_device(dev);
 	}
 
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 0974e13..2b1ce17 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -425,6 +425,7 @@
 	 * status ... if any child can't handle the reset, then the entire
 	 * slot is dlpar removed and added.
 	 */
+	pr_info("EEH: Notify device drivers to shutdown\n");
 	eeh_pe_dev_traverse(pe, eeh_report_error, &result);
 
 	/* Get the current PCI slot state. This can take a long time,
@@ -432,7 +433,7 @@
 	 */
 	rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
 	if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
-		printk(KERN_WARNING "EEH: Permanent failure\n");
+		pr_warning("EEH: Permanent failure\n");
 		goto hard_fail;
 	}
 
@@ -440,6 +441,7 @@
 	 * don't post the error log until after all dev drivers
 	 * have been informed.
 	 */
+	pr_info("EEH: Collect temporary log\n");
 	eeh_slot_error_detail(pe, EEH_LOG_TEMP);
 
 	/* If all device drivers were EEH-unaware, then shut
@@ -447,15 +449,18 @@
 	 * go down willingly, without panicing the system.
 	 */
 	if (result == PCI_ERS_RESULT_NONE) {
+		pr_info("EEH: Reset with hotplug activity\n");
 		rc = eeh_reset_device(pe, frozen_bus);
 		if (rc) {
-			printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
+			pr_warning("%s: Unable to reset, err=%d\n",
+				   __func__, rc);
 			goto hard_fail;
 		}
 	}
 
 	/* If all devices reported they can proceed, then re-enable MMIO */
 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
+		pr_info("EEH: Enable I/O for affected devices\n");
 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
 
 		if (rc < 0)
@@ -463,6 +468,7 @@
 		if (rc) {
 			result = PCI_ERS_RESULT_NEED_RESET;
 		} else {
+			pr_info("EEH: Notify device drivers to resume I/O\n");
 			result = PCI_ERS_RESULT_NONE;
 			eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
 		}
@@ -470,6 +476,7 @@
 
 	/* If all devices reported they can proceed, then re-enable DMA */
 	if (result == PCI_ERS_RESULT_CAN_RECOVER) {
+		pr_info("EEH: Enabled DMA for affected devices\n");
 		rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
 
 		if (rc < 0)
@@ -482,17 +489,22 @@
 
 	/* If any device has a hard failure, then shut off everything. */
 	if (result == PCI_ERS_RESULT_DISCONNECT) {
-		printk(KERN_WARNING "EEH: Device driver gave up\n");
+		pr_warning("EEH: Device driver gave up\n");
 		goto hard_fail;
 	}
 
 	/* If any device called out for a reset, then reset the slot */
 	if (result == PCI_ERS_RESULT_NEED_RESET) {
+		pr_info("EEH: Reset without hotplug activity\n");
 		rc = eeh_reset_device(pe, NULL);
 		if (rc) {
-			printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
+			pr_warning("%s: Cannot reset, err=%d\n",
+				   __func__, rc);
 			goto hard_fail;
 		}
+
+		pr_info("EEH: Notify device drivers "
+			"the completion of reset\n");
 		result = PCI_ERS_RESULT_NONE;
 		eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
 	}
@@ -500,11 +512,12 @@
 	/* All devices should claim they have recovered by now. */
 	if ((result != PCI_ERS_RESULT_RECOVERED) &&
 	    (result != PCI_ERS_RESULT_NONE)) {
-		printk(KERN_WARNING "EEH: Not recovered\n");
+		pr_warning("EEH: Not recovered\n");
 		goto hard_fail;
 	}
 
 	/* Tell all device drivers that they can resume operations */
+	pr_info("EEH: Notify device driver to resume\n");
 	eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
 
 	return;
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index 39bcd81..d27c5af 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -55,7 +55,8 @@
 	struct eeh_pe *pe;
 
 	while (!kthread_should_stop()) {
-		down(&eeh_eventlist_sem);
+		if (down_interruptible(&eeh_eventlist_sem))
+			break;
 
 		/* Fetch EEH event from the queue */
 		spin_lock_irqsave(&eeh_eventlist_lock, flags);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index ae75722..016588a 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -22,6 +22,7 @@
  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  */
 
+#include <linux/delay.h>
 #include <linux/export.h>
 #include <linux/gfp.h>
 #include <linux/init.h>
@@ -78,9 +79,7 @@
 	}
 
 	/* Put it into the list */
-	eeh_lock();
 	list_add_tail(&pe->child, &eeh_phb_pe);
-	eeh_unlock();
 
 	pr_debug("EEH: Add PE for PHB#%d\n", phb->global_number);
 
@@ -185,21 +184,15 @@
 		return NULL;
 	}
 
-	eeh_lock();
-
 	/* Traverse root PE */
 	for (pe = root; pe; pe = eeh_pe_next(pe, root)) {
 		eeh_pe_for_each_dev(pe, edev) {
 			ret = fn(edev, flag);
-			if (ret) {
-				eeh_unlock();
+			if (ret)
 				return ret;
-			}
 		}
 	}
 
-	eeh_unlock();
-
 	return NULL;
 }
 
@@ -305,8 +298,6 @@
 {
 	struct eeh_pe *pe, *parent;
 
-	eeh_lock();
-
 	/*
 	 * Search the PE has been existing or not according
 	 * to the PE address. If that has been existing, the
@@ -316,7 +307,6 @@
 	pe = eeh_pe_get(edev);
 	if (pe && !(pe->type & EEH_PE_INVALID)) {
 		if (!edev->pe_config_addr) {
-			eeh_unlock();
 			pr_err("%s: PE with addr 0x%x already exists\n",
 				__func__, edev->config_addr);
 			return -EEXIST;
@@ -328,7 +318,6 @@
 
 		/* Put the edev to PE */
 		list_add_tail(&edev->list, &pe->edevs);
-		eeh_unlock();
 		pr_debug("EEH: Add %s to Bus PE#%x\n",
 			edev->dn->full_name, pe->addr);
 
@@ -347,7 +336,6 @@
 			parent->type &= ~EEH_PE_INVALID;
 			parent = parent->parent;
 		}
-		eeh_unlock();
 		pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
 			edev->dn->full_name, pe->addr, pe->parent->addr);
 
@@ -357,7 +345,6 @@
 	/* Create a new EEH PE */
 	pe = eeh_pe_alloc(edev->phb, EEH_PE_DEVICE);
 	if (!pe) {
-		eeh_unlock();
 		pr_err("%s: out of memory!\n", __func__);
 		return -ENOMEM;
 	}
@@ -385,7 +372,6 @@
 	if (!parent) {
 		parent = eeh_phb_pe_get(edev->phb);
 		if (!parent) {
-			eeh_unlock();
 			pr_err("%s: No PHB PE is found (PHB Domain=%d)\n",
 				__func__, edev->phb->global_number);
 			edev->pe = NULL;
@@ -402,7 +388,6 @@
 	list_add_tail(&pe->child, &parent->child_list);
 	list_add_tail(&edev->list, &pe->edevs);
 	edev->pe = pe;
-	eeh_unlock();
 	pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
 		edev->dn->full_name, pe->addr, pe->parent->addr);
 
@@ -430,8 +415,6 @@
 		return -EEXIST;
 	}
 
-	eeh_lock();
-
 	/* Remove the EEH device */
 	pe = edev->pe;
 	edev->pe = NULL;
@@ -476,8 +459,6 @@
 		pe = parent;
 	}
 
-	eeh_unlock();
-
 	return 0;
 }
 
@@ -550,9 +531,7 @@
  */
 void eeh_pe_state_mark(struct eeh_pe *pe, int state)
 {
-	eeh_lock();
 	eeh_pe_traverse(pe, __eeh_pe_state_mark, &state);
-	eeh_unlock();
 }
 
 /**
@@ -586,35 +565,135 @@
  */
 void eeh_pe_state_clear(struct eeh_pe *pe, int state)
 {
-	eeh_lock();
 	eeh_pe_traverse(pe, __eeh_pe_state_clear, &state);
-	eeh_unlock();
 }
 
-/**
- * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
- * @data: EEH device
- * @flag: Unused
+/*
+ * Some PCI bridges (e.g. PLX bridges) have primary/secondary
+ * buses assigned explicitly by firmware, and we probably have
+ * lost that after reset. So we have to delay the check until
+ * the PCI-CFG registers have been restored for the parent
+ * bridge.
  *
- * Loads the PCI configuration space base address registers,
- * the expansion ROM base address, the latency timer, and etc.
- * from the saved values in the device node.
+ * Don't use normal PCI-CFG accessors, which probably has been
+ * blocked on normal path during the stage. So we need utilize
+ * eeh operations, which is always permitted.
  */
-static void *eeh_restore_one_device_bars(void *data, void *flag)
+static void eeh_bridge_check_link(struct pci_dev *pdev,
+				  struct device_node *dn)
+{
+	int cap;
+	uint32_t val;
+	int timeout = 0;
+
+	/*
+	 * We only check root port and downstream ports of
+	 * PCIe switches
+	 */
+	if (!pci_is_pcie(pdev) ||
+	    (pci_pcie_type(pdev) != PCI_EXP_TYPE_ROOT_PORT &&
+	     pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM))
+		return;
+
+	pr_debug("%s: Check PCIe link for %s ...\n",
+		 __func__, pci_name(pdev));
+
+	/* Check slot status */
+	cap = pdev->pcie_cap;
+	eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
+	if (!(val & PCI_EXP_SLTSTA_PDS)) {
+		pr_debug("  No card in the slot (0x%04x) !\n", val);
+		return;
+	}
+
+	/* Check power status if we have the capability */
+	eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val);
+	if (val & PCI_EXP_SLTCAP_PCP) {
+		eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val);
+		if (val & PCI_EXP_SLTCTL_PCC) {
+			pr_debug("  In power-off state, power it on ...\n");
+			val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
+			val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
+			eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val);
+			msleep(2 * 1000);
+		}
+	}
+
+	/* Enable link */
+	eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val);
+	val &= ~PCI_EXP_LNKCTL_LD;
+	eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val);
+
+	/* Check link */
+	eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val);
+	if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
+		pr_debug("  No link reporting capability (0x%08x) \n", val);
+		msleep(1000);
+		return;
+	}
+
+	/* Wait the link is up until timeout (5s) */
+	timeout = 0;
+	while (timeout < 5000) {
+		msleep(20);
+		timeout += 20;
+
+		eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val);
+		if (val & PCI_EXP_LNKSTA_DLLLA)
+			break;
+	}
+
+	if (val & PCI_EXP_LNKSTA_DLLLA)
+		pr_debug("  Link up (%s)\n",
+			 (val & PCI_EXP_LNKSTA_CLS_2_5GB) ? "2.5GB" : "5GB");
+	else
+		pr_debug("  Link not ready (0x%04x)\n", val);
+}
+
+#define BYTE_SWAP(OFF)	(8*((OFF)/4)+3-(OFF))
+#define SAVED_BYTE(OFF)	(((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
+
+static void eeh_restore_bridge_bars(struct pci_dev *pdev,
+				    struct eeh_dev *edev,
+				    struct device_node *dn)
+{
+	int i;
+
+	/*
+	 * Device BARs: 0x10 - 0x18
+	 * Bus numbers and windows: 0x18 - 0x30
+	 */
+	for (i = 4; i < 13; i++)
+		eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+	/* Rom: 0x38 */
+	eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]);
+
+	/* Cache line & Latency timer: 0xC 0xD */
+	eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+                SAVED_BYTE(PCI_CACHE_LINE_SIZE));
+        eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+                SAVED_BYTE(PCI_LATENCY_TIMER));
+	/* Max latency, min grant, interrupt ping and line: 0x3C */
+	eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+
+	/* PCI Command: 0x4 */
+	eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
+
+	/* Check the PCIe link is ready */
+	eeh_bridge_check_link(pdev, dn);
+}
+
+static void eeh_restore_device_bars(struct eeh_dev *edev,
+				    struct device_node *dn)
 {
 	int i;
 	u32 cmd;
-	struct eeh_dev *edev = (struct eeh_dev *)data;
-	struct device_node *dn = eeh_dev_to_of_node(edev);
 
 	for (i = 4; i < 10; i++)
 		eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
 	/* 12 == Expansion ROM Address */
 	eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
 
-#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
-#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-
 	eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
 		SAVED_BYTE(PCI_CACHE_LINE_SIZE));
 	eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
@@ -637,6 +716,34 @@
 	else
 		cmd &= ~PCI_COMMAND_SERR;
 	eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
+}
+
+/**
+ * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
+ * @data: EEH device
+ * @flag: Unused
+ *
+ * Loads the PCI configuration space base address registers,
+ * the expansion ROM base address, the latency timer, and etc.
+ * from the saved values in the device node.
+ */
+static void *eeh_restore_one_device_bars(void *data, void *flag)
+{
+	struct pci_dev *pdev = NULL;
+	struct eeh_dev *edev = (struct eeh_dev *)data;
+	struct device_node *dn = eeh_dev_to_of_node(edev);
+
+	/* Trace the PCI bridge */
+	if (eeh_probe_mode_dev()) {
+		pdev = eeh_dev_to_pci_dev(edev);
+		if (pdev->hdr_type != PCI_HEADER_TYPE_BRIDGE)
+                        pdev = NULL;
+        }
+
+	if (pdev)
+		eeh_restore_bridge_bars(pdev, edev, dn);
+	else
+		eeh_restore_device_bars(edev, dn);
 
 	return NULL;
 }
@@ -673,8 +780,6 @@
 	struct eeh_dev *edev;
 	struct pci_dev *pdev;
 
-	eeh_lock();
-
 	if (pe->type & EEH_PE_PHB) {
 		bus = pe->phb->bus;
 	} else if (pe->type & EEH_PE_BUS ||
@@ -691,7 +796,5 @@
 	}
 
 out:
-	eeh_unlock();
-
 	return bus;
 }
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index e783453..4e00d22 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -341,10 +341,17 @@
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
 	b	vsx_unavailable_pSeries
 
+facility_unavailable_trampoline:
 	. = 0xf60
 	SET_SCRATCH0(r13)
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	tm_unavailable_pSeries
+	b	facility_unavailable_pSeries
+
+hv_facility_unavailable_trampoline:
+	. = 0xf80
+	SET_SCRATCH0(r13)
+	EXCEPTION_PROLOG_0(PACA_EXGEN)
+	b	facility_unavailable_hv
 
 #ifdef CONFIG_CBE_RAS
 	STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
@@ -522,8 +529,10 @@
 	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
 	STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
 	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
-	STD_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+	STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
 	KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
+	STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
+	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
 
 /*
  * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
@@ -683,7 +692,7 @@
 	STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
 	STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
 	STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
-	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
+	STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt)
 	STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
 #ifdef CONFIG_PPC_DOORBELL
 	STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception)
@@ -793,14 +802,10 @@
 	STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
 
 	. = 0x4e00
-	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	h_data_storage_relon_hv
+	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
 
 	. = 0x4e20
-	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	h_instr_storage_relon_hv
+	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
 
 	. = 0x4e40
 	SET_SCRATCH0(r13)
@@ -808,9 +813,7 @@
 	b	emulation_assist_relon_hv
 
 	. = 0x4e60
-	SET_SCRATCH0(r13)
-	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	hmi_exception_relon_hv
+	b	.	/* Can't happen, see v2.07 Book III-S section 6.5 */
 
 	. = 0x4e80
 	SET_SCRATCH0(r13)
@@ -835,11 +838,17 @@
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
 	b	vsx_unavailable_relon_pSeries
 
-tm_unavailable_relon_pSeries_1:
+facility_unavailable_relon_trampoline:
 	. = 0x4f60
 	SET_SCRATCH0(r13)
 	EXCEPTION_PROLOG_0(PACA_EXGEN)
-	b	tm_unavailable_relon_pSeries
+	b	facility_unavailable_relon_pSeries
+
+hv_facility_unavailable_relon_trampoline:
+	. = 0x4f80
+	SET_SCRATCH0(r13)
+	EXCEPTION_PROLOG_0(PACA_EXGEN)
+	b	facility_unavailable_relon_hv
 
 	STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
 #ifdef CONFIG_PPC_DENORMALISATION
@@ -1165,36 +1174,21 @@
 	bl	.vsx_unavailable_exception
 	b	.ret_from_except
 
-	.align	7
-	.globl tm_unavailable_common
-tm_unavailable_common:
-	EXCEPTION_PROLOG_COMMON(0xf60, PACA_EXGEN)
-	bl	.save_nvgprs
-	DISABLE_INTS
-	addi	r3,r1,STACK_FRAME_OVERHEAD
-	bl	.tm_unavailable_exception
-	b	.ret_from_except
+	STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception)
 
 	.align	7
 	.globl	__end_handlers
 __end_handlers:
 
 	/* Equivalents to the above handlers for relocation-on interrupt vectors */
-	STD_RELON_EXCEPTION_HV_OOL(0xe00, h_data_storage)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe00)
-	STD_RELON_EXCEPTION_HV_OOL(0xe20, h_instr_storage)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe20)
 	STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe40)
-	STD_RELON_EXCEPTION_HV_OOL(0xe60, hmi_exception)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe60)
 	MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
-	KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe80)
 
 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
 	STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
-	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, tm_unavailable)
+	STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
+	STD_RELON_EXCEPTION_HV_OOL(0xf80, facility_unavailable)
 
 #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
 /*
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index a949bdf..f0b47d1 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -176,7 +176,7 @@
 		length_max = 512 ; /* 64 doublewords */
 		/* DAWR region can't cross 512 boundary */
 		if ((bp->attr.bp_addr >> 10) != 
-		    ((bp->attr.bp_addr + bp->attr.bp_len) >> 10))
+		    ((bp->attr.bp_addr + bp->attr.bp_len - 1) >> 10))
 			return -EINVAL;
 	}
 	if (info->len >
@@ -250,6 +250,7 @@
 	 * we still need to single-step the instruction, but we don't
 	 * generate an event.
 	 */
+	info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
 	if (!((bp->attr.bp_addr <= dar) &&
 	      (dar - bp->attr.bp_addr < bp->attr.bp_len)))
 		info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 939ea7e..d7216c9 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -85,7 +85,7 @@
 /*
  * Register the sysctl to set/clear powersave_nap.
  */
-static ctl_table powersave_nap_ctl_table[]={
+static struct ctl_table powersave_nap_ctl_table[] = {
 	{
 		.procname	= "powersave-nap",
 		.data		= &powersave_nap,
@@ -95,7 +95,7 @@
 	},
 	{}
 };
-static ctl_table powersave_nap_sysctl_root[] = {
+static struct ctl_table powersave_nap_sysctl_root[] = {
 	{
 		.procname	= "kernel",
 		.mode		= 0555,
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 32fa52e..2e51cde 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -160,7 +160,7 @@
 	 * in case we also had a rollover while hard disabled
 	 */
 	local_paca->irq_happened &= ~PACA_IRQ_DEC;
-	if (decrementer_check_overflow())
+	if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow())
 		return 0x900;
 
 	/* Finally check if an external interrupt happened */
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 48fbc2b..8213ee1 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -84,22 +84,30 @@
 	char *tmp = NULL;
 	ssize_t size;
 
-	ret = -ENODEV;
-	if (!ppc_md.nvram_size)
+	if (!ppc_md.nvram_size) {
+		ret = -ENODEV;
 		goto out;
+	}
 
-	ret = 0;
 	size = ppc_md.nvram_size();
-	if (*ppos >= size || size < 0)
+	if (size < 0) {
+		ret = size;
 		goto out;
+	}
+
+	if (*ppos >= size) {
+		ret = 0;
+		goto out;
+	}
 
 	count = min_t(size_t, count, size - *ppos);
 	count = min(count, PAGE_SIZE);
 
-	ret = -ENOMEM;
 	tmp = kmalloc(count, GFP_KERNEL);
-	if (!tmp)
+	if (!tmp) {
+		ret = -ENOMEM;
 		goto out;
+	}
 
 	ret = ppc_md.nvram_read(tmp, count, ppos);
 	if (ret <= 0)
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index eabeec9..f46914a 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -994,7 +994,7 @@
 		ppc_md.pci_dma_bus_setup(bus);
 }
 
-void pcibios_setup_device(struct pci_dev *dev)
+static void pcibios_setup_device(struct pci_dev *dev)
 {
 	/* Fixup NUMA node as it may not be setup yet by the generic
 	 * code and is needed by the DMA init
@@ -1015,6 +1015,17 @@
 		ppc_md.pci_irq_fixup(dev);
 }
 
+int pcibios_add_device(struct pci_dev *dev)
+{
+	/*
+	 * We can only call pcibios_setup_device() after bus setup is complete,
+	 * since some of the platform specific DMA setup code depends on it.
+	 */
+	if (dev->bus->is_added)
+		pcibios_setup_device(dev);
+	return 0;
+}
+
 void pcibios_setup_bus_devices(struct pci_bus *bus)
 {
 	struct pci_dev *dev;
@@ -1469,10 +1480,6 @@
 		if (ppc_md.pcibios_enable_device_hook(dev))
 			return -EINVAL;
 
-	/* avoid pcie irq fix up impact on cardbus */
-	if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
-		pcibios_setup_device(dev);
-
 	return pci_enable_resources(dev, mask);
 }
 
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b0f3e3f..c517dbe 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -916,7 +916,11 @@
 	flush_altivec_to_thread(src);
 	flush_vsx_to_thread(src);
 	flush_spe_to_thread(src);
+
 	*dst = *src;
+
+	clear_task_ebb(dst);
+
 	return 0;
 }
 
@@ -1369,7 +1373,7 @@
 
 #ifdef CONFIG_PPC64
 /* Called with hard IRQs off */
-void __ppc64_runlatch_on(void)
+void notrace __ppc64_runlatch_on(void)
 {
 	struct thread_info *ti = current_thread_info();
 	unsigned long ctrl;
@@ -1382,7 +1386,7 @@
 }
 
 /* Called with hard IRQs off */
-void __ppc64_runlatch_off(void)
+void notrace __ppc64_runlatch_off(void)
 {
 	struct thread_info *ti = current_thread_info();
 	unsigned long ctrl;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 9c753bc..eb23ac9 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -559,7 +559,7 @@
 }
 #endif
 
-static bool __init early_reserve_mem_dt(void)
+static void __init early_reserve_mem_dt(void)
 {
 	unsigned long i, len, dt_root;
 	const __be32 *prop;
@@ -569,7 +569,9 @@
 	prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
 
 	if (!prop)
-		return false;
+		return;
+
+	DBG("Found new-style reserved-ranges\n");
 
 	/* Each reserved range is an (address,size) pair, 2 cells each,
 	 * totalling 4 cells per range. */
@@ -579,11 +581,11 @@
 		base = of_read_number(prop + (i * 4) + 0, 2);
 		size = of_read_number(prop + (i * 4) + 2, 2);
 
-		if (size)
+		if (size) {
+			DBG("reserving: %llx -> %llx\n", base, size);
 			memblock_reserve(base, size);
+		}
 	}
-
-	return true;
 }
 
 static void __init early_reserve_mem(void)
@@ -601,20 +603,16 @@
 	self_size = initial_boot_params->totalsize;
 	memblock_reserve(self_base, self_size);
 
-	/*
-	 * Try looking for reserved-regions property in the DT first; if
-	 * it's present, it'll contain all of the necessary reservation
-	 * info
-	 */
-	if (early_reserve_mem_dt())
-		return;
+	/* Look for the new "reserved-regions" property in the DT */
+	early_reserve_mem_dt();
 
 #ifdef CONFIG_BLK_DEV_INITRD
-	/* then reserve the initrd, if any */
-	if (initrd_start && (initrd_end > initrd_start))
+	/* Then reserve the initrd, if any */
+	if (initrd_start && (initrd_end > initrd_start)) {
 		memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
 			_ALIGN_UP(initrd_end, PAGE_SIZE) -
 			_ALIGN_DOWN(initrd_start, PAGE_SIZE));
+	}
 #endif /* CONFIG_BLK_DEV_INITRD */
 
 #ifdef CONFIG_PPC32
@@ -626,6 +624,8 @@
 		u32 base_32, size_32;
 		u32 *reserve_map_32 = (u32 *)reserve_map;
 
+		DBG("Found old 32-bit reserve map\n");
+
 		while (1) {
 			base_32 = *(reserve_map_32++);
 			size_32 = *(reserve_map_32++);
@@ -640,6 +640,9 @@
 		return;
 	}
 #endif
+	DBG("Processing reserve map\n");
+
+	/* Handle the reserve map in the fdt blob if it exists */
 	while (1) {
 		base = *(reserve_map++);
 		size = *(reserve_map++);
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 98c2fc1..64f7bd5 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1449,7 +1449,9 @@
 	 */
 	if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_RANGE_INCLUSIVE) {
 		len = bp_info->addr2 - bp_info->addr;
-	} else if (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) {
+	} else if (bp_info->addr_mode == PPC_BREAKPOINT_MODE_EXACT)
+		len = 1;
+	else {
 		ptrace_put_breakpoints(child);
 		return -EINVAL;
 	}
diff --git a/arch/powerpc/kernel/reloc_32.S b/arch/powerpc/kernel/reloc_32.S
index ef46ba6..f366fed 100644
--- a/arch/powerpc/kernel/reloc_32.S
+++ b/arch/powerpc/kernel/reloc_32.S
@@ -166,7 +166,7 @@
 	/* R_PPC_ADDR16_LO */
 lo16:
 	cmpwi	r4, R_PPC_ADDR16_LO
-	bne	nxtrela
+	bne	unknown_type
 	lwz	r4, 0(r9)	/* r_offset */
 	lwz	r0, 8(r9)	/* r_addend */
 	add	r0, r0, r3
@@ -191,6 +191,7 @@
 	dcbst	r4,r7
 	sync			/* Ensure the data is flushed before icbi */
 	icbi	r4,r7
+unknown_type:
 	cmpwi	r8, 0		/* relasz = 0 ? */
 	ble	done
 	add	r9, r9, r6	/* move to next entry in the .rela table */
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 52add6f..80b5ef4 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1172,7 +1172,7 @@
 static arch_spinlock_t timebase_lock;
 static u64 timebase = 0;
 
-void __cpuinit rtas_give_timebase(void)
+void rtas_give_timebase(void)
 {
 	unsigned long flags;
 
@@ -1189,7 +1189,7 @@
 	local_irq_restore(flags);
 }
 
-void __cpuinit rtas_take_timebase(void)
+void rtas_take_timebase(void)
 {
 	while (!timebase)
 		barrier();
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index e379d3f..389fb807 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -76,7 +76,7 @@
 #endif
 
 int boot_cpuid = 0;
-int __initdata spinning_secondaries;
+int spinning_secondaries;
 u64 ppc64_pft_size;
 
 /* Pick defaults since we might want to patch instructions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ee7ac5e..38b0ba6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -480,7 +480,7 @@
 	secondary_ti = current_set[cpu] = ti;
 }
 
-int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
 	int rc, c;
 
@@ -610,7 +610,7 @@
 }
 
 /* Activate a secondary processor. */
-__cpuinit void start_secondary(void *unused)
+void start_secondary(void *unused)
 {
 	unsigned int cpu = smp_processor_id();
 	struct device_node *l2_cache;
@@ -637,12 +637,10 @@
 
 	vdso_getcpu_init();
 #endif
-	notify_cpu_starting(cpu);
-	set_cpu_online(cpu, true);
 	/* Update sibling maps */
 	base = cpu_first_thread_sibling(cpu);
 	for (i = 0; i < threads_per_core; i++) {
-		if (cpu_is_offline(base + i))
+		if (cpu_is_offline(base + i) && (cpu != base + i))
 			continue;
 		cpumask_set_cpu(cpu, cpu_sibling_mask(base + i));
 		cpumask_set_cpu(base + i, cpu_sibling_mask(cpu));
@@ -667,6 +665,10 @@
 	}
 	of_node_put(l2_cache);
 
+	smp_wmb();
+	notify_cpu_starting(cpu);
+	set_cpu_online(cpu, true);
+
 	local_irq_enable();
 
 	cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e68a845..27a90b9 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -341,7 +341,7 @@
 #endif /* HAS_PPC_PMC_PA6T */
 #endif /* HAS_PPC_PMC_CLASSIC */
 
-static void __cpuinit register_cpu_online(unsigned int cpu)
+static void register_cpu_online(unsigned int cpu)
 {
 	struct cpu *c = &per_cpu(cpu_devices, cpu);
 	struct device *s = &c->dev;
@@ -502,7 +502,7 @@
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
+static int sysfs_cpu_notify(struct notifier_block *self,
 				      unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned int)(long)hcpu;
@@ -522,7 +522,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
+static struct notifier_block sysfs_cpu_nb = {
 	.notifier_call	= sysfs_cpu_notify,
 };
 
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 5fc29ad..65ab9e9 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -631,7 +631,6 @@
 	return found;
 }
 
-/* should become __cpuinit when secondary_cpu_time_init also is */
 void start_cpu_decrementer(void)
 {
 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2da67e7..51be8fb 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -112,9 +112,18 @@
 	std	r3, STACK_PARAM(0)(r1)
 	SAVE_NVGPRS(r1)
 
+	/* We need to setup MSR for VSX register save instructions.  Here we
+	 * also clear the MSR RI since when we do the treclaim, we won't have a
+	 * valid kernel pointer for a while.  We clear RI here as it avoids
+	 * adding another mtmsr closer to the treclaim.  This makes the region
+	 * maked as non-recoverable wider than it needs to be but it saves on
+	 * inserting another mtmsrd later.
+	 */
 	mfmsr	r14
 	mr	r15, r14
 	ori	r15, r15, MSR_FP
+	li	r16, MSR_RI
+	andc	r15, r15, r16
 	oris	r15, r15, MSR_VEC@h
 #ifdef CONFIG_VSX
 	BEGIN_FTR_SECTION
@@ -349,9 +358,10 @@
 	mtcr	r5
 	mtxer	r6
 
-	/* MSR and flags:  We don't change CRs, and we don't need to alter
-	 * MSR.
+	/* Clear the MSR RI since we are about to change R1.  EE is already off
 	 */
+	li	r4, 0
+	mtmsrd	r4, 1
 
 	REST_4GPRS(0, r7)			/* GPR0-3 */
 	REST_GPR(4, r7)				/* GPR4-6 */
@@ -377,6 +387,10 @@
 	GET_PACA(r13)
 	GET_SCRATCH0(r1)
 
+	/* R1 is restored, so we are recoverable again.  EE is still off */
+	li	r4, MSR_RI
+	mtmsrd	r4, 1
+
 	REST_NVGPRS(r1)
 
 	addi    r1, r1, TM_FRAME_SIZE
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 071f6e0..bf33c22 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -866,6 +866,10 @@
 		u8 val;
 		u32 shift = 8 * (3 - (pos & 0x3));
 
+		/* if process is 32-bit, clear upper 32 bits of EA */
+		if ((regs->msr & MSR_64BIT) == 0)
+			EA &= 0xFFFFFFFF;
+
 		switch ((instword & PPC_INST_STRING_MASK)) {
 			case PPC_INST_LSWX:
 			case PPC_INST_LSWI:
@@ -1175,6 +1179,16 @@
 	exception_exit(prev_state);
 }
 
+/*
+ * This occurs when running in hypervisor mode on POWER6 or later
+ * and an illegal instruction is encountered.
+ */
+void __kprobes emulation_assist_interrupt(struct pt_regs *regs)
+{
+	regs->msr |= REASON_ILLEGAL;
+	program_check_exception(regs);
+}
+
 void alignment_exception(struct pt_regs *regs)
 {
 	enum ctx_state prev_state = exception_enter();
@@ -1282,25 +1296,50 @@
 	die("Unrecoverable VSX Unavailable Exception", regs, SIGABRT);
 }
 
-void tm_unavailable_exception(struct pt_regs *regs)
+void facility_unavailable_exception(struct pt_regs *regs)
 {
+	static char *facility_strings[] = {
+		"FPU",
+		"VMX/VSX",
+		"DSCR",
+		"PMU SPRs",
+		"BHRB",
+		"TM",
+		"AT",
+		"EBB",
+		"TAR",
+	};
+	char *facility, *prefix;
+	u64 value;
+
+	if (regs->trap == 0xf60) {
+		value = mfspr(SPRN_FSCR);
+		prefix = "";
+	} else {
+		value = mfspr(SPRN_HFSCR);
+		prefix = "Hypervisor ";
+	}
+
+	value = value >> 56;
+
 	/* We restore the interrupt state now */
 	if (!arch_irq_disabled_regs(regs))
 		local_irq_enable();
 
-	/* Currently we never expect a TMU exception.  Catch
-	 * this and kill the process!
-	 */
-	printk(KERN_EMERG "Unexpected TM unavailable exception at %lx "
-	       "(msr %lx)\n",
-	       regs->nip, regs->msr);
+	if (value < ARRAY_SIZE(facility_strings))
+		facility = facility_strings[value];
+	else
+		facility = "unknown";
+
+	pr_err("%sFacility '%s' unavailable, exception at 0x%lx, MSR=%lx\n",
+		prefix, facility, regs->nip, regs->msr);
 
 	if (user_mode(regs)) {
 		_exception(SIGILL, regs, ILL_ILLOPC, regs->nip);
 		return;
 	}
 
-	die("Unexpected TM unavailable exception", regs, SIGABRT);
+	die("Unexpected facility unavailable exception", regs, SIGABRT);
 }
 
 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index d4f463a..1d9c926 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -711,7 +711,7 @@
 }
 
 #ifdef CONFIG_PPC64
-int __cpuinit vdso_getcpu_init(void)
+int vdso_getcpu_init(void)
 {
 	unsigned long cpu, node, val;
 
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 5cd7ad0..1a1b511 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -673,7 +673,6 @@
 		ret = s;
 		goto out;
 	}
-	kvmppc_lazy_ee_enable();
 
 	kvm_guest_enter();
 
@@ -699,6 +698,8 @@
 	kvmppc_load_guest_fp(vcpu);
 #endif
 
+	kvmppc_lazy_ee_enable();
+
 	ret = __kvmppc_vcpu_run(kvm_run, vcpu);
 
 	/* No need for kvm_guest_exit. It's done in handle_exit.
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c
index 2c9441e..82b1ff7 100644
--- a/arch/powerpc/mm/44x_mmu.c
+++ b/arch/powerpc/mm/44x_mmu.c
@@ -41,7 +41,7 @@
 
 unsigned long tlb_47x_boltmap[1024/8];
 
-static void __cpuinit ppc44x_update_tlb_hwater(void)
+static void ppc44x_update_tlb_hwater(void)
 {
 	extern unsigned int tlb_44x_patch_hwater_D[];
 	extern unsigned int tlb_44x_patch_hwater_I[];
@@ -134,7 +134,7 @@
 /*
  * "Pins" a 256MB TLB entry in AS0 for kernel lowmem for 47x type MMU
  */
-static void __cpuinit ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
+static void ppc47x_pin_tlb(unsigned int virt, unsigned int phys)
 {
 	unsigned int rA;
 	int bolted;
@@ -229,7 +229,7 @@
 }
 
 #ifdef CONFIG_SMP
-void __cpuinit mmu_init_secondary(int cpu)
+void mmu_init_secondary(int cpu)
 {
 	unsigned long addr;
 	unsigned long memstart = memstart_addr & ~(PPC_PIN_SIZE - 1);
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8452316..6ecc38b 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -807,7 +807,7 @@
 }
 
 #ifdef CONFIG_SMP
-void __cpuinit early_init_mmu_secondary(void)
+void early_init_mmu_secondary(void)
 {
 	/* Initialize hash table for that CPU */
 	if (!firmware_has_feature(FW_FEATURE_LPAR))
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index f2f01fd..5555778 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -536,8 +536,14 @@
 	do {
 		pmd = pmd_offset(pud, addr);
 		next = pmd_addr_end(addr, end);
-		if (pmd_none_or_clear_bad(pmd))
+		if (!is_hugepd(pmd)) {
+			/*
+			 * if it is not hugepd pointer, we should already find
+			 * it cleared.
+			 */
+			WARN_ON(!pmd_none_or_clear_bad(pmd));
 			continue;
+		}
 #ifdef CONFIG_PPC_FSL_BOOK3E
 		/*
 		 * Increment next by the size of the huge mapping since
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 810f8e4..af3d78e 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -332,8 +332,8 @@
 
 #ifdef CONFIG_SMP
 
-static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
-					    unsigned long action, void *hcpu)
+static int mmu_context_cpu_notify(struct notifier_block *self,
+				  unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned int)(long)hcpu;
 
@@ -366,7 +366,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
+static struct notifier_block mmu_context_cpu_nb = {
 	.notifier_call	= mmu_context_cpu_notify,
 };
 
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 88c0425..0839721 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -516,7 +516,7 @@
  * Figure out to which domain a cpu belongs and stick it there.
  * Return the id of the domain used.
  */
-static int __cpuinit numa_setup_cpu(unsigned long lcpu)
+static int numa_setup_cpu(unsigned long lcpu)
 {
 	int nid = 0;
 	struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
@@ -538,8 +538,7 @@
 	return nid;
 }
 
-static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
-			     unsigned long action,
+static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action,
 			     void *hcpu)
 {
 	unsigned long lcpu = (unsigned long)hcpu;
@@ -919,7 +918,7 @@
 	return ret;
 }
 
-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
+static struct notifier_block ppc64_numa_nb = {
 	.notifier_call = cpu_numa_callback,
 	.priority = 1 /* Must run before sched domains notifier. */
 };
@@ -1433,11 +1432,9 @@
 		if (cpu != update->cpu)
 			continue;
 
-		unregister_cpu_under_node(update->cpu, update->old_nid);
 		unmap_cpu_from_node(update->cpu);
 		map_cpu_to_node(update->cpu, update->new_nid);
 		vdso_getcpu_init();
-		register_cpu_under_node(update->cpu, update->new_nid);
 	}
 
 	return 0;
@@ -1485,6 +1482,9 @@
 	stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
 
 	for (ud = &updates[0]; ud; ud = ud->next) {
+		unregister_cpu_under_node(ud->cpu, ud->old_nid);
+		register_cpu_under_node(ud->cpu, ud->new_nid);
+
 		dev = get_cpu_device(ud->cpu);
 		if (dev)
 			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c
index 6888cad..41cd68d 100644
--- a/arch/powerpc/mm/tlb_nohash.c
+++ b/arch/powerpc/mm/tlb_nohash.c
@@ -648,7 +648,7 @@
 	__early_init_mmu(1);
 }
 
-void __cpuinit early_init_mmu_secondary(void)
+void early_init_mmu_secondary(void)
 {
 	__early_init_mmu(0);
 }
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29c6482..a3985ae 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -75,6 +75,11 @@
 
 #define MMCR0_FCHV		0
 #define MMCR0_PMCjCE		MMCR0_PMCnCE
+#define MMCR0_FC56		0
+#define MMCR0_PMAO		0
+#define MMCR0_EBE		0
+#define MMCR0_PMCC		0
+#define MMCR0_PMCC_U6		0
 
 #define SPRN_MMCRA		SPRN_MMCR2
 #define MMCRA_SAMPLE_ENABLE	0
@@ -102,6 +107,15 @@
 	return 1;
 }
 
+static bool is_ebb_event(struct perf_event *event) { return false; }
+static int ebb_event_check(struct perf_event *event) { return 0; }
+static void ebb_event_add(struct perf_event *event) { }
+static void ebb_switch_out(unsigned long mmcr0) { }
+static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
+{
+	return mmcr0;
+}
+
 static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
 static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
 void power_pmu_flush_branch_stack(void) {}
@@ -462,6 +476,89 @@
 	return;
 }
 
+static bool is_ebb_event(struct perf_event *event)
+{
+	/*
+	 * This could be a per-PMU callback, but we'd rather avoid the cost. We
+	 * check that the PMU supports EBB, meaning those that don't can still
+	 * use bit 63 of the event code for something else if they wish.
+	 */
+	return (ppmu->flags & PPMU_EBB) &&
+	       ((event->attr.config >> EVENT_CONFIG_EBB_SHIFT) & 1);
+}
+
+static int ebb_event_check(struct perf_event *event)
+{
+	struct perf_event *leader = event->group_leader;
+
+	/* Event and group leader must agree on EBB */
+	if (is_ebb_event(leader) != is_ebb_event(event))
+		return -EINVAL;
+
+	if (is_ebb_event(event)) {
+		if (!(event->attach_state & PERF_ATTACH_TASK))
+			return -EINVAL;
+
+		if (!leader->attr.pinned || !leader->attr.exclusive)
+			return -EINVAL;
+
+		if (event->attr.inherit || event->attr.sample_period ||
+		    event->attr.enable_on_exec || event->attr.freq)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ebb_event_add(struct perf_event *event)
+{
+	if (!is_ebb_event(event) || current->thread.used_ebb)
+		return;
+
+	/*
+	 * IFF this is the first time we've added an EBB event, set
+	 * PMXE in the user MMCR0 so we can detect when it's cleared by
+	 * userspace. We need this so that we can context switch while
+	 * userspace is in the EBB handler (where PMXE is 0).
+	 */
+	current->thread.used_ebb = 1;
+	current->thread.mmcr0 |= MMCR0_PMXE;
+}
+
+static void ebb_switch_out(unsigned long mmcr0)
+{
+	if (!(mmcr0 & MMCR0_EBE))
+		return;
+
+	current->thread.siar  = mfspr(SPRN_SIAR);
+	current->thread.sier  = mfspr(SPRN_SIER);
+	current->thread.sdar  = mfspr(SPRN_SDAR);
+	current->thread.mmcr0 = mmcr0 & MMCR0_USER_MASK;
+	current->thread.mmcr2 = mfspr(SPRN_MMCR2) & MMCR2_USER_MASK;
+}
+
+static unsigned long ebb_switch_in(bool ebb, unsigned long mmcr0)
+{
+	if (!ebb)
+		goto out;
+
+	/* Enable EBB and read/write to all 6 PMCs for userspace */
+	mmcr0 |= MMCR0_EBE | MMCR0_PMCC_U6;
+
+	/* Add any bits from the user reg, FC or PMAO */
+	mmcr0 |= current->thread.mmcr0;
+
+	/* Be careful not to set PMXE if userspace had it cleared */
+	if (!(current->thread.mmcr0 & MMCR0_PMXE))
+		mmcr0 &= ~MMCR0_PMXE;
+
+	mtspr(SPRN_SIAR, current->thread.siar);
+	mtspr(SPRN_SIER, current->thread.sier);
+	mtspr(SPRN_SDAR, current->thread.sdar);
+	mtspr(SPRN_MMCR2, current->thread.mmcr2);
+out:
+	return mmcr0;
+}
 #endif /* CONFIG_PPC64 */
 
 static void perf_event_interrupt(struct pt_regs *regs);
@@ -732,6 +829,13 @@
 
 	if (!event->hw.idx)
 		return;
+
+	if (is_ebb_event(event)) {
+		val = read_pmc(event->hw.idx);
+		local64_set(&event->hw.prev_count, val);
+		return;
+	}
+
 	/*
 	 * Performance monitor interrupts come even when interrupts
 	 * are soft-disabled, as long as interrupts are hard-enabled.
@@ -852,7 +956,7 @@
 static void power_pmu_disable(struct pmu *pmu)
 {
 	struct cpu_hw_events *cpuhw;
-	unsigned long flags;
+	unsigned long flags, mmcr0, val;
 
 	if (!ppmu)
 		return;
@@ -860,9 +964,6 @@
 	cpuhw = &__get_cpu_var(cpu_hw_events);
 
 	if (!cpuhw->disabled) {
-		cpuhw->disabled = 1;
-		cpuhw->n_added = 0;
-
 		/*
 		 * Check if we ever enabled the PMU on this cpu.
 		 */
@@ -872,6 +973,21 @@
 		}
 
 		/*
+		 * Set the 'freeze counters' bit, clear EBE/PMCC/PMAO/FC56.
+		 */
+		val  = mmcr0 = mfspr(SPRN_MMCR0);
+		val |= MMCR0_FC;
+		val &= ~(MMCR0_EBE | MMCR0_PMCC | MMCR0_PMAO | MMCR0_FC56);
+
+		/*
+		 * The barrier is to make sure the mtspr has been
+		 * executed and the PMU has frozen the events etc.
+		 * before we return.
+		 */
+		write_mmcr0(cpuhw, val);
+		mb();
+
+		/*
 		 * Disable instruction sampling if it was enabled
 		 */
 		if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
@@ -880,15 +996,12 @@
 			mb();
 		}
 
-		/*
-		 * Set the 'freeze counters' bit.
-		 * The barrier is to make sure the mtspr has been
-		 * executed and the PMU has frozen the events
-		 * before we return.
-		 */
-		write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
-		mb();
+		cpuhw->disabled = 1;
+		cpuhw->n_added = 0;
+
+		ebb_switch_out(mmcr0);
 	}
+
 	local_irq_restore(flags);
 }
 
@@ -903,23 +1016,36 @@
 	struct cpu_hw_events *cpuhw;
 	unsigned long flags;
 	long i;
-	unsigned long val;
+	unsigned long val, mmcr0;
 	s64 left;
 	unsigned int hwc_index[MAX_HWEVENTS];
 	int n_lim;
 	int idx;
+	bool ebb;
 
 	if (!ppmu)
 		return;
 	local_irq_save(flags);
+
 	cpuhw = &__get_cpu_var(cpu_hw_events);
-	if (!cpuhw->disabled) {
-		local_irq_restore(flags);
-		return;
+	if (!cpuhw->disabled)
+		goto out;
+
+	if (cpuhw->n_events == 0) {
+		ppc_set_pmu_inuse(0);
+		goto out;
 	}
+
 	cpuhw->disabled = 0;
 
 	/*
+	 * EBB requires an exclusive group and all events must have the EBB
+	 * flag set, or not set, so we can just check a single event. Also we
+	 * know we have at least one event.
+	 */
+	ebb = is_ebb_event(cpuhw->event[0]);
+
+	/*
 	 * If we didn't change anything, or only removed events,
 	 * no need to recalculate MMCR* settings and reset the PMCs.
 	 * Just reenable the PMU with the current MMCR* settings
@@ -928,8 +1054,6 @@
 	if (!cpuhw->n_added) {
 		mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
 		mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
-		if (cpuhw->n_events == 0)
-			ppc_set_pmu_inuse(0);
 		goto out_enable;
 	}
 
@@ -996,25 +1120,34 @@
 			++n_lim;
 			continue;
 		}
-		val = 0;
-		if (event->hw.sample_period) {
-			left = local64_read(&event->hw.period_left);
-			if (left < 0x80000000L)
-				val = 0x80000000L - left;
+
+		if (ebb)
+			val = local64_read(&event->hw.prev_count);
+		else {
+			val = 0;
+			if (event->hw.sample_period) {
+				left = local64_read(&event->hw.period_left);
+				if (left < 0x80000000L)
+					val = 0x80000000L - left;
+			}
+			local64_set(&event->hw.prev_count, val);
 		}
-		local64_set(&event->hw.prev_count, val);
+
 		event->hw.idx = idx;
 		if (event->hw.state & PERF_HES_STOPPED)
 			val = 0;
 		write_pmc(idx, val);
+
 		perf_event_update_userpage(event);
 	}
 	cpuhw->n_limited = n_lim;
 	cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
 
  out_enable:
+	mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
+
 	mb();
-	write_mmcr0(cpuhw, cpuhw->mmcr[0]);
+	write_mmcr0(cpuhw, mmcr0);
 
 	/*
 	 * Enable instruction sampling if necessary
@@ -1112,6 +1245,8 @@
 	event->hw.config = cpuhw->events[n0];
 
 nocheck:
+	ebb_event_add(event);
+
 	++cpuhw->n_events;
 	++cpuhw->n_added;
 
@@ -1472,6 +1607,11 @@
 		}
 	}
 
+	/* Extra checks for EBB */
+	err = ebb_event_check(event);
+	if (err)
+		return err;
+
 	/*
 	 * If this is in a group, check if it can go on with all the
 	 * other hardware events in the group.  We assume the event
@@ -1511,6 +1651,13 @@
 	local64_set(&event->hw.period_left, event->hw.last_period);
 
 	/*
+	 * For EBB events we just context switch the PMC value, we don't do any
+	 * of the sample_period logic. We use hw.prev_count for this.
+	 */
+	if (is_ebb_event(event))
+		local64_set(&event->hw.prev_count, 0);
+
+	/*
 	 * See if we need to reserve the PMU.
 	 * If no events are currently in use, then we have to take a
 	 * mutex to ensure that we don't race with another task doing
@@ -1786,7 +1933,7 @@
 	cpuhw->mmcr[0] = MMCR0_FC;
 }
 
-static int __cpuinit
+static int
 power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (long)hcpu;
@@ -1803,7 +1950,7 @@
 	return NOTIFY_OK;
 }
 
-int __cpuinit register_power_pmu(struct power_pmu *pmu)
+int register_power_pmu(struct power_pmu *pmu)
 {
 	if (ppmu)
 		return -EBUSY;		/* something's already registered */
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index f7d1c4f..96a64d6 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -31,9 +31,9 @@
  *
  *        60        56        52        48        44        40        36        32
  * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- *                                     [      thresh_cmp     ]   [  thresh_ctl   ]
- *                                                                       |
- *                                       thresh start/stop OR FAB match -*
+ *   |                                 [      thresh_cmp     ]   [  thresh_ctl   ]
+ *   |                                                                   |
+ *   *- EBB (Linux)                      thresh start/stop OR FAB match -*
  *
  *        28        24        20        16        12         8         4         0
  * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
@@ -85,6 +85,7 @@
  *
  */
 
+#define EVENT_EBB_MASK		1ull
 #define EVENT_THR_CMP_SHIFT	40	/* Threshold CMP value */
 #define EVENT_THR_CMP_MASK	0x3ff
 #define EVENT_THR_CTL_SHIFT	32	/* Threshold control value (start/stop) */
@@ -109,6 +110,17 @@
 #define EVENT_IS_MARKED		(EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
 #define EVENT_PSEL_MASK		0xff	/* PMCxSEL value */
 
+#define EVENT_VALID_MASK	\
+	((EVENT_THRESH_MASK    << EVENT_THRESH_SHIFT)		|	\
+	 (EVENT_SAMPLE_MASK    << EVENT_SAMPLE_SHIFT)		|	\
+	 (EVENT_CACHE_SEL_MASK << EVENT_CACHE_SEL_SHIFT)	|	\
+	 (EVENT_PMC_MASK       << EVENT_PMC_SHIFT)		|	\
+	 (EVENT_UNIT_MASK      << EVENT_UNIT_SHIFT)		|	\
+	 (EVENT_COMBINE_MASK   << EVENT_COMBINE_SHIFT)		|	\
+	 (EVENT_MARKED_MASK    << EVENT_MARKED_SHIFT)		|	\
+	 (EVENT_EBB_MASK       << EVENT_CONFIG_EBB_SHIFT)	|	\
+	  EVENT_PSEL_MASK)
+
 /* MMCRA IFM bits - POWER8 */
 #define	POWER8_MMCRA_IFM1		0x0000000040000000UL
 #define	POWER8_MMCRA_IFM2		0x0000000080000000UL
@@ -130,10 +142,10 @@
  *
  *        28        24        20        16        12         8         4         0
  * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- *                       [ ]   [  sample ]   [     ]   [6] [5]   [4] [3]   [2] [1]
- *                        |                     |
- *      L1 I/D qualifier -*                     |      Count of events for each PMC.
- *                                              |        p1, p2, p3, p4, p5, p6.
+ *                   |   [ ]   [  sample ]   [     ]   [6] [5]   [4] [3]   [2] [1]
+ *              EBB -*    |                     |
+ *                        |                     |      Count of events for each PMC.
+ *      L1 I/D qualifier -*                     |        p1, p2, p3, p4, p5, p6.
  *                     nc - number of counters -*
  *
  * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
@@ -149,6 +161,9 @@
 #define CNST_THRESH_VAL(v)	(((v) & EVENT_THRESH_MASK) << 32)
 #define CNST_THRESH_MASK	CNST_THRESH_VAL(EVENT_THRESH_MASK)
 
+#define CNST_EBB_VAL(v)		(((v) & EVENT_EBB_MASK) << 24)
+#define CNST_EBB_MASK		CNST_EBB_VAL(EVENT_EBB_MASK)
+
 #define CNST_L1_QUAL_VAL(v)	(((v) & 3) << 22)
 #define CNST_L1_QUAL_MASK	CNST_L1_QUAL_VAL(3)
 
@@ -207,14 +222,21 @@
 
 static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
 {
-	unsigned int unit, pmc, cache;
+	unsigned int unit, pmc, cache, ebb;
 	unsigned long mask, value;
 
 	mask = value = 0;
 
-	pmc   = (event >> EVENT_PMC_SHIFT)       & EVENT_PMC_MASK;
-	unit  = (event >> EVENT_UNIT_SHIFT)      & EVENT_UNIT_MASK;
-	cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
+	if (event & ~EVENT_VALID_MASK)
+		return -1;
+
+	pmc   = (event >> EVENT_PMC_SHIFT)        & EVENT_PMC_MASK;
+	unit  = (event >> EVENT_UNIT_SHIFT)       & EVENT_UNIT_MASK;
+	cache = (event >> EVENT_CACHE_SEL_SHIFT)  & EVENT_CACHE_SEL_MASK;
+	ebb   = (event >> EVENT_CONFIG_EBB_SHIFT) & EVENT_EBB_MASK;
+
+	/* Clear the EBB bit in the event, so event checks work below */
+	event &= ~(EVENT_EBB_MASK << EVENT_CONFIG_EBB_SHIFT);
 
 	if (pmc) {
 		if (pmc > 6)
@@ -284,6 +306,18 @@
 		value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
 	}
 
+	if (!pmc && ebb)
+		/* EBB events must specify the PMC */
+		return -1;
+
+	/*
+	 * All events must agree on EBB, either all request it or none.
+	 * EBB events are pinned & exclusive, so this should never actually
+	 * hit, but we leave it as a fallback in case.
+	 */
+	mask  |= CNST_EBB_VAL(ebb);
+	value |= CNST_EBB_MASK;
+
 	*maskp = mask;
 	*valp = value;
 
@@ -378,6 +412,10 @@
 	if (pmc_inuse & 0x7c)
 		mmcr[0] |= MMCR0_PMCjCE;
 
+	/* If we're not using PMC 5 or 6, freeze them */
+	if (!(pmc_inuse & 0x60))
+		mmcr[0] |= MMCR0_FC56;
+
 	mmcr[1] = mmcr1;
 	mmcr[2] = mmcra;
 
@@ -574,7 +612,7 @@
 	.get_constraint		= power8_get_constraint,
 	.get_alternatives	= power8_get_alternatives,
 	.disable_pmc		= power8_disable_pmc,
-	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB,
+	.flags			= PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
 	.n_generic		= ARRAY_SIZE(power8_generic_events),
 	.generic_events		= power8_generic_events,
 	.attr_groups		= power8_pmu_attr_groups,
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index c52e1b3..7f1b71a 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -91,12 +91,12 @@
 }
 
 #ifdef CONFIG_SMP
-static void __cpuinit smp_ppc47x_setup_cpu(int cpu)
+static void smp_ppc47x_setup_cpu(int cpu)
 {
 	mpic_setup_this_cpu();
 }
 
-static int __cpuinit smp_ppc47x_kick_cpu(int cpu)
+static int smp_ppc47x_kick_cpu(int cpu)
 {
 	struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
 	const u64 *spin_table_addr_prop;
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index a28a862..4241bc8 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -81,12 +81,12 @@
 }
 
 #ifdef CONFIG_SMP
-static void __cpuinit smp_iss4xx_setup_cpu(int cpu)
+static void smp_iss4xx_setup_cpu(int cpu)
 {
 	mpic_setup_this_cpu();
 }
 
-static int __cpuinit smp_iss4xx_kick_cpu(int cpu)
+static int smp_iss4xx_kick_cpu(int cpu)
 {
 	struct device_node *cpunode = of_get_cpu_node(cpu, NULL);
 	const u64 *spin_table_addr_prop;
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 6a175993..5ced4f5 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -99,7 +99,7 @@
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void __cpuinit smp_85xx_mach_cpu_die(void)
+static void smp_85xx_mach_cpu_die(void)
 {
 	unsigned int cpu = smp_processor_id();
 	u32 tmp;
@@ -141,7 +141,7 @@
 	return in_be32(&((struct epapr_spin_table *)spin_table)->addr_l);
 }
 
-static int __cpuinit smp_85xx_kick_cpu(int nr)
+static int smp_85xx_kick_cpu(int nr)
 {
 	unsigned long flags;
 	const u64 *cpu_rel_addr;
@@ -362,7 +362,7 @@
 }
 #endif /* CONFIG_KEXEC */
 
-static void __cpuinit smp_85xx_setup_cpu(int cpu_nr)
+static void smp_85xx_setup_cpu(int cpu_nr)
 {
 	if (smp_85xx_ops.probe == smp_mpic_probe)
 		mpic_setup_this_cpu();
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index bdb738a..49c9f95 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -885,7 +885,7 @@
 	return NOTIFY_OK;
 }
 
-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
+static struct notifier_block smp_core99_cpu_nb = {
 	.notifier_call	= smp_core99_cpu_notify,
 };
 #endif /* CONFIG_HOTPLUG_CPU */
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 84f3036..0cd1c4a 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -132,7 +132,7 @@
 					    &ioda_eeh_dbgfs_ops);
 #endif
 
-		phb->eeh_enabled = 1;
+		phb->eeh_state |= PNV_EEH_STATE_ENABLED;
 	}
 
 	return 0;
@@ -815,7 +815,7 @@
 		 * removed, we needn't take care of it any more.
 		 */
 		phb = hose->private_data;
-		if (phb->removed)
+		if (phb->eeh_state & PNV_EEH_STATE_REMOVED)
 			continue;
 
 		rc = opal_pci_next_error(phb->opal_id,
@@ -850,14 +850,17 @@
 				list_for_each_entry_safe(hose, tmp,
 						&hose_list, list_node) {
 					phb = hose->private_data;
-					phb->removed = 1;
+					phb->eeh_state |= PNV_EEH_STATE_REMOVED;
 				}
 
-				WARN(1, "EEH: dead IOC detected\n");
+				pr_err("EEH: dead IOC detected\n");
 				ret = 4;
 				goto out;
-			} else if (severity == OPAL_EEH_SEV_INF)
+			} else if (severity == OPAL_EEH_SEV_INF) {
+				pr_info("EEH: IOC informative error "
+					"detected\n");
 				ioda_eeh_hub_diag(hose);
+			}
 
 			break;
 		case OPAL_EEH_PHB_ERROR:
@@ -865,29 +868,33 @@
 				if (ioda_eeh_get_phb_pe(hose, pe))
 					break;
 
-				WARN(1, "EEH: dead PHB#%x detected\n",
-				     hose->global_number);
-				phb->removed = 1;
+				pr_err("EEH: dead PHB#%x detected\n",
+					hose->global_number);
+				phb->eeh_state |= PNV_EEH_STATE_REMOVED;
 				ret = 3;
 				goto out;
 			} else if (severity == OPAL_EEH_SEV_PHB_FENCED) {
 				if (ioda_eeh_get_phb_pe(hose, pe))
 					break;
 
-				WARN(1, "EEH: fenced PHB#%x detected\n",
-				     hose->global_number);
+				pr_err("EEH: fenced PHB#%x detected\n",
+					hose->global_number);
 				ret = 2;
 				goto out;
-			} else if (severity == OPAL_EEH_SEV_INF)
+			} else if (severity == OPAL_EEH_SEV_INF) {
+				pr_info("EEH: PHB#%x informative error "
+					"detected\n",
+					hose->global_number);
 				ioda_eeh_phb_diag(hose);
+			}
 
 			break;
 		case OPAL_EEH_PE_ERROR:
 			if (ioda_eeh_get_pe(hose, frozen_pe_no, pe))
 				break;
 
-			WARN(1, "EEH: Frozen PE#%x on PHB#%x detected\n",
-			     (*pe)->addr, (*pe)->phb->global_number);
+			pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
+				(*pe)->addr, (*pe)->phb->global_number);
 			ret = 1;
 			goto out;
 		}
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 9559115..969cce7 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -315,46 +315,6 @@
 }
 
 /**
- * powernv_eeh_read_config - Read PCI config space
- * @dn: device node
- * @where: PCI address
- * @size: size to read
- * @val: return value
- *
- * Read config space from the speicifed device
- */
-static int powernv_eeh_read_config(struct device_node *dn, int where,
-				   int size, u32 *val)
-{
-	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
-	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
-	struct pci_controller *hose = edev->phb;
-
-	return hose->ops->read(dev->bus, dev->devfn, where, size, val);
-}
-
-/**
- * powernv_eeh_write_config - Write PCI config space
- * @dn: device node
- * @where: PCI address
- * @size: size to write
- * @val: value to be written
- *
- * Write config space to the specified device
- */
-static int powernv_eeh_write_config(struct device_node *dn, int where,
-				    int size, u32 val)
-{
-	struct eeh_dev *edev = of_node_to_eeh_dev(dn);
-	struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
-	struct pci_controller *hose = edev->phb;
-
-	hose = pci_bus_to_host(dev->bus);
-
-	return hose->ops->write(dev->bus, dev->devfn, where, size, val);
-}
-
-/**
  * powernv_eeh_next_error - Retrieve next EEH error to handle
  * @pe: Affected PE
  *
@@ -389,8 +349,8 @@
 	.wait_state             = powernv_eeh_wait_state,
 	.get_log                = powernv_eeh_get_log,
 	.configure_bridge       = powernv_eeh_configure_bridge,
-	.read_config            = powernv_eeh_read_config,
-	.write_config           = powernv_eeh_write_config,
+	.read_config            = pnv_pci_cfg_read,
+	.write_config           = pnv_pci_cfg_write,
 	.next_error		= powernv_eeh_next_error
 };
 
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index dc4ec79..49b57b9 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -443,6 +443,17 @@
 	set_iommu_table_base(&pdev->dev, &pe->tce32_table);
 }
 
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
+{
+	struct pci_dev *dev;
+
+	list_for_each_entry(dev, &bus->devices, bus_list) {
+		set_iommu_table_base(&dev->dev, &pe->tce32_table);
+		if (dev->subordinate)
+			pnv_ioda_setup_bus_dma(pe, dev->subordinate);
+	}
+}
+
 static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
 					 u64 *startp, u64 *endp)
 {
@@ -599,6 +610,11 @@
 	iommu_init_table(tbl, phb->hose->node);
 	iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
 
+	if (pe->pdev)
+		set_iommu_table_base(&pe->pdev->dev, tbl);
+	else
+		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
 	return;
  fail:
 	/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -670,6 +686,11 @@
 	}
 	iommu_init_table(tbl, phb->hose->node);
 
+	if (pe->pdev)
+		set_iommu_table_base(&pe->pdev->dev, tbl);
+	else
+		pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
 	return;
 fail:
 	if (pe->tce32_seg >= 0)
@@ -999,6 +1020,7 @@
 	pnv_pci_ioda_create_dbgfs();
 
 #ifdef CONFIG_EEH
+	eeh_probe_mode_set(EEH_PROBE_MODE_DEV);
 	eeh_addr_cache_build();
 	eeh_init();
 #endif
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 577cbea..a28d3b5 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -231,47 +231,50 @@
 	spin_unlock_irqrestore(&phb->lock, flags);
 }
 
-static void pnv_pci_config_check_eeh(struct pnv_phb *phb, struct pci_bus *bus,
-				     u32 bdfn)
+static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
+				     struct device_node *dn)
 {
 	s64	rc;
 	u8	fstate;
 	u16	pcierr;
 	u32	pe_no;
 
-	/* Get PE# if we support IODA */
-	pe_no = phb->bdfn_to_pe ? phb->bdfn_to_pe(phb, bus, bdfn & 0xff) : 0;
+	/*
+	 * Get the PE#. During the PCI probe stage, we might not
+	 * setup that yet. So all ER errors should be mapped to
+	 * PE#0
+	 */
+	pe_no = PCI_DN(dn)->pe_number;
+	if (pe_no == IODA_INVALID_PE)
+		pe_no = 0;
 
 	/* Read freeze status */
 	rc = opal_pci_eeh_freeze_status(phb->opal_id, pe_no, &fstate, &pcierr,
 					NULL);
 	if (rc) {
-		pr_warning("PCI %d: Failed to read EEH status for PE#%d,"
-			   " err %lld\n", phb->hose->global_number, pe_no, rc);
+		pr_warning("%s: Can't read EEH status (PE#%d) for "
+			   "%s, err %lld\n",
+			   __func__, pe_no, dn->full_name, rc);
 		return;
 	}
-	cfg_dbg(" -> EEH check, bdfn=%04x PE%d fstate=%x\n",
-		bdfn, pe_no, fstate);
+	cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
+		(PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn),
+		pe_no, fstate);
 	if (fstate != 0)
 		pnv_pci_handle_eeh_config(phb, pe_no);
 }
 
-static int pnv_pci_read_config(struct pci_bus *bus,
-			       unsigned int devfn,
-			       int where, int size, u32 *val)
+int pnv_pci_cfg_read(struct device_node *dn,
+		     int where, int size, u32 *val)
 {
-	struct pci_controller *hose = pci_bus_to_host(bus);
-	struct pnv_phb *phb = hose->private_data;
+	struct pci_dn *pdn = PCI_DN(dn);
+	struct pnv_phb *phb = pdn->phb->private_data;
+	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
 #ifdef CONFIG_EEH
-	struct device_node *busdn, *dn;
 	struct eeh_pe *phb_pe = NULL;
 #endif
-	u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
 	s64 rc;
 
-	if (hose == NULL)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
 	switch (size) {
 	case 1: {
 		u8 v8;
@@ -295,8 +298,8 @@
 	default:
 		return PCIBIOS_FUNC_NOT_SUPPORTED;
 	}
-	cfg_dbg("pnv_pci_read_config bus: %x devfn: %x +%x/%x -> %08x\n",
-		bus->number, devfn, where, size, *val);
+	cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
+		__func__, pdn->busno, pdn->devfn, where, size, *val);
 
 	/*
 	 * Check if the specified PE has been put into frozen
@@ -305,44 +308,33 @@
 	 * PHB-fatal errors.
 	 */
 #ifdef CONFIG_EEH
-	phb_pe = eeh_phb_pe_get(hose);
+	phb_pe = eeh_phb_pe_get(pdn->phb);
 	if (phb_pe && (phb_pe->state & EEH_PE_ISOLATED))
 		return PCIBIOS_SUCCESSFUL;
 
-	if (phb->eeh_enabled) {
-		if (*val == EEH_IO_ERROR_VALUE(size)) {
-			busdn = pci_bus_to_OF_node(bus);
-			for (dn = busdn->child; dn; dn = dn->sibling) {
-				struct pci_dn *pdn = PCI_DN(dn);
-
-				if (pdn && pdn->devfn == devfn &&
-				    eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
-					return PCIBIOS_DEVICE_NOT_FOUND;
-			}
-		}
+	if (phb->eeh_state & PNV_EEH_STATE_ENABLED) {
+		if (*val == EEH_IO_ERROR_VALUE(size) &&
+		    eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+			return PCIBIOS_DEVICE_NOT_FOUND;
 	} else {
-		pnv_pci_config_check_eeh(phb, bus, bdfn);
+		pnv_pci_config_check_eeh(phb, dn);
 	}
 #else
-	pnv_pci_config_check_eeh(phb, bus, bdfn);
+	pnv_pci_config_check_eeh(phb, dn);
 #endif
 
 	return PCIBIOS_SUCCESSFUL;
 }
 
-static int pnv_pci_write_config(struct pci_bus *bus,
-				unsigned int devfn,
-				int where, int size, u32 val)
+int pnv_pci_cfg_write(struct device_node *dn,
+		      int where, int size, u32 val)
 {
-	struct pci_controller *hose = pci_bus_to_host(bus);
-	struct pnv_phb *phb = hose->private_data;
-	u32 bdfn = (((uint64_t)bus->number) << 8) | devfn;
+	struct pci_dn *pdn = PCI_DN(dn);
+	struct pnv_phb *phb = pdn->phb->private_data;
+	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
 
-	if (hose == NULL)
-		return PCIBIOS_DEVICE_NOT_FOUND;
-
-	cfg_dbg("pnv_pci_write_config bus: %x devfn: %x +%x/%x -> %08x\n",
-		bus->number, devfn, where, size, val);
+	cfg_dbg("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
+		pdn->busno, pdn->devfn, where, size, val);
 	switch (size) {
 	case 1:
 		opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
@@ -359,17 +351,51 @@
 
 	/* Check if the PHB got frozen due to an error (no response) */
 #ifdef CONFIG_EEH
-	if (!phb->eeh_enabled)
-		pnv_pci_config_check_eeh(phb, bus, bdfn);
+	if (!(phb->eeh_state & PNV_EEH_STATE_ENABLED))
+		pnv_pci_config_check_eeh(phb, dn);
 #else
-	pnv_pci_config_check_eeh(phb, bus, bdfn);
+	pnv_pci_config_check_eeh(phb, dn);
 #endif
 
 	return PCIBIOS_SUCCESSFUL;
 }
 
+static int pnv_pci_read_config(struct pci_bus *bus,
+			       unsigned int devfn,
+			       int where, int size, u32 *val)
+{
+	struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
+	struct pci_dn *pdn;
+
+	for (dn = busdn->child; dn; dn = dn->sibling) {
+		pdn = PCI_DN(dn);
+		if (pdn && pdn->devfn == devfn)
+			return pnv_pci_cfg_read(dn, where, size, val);
+	}
+
+	*val = 0xFFFFFFFF;
+	return PCIBIOS_DEVICE_NOT_FOUND;
+
+}
+
+static int pnv_pci_write_config(struct pci_bus *bus,
+				unsigned int devfn,
+				int where, int size, u32 val)
+{
+	struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
+	struct pci_dn *pdn;
+
+	for (dn = busdn->child; dn; dn = dn->sibling) {
+		pdn = PCI_DN(dn);
+		if (pdn && pdn->devfn == devfn)
+			return pnv_pci_cfg_write(dn, where, size, val);
+	}
+
+	return PCIBIOS_DEVICE_NOT_FOUND;
+}
+
 struct pci_ops pnv_pci_ops = {
-	.read = pnv_pci_read_config,
+	.read  = pnv_pci_read_config,
 	.write = pnv_pci_write_config,
 };
 
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 43906e3..d633c64 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -78,6 +78,10 @@
 	int (*configure_bridge)(struct eeh_pe *pe);
 	int (*next_error)(struct eeh_pe **pe);
 };
+
+#define PNV_EEH_STATE_ENABLED	(1 << 0)	/* EEH enabled	*/
+#define PNV_EEH_STATE_REMOVED	(1 << 1)	/* PHB removed	*/
+
 #endif /* CONFIG_EEH */
 
 struct pnv_phb {
@@ -92,8 +96,7 @@
 
 #ifdef CONFIG_EEH
 	struct pnv_eeh_ops	*eeh_ops;
-	int			eeh_enabled;
-	int			removed;
+	int			eeh_state;
 #endif
 
 #ifdef CONFIG_DEBUG_FS
@@ -179,6 +182,10 @@
 extern struct pnv_eeh_ops ioda_eeh_ops;
 #endif
 
+int pnv_pci_cfg_read(struct device_node *dn,
+		     int where, int size, u32 *val);
+int pnv_pci_cfg_write(struct device_node *dn,
+		      int where, int size, u32 val);
 extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
 				      void *tce_mem, u64 tce_size,
 				      u64 dma_offset);
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 77784ae..89e3857 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -40,7 +40,7 @@
 #define DBG(fmt...)
 #endif
 
-static void __cpuinit pnv_smp_setup_cpu(int cpu)
+static void pnv_smp_setup_cpu(int cpu)
 {
 	if (cpu != boot_cpuid)
 		xics_setup_cpu();
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index ef9d9d8..5ea88d1 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -115,7 +115,7 @@
  *   by scope or event type alone. For example, Torrent ISR route change
  *   event is reported with scope 0x00 (Not Applicatable) rather than
  *   0x3B (Torrent-hub). It is better to let the clients to identify
- *   who owns the the event.
+ *   who owns the event.
  */
 
 static irqreturn_t ioei_interrupt(int irq, void *dev_id)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index fd0f2f2..02d6e21 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -71,6 +71,9 @@
 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 		lppaca_of(cpu).vmxregs_in_use = 1;
 
+	if (cpu_has_feature(CPU_FTR_ARCH_207S))
+		lppaca_of(cpu).ebb_regs_in_use = 1;
+
 	addr = __pa(&lppaca_of(cpu));
 	ret = register_vpa(hwcpu, addr);
 
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 14cc486..9f8671a 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -486,7 +486,118 @@
 						NVRAM_RTAS_READ_TIMEOUT);
 }
 
+/* Derived from logfs_compress() */
+static int nvram_compress(const void *in, void *out, size_t inlen,
+							size_t outlen)
+{
+	int err, ret;
+
+	ret = -EIO;
+	err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+						MEM_LEVEL, Z_DEFAULT_STRATEGY);
+	if (err != Z_OK)
+		goto error;
+
+	stream.next_in = in;
+	stream.avail_in = inlen;
+	stream.total_in = 0;
+	stream.next_out = out;
+	stream.avail_out = outlen;
+	stream.total_out = 0;
+
+	err = zlib_deflate(&stream, Z_FINISH);
+	if (err != Z_STREAM_END)
+		goto error;
+
+	err = zlib_deflateEnd(&stream);
+	if (err != Z_OK)
+		goto error;
+
+	if (stream.total_out >= stream.total_in)
+		goto error;
+
+	ret = stream.total_out;
+error:
+	return ret;
+}
+
+/* Compress the text from big_oops_buf into oops_buf. */
+static int zip_oops(size_t text_len)
+{
+	struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+	int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
+								oops_data_sz);
+	if (zipped_len < 0) {
+		pr_err("nvram: compression failed; returned %d\n", zipped_len);
+		pr_err("nvram: logging uncompressed oops/panic report\n");
+		return -1;
+	}
+	oops_hdr->version = OOPS_HDR_VERSION;
+	oops_hdr->report_length = (u16) zipped_len;
+	oops_hdr->timestamp = get_seconds();
+	return 0;
+}
+
 #ifdef CONFIG_PSTORE
+/* Derived from logfs_uncompress */
+int nvram_decompress(void *in, void *out, size_t inlen, size_t outlen)
+{
+	int err, ret;
+
+	ret = -EIO;
+	err = zlib_inflateInit(&stream);
+	if (err != Z_OK)
+		goto error;
+
+	stream.next_in = in;
+	stream.avail_in = inlen;
+	stream.total_in = 0;
+	stream.next_out = out;
+	stream.avail_out = outlen;
+	stream.total_out = 0;
+
+	err = zlib_inflate(&stream, Z_FINISH);
+	if (err != Z_STREAM_END)
+		goto error;
+
+	err = zlib_inflateEnd(&stream);
+	if (err != Z_OK)
+		goto error;
+
+	ret = stream.total_out;
+error:
+	return ret;
+}
+
+static int unzip_oops(char *oops_buf, char *big_buf)
+{
+	struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+	u64 timestamp = oops_hdr->timestamp;
+	char *big_oops_data = NULL;
+	char *oops_data_buf = NULL;
+	size_t big_oops_data_sz;
+	int unzipped_len;
+
+	big_oops_data = big_buf + sizeof(struct oops_log_info);
+	big_oops_data_sz = big_oops_buf_sz - sizeof(struct oops_log_info);
+	oops_data_buf = oops_buf + sizeof(struct oops_log_info);
+
+	unzipped_len = nvram_decompress(oops_data_buf, big_oops_data,
+					oops_hdr->report_length,
+					big_oops_data_sz);
+
+	if (unzipped_len < 0) {
+		pr_err("nvram: decompression failed; returned %d\n",
+								unzipped_len);
+		return -1;
+	}
+	oops_hdr = (struct oops_log_info *)big_buf;
+	oops_hdr->version = OOPS_HDR_VERSION;
+	oops_hdr->report_length = (u16) unzipped_len;
+	oops_hdr->timestamp = timestamp;
+	return 0;
+}
+
 static int nvram_pstore_open(struct pstore_info *psi)
 {
 	/* Reset the iterator to start reading partitions again */
@@ -502,6 +613,7 @@
  * @part:               pstore writes data to registered buffer in parts,
  *                      part number will indicate the same.
  * @count:              Indicates oops count
+ * @hsize:              Size of header added by pstore
  * @size:               number of bytes written to the registered buffer
  * @psi:                registered pstore_info structure
  *
@@ -512,9 +624,11 @@
 static int nvram_pstore_write(enum pstore_type_id type,
 				enum kmsg_dump_reason reason,
 				u64 *id, unsigned int part, int count,
-				size_t size, struct pstore_info *psi)
+				size_t hsize, size_t size,
+				struct pstore_info *psi)
 {
 	int rc;
+	unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
 	struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
 
 	/* part 1 has the recent messages from printk buffer */
@@ -525,8 +639,30 @@
 	oops_hdr->version = OOPS_HDR_VERSION;
 	oops_hdr->report_length = (u16) size;
 	oops_hdr->timestamp = get_seconds();
+
+	if (big_oops_buf) {
+		rc = zip_oops(size);
+		/*
+		 * If compression fails copy recent log messages from
+		 * big_oops_buf to oops_data.
+		 */
+		if (rc != 0) {
+			size_t diff = size - oops_data_sz + hsize;
+
+			if (size > oops_data_sz) {
+				memcpy(oops_data, big_oops_buf, hsize);
+				memcpy(oops_data + hsize, big_oops_buf + diff,
+					oops_data_sz - hsize);
+
+				oops_hdr->report_length = (u16) oops_data_sz;
+			} else
+				memcpy(oops_data, big_oops_buf, size);
+		} else
+			err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+	}
+
 	rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
-		(int) (sizeof(*oops_hdr) + size), ERR_TYPE_KERNEL_PANIC,
+		(int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
 		count);
 
 	if (rc != 0)
@@ -548,10 +684,11 @@
 	struct oops_log_info *oops_hdr;
 	unsigned int err_type, id_no, size = 0;
 	struct nvram_os_partition *part = NULL;
-	char *buff = NULL;
-	int sig = 0;
+	char *buff = NULL, *big_buff = NULL;
+	int rc, sig = 0;
 	loff_t p;
 
+read_partition:
 	read_type++;
 
 	switch (nvram_type_ids[read_type]) {
@@ -614,6 +751,25 @@
 	if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
 		oops_hdr = (struct oops_log_info *)buff;
 		*buf = buff + sizeof(*oops_hdr);
+
+		if (err_type == ERR_TYPE_KERNEL_PANIC_GZ) {
+			big_buff = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+			if (!big_buff)
+				return -ENOMEM;
+
+			rc = unzip_oops(buff, big_buff);
+
+			if (rc != 0) {
+				kfree(buff);
+				kfree(big_buff);
+				goto read_partition;
+			}
+
+			oops_hdr = (struct oops_log_info *)big_buff;
+			*buf = big_buff + sizeof(*oops_hdr);
+			kfree(buff);
+		}
+
 		time->tv_sec = oops_hdr->timestamp;
 		time->tv_nsec = 0;
 		return oops_hdr->report_length;
@@ -635,17 +791,18 @@
 {
 	int rc = 0;
 
-	nvram_pstore_info.buf = oops_data;
-	nvram_pstore_info.bufsize = oops_data_sz;
+	if (big_oops_buf) {
+		nvram_pstore_info.buf = big_oops_buf;
+		nvram_pstore_info.bufsize = big_oops_buf_sz;
+	} else {
+		nvram_pstore_info.buf = oops_data;
+		nvram_pstore_info.bufsize = oops_data_sz;
+	}
 
 	rc = pstore_register(&nvram_pstore_info);
 	if (rc != 0)
 		pr_err("nvram: pstore_register() failed, defaults to "
 				"kmsg_dump; returned %d\n", rc);
-	else
-		/*TODO: Support compression when pstore is configured */
-		pr_info("nvram: Compression of oops text supported only when "
-				"pstore is not configured");
 
 	return rc;
 }
@@ -679,11 +836,6 @@
 	oops_data = oops_buf + sizeof(struct oops_log_info);
 	oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
 
-	rc = nvram_pstore_init();
-
-	if (!rc)
-		return;
-
 	/*
 	 * Figure compression (preceded by elimination of each line's <n>
 	 * severity prefix) will reduce the oops/panic report to at most
@@ -707,6 +859,11 @@
 		stream.workspace = NULL;
 	}
 
+	rc = nvram_pstore_init();
+
+	if (!rc)
+		return;
+
 	rc = kmsg_dump_register(&nvram_kmsg_dumper);
 	if (rc != 0) {
 		pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
@@ -757,58 +914,6 @@
 }
 
 
-/* Derived from logfs_compress() */
-static int nvram_compress(const void *in, void *out, size_t inlen,
-							size_t outlen)
-{
-	int err, ret;
-
-	ret = -EIO;
-	err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
-						MEM_LEVEL, Z_DEFAULT_STRATEGY);
-	if (err != Z_OK)
-		goto error;
-
-	stream.next_in = in;
-	stream.avail_in = inlen;
-	stream.total_in = 0;
-	stream.next_out = out;
-	stream.avail_out = outlen;
-	stream.total_out = 0;
-
-	err = zlib_deflate(&stream, Z_FINISH);
-	if (err != Z_STREAM_END)
-		goto error;
-
-	err = zlib_deflateEnd(&stream);
-	if (err != Z_OK)
-		goto error;
-
-	if (stream.total_out >= stream.total_in)
-		goto error;
-
-	ret = stream.total_out;
-error:
-	return ret;
-}
-
-/* Compress the text from big_oops_buf into oops_buf. */
-static int zip_oops(size_t text_len)
-{
-	struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
-	int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
-								oops_data_sz);
-	if (zipped_len < 0) {
-		pr_err("nvram: compression failed; returned %d\n", zipped_len);
-		pr_err("nvram: logging uncompressed oops/panic report\n");
-		return -1;
-	}
-	oops_hdr->version = OOPS_HDR_VERSION;
-	oops_hdr->report_length = (u16) zipped_len;
-	oops_hdr->timestamp = get_seconds();
-	return 0;
-}
-
 /*
  * This is our kmsg_dump callback, called after an oops or panic report
  * has been written to the printk buffer.  We want to capture as much
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 028ac1f..46ac1dd 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -97,22 +97,14 @@
 	return indirect_read_config(bus, devfn, offset, len, val);
 }
 
-static struct pci_ops fsl_indirect_pci_ops =
+#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
+
+static struct pci_ops fsl_indirect_pcie_ops =
 {
 	.read = fsl_indirect_read_config,
 	.write = indirect_write_config,
 };
 
-static void __init fsl_setup_indirect_pci(struct pci_controller* hose,
-					  resource_size_t cfg_addr,
-					  resource_size_t cfg_data, u32 flags)
-{
-	setup_indirect_pci(hose, cfg_addr, cfg_data, flags);
-	hose->ops = &fsl_indirect_pci_ops;
-}
-
-#if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx)
-
 #define MAX_PHYS_ADDR_BITS	40
 static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
 
@@ -504,13 +496,15 @@
 	if (!hose->private_data)
 		goto no_bridge;
 
-	fsl_setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
-			       PPC_INDIRECT_TYPE_BIG_ENDIAN);
+	setup_indirect_pci(hose, rsrc.start, rsrc.start + 0x4,
+			   PPC_INDIRECT_TYPE_BIG_ENDIAN);
 
 	if (in_be32(&pci->block_rev1) < PCIE_IP_REV_3_0)
 		hose->indirect_type |= PPC_INDIRECT_TYPE_FSL_CFG_REG_LINK;
 
 	if (early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) {
+		/* use fsl_indirect_read_config for PCIe */
+		hose->ops = &fsl_indirect_pcie_ops;
 		/* For PCIE read HEADER_TYPE to identify controler mode */
 		early_read_config_byte(hose, 0, 0, PCI_HEADER_TYPE, &hdr_type);
 		if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE)
@@ -814,8 +808,8 @@
 		if (ret)
 			goto err0;
 	} else {
-		fsl_setup_indirect_pci(hose, rsrc_cfg.start,
-				       rsrc_cfg.start + 4, 0);
+		setup_indirect_pci(hose, rsrc_cfg.start,
+				   rsrc_cfg.start + 4, 0);
 	}
 
 	printk(KERN_INFO "Found FSL PCI host bridge at 0x%016llx. "
diff --git a/arch/s390/include/asm/dma-mapping.h b/arch/s390/include/asm/dma-mapping.h
index 886ac7d..2f8c1ab 100644
--- a/arch/s390/include/asm/dma-mapping.h
+++ b/arch/s390/include/asm/dma-mapping.h
@@ -50,9 +50,10 @@
 {
 	struct dma_map_ops *dma_ops = get_dma_ops(dev);
 
+	debug_dma_mapping_error(dev, dma_addr);
 	if (dma_ops->mapping_error)
 		return dma_ops->mapping_error(dev, dma_addr);
-	return (dma_addr == 0UL);
+	return (dma_addr == DMA_ERROR_CODE);
 }
 
 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index d8a6a38..feb719d 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -754,9 +754,9 @@
 	.write = reipl_fcp_scpdata_write,
 };
 
-DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%llx\n",
 		   reipl_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%llx\n",
 		   reipl_block_fcp->ipl_info.fcp.lun);
 DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
 		   reipl_block_fcp->ipl_info.fcp.bootprog);
@@ -1323,9 +1323,9 @@
 
 /* FCP dump device attributes */
 
-DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%llx\n",
 		   dump_block_fcp->ipl_info.fcp.wwpn);
-DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
+DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%llx\n",
 		   dump_block_fcp->ipl_info.fcp.lun);
 DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
 		   dump_block_fcp->ipl_info.fcp.bootprog);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 408e866..dd3c199 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -312,6 +312,7 @@
 }
 EXPORT_SYMBOL(measurement_alert_subclass_unregister);
 
+#ifdef CONFIG_SMP
 void synchronize_irq(unsigned int irq)
 {
 	/*
@@ -320,6 +321,7 @@
 	 */
 }
 EXPORT_SYMBOL_GPL(synchronize_irq);
+#endif
 
 #ifndef CONFIG_PCI
 
diff --git a/arch/s390/mm/mem_detect.c b/arch/s390/mm/mem_detect.c
index 3cbd3b8..cca3882 100644
--- a/arch/s390/mm/mem_detect.c
+++ b/arch/s390/mm/mem_detect.c
@@ -123,7 +123,8 @@
 			continue;
 		} else if ((addr <= chunk->addr) &&
 			   (addr + size >= chunk->addr + chunk->size)) {
-			memset(chunk, 0 , sizeof(*chunk));
+			memmove(chunk, chunk + 1, (MEMORY_CHUNKS-i-1) * sizeof(*chunk));
+			memset(&mem_chunk[MEMORY_CHUNKS-1], 0, sizeof(*chunk));
 		} else if (addr + size < chunk->addr + chunk->size) {
 			chunk->size =  chunk->addr + chunk->size - addr - size;
 			chunk->addr = addr + size;
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index ff18e3c..7e4a97f 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -6,6 +6,7 @@
 generic-y += div64.h
 generic-y += emergency-restart.h
 generic-y += exec.h
+generic-y += linkage.h
 generic-y += local64.h
 generic-y += mutex.h
 generic-y += irq_regs.h
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 15a7169..b836e92 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -135,7 +135,7 @@
 
 #ifdef CONFIG_SMP
 # define LEON3_IRQ_IPI_DEFAULT		13
-# define LEON3_IRQ_TICKER		(leon3_ticker_irq)
+# define LEON3_IRQ_TICKER		(leon3_gptimer_irq)
 # define LEON3_IRQ_CROSS_CALL		15
 #endif
 
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index f3034ed..24ec48c 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -47,6 +47,7 @@
 #define LEON3_GPTIMER_LD 4
 #define LEON3_GPTIMER_IRQEN 8
 #define LEON3_GPTIMER_SEPIRQ 8
+#define LEON3_GPTIMER_TIMERS 0x7
 
 #define LEON23_REG_TIMER_CONTROL_EN    0x00000001 /* 1 = enable counting */
 /* 0 = hold scalar and counter */
diff --git a/arch/sparc/include/asm/linkage.h b/arch/sparc/include/asm/linkage.h
deleted file mode 100644
index 291c2d0..0000000
--- a/arch/sparc/include/asm/linkage.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __ASM_LINKAGE_H
-#define __ASM_LINKAGE_H
-
-/* Nothing to see here... */
-
-#endif
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index 75bb608..5ef48da 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -843,7 +843,8 @@
 		unsigned long len;
 
 		strcpy(full_boot_str, "boot ");
-		strcpy(full_boot_str + strlen("boot "), boot_command);
+		strlcpy(full_boot_str + strlen("boot "), boot_command,
+			sizeof(full_boot_str + strlen("boot ")));
 		len = strlen(full_boot_str);
 
 		if (reboot_data_supported) {
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 7c0231d..b7c6897 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -38,7 +38,6 @@
 
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
 unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
-int leon3_ticker_irq; /* Timer ticker IRQ */
 unsigned int sparc_leon_eirq;
 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -278,6 +277,9 @@
 
 	leon_clear_profile_irq(cpu);
 
+	if (cpu == boot_cpu_id)
+		timer_interrupt(irq, NULL);
+
 	ce = &per_cpu(sparc32_clockevent, cpu);
 
 	irq_enter();
@@ -299,6 +301,7 @@
 	int icsel;
 	int ampopts;
 	int err;
+	u32 config;
 
 	sparc_config.get_cycles_offset = leon_cycles_offset;
 	sparc_config.cs_period = 1000000 / HZ;
@@ -377,23 +380,6 @@
 	LEON3_BYPASS_STORE_PA(
 			&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0);
 
-#ifdef CONFIG_SMP
-	leon3_ticker_irq = leon3_gptimer_irq + 1 + leon3_gptimer_idx;
-
-	if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) &
-	      (1<<LEON3_GPTIMER_SEPIRQ))) {
-		printk(KERN_ERR "timer not configured with separate irqs\n");
-		BUG();
-	}
-
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].val,
-				0);
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].rld,
-				(((1000000/HZ) - 1)));
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
-				0);
-#endif
-
 	/*
 	 * The IRQ controller may (if implemented) consist of multiple
 	 * IRQ controllers, each mapped on a 4Kb boundary.
@@ -416,13 +402,6 @@
 	if (eirq != 0)
 		leon_eirq_setup(eirq);
 
-	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq+leon3_gptimer_idx);
-	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
-	if (err) {
-		printk(KERN_ERR "unable to attach timer IRQ%d\n", irq);
-		prom_halt();
-	}
-
 #ifdef CONFIG_SMP
 	{
 		unsigned long flags;
@@ -439,30 +418,31 @@
 	}
 #endif
 
+	config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config);
+	if (config & (1 << LEON3_GPTIMER_SEPIRQ))
+		leon3_gptimer_irq += leon3_gptimer_idx;
+	else if ((config & LEON3_GPTIMER_TIMERS) > 1)
+		pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n");
+
+#ifdef CONFIG_SMP
+	/* Install per-cpu IRQ handler for broadcasted ticker */
+	irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq,
+				    "per-cpu", 0);
+	err = request_irq(irq, leon_percpu_timer_ce_interrupt,
+			  IRQF_PERCPU | IRQF_TIMER, "timer", NULL);
+#else
+	irq = _leon_build_device_irq(NULL, leon3_gptimer_irq);
+	err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL);
+#endif
+	if (err) {
+		pr_err("Unable to attach timer IRQ%d\n", irq);
+		prom_halt();
+	}
 	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl,
 			      LEON3_GPTIMER_EN |
 			      LEON3_GPTIMER_RL |
 			      LEON3_GPTIMER_LD |
 			      LEON3_GPTIMER_IRQEN);
-
-#ifdef CONFIG_SMP
-	/* Install per-cpu IRQ handler for broadcasted ticker */
-	irq = leon_build_device_irq(leon3_ticker_irq, handle_percpu_irq,
-				    "per-cpu", 0);
-	err = request_irq(irq, leon_percpu_timer_ce_interrupt,
-			  IRQF_PERCPU | IRQF_TIMER, "ticker",
-			  NULL);
-	if (err) {
-		printk(KERN_ERR "unable to attach ticker IRQ%d\n", irq);
-		prom_halt();
-	}
-
-	LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx+1].ctrl,
-			      LEON3_GPTIMER_EN |
-			      LEON3_GPTIMER_RL |
-			      LEON3_GPTIMER_LD |
-			      LEON3_GPTIMER_IRQEN);
-#endif
 	return;
 bad:
 	printk(KERN_ERR "No Timer/irqctrl found\n");
diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c
index 7739a54..6df26e3 100644
--- a/arch/sparc/kernel/leon_pci_grpci1.c
+++ b/arch/sparc/kernel/leon_pci_grpci1.c
@@ -536,11 +536,9 @@
 
 	/* find device register base address */
 	res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
-	regs = devm_request_and_ioremap(&ofdev->dev, res);
-	if (!regs) {
-		dev_err(&ofdev->dev, "io-regs mapping failed\n");
-		return -EADDRNOTAVAIL;
-	}
+	regs = devm_ioremap_resource(&ofdev->dev, res);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
 
 	/*
 	 * check that we're in Host Slot and that we can act as a Host Bridge
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index bdf53d9..b0b3967 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -47,6 +47,10 @@
 	 * MMU does not get a TLB miss here by using the MMU BYPASS ASI.
 	 */
 	register unsigned int address = (unsigned int)leon3_irqctrl_regs;
+
+	/* Interrupts need to be enabled to not hang the CPU */
+	local_irq_enable();
+
 	__asm__ __volatile__ (
 		"wr	%%g0, %%asr19\n"
 		"lda	[%0] %1, %%g0\n"
@@ -60,6 +64,9 @@
  */
 void pmc_leon_idle(void)
 {
+	/* Interrupts need to be enabled to not hang the CPU */
+	local_irq_enable();
+
 	/* For systems without power-down, this will be no-op */
 	__asm__ __volatile__ ("wr	%g0, %asr19\n\t");
 }
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 9f20566..79cc0d1 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -54,6 +54,7 @@
 int of_set_property(struct device_node *dp, const char *name, void *val, int len)
 {
 	struct property **prevp;
+	unsigned long flags;
 	void *new_val;
 	int err;
 
@@ -64,7 +65,7 @@
 	err = -ENODEV;
 
 	mutex_lock(&of_set_property_mutex);
-	raw_spin_lock(&devtree_lock);
+	raw_spin_lock_irqsave(&devtree_lock, flags);
 	prevp = &dp->properties;
 	while (*prevp) {
 		struct property *prop = *prevp;
@@ -91,7 +92,7 @@
 		}
 		prevp = &(*prevp)->next;
 	}
-	raw_spin_unlock(&devtree_lock);
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 	mutex_unlock(&of_set_property_mutex);
 
 	/* XXX Upate procfs if necessary... */
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index 38bf80a..1434526 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -304,7 +304,7 @@
 
 	/* Initialize PROM console and command line. */
 	*cmdline_p = prom_getbootargs();
-	strcpy(boot_command_line, *cmdline_p);
+	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
 	parse_early_param();
 
 	boot_flags_init(*cmdline_p);
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index 88a127b..1378554 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -555,7 +555,7 @@
 {
 	/* Initialize PROM console and command line. */
 	*cmdline_p = prom_getbootargs();
-	strcpy(boot_command_line, *cmdline_p);
+	strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
 	parse_early_param();
 
 	boot_flags_init(*cmdline_p);
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index a717199..04fd55a 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1098,7 +1098,14 @@
 		m->size = *val;
 		val = mdesc_get_property(md, node,
 					 "address-congruence-offset", NULL);
-		m->offset = *val;
+
+		/* The address-congruence-offset property is optional.
+		 * Explicity zero it be identifty this.
+		 */
+		if (val)
+			m->offset = *val;
+		else
+			m->offset = 0UL;
 
 		numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n",
 			count - 1, m->base, m->size, m->offset);
diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c
index f828dd3..7a91f28 100644
--- a/arch/sparc/mm/tlb.c
+++ b/arch/sparc/mm/tlb.c
@@ -85,8 +85,8 @@
 	}
 
 	if (!tb->active) {
-		global_flush_tlb_page(mm, vaddr);
 		flush_tsb_user_page(mm, vaddr);
+		global_flush_tlb_page(mm, vaddr);
 		goto out;
 	}
 
diff --git a/arch/sparc/prom/bootstr_32.c b/arch/sparc/prom/bootstr_32.c
index f5ec32e..d2b49d2 100644
--- a/arch/sparc/prom/bootstr_32.c
+++ b/arch/sparc/prom/bootstr_32.c
@@ -23,23 +23,25 @@
 		return barg_buf;
 	}
 
-	switch(prom_vers) {
+	switch (prom_vers) {
 	case PROM_V0:
 		cp = barg_buf;
 		/* Start from 1 and go over fd(0,0,0)kernel */
-		for(iter = 1; iter < 8; iter++) {
+		for (iter = 1; iter < 8; iter++) {
 			arg = (*(romvec->pv_v0bootargs))->argv[iter];
 			if (arg == NULL)
 				break;
-			while(*arg != 0) {
+			while (*arg != 0) {
 				/* Leave place for space and null. */
-				if(cp >= barg_buf + BARG_LEN-2){
+				if (cp >= barg_buf + BARG_LEN - 2)
 					/* We might issue a warning here. */
 					break;
-				}
 				*cp++ = *arg++;
 			}
 			*cp++ = ' ';
+			if (cp >= barg_buf + BARG_LEN - 1)
+				/* We might issue a warning here. */
+				break;
 		}
 		*cp = 0;
 		break;
diff --git a/arch/sparc/prom/tree_64.c b/arch/sparc/prom/tree_64.c
index 92204c3..bd1b2a3 100644
--- a/arch/sparc/prom/tree_64.c
+++ b/arch/sparc/prom/tree_64.c
@@ -39,7 +39,7 @@
 	return prom_node_to_node("child", node);
 }
 
-inline phandle prom_getchild(phandle node)
+phandle prom_getchild(phandle node)
 {
 	phandle cnode;
 
@@ -72,7 +72,7 @@
 	return prom_node_to_node(prom_peer_name, node);
 }
 
-inline phandle prom_getsibling(phandle node)
+phandle prom_getsibling(phandle node)
 {
 	phandle sibnode;
 
@@ -89,7 +89,7 @@
 /* Return the length in bytes of property 'prop' at node 'node'.
  * Return -1 on error.
  */
-inline int prom_getproplen(phandle node, const char *prop)
+int prom_getproplen(phandle node, const char *prop)
 {
 	unsigned long args[6];
 
@@ -113,8 +113,8 @@
  * 'buffer' which has a size of 'bufsize'.  If the acquisition
  * was successful the length will be returned, else -1 is returned.
  */
-inline int prom_getproperty(phandle node, const char *prop,
-			    char *buffer, int bufsize)
+int prom_getproperty(phandle node, const char *prop,
+		     char *buffer, int bufsize)
 {
 	unsigned long args[8];
 	int plen;
@@ -141,7 +141,7 @@
 /* Acquire an integer property and return its value.  Returns -1
  * on failure.
  */
-inline int prom_getint(phandle node, const char *prop)
+int prom_getint(phandle node, const char *prop)
 {
 	int intprop;
 
@@ -235,7 +235,7 @@
 /* Return the first property type for node 'node'.
  * buffer should be at least 32B in length
  */
-inline char *prom_firstprop(phandle node, char *buffer)
+char *prom_firstprop(phandle node, char *buffer)
 {
 	unsigned long args[7];
 
@@ -261,7 +261,7 @@
  * at node 'node' .  Returns NULL string if no more
  * property types for this node.
  */
-inline char *prom_nextprop(phandle node, const char *oprop, char *buffer)
+char *prom_nextprop(phandle node, const char *oprop, char *buffer)
 {
 	unsigned long args[7];
 	char buf[32];
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 4385cb6..a93b02a 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -84,4 +84,6 @@
 EXPORT_SYMBOL(__ashrdi3);
 uint64_t __ashldi3(uint64_t, unsigned int);
 EXPORT_SYMBOL(__ashldi3);
+int __ffsdi2(uint64_t);
+EXPORT_SYMBOL(__ffsdi2);
 #endif
diff --git a/arch/um/drivers/mconsole_kern.c b/arch/um/drivers/mconsole_kern.c
index d7d2185..3df3bd5 100644
--- a/arch/um/drivers/mconsole_kern.c
+++ b/arch/um/drivers/mconsole_kern.c
@@ -147,7 +147,7 @@
 	}
 
 	do {
-		loff_t pos;
+		loff_t pos = file->f_pos;
 		mm_segment_t old_fs = get_fs();
 		set_fs(KERNEL_DS);
 		len = vfs_read(file, buf, PAGE_SIZE - 1, &pos);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 685692c..fe120da 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -2265,6 +2265,7 @@
 config IA32_EMULATION
 	bool "IA32 Emulation"
 	depends on X86_64
+	select BINFMT_ELF
 	select COMPAT_BINFMT_ELF
 	select HAVE_UID16
 	---help---
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index 62fe22c..477e9d7 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2681,56 +2681,68 @@
 	addq %rcx, KEYP
 
 	movdqa IV, STATE1
-	pxor 0x00(INP), STATE1
+	movdqu 0x00(INP), INC
+	pxor INC, STATE1
 	movdqu IV, 0x00(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE2
-	pxor 0x10(INP), STATE2
+	movdqu 0x10(INP), INC
+	pxor INC, STATE2
 	movdqu IV, 0x10(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE3
-	pxor 0x20(INP), STATE3
+	movdqu 0x20(INP), INC
+	pxor INC, STATE3
 	movdqu IV, 0x20(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE4
-	pxor 0x30(INP), STATE4
+	movdqu 0x30(INP), INC
+	pxor INC, STATE4
 	movdqu IV, 0x30(OUTP)
 
 	call *%r11
 
-	pxor 0x00(OUTP), STATE1
+	movdqu 0x00(OUTP), INC
+	pxor INC, STATE1
 	movdqu STATE1, 0x00(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE1
-	pxor 0x40(INP), STATE1
+	movdqu 0x40(INP), INC
+	pxor INC, STATE1
 	movdqu IV, 0x40(OUTP)
 
-	pxor 0x10(OUTP), STATE2
+	movdqu 0x10(OUTP), INC
+	pxor INC, STATE2
 	movdqu STATE2, 0x10(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE2
-	pxor 0x50(INP), STATE2
+	movdqu 0x50(INP), INC
+	pxor INC, STATE2
 	movdqu IV, 0x50(OUTP)
 
-	pxor 0x20(OUTP), STATE3
+	movdqu 0x20(OUTP), INC
+	pxor INC, STATE3
 	movdqu STATE3, 0x20(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE3
-	pxor 0x60(INP), STATE3
+	movdqu 0x60(INP), INC
+	pxor INC, STATE3
 	movdqu IV, 0x60(OUTP)
 
-	pxor 0x30(OUTP), STATE4
+	movdqu 0x30(OUTP), INC
+	pxor INC, STATE4
 	movdqu STATE4, 0x30(OUTP)
 
 	_aesni_gf128mul_x_ble()
 	movdqa IV, STATE4
-	pxor 0x70(INP), STATE4
+	movdqu 0x70(INP), INC
+	pxor INC, STATE4
 	movdqu IV, 0x70(OUTP)
 
 	_aesni_gf128mul_x_ble()
@@ -2738,16 +2750,20 @@
 
 	call *%r11
 
-	pxor 0x40(OUTP), STATE1
+	movdqu 0x40(OUTP), INC
+	pxor INC, STATE1
 	movdqu STATE1, 0x40(OUTP)
 
-	pxor 0x50(OUTP), STATE2
+	movdqu 0x50(OUTP), INC
+	pxor INC, STATE2
 	movdqu STATE2, 0x50(OUTP)
 
-	pxor 0x60(OUTP), STATE3
+	movdqu 0x60(OUTP), INC
+	pxor INC, STATE3
 	movdqu STATE3, 0x60(OUTP)
 
-	pxor 0x70(OUTP), STATE4
+	movdqu 0x70(OUTP), INC
+	pxor INC, STATE4
 	movdqu STATE4, 0x70(OUTP)
 
 	ret
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 805078e..52ff81c 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -192,7 +192,7 @@
 	/* struct user */
 	DUMP_WRITE(&dump, sizeof(dump));
 	/* Now dump all of the user data.  Include malloced stuff as well */
-	DUMP_SEEK(PAGE_SIZE);
+	DUMP_SEEK(PAGE_SIZE - sizeof(dump));
 	/* now we start writing out the user space info */
 	set_fs(USER_DS);
 	/* Dump the data area */
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index ba870bb..57873be 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -41,4 +41,9 @@
 
 extern void init_ISA_irqs(void);
 
+#ifdef CONFIG_X86_LOCAL_APIC
+void arch_trigger_all_cpu_backtrace(void);
+#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
+#endif
+
 #endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
index 6825e2e..6bc3985 100644
--- a/arch/x86/include/asm/microcode.h
+++ b/arch/x86/include/asm/microcode.h
@@ -60,11 +60,11 @@
 #ifdef CONFIG_MICROCODE_EARLY
 #define MAX_UCODE_COUNT 128
 extern void __init load_ucode_bsp(void);
-extern __init void load_ucode_ap(void);
+extern void __cpuinit load_ucode_ap(void);
 extern int __init save_microcode_in_initrd(void);
 #else
 static inline void __init load_ucode_bsp(void) {}
-static inline __init void load_ucode_ap(void) {}
+static inline void __cpuinit load_ucode_ap(void) {}
 static inline int __init save_microcode_in_initrd(void)
 {
 	return 0;
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index c0fa356..86f9301 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -18,9 +18,7 @@
 			void __user *, size_t *, loff_t *);
 extern int unknown_nmi_panic;
 
-void arch_trigger_all_cpu_backtrace(void);
-#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
-#endif
+#endif /* CONFIG_X86_LOCAL_APIC */
 
 #define NMI_FLAG_FIRST	1
 
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index 31cb9ae..a698d71 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -9,6 +9,7 @@
  *
  */
 #include <asm/apic.h>
+#include <asm/nmi.h>
 
 #include <linux/cpumask.h>
 #include <linux/kdebug.h>
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index 35ffda5..5f90b85 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -714,15 +714,15 @@
 	if (mtrr_tom2)
 		x_remove_size = (mtrr_tom2 >> PAGE_SHIFT) - x_remove_base;
 
-	nr_range = x86_get_mtrr_mem_range(range, 0, x_remove_base, x_remove_size);
 	/*
 	 * [0, 1M) should always be covered by var mtrr with WB
 	 * and fixed mtrrs should take effect before var mtrr for it:
 	 */
-	nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, 0,
+	nr_range = add_range_with_merge(range, RANGE_NUM, 0, 0,
 					1ULL<<(20 - PAGE_SHIFT));
-	/* Sort the ranges: */
-	sort_range(range, nr_range);
+	/* add from var mtrr at last */
+	nr_range = x86_get_mtrr_mem_range(range, nr_range,
+					  x_remove_base, x_remove_size);
 
 	range_sums = sum_ranges(range, nr_range);
 	printk(KERN_INFO "total RAM covered: %ldM\n",
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index f60d41f..a9e2207 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -165,13 +165,13 @@
 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
 	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
-	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 	EVENT_EXTRA_END
 };
 
 static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
 	INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
 	INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+	INTEL_UEVENT_PEBS_LDLAT_EXTRA_REG(0x01cd),
 	EVENT_EXTRA_END
 };
 
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 9895a9a..211bce4 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -365,10 +365,14 @@
 	return insn.length;
 }
 
-static void __kprobes arch_copy_kprobe(struct kprobe *p)
+static int __kprobes arch_copy_kprobe(struct kprobe *p)
 {
+	int ret;
+
 	/* Copy an instruction with recovering if other optprobe modifies it.*/
-	__copy_instruction(p->ainsn.insn, p->addr);
+	ret = __copy_instruction(p->ainsn.insn, p->addr);
+	if (!ret)
+		return -EINVAL;
 
 	/*
 	 * __copy_instruction can modify the displacement of the instruction,
@@ -384,6 +388,8 @@
 
 	/* Also, displacement change doesn't affect the first byte */
 	p->opcode = p->ainsn.insn[0];
+
+	return 0;
 }
 
 int __kprobes arch_prepare_kprobe(struct kprobe *p)
@@ -397,8 +403,8 @@
 	p->ainsn.insn = get_insn_slot();
 	if (!p->ainsn.insn)
 		return -ENOMEM;
-	arch_copy_kprobe(p);
-	return 0;
+
+	return arch_copy_kprobe(p);
 }
 
 void __kprobes arch_arm_kprobe(struct kprobe *p)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index d2c3812..3dd37eb 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -242,6 +242,7 @@
 	if (!mem)
 		return;
 	hv_clock = __va(mem);
+	memset(hv_clock, 0, size);
 
 	if (kvm_register_clock("boot clock")) {
 		hv_clock = NULL;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 4e7a37f..81a5f5e 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -277,18 +277,6 @@
 }
 #endif
 
-void arch_cpu_idle_prepare(void)
-{
-	/*
-	 * If we're the non-boot CPU, nothing set the stack canary up
-	 * for us.  CPU0 already has it initialized but no harm in
-	 * doing it again.  This is a good place for updating it, as
-	 * we wont ever return from this function (so the invalid
-	 * canaries already on the stack wont ever trigger).
-	 */
-	boot_init_stack_canary();
-}
-
 void arch_cpu_idle_enter(void)
 {
 	local_touch_nmi();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9c73b51..bfd348e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -372,15 +372,15 @@
 
 void __cpuinit set_cpu_sibling_map(int cpu)
 {
-	bool has_mc = boot_cpu_data.x86_max_cores > 1;
 	bool has_smt = smp_num_siblings > 1;
+	bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
 	struct cpuinfo_x86 *c = &cpu_data(cpu);
 	struct cpuinfo_x86 *o;
 	int i;
 
 	cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
-	if (!has_smt && !has_mc) {
+	if (!has_mp) {
 		cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
 		cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
 		cpumask_set_cpu(cpu, cpu_core_mask(cpu));
@@ -394,7 +394,7 @@
 		if ((i == cpu) || (has_smt && match_smt(c, o)))
 			link_mask(sibling, cpu, i);
 
-		if ((i == cpu) || (has_mc && match_llc(c, o)))
+		if ((i == cpu) || (has_mp && match_llc(c, o)))
 			link_mask(llc_shared, cpu, i);
 
 	}
@@ -406,7 +406,7 @@
 	for_each_cpu(i, cpu_sibling_setup_mask) {
 		o = &cpu_data(i);
 
-		if ((i == cpu) || (has_mc && match_mc(c, o))) {
+		if ((i == cpu) || (has_mp && match_mc(c, o))) {
 			link_mask(core, cpu, i);
 
 			/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 094b5d9..e8ba99c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -582,8 +582,6 @@
 	if (index != XCR_XFEATURE_ENABLED_MASK)
 		return 1;
 	xcr0 = xcr;
-	if (kvm_x86_ops->get_cpl(vcpu) != 0)
-		return 1;
 	if (!(xcr0 & XSTATE_FP))
 		return 1;
 	if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE))
@@ -597,7 +595,8 @@
 
 int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
 {
-	if (__kvm_set_xcr(vcpu, index, xcr)) {
+	if (kvm_x86_ops->get_cpl(vcpu) != 0 ||
+	    __kvm_set_xcr(vcpu, index, xcr)) {
 		kvm_inject_gp(vcpu, 0);
 		return 1;
 	}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 5ae2eb0..d2fbced 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -1069,7 +1069,10 @@
 		 * that by attempting to use more space than is available.
 		 */
 		unsigned long dummy_size = remaining_size + 1024;
-		void *dummy = kmalloc(dummy_size, GFP_ATOMIC);
+		void *dummy = kzalloc(dummy_size, GFP_ATOMIC);
+
+		if (!dummy)
+			return EFI_OUT_OF_RESOURCES;
 
 		status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID,
 					  EFI_VARIABLE_NON_VOLATILE |
@@ -1089,6 +1092,8 @@
 					 0, dummy);
 		}
 
+		kfree(dummy);
+
 		/*
 		 * The runtime code may now have triggered a garbage collection
 		 * run, so check the variable info again
diff --git a/crypto/algboss.c b/crypto/algboss.c
index 769219b..76fc0b2 100644
--- a/crypto/algboss.c
+++ b/crypto/algboss.c
@@ -45,10 +45,9 @@
 		} nu32;
 	} attrs[CRYPTO_MAX_ATTRS];
 
-	char larval[CRYPTO_MAX_ALG_NAME];
 	char template[CRYPTO_MAX_ALG_NAME];
 
-	struct completion *completion;
+	struct crypto_larval *larval;
 
 	u32 otype;
 	u32 omask;
@@ -87,7 +86,8 @@
 	crypto_tmpl_put(tmpl);
 
 out:
-	complete_all(param->completion);
+	complete_all(&param->larval->completion);
+	crypto_alg_put(&param->larval->alg);
 	kfree(param);
 	module_put_and_exit(0);
 }
@@ -187,18 +187,19 @@
 	param->otype = larval->alg.cra_flags;
 	param->omask = larval->mask;
 
-	memcpy(param->larval, larval->alg.cra_name, CRYPTO_MAX_ALG_NAME);
-
-	param->completion = &larval->completion;
+	crypto_alg_get(&larval->alg);
+	param->larval = larval;
 
 	thread = kthread_run(cryptomgr_probe, param, "cryptomgr_probe");
 	if (IS_ERR(thread))
-		goto err_free_param;
+		goto err_put_larval;
 
 	wait_for_completion_interruptible(&larval->completion);
 
 	return NOTIFY_STOP;
 
+err_put_larval:
+	crypto_alg_put(&larval->alg);
 err_free_param:
 	kfree(param);
 err_put_module:
diff --git a/crypto/api.c b/crypto/api.c
index 033a714..3b61803 100644
--- a/crypto/api.c
+++ b/crypto/api.c
@@ -34,12 +34,6 @@
 BLOCKING_NOTIFIER_HEAD(crypto_chain);
 EXPORT_SYMBOL_GPL(crypto_chain);
 
-static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
-{
-	atomic_inc(&alg->cra_refcnt);
-	return alg;
-}
-
 struct crypto_alg *crypto_mod_get(struct crypto_alg *alg)
 {
 	return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL;
diff --git a/crypto/internal.h b/crypto/internal.h
index 9ebedae..bd39bfc 100644
--- a/crypto/internal.h
+++ b/crypto/internal.h
@@ -103,6 +103,12 @@
 int crypto_unregister_notifier(struct notifier_block *nb);
 int crypto_probing_notify(unsigned long val, void *v);
 
+static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg)
+{
+	atomic_inc(&alg->cra_refcnt);
+	return alg;
+}
+
 static inline void crypto_alg_put(struct crypto_alg *alg)
 {
 	if (atomic_dec_and_test(&alg->cra_refcnt) && alg->cra_destroy)
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 652fd5c..cab13f2 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -164,15 +164,24 @@
 	if (dev_desc->clk_required) {
 		ret = register_device_clock(adev, pdata);
 		if (ret) {
-			/*
-			 * Skip the device, but don't terminate the namespace
-			 * scan.
-			 */
-			kfree(pdata);
-			return 0;
+			/* Skip the device, but continue the namespace scan. */
+			ret = 0;
+			goto err_out;
 		}
 	}
 
+	/*
+	 * This works around a known issue in ACPI tables where LPSS devices
+	 * have _PS0 and _PS3 without _PSC (and no power resources), so
+	 * acpi_bus_init_power() will assume that the BIOS has put them into D0.
+	 */
+	ret = acpi_device_fix_up_power(adev);
+	if (ret) {
+		/* Skip the device, but continue the namespace scan. */
+		ret = 0;
+		goto err_out;
+	}
+
 	adev->driver_data = pdata;
 	ret = acpi_create_platform_device(adev, id);
 	if (ret > 0)
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 6d894bf..a9cf960 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -935,7 +935,7 @@
 			   struct timespec *time, char **buf,
 			   struct pstore_info *psi);
 static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
-		       u64 *id, unsigned int part, int count,
+		       u64 *id, unsigned int part, int count, size_t hsize,
 		       size_t size, struct pstore_info *psi);
 static int erst_clearer(enum pstore_type_id type, u64 id, int count,
 			struct timespec time, struct pstore_info *psi);
@@ -1055,7 +1055,7 @@
 }
 
 static int erst_writer(enum pstore_type_id type, enum kmsg_dump_reason reason,
-		       u64 *id, unsigned int part, int count,
+		       u64 *id, unsigned int part, int count, size_t hsize,
 		       size_t size, struct pstore_info *psi)
 {
 	struct cper_pstore_record *rcd = (struct cper_pstore_record *)
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 318fa32..31c217a 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -290,6 +290,26 @@
 	return 0;
 }
 
+/**
+ * acpi_device_fix_up_power - Force device with missing _PSC into D0.
+ * @device: Device object whose power state is to be fixed up.
+ *
+ * Devices without power resources and _PSC, but having _PS0 and _PS3 defined,
+ * are assumed to be put into D0 by the BIOS.  However, in some cases that may
+ * not be the case and this function should be used then.
+ */
+int acpi_device_fix_up_power(struct acpi_device *device)
+{
+	int ret = 0;
+
+	if (!device->power.flags.power_resources
+	    && !device->power.flags.explicit_get
+	    && device->power.state == ACPI_STATE_D0)
+		ret = acpi_dev_pm_explicit_set(device, ACPI_STATE_D0);
+
+	return ret;
+}
+
 int acpi_bus_update_power(acpi_handle handle, int *state_p)
 {
 	struct acpi_device *device;
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 4fdea38..14de9f4 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -66,20 +66,21 @@
 	spinlock_t dd_lock;
 	struct mutex hp_lock;
 	struct list_head dependent_devices;
-	struct list_head hotplug_devices;
 
 	struct list_head sibling;
 	struct platform_device *dock_device;
 };
 static LIST_HEAD(dock_stations);
 static int dock_station_count;
+static DEFINE_MUTEX(hotplug_lock);
 
 struct dock_dependent_device {
 	struct list_head list;
-	struct list_head hotplug_list;
 	acpi_handle handle;
-	const struct acpi_dock_ops *ops;
-	void *context;
+	const struct acpi_dock_ops *hp_ops;
+	void *hp_context;
+	unsigned int hp_refcount;
+	void (*hp_release)(void *);
 };
 
 #define DOCK_DOCKING	0x00000001
@@ -111,7 +112,6 @@
 
 	dd->handle = handle;
 	INIT_LIST_HEAD(&dd->list);
-	INIT_LIST_HEAD(&dd->hotplug_list);
 
 	spin_lock(&ds->dd_lock);
 	list_add_tail(&dd->list, &ds->dependent_devices);
@@ -121,35 +121,90 @@
 }
 
 /**
- * dock_add_hotplug_device - associate a hotplug handler with the dock station
- * @ds: The dock station
- * @dd: The dependent device struct
- *
- * Add the dependent device to the dock's hotplug device list
+ * dock_init_hotplug - Initialize a hotplug device on a docking station.
+ * @dd: Dock-dependent device.
+ * @ops: Dock operations to attach to the dependent device.
+ * @context: Data to pass to the @ops callbacks and @release.
+ * @init: Optional initialization routine to run after setting up context.
+ * @release: Optional release routine to run on removal.
  */
-static void
-dock_add_hotplug_device(struct dock_station *ds,
-			struct dock_dependent_device *dd)
+static int dock_init_hotplug(struct dock_dependent_device *dd,
+			     const struct acpi_dock_ops *ops, void *context,
+			     void (*init)(void *), void (*release)(void *))
 {
-	mutex_lock(&ds->hp_lock);
-	list_add_tail(&dd->hotplug_list, &ds->hotplug_devices);
-	mutex_unlock(&ds->hp_lock);
+	int ret = 0;
+
+	mutex_lock(&hotplug_lock);
+
+	if (dd->hp_context) {
+		ret = -EEXIST;
+	} else {
+		dd->hp_refcount = 1;
+		dd->hp_ops = ops;
+		dd->hp_context = context;
+		dd->hp_release = release;
+	}
+
+	if (!WARN_ON(ret) && init)
+		init(context);
+
+	mutex_unlock(&hotplug_lock);
+	return ret;
 }
 
 /**
- * dock_del_hotplug_device - remove a hotplug handler from the dock station
- * @ds: The dock station
- * @dd: the dependent device struct
+ * dock_release_hotplug - Decrement hotplug reference counter of dock device.
+ * @dd: Dock-dependent device.
  *
- * Delete the dependent device from the dock's hotplug device list
+ * Decrement the reference counter of @dd and if 0, detach its hotplug
+ * operations from it, reset its context pointer and run the optional release
+ * routine if present.
  */
-static void
-dock_del_hotplug_device(struct dock_station *ds,
-			struct dock_dependent_device *dd)
+static void dock_release_hotplug(struct dock_dependent_device *dd)
 {
-	mutex_lock(&ds->hp_lock);
-	list_del(&dd->hotplug_list);
-	mutex_unlock(&ds->hp_lock);
+	void (*release)(void *) = NULL;
+	void *context = NULL;
+
+	mutex_lock(&hotplug_lock);
+
+	if (dd->hp_context && !--dd->hp_refcount) {
+		dd->hp_ops = NULL;
+		context = dd->hp_context;
+		dd->hp_context = NULL;
+		release = dd->hp_release;
+		dd->hp_release = NULL;
+	}
+
+	if (release && context)
+		release(context);
+
+	mutex_unlock(&hotplug_lock);
+}
+
+static void dock_hotplug_event(struct dock_dependent_device *dd, u32 event,
+			       bool uevent)
+{
+	acpi_notify_handler cb = NULL;
+	bool run = false;
+
+	mutex_lock(&hotplug_lock);
+
+	if (dd->hp_context) {
+		run = true;
+		dd->hp_refcount++;
+		if (dd->hp_ops)
+			cb = uevent ? dd->hp_ops->uevent : dd->hp_ops->handler;
+	}
+
+	mutex_unlock(&hotplug_lock);
+
+	if (!run)
+		return;
+
+	if (cb)
+		cb(dd->handle, event, dd->hp_context);
+
+	dock_release_hotplug(dd);
 }
 
 /**
@@ -360,9 +415,8 @@
 	/*
 	 * First call driver specific hotplug functions
 	 */
-	list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
-		if (dd->ops && dd->ops->handler)
-			dd->ops->handler(dd->handle, event, dd->context);
+	list_for_each_entry(dd, &ds->dependent_devices, list)
+		dock_hotplug_event(dd, event, false);
 
 	/*
 	 * Now make sure that an acpi_device is created for each
@@ -398,9 +452,8 @@
 	if (num == DOCK_EVENT)
 		kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
 
-	list_for_each_entry(dd, &ds->hotplug_devices, hotplug_list)
-		if (dd->ops && dd->ops->uevent)
-			dd->ops->uevent(dd->handle, event, dd->context);
+	list_for_each_entry(dd, &ds->dependent_devices, list)
+		dock_hotplug_event(dd, event, true);
 
 	if (num != DOCK_EVENT)
 		kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, envp);
@@ -570,19 +623,24 @@
  * @handle: the handle of the device
  * @ops: handlers to call after docking
  * @context: device specific data
+ * @init: Optional initialization routine to run after registration
+ * @release: Optional release routine to run on unregistration
  *
  * If a driver would like to perform a hotplug operation after a dock
  * event, they can register an acpi_notifiy_handler to be called by
  * the dock driver after _DCK is executed.
  */
-int
-register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
-			     void *context)
+int register_hotplug_dock_device(acpi_handle handle,
+				 const struct acpi_dock_ops *ops, void *context,
+				 void (*init)(void *), void (*release)(void *))
 {
 	struct dock_dependent_device *dd;
 	struct dock_station *dock_station;
 	int ret = -EINVAL;
 
+	if (WARN_ON(!context))
+		return -EINVAL;
+
 	if (!dock_station_count)
 		return -ENODEV;
 
@@ -597,12 +655,8 @@
 		 * ops
 		 */
 		dd = find_dock_dependent_device(dock_station, handle);
-		if (dd) {
-			dd->ops = ops;
-			dd->context = context;
-			dock_add_hotplug_device(dock_station, dd);
+		if (dd && !dock_init_hotplug(dd, ops, context, init, release))
 			ret = 0;
-		}
 	}
 
 	return ret;
@@ -624,7 +678,7 @@
 	list_for_each_entry(dock_station, &dock_stations, sibling) {
 		dd = find_dock_dependent_device(dock_station, handle);
 		if (dd)
-			dock_del_hotplug_device(dock_station, dd);
+			dock_release_hotplug(dd);
 	}
 }
 EXPORT_SYMBOL_GPL(unregister_hotplug_dock_device);
@@ -868,8 +922,10 @@
 	if (!count)
 		return -EINVAL;
 
+	acpi_scan_lock_acquire();
 	begin_undock(dock_station);
 	ret = handle_eject_request(dock_station, ACPI_NOTIFY_EJECT_REQUEST);
+	acpi_scan_lock_release();
 	return ret ? ret: count;
 }
 static DEVICE_ATTR(undock, S_IWUSR, NULL, write_undock);
@@ -951,7 +1007,6 @@
 	mutex_init(&dock_station->hp_lock);
 	spin_lock_init(&dock_station->dd_lock);
 	INIT_LIST_HEAD(&dock_station->sibling);
-	INIT_LIST_HEAD(&dock_station->hotplug_devices);
 	ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
 	INIT_LIST_HEAD(&dock_station->dependent_devices);
 
@@ -992,30 +1047,6 @@
 }
 
 /**
- * dock_remove - free up resources related to the dock station
- */
-static int dock_remove(struct dock_station *ds)
-{
-	struct dock_dependent_device *dd, *tmp;
-	struct platform_device *dock_device = ds->dock_device;
-
-	if (!dock_station_count)
-		return 0;
-
-	/* remove dependent devices */
-	list_for_each_entry_safe(dd, tmp, &ds->dependent_devices, list)
-		kfree(dd);
-
-	list_del(&ds->sibling);
-
-	/* cleanup sysfs */
-	sysfs_remove_group(&dock_device->dev.kobj, &dock_attribute_group);
-	platform_device_unregister(dock_device);
-
-	return 0;
-}
-
-/**
  * find_dock_and_bay - look for dock stations and bays
  * @handle: acpi handle of a device
  * @lvl: unused
@@ -1033,7 +1064,7 @@
 	return AE_OK;
 }
 
-static int __init dock_init(void)
+int __init acpi_dock_init(void)
 {
 	if (acpi_disabled)
 		return 0;
@@ -1052,19 +1083,3 @@
 		ACPI_DOCK_DRIVER_DESCRIPTION, dock_station_count);
 	return 0;
 }
-
-static void __exit dock_exit(void)
-{
-	struct dock_station *tmp, *dock_station;
-
-	unregister_acpi_bus_notifier(&dock_acpi_notifier);
-	list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
-		dock_remove(dock_station);
-}
-
-/*
- * Must be called before drivers of devices in dock, otherwise we can't know
- * which devices are in a dock
- */
-subsys_initcall(dock_init);
-module_exit(dock_exit);
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 297cbf4..c610a76 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -40,6 +40,11 @@
 #else
 static inline void acpi_container_init(void) {}
 #endif
+#ifdef CONFIG_ACPI_DOCK
+void acpi_dock_init(void);
+#else
+static inline void acpi_dock_init(void) {}
+#endif
 #ifdef CONFIG_ACPI_HOTPLUG_MEMORY
 void acpi_memory_hotplug_init(void);
 #else
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index f962047..288bb27 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -885,6 +885,7 @@
 				ACPI_STA_DEFAULT);
 	mutex_init(&resource->resource_lock);
 	INIT_LIST_HEAD(&resource->dependent);
+	INIT_LIST_HEAD(&resource->list_node);
 	resource->name = device->pnp.bus_id;
 	strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
 	strcpy(acpi_device_class(device), ACPI_POWER_CLASS);
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index a3868f6..3322b47 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -304,7 +304,8 @@
 }
 
 static void acpi_dev_get_irqresource(struct resource *res, u32 gsi,
-				     u8 triggering, u8 polarity, u8 shareable)
+				     u8 triggering, u8 polarity, u8 shareable,
+				     bool legacy)
 {
 	int irq, p, t;
 
@@ -317,14 +318,19 @@
 	 * In IO-APIC mode, use overrided attribute. Two reasons:
 	 * 1. BIOS bug in DSDT
 	 * 2. BIOS uses IO-APIC mode Interrupt Source Override
+	 *
+	 * We do this only if we are dealing with IRQ() or IRQNoFlags()
+	 * resource (the legacy ISA resources). With modern ACPI 5 devices
+	 * using extended IRQ descriptors we take the IRQ configuration
+	 * from _CRS directly.
 	 */
-	if (!acpi_get_override_irq(gsi, &t, &p)) {
+	if (legacy && !acpi_get_override_irq(gsi, &t, &p)) {
 		u8 trig = t ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
 		u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
 
 		if (triggering != trig || polarity != pol) {
 			pr_warning("ACPI: IRQ %d override to %s, %s\n", gsi,
-				   t ? "edge" : "level", p ? "low" : "high");
+				   t ? "level" : "edge", p ? "low" : "high");
 			triggering = trig;
 			polarity = pol;
 		}
@@ -373,7 +379,7 @@
 		}
 		acpi_dev_get_irqresource(res, irq->interrupts[index],
 					 irq->triggering, irq->polarity,
-					 irq->sharable);
+					 irq->sharable, true);
 		break;
 	case ACPI_RESOURCE_TYPE_EXTENDED_IRQ:
 		ext_irq = &ares->data.extended_irq;
@@ -383,7 +389,7 @@
 		}
 		acpi_dev_get_irqresource(res, ext_irq->interrupts[index],
 					 ext_irq->triggering, ext_irq->polarity,
-					 ext_irq->sharable);
+					 ext_irq->sharable, false);
 		break;
 	default:
 		return false;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index b14ac46..27da630 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -2042,6 +2042,7 @@
 	acpi_lpss_init();
 	acpi_container_init();
 	acpi_memory_hotplug_init();
+	acpi_dock_init();
 
 	mutex_lock(&acpi_scan_lock);
 	/*
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
index 87f2f39..cf4e702 100644
--- a/drivers/ata/libata-acpi.c
+++ b/drivers/ata/libata-acpi.c
@@ -156,8 +156,10 @@
 
 	spin_unlock_irqrestore(ap->lock, flags);
 
-	if (wait)
+	if (wait) {
 		ata_port_wait_eh(ap);
+		flush_work(&ap->hotplug_task.work);
+	}
 }
 
 static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data)
@@ -214,6 +216,39 @@
 	.uevent = ata_acpi_ap_uevent,
 };
 
+void ata_acpi_hotplug_init(struct ata_host *host)
+{
+	int i;
+
+	for (i = 0; i < host->n_ports; i++) {
+		struct ata_port *ap = host->ports[i];
+		acpi_handle handle;
+		struct ata_device *dev;
+
+		if (!ap)
+			continue;
+
+		handle = ata_ap_acpi_handle(ap);
+		if (handle) {
+			/* we might be on a docking station */
+			register_hotplug_dock_device(handle,
+						     &ata_acpi_ap_dock_ops, ap,
+						     NULL, NULL);
+		}
+
+		ata_for_each_dev(dev, &ap->link, ALL) {
+			handle = ata_dev_acpi_handle(dev);
+			if (!handle)
+				continue;
+
+			/* we might be on a docking station */
+			register_hotplug_dock_device(handle,
+						     &ata_acpi_dev_dock_ops,
+						     dev, NULL, NULL);
+		}
+	}
+}
+
 /**
  * ata_acpi_dissociate - dissociate ATA host from ACPI objects
  * @host: target ATA host
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index f218427..adf002a3 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -6148,6 +6148,8 @@
 	if (rc)
 		goto err_tadd;
 
+	ata_acpi_hotplug_init(host);
+
 	/* set cable, sata_spd_limit and report */
 	for (i = 0; i < host->n_ports; i++) {
 		struct ata_port *ap = host->ports[i];
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
index c949dd3..577d902 100644
--- a/drivers/ata/libata.h
+++ b/drivers/ata/libata.h
@@ -122,6 +122,7 @@
 extern void ata_acpi_unregister(void);
 extern void ata_acpi_bind(struct ata_device *dev);
 extern void ata_acpi_unbind(struct ata_device *dev);
+extern void ata_acpi_hotplug_init(struct ata_host *host);
 #else
 static inline void ata_acpi_dissociate(struct ata_host *host) { }
 static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
@@ -134,6 +135,7 @@
 static inline void ata_acpi_unregister(void) { }
 static inline void ata_acpi_bind(struct ata_device *dev) { }
 static inline void ata_acpi_unbind(struct ata_device *dev) { }
+static inline void ata_acpi_hotplug_init(struct ata_host *host) {}
 #endif
 
 /* libata-scsi.c */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 4b1f926..01e2103 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -450,8 +450,18 @@
 {
 	struct firmware_buf *buf = fw_priv->buf;
 
+	/*
+	 * There is a small window in which user can write to 'loading'
+	 * between loading done and disappearance of 'loading'
+	 */
+	if (test_bit(FW_STATUS_DONE, &buf->status))
+		return;
+
 	set_bit(FW_STATUS_ABORT, &buf->status);
 	complete_all(&buf->completion);
+
+	/* avoid user action after loading abort */
+	fw_priv->buf = NULL;
 }
 
 #define is_fw_load_aborted(buf)	\
@@ -528,7 +538,12 @@
 				     struct device_attribute *attr, char *buf)
 {
 	struct firmware_priv *fw_priv = to_firmware_priv(dev);
-	int loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+	int loading = 0;
+
+	mutex_lock(&fw_lock);
+	if (fw_priv->buf)
+		loading = test_bit(FW_STATUS_LOADING, &fw_priv->buf->status);
+	mutex_unlock(&fw_lock);
 
 	return sprintf(buf, "%d\n", loading);
 }
@@ -570,12 +585,12 @@
 				      const char *buf, size_t count)
 {
 	struct firmware_priv *fw_priv = to_firmware_priv(dev);
-	struct firmware_buf *fw_buf = fw_priv->buf;
+	struct firmware_buf *fw_buf;
 	int loading = simple_strtol(buf, NULL, 10);
 	int i;
 
 	mutex_lock(&fw_lock);
-
+	fw_buf = fw_priv->buf;
 	if (!fw_buf)
 		goto out;
 
@@ -777,10 +792,6 @@
 			struct firmware_priv, timeout_work.work);
 
 	mutex_lock(&fw_lock);
-	if (test_bit(FW_STATUS_DONE, &(fw_priv->buf->status))) {
-		mutex_unlock(&fw_lock);
-		return;
-	}
 	fw_load_abort(fw_priv);
 	mutex_unlock(&fw_lock);
 }
@@ -861,8 +872,6 @@
 
 	cancel_delayed_work_sync(&fw_priv->timeout_work);
 
-	fw_priv->buf = NULL;
-
 	device_remove_file(f_dev, &dev_attr_loading);
 err_del_bin_attr:
 	device_remove_bin_file(f_dev, &firmware_attr_data);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3063452..aff789d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1036,12 +1036,16 @@
 	char *name;
 	u64 segment;
 	int ret;
+	char *name_format;
 
 	name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
 	if (!name)
 		return NULL;
 	segment = offset >> rbd_dev->header.obj_order;
-	ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
+	name_format = "%s.%012llx";
+	if (rbd_dev->image_format == 2)
+		name_format = "%s.%016llx";
+	ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
 			rbd_dev->header.object_prefix, segment);
 	if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
 		pr_err("error formatting segment name for #%llu (%d)\n",
@@ -2248,13 +2252,17 @@
 					obj_request->pages, length,
 					offset & ~PAGE_MASK, false, false);
 
+		/*
+		 * set obj_request->img_request before formatting
+		 * the osd_request so that it gets the right snapc
+		 */
+		rbd_img_obj_request_add(img_request, obj_request);
 		if (write_request)
 			rbd_osd_req_format_write(obj_request);
 		else
 			rbd_osd_req_format_read(obj_request);
 
 		obj_request->img_offset = img_offset;
-		rbd_img_obj_request_add(img_request, obj_request);
 
 		img_offset += length;
 		resid -= length;
@@ -4239,6 +4247,10 @@
 
 	down_write(&rbd_dev->header_rwsem);
 
+	ret = rbd_dev_v2_image_size(rbd_dev);
+	if (ret)
+		goto out;
+
 	if (first_time) {
 		ret = rbd_dev_v2_header_onetime(rbd_dev);
 		if (ret)
@@ -4272,10 +4284,6 @@
 					"is EXPERIMENTAL!");
 	}
 
-	ret = rbd_dev_v2_image_size(rbd_dev);
-	if (ret)
-		goto out;
-
 	if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
 		if (rbd_dev->mapping.size != rbd_dev->header.image_size)
 			rbd_dev->mapping.size = rbd_dev->header.image_size;
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index 3a4343b..9a9f518 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -498,6 +498,10 @@
 		add_wait_queue(&thread->wait_q, &wait);
 
 		set_current_state(TASK_INTERRUPTIBLE);
+		if (kthread_should_stop()) {
+			BT_DBG("main_thread: break from main thread");
+			break;
+		}
 
 		if (adapter->wakeup_tries ||
 				((!adapter->int_count) &&
@@ -513,11 +517,6 @@
 
 		BT_DBG("main_thread woke up");
 
-		if (kthread_should_stop()) {
-			BT_DBG("main_thread: break from main thread");
-			break;
-		}
-
 		spin_lock_irqsave(&priv->driver_lock, flags);
 		if (adapter->int_count) {
 			adapter->int_count = 0;
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 934cfd1..1144e8c 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1955,6 +1955,7 @@
 		/* XXX the notifier code should handle this better */
 		if (!cn->notifier_head.head) {
 			srcu_cleanup_notifier_head(&cn->notifier_head);
+			list_del(&cn->node);
 			kfree(cn);
 		}
 
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 5c97e75..22d7699 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -155,7 +155,7 @@
 
 /* list of all parent clock list */
 PNAME(mout_apll_p)	= { "fin_pll", "fout_apll", };
-PNAME(mout_cpu_p)	= { "mout_apll", "mout_mpll", };
+PNAME(mout_cpu_p)	= { "mout_apll", "sclk_mpll", };
 PNAME(mout_mpll_fout_p)	= { "fout_mplldiv2", "fout_mpll" };
 PNAME(mout_mpll_p)	= { "fin_pll", "mout_mpll_fout" };
 PNAME(mout_bpll_fout_p)	= { "fout_bplldiv2", "fout_bpll" };
@@ -208,10 +208,10 @@
 };
 
 struct samsung_mux_clock exynos5250_mux_clks[] __initdata = {
-	MUX(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1),
-	MUX(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1),
+	MUX_A(none, "mout_apll", mout_apll_p, SRC_CPU, 0, 1, "mout_apll"),
+	MUX_A(none, "mout_cpu", mout_cpu_p, SRC_CPU, 16, 1, "mout_cpu"),
 	MUX(none, "mout_mpll_fout", mout_mpll_fout_p, PLL_DIV2_SEL, 4, 1),
-	MUX(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1),
+	MUX_A(none, "sclk_mpll", mout_mpll_p, SRC_CORE1, 8, 1, "mout_mpll"),
 	MUX(none, "mout_bpll_fout", mout_bpll_fout_p, PLL_DIV2_SEL, 0, 1),
 	MUX(none, "sclk_bpll", mout_bpll_p, SRC_CDREX, 0, 1),
 	MUX(none, "mout_vpllsrc", mout_vpllsrc_p, SRC_TOP2, 0, 1),
@@ -378,7 +378,7 @@
 	GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
 	GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
 	GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
-	GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, 0, 0),
+	GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
 	GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
 	GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
 	GATE(tzpc2, "tzpc2", "aclk66", GATE_IP_PERIS, 8, 0, 0),
diff --git a/drivers/clk/samsung/clk-pll.c b/drivers/clk/samsung/clk-pll.c
index 89135f6..362f12d 100644
--- a/drivers/clk/samsung/clk-pll.c
+++ b/drivers/clk/samsung/clk-pll.c
@@ -111,7 +111,8 @@
 				unsigned long parent_rate)
 {
 	struct samsung_clk_pll36xx *pll = to_clk_pll36xx(hw);
-	u32 mdiv, pdiv, sdiv, kdiv, pll_con0, pll_con1;
+	u32 mdiv, pdiv, sdiv, pll_con0, pll_con1;
+	s16 kdiv;
 	u64 fvco = parent_rate;
 
 	pll_con0 = __raw_readl(pll->con_reg);
@@ -119,7 +120,7 @@
 	mdiv = (pll_con0 >> PLL36XX_MDIV_SHIFT) & PLL36XX_MDIV_MASK;
 	pdiv = (pll_con0 >> PLL36XX_PDIV_SHIFT) & PLL36XX_PDIV_MASK;
 	sdiv = (pll_con0 >> PLL36XX_SDIV_SHIFT) & PLL36XX_SDIV_MASK;
-	kdiv = pll_con1 & PLL36XX_KDIV_MASK;
+	kdiv = (s16)(pll_con1 & PLL36XX_KDIV_MASK);
 
 	fvco *= (mdiv << 16) + kdiv;
 	do_div(fvco, (pdiv << sdiv));
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index f9ec43f..080c3c5 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -369,7 +369,7 @@
 	clk_register_clkdev(clk, NULL, "60100000.serial");
 }
 #else
-static inline void spear320_clk_init(void) { }
+static inline void spear320_clk_init(void __iomem *soc_config_base) { }
 #endif
 
 void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
diff --git a/drivers/clk/tegra/clk-tegra30.c b/drivers/clk/tegra/clk-tegra30.c
index c6921f5..ba99e38 100644
--- a/drivers/clk/tegra/clk-tegra30.c
+++ b/drivers/clk/tegra/clk-tegra30.c
@@ -1598,6 +1598,12 @@
 	clk_register_clkdev(clk, "afi", "tegra-pcie");
 	clks[afi] = clk;
 
+	/* pciex */
+	clk = tegra_clk_register_periph_gate("pciex", "pll_e", 0, clk_base, 0,
+				    74, &periph_u_regs, periph_clk_enb_refcnt);
+	clk_register_clkdev(clk, "pciex", "tegra-pcie");
+	clks[pciex] = clk;
+
 	/* kfuse */
 	clk = tegra_clk_register_periph_gate("kfuse", "clk_m",
 				    TEGRA_PERIPH_ON_APB,
@@ -1716,11 +1722,6 @@
 				1, 0, &cml_lock);
 	clk_register_clkdev(clk, "cml1", NULL);
 	clks[cml1] = clk;
-
-	/* pciex */
-	clk = clk_register_fixed_rate(NULL, "pciex", "pll_e", 0, 100000000);
-	clk_register_clkdev(clk, "pciex", NULL);
-	clks[pciex] = clk;
 }
 
 static void __init tegra30_osc_clk_init(void)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 4b9bb5d..93eb5cb 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -47,6 +47,8 @@
 static struct cpufreq_governor cpufreq_gov_ondemand;
 #endif
 
+static unsigned int default_powersave_bias;
+
 static void ondemand_powersave_bias_init_cpu(int cpu)
 {
 	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
@@ -543,7 +545,7 @@
 
 	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
 	tuners->ignore_nice = 0;
-	tuners->powersave_bias = 0;
+	tuners->powersave_bias = default_powersave_bias;
 	tuners->io_is_busy = should_io_be_busy();
 
 	dbs_data->tuners = tuners;
@@ -585,6 +587,7 @@
 	unsigned int cpu;
 	cpumask_t done;
 
+	default_powersave_bias = powersave_bias;
 	cpumask_clear(&done);
 
 	get_online_cpus();
@@ -593,11 +596,17 @@
 			continue;
 
 		policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy;
-		dbs_data = policy->governor_data;
-		od_tuners = dbs_data->tuners;
-		od_tuners->powersave_bias = powersave_bias;
+		if (!policy)
+			continue;
 
 		cpumask_or(&done, &done, policy->cpus);
+
+		if (policy->governor != &cpufreq_gov_ondemand)
+			continue;
+
+		dbs_data = policy->governor_data;
+		od_tuners = dbs_data->tuners;
+		od_tuners->powersave_bias = default_powersave_bias;
 	}
 	put_online_cpus();
 }
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 202d2c8..452800e0 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -104,7 +104,7 @@
 
 static int efi_pstore_write(enum pstore_type_id type,
 		enum kmsg_dump_reason reason, u64 *id,
-		unsigned int part, int count, size_t size,
+		unsigned int part, int count, size_t hsize, size_t size,
 		struct pstore_info *psi)
 {
 	char name[DUMP_NAME_LEN];
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index d3f7d2d..4a43036 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1094,6 +1094,9 @@
 	const struct omap_gpio_platform_data *pdata;
 	struct resource *res;
 	struct gpio_bank *bank;
+#ifdef CONFIG_ARCH_OMAP1
+	int irq_base;
+#endif
 
 	match = of_match_device(of_match_ptr(omap_gpio_match), dev);
 
@@ -1135,11 +1138,28 @@
 				pdata->get_context_loss_count;
 	}
 
+#ifdef CONFIG_ARCH_OMAP1
+	/*
+	 * REVISIT: Once we have OMAP1 supporting SPARSE_IRQ, we can drop
+	 * irq_alloc_descs() and irq_domain_add_legacy() and just use a
+	 * linear IRQ domain mapping for all OMAP platforms.
+	 */
+	irq_base = irq_alloc_descs(-1, 0, bank->width, 0);
+	if (irq_base < 0) {
+		dev_err(dev, "Couldn't allocate IRQ numbers\n");
+		return -ENODEV;
+	}
 
+	bank->domain = irq_domain_add_legacy(node, bank->width, irq_base,
+					     0, &irq_domain_simple_ops, NULL);
+#else
 	bank->domain = irq_domain_add_linear(node, bank->width,
 					     &irq_domain_simple_ops, NULL);
-	if (!bank->domain)
+#endif
+	if (!bank->domain) {
+		dev_err(dev, "Couldn't register an IRQ domain\n");
 		return -ENODEV;
+	}
 
 	if (bank->regs->set_dataout && bank->regs->clr_dataout)
 		bank->set_dataout = _set_gpio_dataout_reg;
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index dcde352..5b7b911 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -190,8 +190,7 @@
 		if (ret)
 			return ERR_PTR(ret);
 	}
-	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size,
-			      0600);
+	return dma_buf_export(obj, &drm_gem_prime_dmabuf_ops, obj->size, flags);
 }
 EXPORT_SYMBOL(drm_gem_prime_export);
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b9d00dc..9669a0b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1697,6 +1697,8 @@
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				struct drm_gem_object *gem_obj, int flags);
 
+void i915_gem_restore_fences(struct drm_device *dev);
+
 /* i915_gem_context.c */
 void i915_gem_context_init(struct drm_device *dev);
 void i915_gem_context_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 970ad17..9e35daf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1801,7 +1801,14 @@
 			gfp |= __GFP_NORETRY | __GFP_NOWARN | __GFP_NO_KSWAPD;
 			gfp &= ~(__GFP_IO | __GFP_WAIT);
 		}
-
+#ifdef CONFIG_SWIOTLB
+		if (swiotlb_nr_tbl()) {
+			st->nents++;
+			sg_set_page(sg, page, PAGE_SIZE, 0);
+			sg = sg_next(sg);
+			continue;
+		}
+#endif
 		if (!i || page_to_pfn(page) != last_pfn + 1) {
 			if (i)
 				sg = sg_next(sg);
@@ -1812,8 +1819,10 @@
 		}
 		last_pfn = page_to_pfn(page);
 	}
-
-	sg_mark_end(sg);
+#ifdef CONFIG_SWIOTLB
+	if (!swiotlb_nr_tbl())
+#endif
+		sg_mark_end(sg);
 	obj->pages = st;
 
 	if (i915_gem_object_needs_bit17_swizzle(obj))
@@ -2117,25 +2126,15 @@
 	}
 }
 
-static void i915_gem_reset_fences(struct drm_device *dev)
+void i915_gem_restore_fences(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	int i;
 
 	for (i = 0; i < dev_priv->num_fence_regs; i++) {
 		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
-
-		if (reg->obj)
-			i915_gem_object_fence_lost(reg->obj);
-
-		i915_gem_write_fence(dev, i, NULL);
-
-		reg->pin_count = 0;
-		reg->obj = NULL;
-		INIT_LIST_HEAD(&reg->lru_list);
+		i915_gem_write_fence(dev, i, reg->obj);
 	}
-
-	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 }
 
 void i915_gem_reset(struct drm_device *dev)
@@ -2158,8 +2157,7 @@
 		obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
 	}
 
-	/* The fence registers are invalidated so clear them out */
-	i915_gem_reset_fences(dev);
+	i915_gem_restore_fences(dev);
 }
 
 /**
@@ -3865,8 +3863,6 @@
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_gem_evict_everything(dev);
 
-	i915_gem_reset_fences(dev);
-
 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
 	 * We need to replace this with a semaphore, or something.
 	 * And not confound mm.suspended!
@@ -4193,7 +4189,8 @@
 		dev_priv->num_fence_regs = 8;
 
 	/* Initialize fence registers to zero */
-	i915_gem_reset_fences(dev);
+	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	i915_gem_restore_fences(dev);
 
 	i915_gem_detect_bit_6_swizzle(dev);
 	init_waitqueue_head(&dev_priv->pending_flip_queue);
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 41f0fde..369b3d8 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -384,6 +384,7 @@
 
 	mutex_lock(&dev->struct_mutex);
 
+	i915_gem_restore_fences(dev);
 	i915_restore_display(dev);
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index a4b71b2..a30f294 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -171,6 +171,11 @@
 		if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
 			return -EINVAL;
 
+		if (!access_ok(VERIFY_READ,
+			       (void *)(unsigned long)user_cmd.command,
+			       user_cmd.command_size))
+			return -EFAULT;
+
 		ret = qxl_alloc_release_reserved(qdev,
 						 sizeof(union qxl_release_info) +
 						 user_cmd.command_size,
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 0e53416..6948eb8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2687,6 +2687,9 @@
 int r600_uvd_init(struct radeon_device *rdev)
 {
 	int i, j, r;
+	/* disable byte swapping */
+	u32 lmi_swap_cntl = 0;
+	u32 mp_swap_cntl = 0;
 
 	/* raise clocks while booting up the VCPU */
 	radeon_set_uvd_clocks(rdev, 53300, 40000);
@@ -2711,9 +2714,13 @@
 	WREG32(UVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
 			     (1 << 21) | (1 << 9) | (1 << 20));
 
-	/* disable byte swapping */
-	WREG32(UVD_LMI_SWAP_CNTL, 0);
-	WREG32(UVD_MP_SWAP_CNTL, 0);
+#ifdef __BIG_ENDIAN
+	/* swap (8 in 32) RB and IB */
+	lmi_swap_cntl = 0xa;
+	mp_swap_cntl = 0;
+#endif
+	WREG32(UVD_LMI_SWAP_CNTL, lmi_swap_cntl);
+	WREG32(UVD_MP_SWAP_CNTL, mp_swap_cntl);
 
 	WREG32(UVD_MPC_SET_MUXA0, 0x40c2040);
 	WREG32(UVD_MPC_SET_MUXA1, 0x0);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 1899738..b0dc0b6 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -244,16 +244,6 @@
  */
 void radeon_wb_disable(struct radeon_device *rdev)
 {
-	int r;
-
-	if (rdev->wb.wb_obj) {
-		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-		if (unlikely(r != 0))
-			return;
-		radeon_bo_kunmap(rdev->wb.wb_obj);
-		radeon_bo_unpin(rdev->wb.wb_obj);
-		radeon_bo_unreserve(rdev->wb.wb_obj);
-	}
 	rdev->wb.enabled = false;
 }
 
@@ -269,6 +259,11 @@
 {
 	radeon_wb_disable(rdev);
 	if (rdev->wb.wb_obj) {
+		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
+			radeon_bo_kunmap(rdev->wb.wb_obj);
+			radeon_bo_unpin(rdev->wb.wb_obj);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+		}
 		radeon_bo_unref(&rdev->wb.wb_obj);
 		rdev->wb.wb = NULL;
 		rdev->wb.wb_obj = NULL;
@@ -295,26 +290,26 @@
 			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
 			return r;
 		}
-	}
-	r = radeon_bo_reserve(rdev->wb.wb_obj, false);
-	if (unlikely(r != 0)) {
-		radeon_wb_fini(rdev);
-		return r;
-	}
-	r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
-			  &rdev->wb.gpu_addr);
-	if (r) {
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+				&rdev->wb.gpu_addr);
+		if (r) {
+			radeon_bo_unreserve(rdev->wb.wb_obj);
+			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
 		radeon_bo_unreserve(rdev->wb.wb_obj);
-		dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
-		radeon_wb_fini(rdev);
-		return r;
-	}
-	r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
-	radeon_bo_unreserve(rdev->wb.wb_obj);
-	if (r) {
-		dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
-		radeon_wb_fini(rdev);
-		return r;
+		if (r) {
+			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
+			radeon_wb_fini(rdev);
+			return r;
+		}
 	}
 
 	/* clear wb memory */
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 5b937df..ddb8f8e 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -63,7 +63,9 @@
 {
 	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
 	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-		*drv->cpu_addr = cpu_to_le32(seq);
+		if (drv->cpu_addr) {
+			*drv->cpu_addr = cpu_to_le32(seq);
+		}
 	} else {
 		WREG32(drv->scratch_reg, seq);
 	}
@@ -84,7 +86,11 @@
 	u32 seq = 0;
 
 	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
-		seq = le32_to_cpu(*drv->cpu_addr);
+		if (drv->cpu_addr) {
+			seq = le32_to_cpu(*drv->cpu_addr);
+		} else {
+			seq = lower_32_bits(atomic64_read(&drv->last_seq));
+		}
 	} else {
 		seq = RREG32(drv->scratch_reg);
 	}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 2c1341f..43ec4a4 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -1197,11 +1197,13 @@
 int radeon_vm_bo_rmv(struct radeon_device *rdev,
 		     struct radeon_bo_va *bo_va)
 {
-	int r;
+	int r = 0;
 
 	mutex_lock(&rdev->vm_manager.lock);
 	mutex_lock(&bo_va->vm->mutex);
-	r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+	if (bo_va->soffset) {
+		r = radeon_vm_bo_update_pte(rdev, bo_va->vm, bo_va->bo, NULL);
+	}
 	mutex_unlock(&rdev->vm_manager.lock);
 	list_del(&bo_va->vm_list);
 	mutex_unlock(&bo_va->vm->mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index e17faa7..82434018 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -402,6 +402,13 @@
 		return -ENOMEM;
 	/* Align requested size with padding so unlock_commit can
 	 * pad safely */
+	radeon_ring_free_size(rdev, ring);
+	if (ring->ring_free_dw == (ring->ring_size / 4)) {
+		/* This is an empty ring update lockup info to avoid
+		 * false positive.
+		 */
+		radeon_ring_lockup_update(ring);
+	}
 	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
 	while (ndw > (ring->ring_free_dw - 1)) {
 		radeon_ring_free_size(rdev, ring);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 906e5c0..cad735d 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -159,7 +159,17 @@
 	if (!r) {
 		radeon_bo_kunmap(rdev->uvd.vcpu_bo);
 		radeon_bo_unpin(rdev->uvd.vcpu_bo);
+		rdev->uvd.cpu_addr = NULL;
+		if (!radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_CPU, NULL)) {
+			radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
+		}
 		radeon_bo_unreserve(rdev->uvd.vcpu_bo);
+
+		if (rdev->uvd.cpu_addr) {
+			radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX);
+		} else {
+			rdev->fence_drv[R600_RING_TYPE_UVD_INDEX].cpu_addr = NULL;
+		}
 	}
 	return r;
 }
@@ -178,6 +188,10 @@
 		return r;
 	}
 
+	/* Have been pin in cpu unmap unpin */
+	radeon_bo_kunmap(rdev->uvd.vcpu_bo);
+	radeon_bo_unpin(rdev->uvd.vcpu_bo);
+
 	r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
 			  &rdev->uvd.gpu_addr);
 	if (r) {
@@ -613,19 +627,19 @@
 	}
 
 	/* stitch together an UVD create msg */
-	msg[0] = 0x00000de4;
-	msg[1] = 0x00000000;
-	msg[2] = handle;
-	msg[3] = 0x00000000;
-	msg[4] = 0x00000000;
-	msg[5] = 0x00000000;
-	msg[6] = 0x00000000;
-	msg[7] = 0x00000780;
-	msg[8] = 0x00000440;
-	msg[9] = 0x00000000;
-	msg[10] = 0x01b37000;
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000000);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
+	msg[4] = cpu_to_le32(0x00000000);
+	msg[5] = cpu_to_le32(0x00000000);
+	msg[6] = cpu_to_le32(0x00000000);
+	msg[7] = cpu_to_le32(0x00000780);
+	msg[8] = cpu_to_le32(0x00000440);
+	msg[9] = cpu_to_le32(0x00000000);
+	msg[10] = cpu_to_le32(0x01b37000);
 	for (i = 11; i < 1024; ++i)
-		msg[i] = 0x0;
+		msg[i] = cpu_to_le32(0x0);
 
 	radeon_bo_kunmap(bo);
 	radeon_bo_unreserve(bo);
@@ -659,12 +673,12 @@
 	}
 
 	/* stitch together an UVD destroy msg */
-	msg[0] = 0x00000de4;
-	msg[1] = 0x00000002;
-	msg[2] = handle;
-	msg[3] = 0x00000000;
+	msg[0] = cpu_to_le32(0x00000de4);
+	msg[1] = cpu_to_le32(0x00000002);
+	msg[2] = cpu_to_le32(handle);
+	msg[3] = cpu_to_le32(0x00000000);
 	for (i = 4; i < 1024; ++i)
-		msg[i] = 0x0;
+		msg[i] = cpu_to_le32(0x0);
 
 	radeon_bo_kunmap(bo);
 	radeon_bo_unreserve(bo);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index d6cbfe9..fa061d4 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -137,7 +137,7 @@
 	{ 0x0738, 0x4540, "Mad Catz Beat Pad", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
 	{ 0x0738, 0x4556, "Mad Catz Lynx Wireless Controller", 0, XTYPE_XBOX },
 	{ 0x0738, 0x4716, "Mad Catz Wired Xbox 360 Controller", 0, XTYPE_XBOX360 },
-	{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", XTYPE_XBOX360 },
+	{ 0x0738, 0x4728, "Mad Catz Street Fighter IV FightPad", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x0738, 0x4738, "Mad Catz Wired Xbox 360 Controller (SFIV)", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
 	{ 0x0738, 0x6040, "Mad Catz Beat Pad Pro", MAP_DPAD_TO_BUTTONS, XTYPE_XBOX },
 	{ 0x0738, 0xbeef, "Mad Catz JOYTECH NEO SE Advanced GamePad", XTYPE_XBOX360 },
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 62a2c0e..7ac9c98 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -431,6 +431,7 @@
 
 config KEYBOARD_OPENCORES
 	tristate "OpenCores Keyboard Controller"
+	depends on HAS_IOMEM
 	help
 	  Say Y here if you want to use the OpenCores Keyboard Controller
 	  http://www.opencores.org/project,keyboardcontroller
diff --git a/drivers/input/serio/Kconfig b/drivers/input/serio/Kconfig
index aebfe3e..1bda828 100644
--- a/drivers/input/serio/Kconfig
+++ b/drivers/input/serio/Kconfig
@@ -205,6 +205,7 @@
 
 config SERIO_ALTERA_PS2
 	tristate "Altera UP PS/2 controller"
+	depends on HAS_IOMEM
 	help
 	  Say Y here if you have Altera University Program PS/2 ports.
 
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 518282d..384fbcd 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -363,6 +363,7 @@
 		case 0x140802: /* Intuos4/5 13HD/24HD Classic Pen */
 		case 0x160802: /* Cintiq 13HD Pro Pen */
 		case 0x180802: /* DTH2242 Pen */
+		case 0x100802: /* Intuos4/5 13HD/24HD General Pen */
 			wacom->tool[idx] = BTN_TOOL_PEN;
 			break;
 
@@ -401,6 +402,7 @@
 		case 0x10080c: /* Intuos4/5 13HD/24HD Art Pen Eraser */
 		case 0x16080a: /* Cintiq 13HD Pro Pen Eraser */
 		case 0x18080a: /* DTH2242 Eraser */
+		case 0x10080a: /* Intuos4/5 13HD/24HD General Pen Eraser */
 			wacom->tool[idx] = BTN_TOOL_RUBBER;
 			break;
 
diff --git a/drivers/input/touchscreen/cyttsp_core.c b/drivers/input/touchscreen/cyttsp_core.c
index 8e60437..ae89d26 100644
--- a/drivers/input/touchscreen/cyttsp_core.c
+++ b/drivers/input/touchscreen/cyttsp_core.c
@@ -116,6 +116,15 @@
 	return ttsp_write_block_data(ts, CY_REG_BASE, sizeof(cmd), &cmd);
 }
 
+static int cyttsp_handshake(struct cyttsp *ts)
+{
+	if (ts->pdata->use_hndshk)
+		return ttsp_send_command(ts,
+				ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
+
+	return 0;
+}
+
 static int cyttsp_load_bl_regs(struct cyttsp *ts)
 {
 	memset(&ts->bl_data, 0, sizeof(ts->bl_data));
@@ -133,7 +142,7 @@
 	memcpy(bl_cmd, bl_command, sizeof(bl_command));
 	if (ts->pdata->bl_keys)
 		memcpy(&bl_cmd[sizeof(bl_command) - CY_NUM_BL_KEYS],
-			ts->pdata->bl_keys, sizeof(bl_command));
+			ts->pdata->bl_keys, CY_NUM_BL_KEYS);
 
 	error = ttsp_write_block_data(ts, CY_REG_BASE,
 				      sizeof(bl_cmd), bl_cmd);
@@ -167,6 +176,10 @@
 	if (error)
 		return error;
 
+	error = cyttsp_handshake(ts);
+	if (error)
+		return error;
+
 	return ts->xy_data.act_dist == CY_ACT_DIST_DFLT ? -EIO : 0;
 }
 
@@ -188,6 +201,10 @@
 	if (error)
 		return error;
 
+	error = cyttsp_handshake(ts);
+	if (error)
+		return error;
+
 	if (!ts->sysinfo_data.tts_verh && !ts->sysinfo_data.tts_verl)
 		return -EIO;
 
@@ -344,12 +361,9 @@
 		goto out;
 
 	/* provide flow control handshake */
-	if (ts->pdata->use_hndshk) {
-		error = ttsp_send_command(ts,
-				ts->xy_data.hst_mode ^ CY_HNDSHK_BIT);
-		if (error)
-			goto out;
-	}
+	error = cyttsp_handshake(ts);
+	if (error)
+		goto out;
 
 	if (unlikely(ts->state == CY_IDLE_STATE))
 		goto out;
diff --git a/drivers/input/touchscreen/cyttsp_core.h b/drivers/input/touchscreen/cyttsp_core.h
index 1aa3c69..f1ebde3 100644
--- a/drivers/input/touchscreen/cyttsp_core.h
+++ b/drivers/input/touchscreen/cyttsp_core.h
@@ -67,8 +67,8 @@
 /* TTSP System Information interface definition */
 struct cyttsp_sysinfo_data {
 	u8 hst_mode;
-	u8 mfg_cmd;
 	u8 mfg_stat;
+	u8 mfg_cmd;
 	u8 cid[3];
 	u8 tt_undef1;
 	u8 uid[8];
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 1760ceb6..19ceaa6 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -705,7 +705,7 @@
 static int __cpuinit gic_secondary_init(struct notifier_block *nfb,
 					unsigned long action, void *hcpu)
 {
-	if (action == CPU_STARTING)
+	if (action == CPU_STARTING || action == CPU_STARTING_FROZEN)
 		gic_cpu_init(&gic_data[0]);
 	return NOTIFY_OK;
 }
diff --git a/drivers/macintosh/adb.c b/drivers/macintosh/adb.c
index b026896..04a5049 100644
--- a/drivers/macintosh/adb.c
+++ b/drivers/macintosh/adb.c
@@ -697,7 +697,7 @@
 	int ret = 0;
 	struct adbdev_state *state = file->private_data;
 	struct adb_request *req;
-	wait_queue_t wait = __WAITQUEUE_INITIALIZER(wait,current);
+	DECLARE_WAITQUEUE(wait,current);
 	unsigned long flags;
 
 	if (count < 2)
diff --git a/drivers/macintosh/mac_hid.c b/drivers/macintosh/mac_hid.c
index 6a82388..80d30e8 100644
--- a/drivers/macintosh/mac_hid.c
+++ b/drivers/macintosh/mac_hid.c
@@ -181,7 +181,7 @@
 	mac_hid_destroy_emumouse();
 }
 
-static int mac_hid_toggle_emumouse(ctl_table *table, int write,
+static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
 				   void __user *buffer, size_t *lenp,
 				   loff_t *ppos)
 {
@@ -214,7 +214,7 @@
 }
 
 /* file(s) in /proc/sys/dev/mac_hid */
-static ctl_table mac_hid_files[] = {
+static struct ctl_table mac_hid_files[] = {
 	{
 		.procname	= "mouse_button_emulation",
 		.data		= &mouse_emulate_buttons,
@@ -240,7 +240,7 @@
 };
 
 /* dir in /proc/sys/dev */
-static ctl_table mac_hid_dir[] = {
+static struct ctl_table mac_hid_dir[] = {
 	{
 		.procname	= "mac_hid",
 		.maxlen		= 0,
@@ -251,7 +251,7 @@
 };
 
 /* /proc/sys/dev itself, in case that is not there yet */
-static ctl_table mac_hid_root_dir[] = {
+static struct ctl_table mac_hid_root_dir[] = {
 	{
 		.procname	= "dev",
 		.maxlen		= 0,
diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c
index 86511c5..d61f271 100644
--- a/drivers/macintosh/via-cuda.c
+++ b/drivers/macintosh/via-cuda.c
@@ -259,7 +259,7 @@
     } while (0)
 
 static int
-cuda_init_via(void)
+__init cuda_init_via(void)
 {
     out_8(&via[DIRB], (in_8(&via[DIRB]) | TACK | TIP) & ~TREQ);	/* TACK & TIP out */
     out_8(&via[B], in_8(&via[B]) | TACK | TIP);			/* negate them */
diff --git a/drivers/macintosh/windfarm_pm121.c b/drivers/macintosh/windfarm_pm121.c
index af605e9..7fe58b0 100644
--- a/drivers/macintosh/windfarm_pm121.c
+++ b/drivers/macintosh/windfarm_pm121.c
@@ -276,6 +276,7 @@
 
 static unsigned int pm121_failure_state;
 static int pm121_readjust, pm121_skipping;
+static bool pm121_overtemp;
 static s32 average_power;
 
 struct pm121_correction {
@@ -847,6 +848,7 @@
 	if (new_failure & FAILURE_OVERTEMP) {
 		wf_set_overtemp();
 		pm121_skipping = 2;
+		pm121_overtemp = true;
 	}
 
 	/* We only clear the overtemp condition if overtemp is cleared
@@ -855,8 +857,10 @@
 	 * the control loop levels, but we don't want to keep it clear
 	 * here in this case
 	 */
-	if (new_failure == 0 && last_failure & FAILURE_OVERTEMP)
+	if (!pm121_failure_state && pm121_overtemp) {
 		wf_clear_overtemp();
+		pm121_overtemp = false;
+	}
 }
 
 
diff --git a/drivers/macintosh/windfarm_pm81.c b/drivers/macintosh/windfarm_pm81.c
index f84933f..2a5e1b1 100644
--- a/drivers/macintosh/windfarm_pm81.c
+++ b/drivers/macintosh/windfarm_pm81.c
@@ -149,6 +149,7 @@
 
 static unsigned int wf_smu_failure_state;
 static int wf_smu_readjust, wf_smu_skipping;
+static bool wf_smu_overtemp;
 
 /*
  * ****** System Fans Control Loop ******
@@ -593,6 +594,7 @@
 	if (new_failure & FAILURE_OVERTEMP) {
 		wf_set_overtemp();
 		wf_smu_skipping = 2;
+		wf_smu_overtemp = true;
 	}
 
 	/* We only clear the overtemp condition if overtemp is cleared
@@ -601,8 +603,10 @@
 	 * the control loop levels, but we don't want to keep it clear
 	 * here in this case
 	 */
-	if (new_failure == 0 && last_failure & FAILURE_OVERTEMP)
+	if (!wf_smu_failure_state && wf_smu_overtemp) {
 		wf_clear_overtemp();
+		wf_smu_overtemp = false;
+	}
 }
 
 static void wf_smu_new_control(struct wf_control *ct)
diff --git a/drivers/macintosh/windfarm_pm91.c b/drivers/macintosh/windfarm_pm91.c
index 2eb484f..a8ac66c 100644
--- a/drivers/macintosh/windfarm_pm91.c
+++ b/drivers/macintosh/windfarm_pm91.c
@@ -76,6 +76,7 @@
 
 /* Set to kick the control loop into life */
 static int wf_smu_all_controls_ok, wf_smu_all_sensors_ok, wf_smu_started;
+static bool wf_smu_overtemp;
 
 /* Failure handling.. could be nicer */
 #define FAILURE_FAN		0x01
@@ -517,6 +518,7 @@
 	if (new_failure & FAILURE_OVERTEMP) {
 		wf_set_overtemp();
 		wf_smu_skipping = 2;
+		wf_smu_overtemp = true;
 	}
 
 	/* We only clear the overtemp condition if overtemp is cleared
@@ -525,8 +527,10 @@
 	 * the control loop levels, but we don't want to keep it clear
 	 * here in this case
 	 */
-	if (new_failure == 0 && last_failure & FAILURE_OVERTEMP)
+	if (!wf_smu_failure_state && wf_smu_overtemp) {
 		wf_clear_overtemp();
+		wf_smu_overtemp = false;
+	}
 }
 
 
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index d87f5ee..ad6223e 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -343,7 +343,6 @@
 		wf_unregister_sensor(&sens->sens);
 	}
 	sat->i2c = NULL;
-	i2c_set_clientdata(client, NULL);
 	kref_put(&sat->ref, wf_sat_release);
 
 	return 0;
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 7f5a7ca..8270388 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -136,9 +136,9 @@
 
 # This Kconfig option is used by both PCI and USB drivers
 config TTPCI_EEPROM
-        tristate
-        depends on I2C
-        default n
+	tristate
+	depends on I2C
+	default n
 
 source "drivers/media/dvb-core/Kconfig"
 
@@ -189,6 +189,12 @@
 
 	  If unsure say Y.
 
+config MEDIA_ATTACH
+	bool
+	depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
+	depends on MODULES
+	default MODULES
+
 source "drivers/media/i2c/Kconfig"
 source "drivers/media/tuners/Kconfig"
 source "drivers/media/dvb-frontends/Kconfig"
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index cb52438..9eac531 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -956,7 +956,7 @@
 
 	if (fie->pad != OIF_SOURCE_PAD)
 		return -EINVAL;
-	if (fie->index > ARRAY_SIZE(s5c73m3_intervals))
+	if (fie->index >= ARRAY_SIZE(s5c73m3_intervals))
 		return -EINVAL;
 
 	mutex_lock(&state->lock);
diff --git a/drivers/media/pci/cx88/cx88-alsa.c b/drivers/media/pci/cx88/cx88-alsa.c
index 27d6262..aba5b1c 100644
--- a/drivers/media/pci/cx88/cx88-alsa.c
+++ b/drivers/media/pci/cx88/cx88-alsa.c
@@ -615,7 +615,7 @@
 	int changed = 0;
 	u32 old;
 
-	if (core->board.audio_chip == V4L2_IDENT_WM8775)
+	if (core->sd_wm8775)
 		snd_cx88_wm8775_volume_put(kcontrol, value);
 
 	left = value->value.integer.value[0] & 0x3f;
@@ -682,8 +682,7 @@
 		vol ^= bit;
 		cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
 		/* Pass mute onto any WM8775 */
-		if ((core->board.audio_chip == V4L2_IDENT_WM8775) &&
-		    ((1<<6) == bit))
+		if (core->sd_wm8775 && ((1<<6) == bit))
 			wm8775_s_ctrl(core, V4L2_CID_AUDIO_MUTE, 0 != (vol & bit));
 		ret = 1;
 	}
@@ -903,7 +902,7 @@
 		goto error;
 
 	/* If there's a wm8775 then add a Line-In ALC switch */
-	if (core->board.audio_chip == V4L2_IDENT_WM8775)
+	if (core->sd_wm8775)
 		snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch, chip));
 
 	strcpy (card->driver, "CX88x");
diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
index 1b00615..c7a9be1 100644
--- a/drivers/media/pci/cx88/cx88-video.c
+++ b/drivers/media/pci/cx88/cx88-video.c
@@ -385,8 +385,7 @@
 		/* The wm8775 module has the "2" route hardwired into
 		   the initialization. Some boards may use different
 		   routes for different inputs. HVR-1300 surely does */
-		if (core->board.audio_chip &&
-		    core->board.audio_chip == V4L2_IDENT_WM8775) {
+		if (core->sd_wm8775) {
 			call_all(core, audio, s_routing,
 				 INPUT(input).audioroute, 0, 0);
 		}
@@ -771,8 +770,7 @@
 		cx_write(MO_GP1_IO, core->board.radio.gpio1);
 		cx_write(MO_GP2_IO, core->board.radio.gpio2);
 		if (core->board.radio.audioroute) {
-			if(core->board.audio_chip &&
-				core->board.audio_chip == V4L2_IDENT_WM8775) {
+			if (core->sd_wm8775) {
 				call_all(core, audio, s_routing,
 					core->board.radio.audioroute, 0, 0);
 			}
@@ -959,7 +957,7 @@
 	u32 value,mask;
 
 	/* Pass changes onto any WM8775 */
-	if (core->board.audio_chip == V4L2_IDENT_WM8775) {
+	if (core->sd_wm8775) {
 		switch (ctrl->id) {
 		case V4L2_CID_AUDIO_MUTE:
 			wm8775_s_ctrl(core, ctrl->id, ctrl->val);
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index 48b8d7a..9d1481a 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -576,6 +576,14 @@
 	return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
 }
 
+static int vidioc_create_bufs(struct file *file, void *priv,
+			      struct v4l2_create_buffers *create)
+{
+	struct coda_ctx *ctx = fh_to_ctx(priv);
+
+	return v4l2_m2m_create_bufs(file, ctx->m2m_ctx, create);
+}
+
 static int vidioc_streamon(struct file *file, void *priv,
 			   enum v4l2_buf_type type)
 {
@@ -610,6 +618,7 @@
 
 	.vidioc_qbuf		= vidioc_qbuf,
 	.vidioc_dqbuf		= vidioc_dqbuf,
+	.vidioc_create_bufs	= vidioc_create_bufs,
 
 	.vidioc_streamon	= vidioc_streamon,
 	.vidioc_streamoff	= vidioc_streamoff,
diff --git a/drivers/media/platform/davinci/vpbe_display.c b/drivers/media/platform/davinci/vpbe_display.c
index 1802f11..d0b375c 100644
--- a/drivers/media/platform/davinci/vpbe_display.c
+++ b/drivers/media/platform/davinci/vpbe_display.c
@@ -916,6 +916,21 @@
 	other video window */
 
 	layer->pix_fmt = *pixfmt;
+	if (pixfmt->pixelformat == V4L2_PIX_FMT_NV12) {
+		struct vpbe_layer *otherlayer;
+
+		otherlayer = _vpbe_display_get_other_win_layer(disp_dev, layer);
+		/* if other layer is available, only
+		 * claim it, do not configure it
+		 */
+		ret = osd_device->ops.request_layer(osd_device,
+						    otherlayer->layer_info.id);
+		if (ret < 0) {
+			v4l2_err(&vpbe_dev->v4l2_dev,
+				 "Display Manager failed to allocate layer\n");
+			return -EBUSY;
+		}
+	}
 
 	/* Get osd layer config */
 	osd_device->ops.get_layer_config(osd_device,
diff --git a/drivers/media/platform/davinci/vpfe_capture.c b/drivers/media/platform/davinci/vpfe_capture.c
index 8c50d30..9360909 100644
--- a/drivers/media/platform/davinci/vpfe_capture.c
+++ b/drivers/media/platform/davinci/vpfe_capture.c
@@ -1837,7 +1837,7 @@
 	if (NULL == ccdc_cfg) {
 		v4l2_err(pdev->dev.driver,
 			 "Memory allocation failed for ccdc_cfg\n");
-		goto probe_free_lock;
+		goto probe_free_dev_mem;
 	}
 
 	mutex_lock(&ccdc_lock);
@@ -1991,7 +1991,6 @@
 	free_irq(vpfe_dev->ccdc_irq0, vpfe_dev);
 probe_free_ccdc_cfg_mem:
 	kfree(ccdc_cfg);
-probe_free_lock:
 	mutex_unlock(&ccdc_lock);
 probe_free_dev_mem:
 	kfree(vpfe_dev);
diff --git a/drivers/media/platform/exynos4-is/fimc-is-regs.c b/drivers/media/platform/exynos4-is/fimc-is-regs.c
index b0ff67b..d05eaa2 100644
--- a/drivers/media/platform/exynos4-is/fimc-is-regs.c
+++ b/drivers/media/platform/exynos4-is/fimc-is-regs.c
@@ -174,7 +174,7 @@
 		HIC_CAPTURE_STILL, HIC_CAPTURE_VIDEO,
 	};
 
-	if (WARN_ON(is->config_index > ARRAY_SIZE(cmd)))
+	if (WARN_ON(is->config_index >= ARRAY_SIZE(cmd)))
 		return -EINVAL;
 
 	mcuctl_write(cmd[is->config_index], is, MCUCTL_REG_ISSR(0));
diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
index 47c6363..0741945 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.c
+++ b/drivers/media/platform/exynos4-is/fimc-is.c
@@ -48,7 +48,6 @@
 	[ISS_CLK_LITE0]			= "lite0",
 	[ISS_CLK_LITE1]			= "lite1",
 	[ISS_CLK_MPLL]			= "mpll",
-	[ISS_CLK_SYSREG]		= "sysreg",
 	[ISS_CLK_ISP]			= "isp",
 	[ISS_CLK_DRC]			= "drc",
 	[ISS_CLK_FD]			= "fd",
@@ -71,7 +70,6 @@
 	for (i = 0; i < ISS_CLKS_MAX; i++) {
 		if (IS_ERR(is->clocks[i]))
 			continue;
-		clk_unprepare(is->clocks[i]);
 		clk_put(is->clocks[i]);
 		is->clocks[i] = ERR_PTR(-EINVAL);
 	}
@@ -90,12 +88,6 @@
 			ret = PTR_ERR(is->clocks[i]);
 			goto err;
 		}
-		ret = clk_prepare(is->clocks[i]);
-		if (ret < 0) {
-			clk_put(is->clocks[i]);
-			is->clocks[i] = ERR_PTR(-EINVAL);
-			goto err;
-		}
 	}
 
 	return 0;
@@ -103,7 +95,7 @@
 	fimc_is_put_clocks(is);
 	dev_err(&is->pdev->dev, "failed to get clock: %s\n",
 		fimc_is_clocks[i]);
-	return -ENXIO;
+	return ret;
 }
 
 static int fimc_is_setup_clocks(struct fimc_is *is)
@@ -144,7 +136,7 @@
 	for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
 		if (IS_ERR(is->clocks[i]))
 			continue;
-		ret = clk_enable(is->clocks[i]);
+		ret = clk_prepare_enable(is->clocks[i]);
 		if (ret < 0) {
 			dev_err(&is->pdev->dev, "clock %s enable failed\n",
 				fimc_is_clocks[i]);
@@ -163,7 +155,7 @@
 
 	for (i = 0; i < ISS_GATE_CLKS_MAX; i++) {
 		if (!IS_ERR(is->clocks[i])) {
-			clk_disable(is->clocks[i]);
+			clk_disable_unprepare(is->clocks[i]);
 			pr_debug("disabled clock: %s\n", fimc_is_clocks[i]);
 		}
 	}
@@ -326,6 +318,11 @@
 	struct device *dev = &is->pdev->dev;
 	int ret;
 
+	if (is->fw.f_w == NULL) {
+		dev_err(dev, "firmware is not loaded\n");
+		return -EINVAL;
+	}
+
 	memcpy(is->memory.vaddr, is->fw.f_w->data, is->fw.f_w->size);
 	wmb();
 
@@ -837,23 +834,11 @@
 		goto err_clk;
 	}
 	pm_runtime_enable(dev);
-	/*
-	 * Enable only the ISP power domain, keep FIMC-IS clocks off until
-	 * the whole clock tree is configured. The ISP power domain needs
-	 * be active in order to acces any CMU_ISP clock registers.
-	 */
+
 	ret = pm_runtime_get_sync(dev);
 	if (ret < 0)
 		goto err_irq;
 
-	ret = fimc_is_setup_clocks(is);
-	pm_runtime_put_sync(dev);
-
-	if (ret < 0)
-		goto err_irq;
-
-	is->clk_init = true;
-
 	is->alloc_ctx = vb2_dma_contig_init_ctx(dev);
 	if (IS_ERR(is->alloc_ctx)) {
 		ret = PTR_ERR(is->alloc_ctx);
@@ -875,6 +860,8 @@
 	if (ret < 0)
 		goto err_dfs;
 
+	pm_runtime_put_sync(dev);
+
 	dev_dbg(dev, "FIMC-IS registered successfully\n");
 	return 0;
 
@@ -894,9 +881,11 @@
 static int fimc_is_runtime_resume(struct device *dev)
 {
 	struct fimc_is *is = dev_get_drvdata(dev);
+	int ret;
 
-	if (!is->clk_init)
-		return 0;
+	ret = fimc_is_setup_clocks(is);
+	if (ret)
+		return ret;
 
 	return fimc_is_enable_clocks(is);
 }
@@ -905,9 +894,7 @@
 {
 	struct fimc_is *is = dev_get_drvdata(dev);
 
-	if (is->clk_init)
-		fimc_is_disable_clocks(is);
-
+	fimc_is_disable_clocks(is);
 	return 0;
 }
 
@@ -941,7 +928,8 @@
 	vb2_dma_contig_cleanup_ctx(is->alloc_ctx);
 	fimc_is_put_clocks(is);
 	fimc_is_debugfs_remove(is);
-	release_firmware(is->fw.f_w);
+	if (is->fw.f_w)
+		release_firmware(is->fw.f_w);
 	fimc_is_free_cpu_memory(is);
 
 	return 0;
diff --git a/drivers/media/platform/exynos4-is/fimc-is.h b/drivers/media/platform/exynos4-is/fimc-is.h
index f5275a5..d7db133 100644
--- a/drivers/media/platform/exynos4-is/fimc-is.h
+++ b/drivers/media/platform/exynos4-is/fimc-is.h
@@ -73,7 +73,6 @@
 	ISS_CLK_LITE0,
 	ISS_CLK_LITE1,
 	ISS_CLK_MPLL,
-	ISS_CLK_SYSREG,
 	ISS_CLK_ISP,
 	ISS_CLK_DRC,
 	ISS_CLK_FD,
@@ -265,7 +264,6 @@
 	spinlock_t			slock;
 
 	struct clk			*clocks[ISS_CLKS_MAX];
-	bool				clk_init;
 	void __iomem			*regs;
 	void __iomem			*pmu_regs;
 	int				irq;
diff --git a/drivers/media/platform/exynos4-is/fimc-isp.c b/drivers/media/platform/exynos4-is/fimc-isp.c
index d63947f..7ede30b 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp.c
@@ -138,7 +138,7 @@
 		return 0;
 	}
 
-	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	mf->colorspace = V4L2_COLORSPACE_SRGB;
 
 	mutex_lock(&isp->subdev_lock);
 	__is_get_frame_size(is, &cur_fmt);
@@ -194,7 +194,7 @@
 	v4l2_dbg(1, debug, sd, "%s: pad%d: code: 0x%x, %dx%d\n",
 		 __func__, fmt->pad, mf->code, mf->width, mf->height);
 
-	mf->colorspace = V4L2_COLORSPACE_JPEG;
+	mf->colorspace = V4L2_COLORSPACE_SRGB;
 
 	mutex_lock(&isp->subdev_lock);
 	__isp_subdev_try_format(isp, fmt);
diff --git a/drivers/media/platform/exynos4-is/mipi-csis.c b/drivers/media/platform/exynos4-is/mipi-csis.c
index a2eda9d..254d70f 100644
--- a/drivers/media/platform/exynos4-is/mipi-csis.c
+++ b/drivers/media/platform/exynos4-is/mipi-csis.c
@@ -746,7 +746,7 @@
 	node = v4l2_of_get_next_endpoint(node, NULL);
 	if (!node) {
 		dev_err(&pdev->dev, "No port node at %s\n",
-					node->full_name);
+				pdev->dev.of_node->full_name);
 		return -EINVAL;
 	}
 	/* Get port node and validate MIPI-CSI channel id. */
diff --git a/drivers/media/platform/s3c-camif/camif-core.h b/drivers/media/platform/s3c-camif/camif-core.h
index 261134b..35d2fcd 100644
--- a/drivers/media/platform/s3c-camif/camif-core.h
+++ b/drivers/media/platform/s3c-camif/camif-core.h
@@ -229,7 +229,7 @@
 	unsigned int		state;
 	u16			fmt_flags;
 	u8			id;
-	u8			rotation;
+	u16			rotation;
 	u8			hflip;
 	u8			vflip;
 	unsigned int		offset;
diff --git a/drivers/media/platform/s5p-jpeg/Makefile b/drivers/media/platform/s5p-jpeg/Makefile
index ddc2900..d18cb5e 100644
--- a/drivers/media/platform/s5p-jpeg/Makefile
+++ b/drivers/media/platform/s5p-jpeg/Makefile
@@ -1,2 +1,2 @@
 s5p-jpeg-objs := jpeg-core.o
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) := s5p-jpeg.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_JPEG) += s5p-jpeg.o
diff --git a/drivers/media/platform/s5p-mfc/Makefile b/drivers/media/platform/s5p-mfc/Makefile
index 379008c..15f59b3 100644
--- a/drivers/media/platform/s5p-mfc/Makefile
+++ b/drivers/media/platform/s5p-mfc/Makefile
@@ -1,4 +1,4 @@
-obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) := s5p-mfc.o
+obj-$(CONFIG_VIDEO_SAMSUNG_S5P_MFC) += s5p-mfc.o
 s5p-mfc-y += s5p_mfc.o s5p_mfc_intr.o
 s5p-mfc-y += s5p_mfc_dec.o s5p_mfc_enc.o
 s5p-mfc-y += s5p_mfc_ctrl.o s5p_mfc_pm.o
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 01f9ae0..d12faa6 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -397,7 +397,7 @@
 leave_handle_frame:
 	spin_unlock_irqrestore(&dev->irqlock, flags);
 	if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
-				    || ctx->dst_queue_cnt < ctx->dpb_count)
+				    || ctx->dst_queue_cnt < ctx->pb_count)
 		clear_work_bit(ctx);
 	s5p_mfc_hw_call(dev->mfc_ops, clear_int_flags, dev);
 	wake_up_ctx(ctx, reason, err);
@@ -473,7 +473,7 @@
 
 		s5p_mfc_hw_call(dev->mfc_ops, dec_calc_dpb_size, ctx);
 
-		ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
+		ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count,
 				dev);
 		ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count,
 				dev);
@@ -562,7 +562,7 @@
 	struct s5p_mfc_dev *dev = ctx->dev;
 	struct s5p_mfc_buf *mb_entry;
 
-	mfc_debug(2, "Stream completed");
+	mfc_debug(2, "Stream completed\n");
 
 	s5p_mfc_clear_int_flags(dev);
 	ctx->int_type = reason;
@@ -1362,7 +1362,6 @@
 	.port_num	= MFC_NUM_PORTS,
 	.buf_size	= &buf_size_v5,
 	.buf_align	= &mfc_buf_align_v5,
-	.mclk_name	= "sclk_mfc",
 	.fw_name	= "s5p-mfc.fw",
 };
 
@@ -1389,7 +1388,6 @@
 	.port_num	= MFC_NUM_PORTS_V6,
 	.buf_size	= &buf_size_v6,
 	.buf_align	= &mfc_buf_align_v6,
-	.mclk_name      = "aclk_333",
 	.fw_name        = "s5p-mfc-v6.fw",
 };
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
index 202d1d7..ef4074c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_common.h
@@ -138,6 +138,7 @@
 	MFCINST_INIT = 100,
 	MFCINST_GOT_INST,
 	MFCINST_HEAD_PARSED,
+	MFCINST_HEAD_PRODUCED,
 	MFCINST_BUFS_SET,
 	MFCINST_RUNNING,
 	MFCINST_FINISHING,
@@ -231,7 +232,6 @@
 	unsigned int port_num;
 	struct s5p_mfc_buf_size *buf_size;
 	struct s5p_mfc_buf_align *buf_align;
-	char	*mclk_name;
 	char	*fw_name;
 };
 
@@ -438,7 +438,7 @@
 	u32 rc_framerate_num;
 	u32 rc_framerate_denom;
 
-	union {
+	struct {
 		struct s5p_mfc_h264_enc_params h264;
 		struct s5p_mfc_mpeg4_enc_params mpeg4;
 	} codec;
@@ -602,7 +602,7 @@
 	int after_packed_pb;
 	int sei_fp_parse;
 
-	int dpb_count;
+	int pb_count;
 	int total_dpb_count;
 	int mv_count;
 	/* Buffers */
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 2e5f30b..dc1fc94 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -38,7 +38,7 @@
 	dev->fw_virt_addr = dma_alloc_coherent(dev->mem_dev_l, dev->fw_size,
 					&dev->bank1, GFP_KERNEL);
 
-	if (IS_ERR(dev->fw_virt_addr)) {
+	if (IS_ERR_OR_NULL(dev->fw_virt_addr)) {
 		dev->fw_virt_addr = NULL;
 		mfc_err("Allocating bitprocessor buffer failed\n");
 		return -ENOMEM;
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
index bd5cd4a..8e608f5 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_debug.h
@@ -30,8 +30,8 @@
 #define mfc_debug(level, fmt, args...)
 #endif
 
-#define mfc_debug_enter() mfc_debug(5, "enter")
-#define mfc_debug_leave() mfc_debug(5, "leave")
+#define mfc_debug_enter() mfc_debug(5, "enter\n")
+#define mfc_debug_leave() mfc_debug(5, "leave\n")
 
 #define mfc_err(fmt, args...)				\
 	do {						\
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
index 4af53bd..00b0703 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_dec.c
@@ -210,11 +210,11 @@
 	/* Context is to decode a frame */
 	if (ctx->src_queue_cnt >= 1 &&
 	    ctx->state == MFCINST_RUNNING &&
-	    ctx->dst_queue_cnt >= ctx->dpb_count)
+	    ctx->dst_queue_cnt >= ctx->pb_count)
 		return 1;
 	/* Context is to return last frame */
 	if (ctx->state == MFCINST_FINISHING &&
-	    ctx->dst_queue_cnt >= ctx->dpb_count)
+	    ctx->dst_queue_cnt >= ctx->pb_count)
 		return 1;
 	/* Context is to set buffers */
 	if (ctx->src_queue_cnt >= 1 &&
@@ -224,7 +224,7 @@
 	/* Resolution change */
 	if ((ctx->state == MFCINST_RES_CHANGE_INIT ||
 		ctx->state == MFCINST_RES_CHANGE_FLUSH) &&
-		ctx->dst_queue_cnt >= ctx->dpb_count)
+		ctx->dst_queue_cnt >= ctx->pb_count)
 		return 1;
 	if (ctx->state == MFCINST_RES_CHANGE_END &&
 		ctx->src_queue_cnt >= 1)
@@ -537,7 +537,7 @@
 			mfc_err("vb2_reqbufs on capture failed\n");
 			return ret;
 		}
-		if (reqbufs->count < ctx->dpb_count) {
+		if (reqbufs->count < ctx->pb_count) {
 			mfc_err("Not enough buffers allocated\n");
 			reqbufs->count = 0;
 			s5p_mfc_clock_on();
@@ -751,7 +751,7 @@
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
 		if (ctx->state >= MFCINST_HEAD_PARSED &&
 		    ctx->state < MFCINST_ABORT) {
-			ctrl->val = ctx->dpb_count;
+			ctrl->val = ctx->pb_count;
 			break;
 		} else if (ctx->state != MFCINST_INIT) {
 			v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
@@ -763,7 +763,7 @@
 				S5P_MFC_R2H_CMD_SEQ_DONE_RET, 0);
 		if (ctx->state >= MFCINST_HEAD_PARSED &&
 		    ctx->state < MFCINST_ABORT) {
-			ctrl->val = ctx->dpb_count;
+			ctrl->val = ctx->pb_count;
 		} else {
 			v4l2_err(&dev->v4l2_dev, "Decoding not initialised\n");
 			return -EINVAL;
@@ -924,10 +924,10 @@
 		/* Output plane count is 2 - one for Y and one for CbCr */
 		*plane_count = 2;
 		/* Setup buffer count */
-		if (*buf_count < ctx->dpb_count)
-			*buf_count = ctx->dpb_count;
-		if (*buf_count > ctx->dpb_count + MFC_MAX_EXTRA_DPB)
-			*buf_count = ctx->dpb_count + MFC_MAX_EXTRA_DPB;
+		if (*buf_count < ctx->pb_count)
+			*buf_count = ctx->pb_count;
+		if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB)
+			*buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB;
 		if (*buf_count > MFC_MAX_BUFFERS)
 			*buf_count = MFC_MAX_BUFFERS;
 	} else {
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
index 4f6b553..2549967 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_enc.c
@@ -592,7 +592,7 @@
 		return 1;
 	/* context is ready to encode a frame */
 	if ((ctx->state == MFCINST_RUNNING ||
-		ctx->state == MFCINST_HEAD_PARSED) &&
+		ctx->state == MFCINST_HEAD_PRODUCED) &&
 		ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1)
 		return 1;
 	/* context is ready to encode remaining frames */
@@ -649,6 +649,7 @@
 	struct s5p_mfc_enc_params *p = &ctx->enc_params;
 	struct s5p_mfc_buf *dst_mb;
 	unsigned long flags;
+	unsigned int enc_pb_count;
 
 	if (p->seq_hdr_mode == V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE) {
 		spin_lock_irqsave(&dev->irqlock, flags);
@@ -661,18 +662,19 @@
 		vb2_buffer_done(dst_mb->b, VB2_BUF_STATE_DONE);
 		spin_unlock_irqrestore(&dev->irqlock, flags);
 	}
-	if (IS_MFCV6(dev)) {
-		ctx->state = MFCINST_HEAD_PARSED; /* for INIT_BUFFER cmd */
-	} else {
+
+	if (!IS_MFCV6(dev)) {
 		ctx->state = MFCINST_RUNNING;
 		if (s5p_mfc_ctx_ready(ctx))
 			set_work_bit_irqsave(ctx);
 		s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
-	}
-
-	if (IS_MFCV6(dev))
-		ctx->dpb_count = s5p_mfc_hw_call(dev->mfc_ops,
+	} else {
+		enc_pb_count = s5p_mfc_hw_call(dev->mfc_ops,
 				get_enc_dpb_count, dev);
+		if (ctx->pb_count < enc_pb_count)
+			ctx->pb_count = enc_pb_count;
+		ctx->state = MFCINST_HEAD_PRODUCED;
+	}
 
 	return 0;
 }
@@ -717,9 +719,9 @@
 
 	slice_type = s5p_mfc_hw_call(dev->mfc_ops, get_enc_slice_type, dev);
 	strm_size = s5p_mfc_hw_call(dev->mfc_ops, get_enc_strm_size, dev);
-	mfc_debug(2, "Encoded slice type: %d", slice_type);
-	mfc_debug(2, "Encoded stream size: %d", strm_size);
-	mfc_debug(2, "Display order: %d",
+	mfc_debug(2, "Encoded slice type: %d\n", slice_type);
+	mfc_debug(2, "Encoded stream size: %d\n", strm_size);
+	mfc_debug(2, "Display order: %d\n",
 		  mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT));
 	spin_lock_irqsave(&dev->irqlock, flags);
 	if (slice_type >= 0) {
@@ -1055,15 +1057,13 @@
 		}
 		ctx->capture_state = QUEUE_BUFS_REQUESTED;
 
-		if (!IS_MFCV6(dev)) {
-			ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
-					alloc_codec_buffers, ctx);
-			if (ret) {
-				mfc_err("Failed to allocate encoding buffers\n");
-				reqbufs->count = 0;
-				ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
-				return -ENOMEM;
-			}
+		ret = s5p_mfc_hw_call(ctx->dev->mfc_ops,
+				alloc_codec_buffers, ctx);
+		if (ret) {
+			mfc_err("Failed to allocate encoding buffers\n");
+			reqbufs->count = 0;
+			ret = vb2_reqbufs(&ctx->vq_dst, reqbufs);
+			return -ENOMEM;
 		}
 	} else if (reqbufs->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
 		if (ctx->output_state != QUEUE_FREE) {
@@ -1071,6 +1071,19 @@
 							ctx->output_state);
 			return -EINVAL;
 		}
+
+		if (IS_MFCV6(dev)) {
+			/* Check for min encoder buffers */
+			if (ctx->pb_count &&
+				(reqbufs->count < ctx->pb_count)) {
+				reqbufs->count = ctx->pb_count;
+				mfc_debug(2, "Minimum %d output buffers needed\n",
+						ctx->pb_count);
+			} else {
+				ctx->pb_count = reqbufs->count;
+			}
+		}
+
 		ret = vb2_reqbufs(&ctx->vq_src, reqbufs);
 		if (ret != 0) {
 			mfc_err("error in vb2_reqbufs() for E(S)\n");
@@ -1533,14 +1546,14 @@
 
 		spin_lock_irqsave(&dev->irqlock, flags);
 		if (list_empty(&ctx->src_queue)) {
-			mfc_debug(2, "EOS: empty src queue, entering finishing state");
+			mfc_debug(2, "EOS: empty src queue, entering finishing state\n");
 			ctx->state = MFCINST_FINISHING;
 			if (s5p_mfc_ctx_ready(ctx))
 				set_work_bit_irqsave(ctx);
 			spin_unlock_irqrestore(&dev->irqlock, flags);
 			s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
 		} else {
-			mfc_debug(2, "EOS: marking last buffer of stream");
+			mfc_debug(2, "EOS: marking last buffer of stream\n");
 			buf = list_entry(ctx->src_queue.prev,
 						struct s5p_mfc_buf, list);
 			if (buf->flags & MFC_BUF_FLAG_USED)
@@ -1609,9 +1622,9 @@
 			mfc_err("failed to get plane cookie\n");
 			return -EINVAL;
 		}
-		mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx",
-				vb->v4l2_buf.index, i,
-				vb2_dma_contig_plane_dma_addr(vb, i));
+		mfc_debug(2, "index: %d, plane[%d] cookie: 0x%08zx\n",
+			  vb->v4l2_buf.index, i,
+			  vb2_dma_contig_plane_dma_addr(vb, i));
 	}
 	return 0;
 }
@@ -1760,11 +1773,27 @@
 	struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv);
 	struct s5p_mfc_dev *dev = ctx->dev;
 
-	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
+	if (IS_MFCV6(dev) && (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
+
+		if ((ctx->state == MFCINST_GOT_INST) &&
+			(dev->curr_ctx == ctx->num) && dev->hw_lock) {
+			s5p_mfc_wait_for_done_ctx(ctx,
+						S5P_MFC_R2H_CMD_SEQ_DONE_RET,
+						0);
+		}
+
+		if (ctx->src_bufs_cnt < ctx->pb_count) {
+			mfc_err("Need minimum %d OUTPUT buffers\n",
+					ctx->pb_count);
+			return -EINVAL;
+		}
+	}
+
 	/* If context is ready then dev = work->data;schedule it to run */
 	if (s5p_mfc_ctx_ready(ctx))
 		set_work_bit_irqsave(ctx);
 	s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
+
 	return 0;
 }
 
@@ -1920,6 +1949,7 @@
 		if (controls[i].is_volatile && ctx->ctrls[i])
 			ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE;
 	}
+	v4l2_ctrl_handler_setup(&ctx->ctrl_handler);
 	return 0;
 }
 
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
index 0af05a2..368582b 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
@@ -1275,8 +1275,8 @@
 	spin_unlock_irqrestore(&dev->irqlock, flags);
 	dev->curr_ctx = ctx->num;
 	s5p_mfc_clean_ctx_int_flags(ctx);
-	mfc_debug(2, "encoding buffer with index=%d state=%d",
-			src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
+	mfc_debug(2, "encoding buffer with index=%d state=%d\n",
+		  src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state);
 	s5p_mfc_encode_one_frame_v5(ctx);
 	return 0;
 }
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
index 7e76fce..66f0d04 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_opr_v6.c
@@ -62,12 +62,6 @@
 	/* NOP */
 }
 
-static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
-{
-	/* NOP */
-	return -1;
-}
-
 /* Allocate codec buffers */
 static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx)
 {
@@ -167,7 +161,7 @@
 				S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
 		ctx->bank1.size =
 			ctx->scratch_buf_size + ctx->tmv_buffer_size +
-			(ctx->dpb_count * (ctx->luma_dpb_size +
+			(ctx->pb_count * (ctx->luma_dpb_size +
 			ctx->chroma_dpb_size + ctx->me_buffer_size));
 		ctx->bank2.size = 0;
 		break;
@@ -181,7 +175,7 @@
 				S5P_FIMV_SCRATCH_BUFFER_ALIGN_V6);
 		ctx->bank1.size =
 			ctx->scratch_buf_size + ctx->tmv_buffer_size +
-			(ctx->dpb_count * (ctx->luma_dpb_size +
+			(ctx->pb_count * (ctx->luma_dpb_size +
 			ctx->chroma_dpb_size + ctx->me_buffer_size));
 		ctx->bank2.size = 0;
 		break;
@@ -198,7 +192,6 @@
 		}
 		BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1));
 	}
-
 	return 0;
 }
 
@@ -449,8 +442,8 @@
 	WRITEL(addr, S5P_FIMV_E_STREAM_BUFFER_ADDR_V6); /* 16B align */
 	WRITEL(size, S5P_FIMV_E_STREAM_BUFFER_SIZE_V6);
 
-	mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d",
-		addr, size);
+	mfc_debug(2, "stream buf addr: 0x%08lx, size: 0x%d\n",
+		  addr, size);
 
 	return 0;
 }
@@ -463,8 +456,8 @@
 	WRITEL(y_addr, S5P_FIMV_E_SOURCE_LUMA_ADDR_V6); /* 256B align */
 	WRITEL(c_addr, S5P_FIMV_E_SOURCE_CHROMA_ADDR_V6);
 
-	mfc_debug(2, "enc src y buf addr: 0x%08lx", y_addr);
-	mfc_debug(2, "enc src c buf addr: 0x%08lx", c_addr);
+	mfc_debug(2, "enc src y buf addr: 0x%08lx\n", y_addr);
+	mfc_debug(2, "enc src c buf addr: 0x%08lx\n", c_addr);
 }
 
 static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx,
@@ -479,8 +472,8 @@
 	enc_recon_y_addr = READL(S5P_FIMV_E_RECON_LUMA_DPB_ADDR_V6);
 	enc_recon_c_addr = READL(S5P_FIMV_E_RECON_CHROMA_DPB_ADDR_V6);
 
-	mfc_debug(2, "recon y addr: 0x%08lx", enc_recon_y_addr);
-	mfc_debug(2, "recon c addr: 0x%08lx", enc_recon_c_addr);
+	mfc_debug(2, "recon y addr: 0x%08lx\n", enc_recon_y_addr);
+	mfc_debug(2, "recon c addr: 0x%08lx\n", enc_recon_c_addr);
 }
 
 /* Set encoding ref & codec buffer */
@@ -497,7 +490,7 @@
 
 	mfc_debug(2, "Buf1: %p (%d)\n", (void *)buf_addr1, buf_size1);
 
-	for (i = 0; i < ctx->dpb_count; i++) {
+	for (i = 0; i < ctx->pb_count; i++) {
 		WRITEL(buf_addr1, S5P_FIMV_E_LUMA_DPB_V6 + (4 * i));
 		buf_addr1 += ctx->luma_dpb_size;
 		WRITEL(buf_addr1, S5P_FIMV_E_CHROMA_DPB_V6 + (4 * i));
@@ -520,7 +513,7 @@
 	buf_size1 -= ctx->tmv_buffer_size;
 
 	mfc_debug(2, "Buf1: %u, buf_size1: %d (ref frames %d)\n",
-			buf_addr1, buf_size1, ctx->dpb_count);
+			buf_addr1, buf_size1, ctx->pb_count);
 	if (buf_size1 < 0) {
 		mfc_debug(2, "Not enough memory has been allocated.\n");
 		return -ENOMEM;
@@ -1431,8 +1424,8 @@
 	src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0);
 	src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1);
 
-	mfc_debug(2, "enc src y addr: 0x%08lx", src_y_addr);
-	mfc_debug(2, "enc src c addr: 0x%08lx", src_c_addr);
+	mfc_debug(2, "enc src y addr: 0x%08lx\n", src_y_addr);
+	mfc_debug(2, "enc src c addr: 0x%08lx\n", src_c_addr);
 
 	s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr);
 
@@ -1522,22 +1515,6 @@
 	struct s5p_mfc_dev *dev = ctx->dev;
 	int ret;
 
-	ret = s5p_mfc_alloc_codec_buffers_v6(ctx);
-	if (ret) {
-		mfc_err("Failed to allocate encoding buffers.\n");
-		return -ENOMEM;
-	}
-
-	/* Header was generated now starting processing
-	 * First set the reference frame buffers
-	 */
-	if (ctx->capture_state != QUEUE_BUFS_REQUESTED) {
-		mfc_err("It seems that destionation buffers were not\n"
-			"requested.MFC requires that header should be generated\n"
-			"before allocating codec buffer.\n");
-		return -EAGAIN;
-	}
-
 	dev->curr_ctx = ctx->num;
 	s5p_mfc_clean_ctx_int_flags(ctx);
 	ret = s5p_mfc_set_enc_ref_buffer_v6(ctx);
@@ -1582,7 +1559,7 @@
 	mfc_debug(1, "Seting new context to %p\n", ctx);
 	/* Got context to run in ctx */
 	mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n",
-		ctx->dst_queue_cnt, ctx->dpb_count, ctx->src_queue_cnt);
+		ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt);
 	mfc_debug(1, "ctx->state=%d\n", ctx->state);
 	/* Last frame has already been sent to MFC
 	 * Now obtaining frames from MFC buffer */
@@ -1647,7 +1624,7 @@
 		case MFCINST_GOT_INST:
 			s5p_mfc_run_init_enc(ctx);
 			break;
-		case MFCINST_HEAD_PARSED: /* Only for MFC6.x */
+		case MFCINST_HEAD_PRODUCED:
 			ret = s5p_mfc_run_init_enc_buffers(ctx);
 			break;
 		default:
@@ -1730,7 +1707,7 @@
 	return mfc_read(dev, S5P_FIMV_D_DISPLAY_STATUS_V6);
 }
 
-static int s5p_mfc_get_decoded_status_v6(struct s5p_mfc_dev *dev)
+static int s5p_mfc_get_dec_status_v6(struct s5p_mfc_dev *dev)
 {
 	return mfc_read(dev, S5P_FIMV_D_DECODED_STATUS_V6);
 }
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
index 6aa38a5..11d5f1d 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_pm.c
@@ -50,19 +50,6 @@
 		goto err_p_ip_clk;
 	}
 
-	pm->clock = clk_get(&dev->plat_dev->dev, dev->variant->mclk_name);
-	if (IS_ERR(pm->clock)) {
-		mfc_err("Failed to get MFC clock\n");
-		ret = PTR_ERR(pm->clock);
-		goto err_g_ip_clk_2;
-	}
-
-	ret = clk_prepare(pm->clock);
-	if (ret) {
-		mfc_err("Failed to prepare MFC clock\n");
-		goto err_p_ip_clk_2;
-	}
-
 	atomic_set(&pm->power, 0);
 #ifdef CONFIG_PM_RUNTIME
 	pm->device = &dev->plat_dev->dev;
@@ -72,10 +59,6 @@
 	atomic_set(&clk_ref, 0);
 #endif
 	return 0;
-err_p_ip_clk_2:
-	clk_put(pm->clock);
-err_g_ip_clk_2:
-	clk_unprepare(pm->clock_gate);
 err_p_ip_clk:
 	clk_put(pm->clock_gate);
 err_g_ip_clk:
@@ -86,8 +69,6 @@
 {
 	clk_unprepare(pm->clock_gate);
 	clk_put(pm->clock_gate);
-	clk_unprepare(pm->clock);
-	clk_put(pm->clock);
 #ifdef CONFIG_PM_RUNTIME
 	pm_runtime_disable(pm->device);
 #endif
@@ -98,7 +79,7 @@
 	int ret;
 #ifdef CLK_DEBUG
 	atomic_inc(&clk_ref);
-	mfc_debug(3, "+ %d", atomic_read(&clk_ref));
+	mfc_debug(3, "+ %d\n", atomic_read(&clk_ref));
 #endif
 	ret = clk_enable(pm->clock_gate);
 	return ret;
@@ -108,7 +89,7 @@
 {
 #ifdef CLK_DEBUG
 	atomic_dec(&clk_ref);
-	mfc_debug(3, "- %d", atomic_read(&clk_ref));
+	mfc_debug(3, "- %d\n", atomic_read(&clk_ref));
 #endif
 	clk_disable(pm->clock_gate);
 }
diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
index 0b32cc3..59a9dee 100644
--- a/drivers/media/platform/sh_veu.c
+++ b/drivers/media/platform/sh_veu.c
@@ -905,11 +905,11 @@
 		if (ftmp.fmt.pix.width != pix->width ||
 		    ftmp.fmt.pix.height != pix->height)
 			return -EINVAL;
-		size = pix->bytesperline ? pix->bytesperline * pix->height :
-			pix->width * pix->height * fmt->depth >> 3;
+		size = pix->bytesperline ? pix->bytesperline * pix->height * fmt->depth / fmt->ydepth :
+			pix->width * pix->height * fmt->depth / fmt->ydepth;
 	} else {
 		vfmt = sh_veu_get_vfmt(veu, vq->type);
-		size = vfmt->bytesperline * vfmt->frame.height;
+		size = vfmt->bytesperline * vfmt->frame.height * vfmt->fmt->depth / vfmt->fmt->ydepth;
 	}
 
 	if (count < 2)
@@ -1033,8 +1033,6 @@
 
 	dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
 
-	pm_runtime_put(veu->dev);
-
 	if (veu_file == veu->capture) {
 		veu->capture = NULL;
 		vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
@@ -1050,6 +1048,8 @@
 		veu->m2m_ctx = NULL;
 	}
 
+	pm_runtime_put(veu->dev);
+
 	kfree(veu_file);
 
 	return 0;
@@ -1138,10 +1138,7 @@
 
 	veu->xaction++;
 
-	if (!veu->aborting)
-		return IRQ_WAKE_THREAD;
-
-	return IRQ_HANDLED;
+	return IRQ_WAKE_THREAD;
 }
 
 static int sh_veu_probe(struct platform_device *pdev)
diff --git a/drivers/media/platform/soc_camera/soc_camera.c b/drivers/media/platform/soc_camera/soc_camera.c
index eea832c..3a4efbd 100644
--- a/drivers/media/platform/soc_camera/soc_camera.c
+++ b/drivers/media/platform/soc_camera/soc_camera.c
@@ -643,9 +643,9 @@
 
 		if (ici->ops->init_videobuf2)
 			vb2_queue_release(&icd->vb2_vidq);
-		ici->ops->remove(icd);
-
 		__soc_camera_power_off(icd);
+
+		ici->ops->remove(icd);
 	}
 
 	if (icd->streamer == file)
diff --git a/drivers/media/radio/Kconfig b/drivers/media/radio/Kconfig
index c0beee2..d529ba7 100644
--- a/drivers/media/radio/Kconfig
+++ b/drivers/media/radio/Kconfig
@@ -22,6 +22,7 @@
 	tristate "Silicon Laboratories Si476x I2C FM Radio"
 	depends on I2C && VIDEO_V4L2
 	depends on MFD_SI476X_CORE
+	depends on SND_SOC
 	select SND_SOC_SI476X
 	---help---
 	  Choose Y here if you have this FM radio chip.
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 9430c6a..9dc8baf 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -44,7 +44,7 @@
 
 #define FREQ_MUL (10000000 / 625)
 
-#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0b10000000 & (status))
+#define SI476X_PHDIV_STATUS_LINK_LOCKED(status) (0x80 & (status))
 
 #define DRIVER_NAME "si476x-radio"
 #define DRIVER_CARD "SI476x AM/FM Receiver"
diff --git a/drivers/media/tuners/Kconfig b/drivers/media/tuners/Kconfig
index f6768ca..15665de 100644
--- a/drivers/media/tuners/Kconfig
+++ b/drivers/media/tuners/Kconfig
@@ -1,23 +1,3 @@
-config MEDIA_ATTACH
-	bool "Load and attach frontend and tuner driver modules as needed"
-	depends on MEDIA_ANALOG_TV_SUPPORT || MEDIA_DIGITAL_TV_SUPPORT || MEDIA_RADIO_SUPPORT
-	depends on MODULES
-	default y if !EXPERT
-	help
-	  Remove the static dependency of DVB card drivers on all
-	  frontend modules for all possible card variants. Instead,
-	  allow the card drivers to only load the frontend modules
-	  they require.
-
-	  Also, tuner module will automatically load a tuner driver
-	  when needed, for analog mode.
-
-	  This saves several KBytes of memory.
-
-	  Note: You will need module-init-tools v3.2 or later for this feature.
-
-	  If unsure say Y.
-
 # Analog TV tuners, auto-loaded via tuner.ko
 config MEDIA_TUNER
 	tristate
diff --git a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
index 22015fe..2cc8ec7 100644
--- a/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
+++ b/drivers/media/usb/dvb-usb-v2/rtl28xxu.c
@@ -376,7 +376,7 @@
 	struct rtl28xxu_req req_mxl5007t = {0xd9c0, CMD_I2C_RD, 1, buf};
 	struct rtl28xxu_req req_e4000 = {0x02c8, CMD_I2C_RD, 1, buf};
 	struct rtl28xxu_req req_tda18272 = {0x00c0, CMD_I2C_RD, 2, buf};
-	struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 5, buf};
+	struct rtl28xxu_req req_r820t = {0x0034, CMD_I2C_RD, 1, buf};
 
 	dev_dbg(&d->udev->dev, "%s:\n", __func__);
 
@@ -481,9 +481,9 @@
 		goto found;
 	}
 
-	/* check R820T by reading tuner stats at I2C addr 0x1a */
+	/* check R820T ID register; reg=00 val=69 */
 	ret = rtl28xxu_ctrl_msg(d, &req_r820t);
-	if (ret == 0) {
+	if (ret == 0 && buf[0] == 0x69) {
 		priv->tuner = TUNER_RTL2832_R820T;
 		priv->tuner_name = "R820T";
 		goto found;
diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
index 3fe207e..d7ff3b9 100644
--- a/drivers/media/usb/gspca/sonixb.c
+++ b/drivers/media/usb/gspca/sonixb.c
@@ -1159,6 +1159,13 @@
 			regs[0x01] = 0x44; /* Select 24 Mhz clock */
 			regs[0x12] = 0x02; /* Set hstart to 2 */
 		}
+		break;
+	case SENSOR_PAS202:
+		/* For some unknown reason we need to increase hstart by 1 on
+		   the sn9c103, otherwise we get wrong colors (bayer shift). */
+		if (sd->bridge == BRIDGE_103)
+			regs[0x12] += 1;
+		break;
 	}
 	/* Disable compression when the raw bayer format has been selected */
 	if (cam->cam_mode[gspca_dev->curr_mode].priv & MODE_RAW)
diff --git a/drivers/media/usb/pwc/pwc.h b/drivers/media/usb/pwc/pwc.h
index 7a6a0d3..81b017a 100644
--- a/drivers/media/usb/pwc/pwc.h
+++ b/drivers/media/usb/pwc/pwc.h
@@ -226,7 +226,7 @@
 	struct list_head queued_bufs;
 	spinlock_t queued_bufs_lock; /* Protects queued_bufs */
 
-	/* Note if taking both locks v4l2_lock must always be locked first! */
+	/* If taking both locks vb_queue_lock must always be locked first! */
 	struct mutex v4l2_lock;      /* Protects everything else */
 	struct mutex vb_queue_lock;  /* Protects vb_queue and capt_file */
 
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index ebb8e48..fccd08b 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -1835,6 +1835,8 @@
 {
 	if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_TX)
 		return true;
+	if (V4L2_CTRL_ID2CLASS(ctrl->id) == V4L2_CTRL_CLASS_FM_RX)
+		return true;
 	switch (ctrl->id) {
 	case V4L2_CID_AUDIO_MUTE:
 	case V4L2_CID_AUDIO_VOLUME:
diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
index f81bda1..7658586 100644
--- a/drivers/media/v4l2-core/v4l2-ioctl.c
+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
@@ -243,7 +243,6 @@
 	const struct v4l2_vbi_format *vbi;
 	const struct v4l2_sliced_vbi_format *sliced;
 	const struct v4l2_window *win;
-	const struct v4l2_clip *clip;
 	unsigned i;
 
 	pr_cont("type=%s", prt_names(p->type, v4l2_type_names));
@@ -253,7 +252,7 @@
 		pix = &p->fmt.pix;
 		pr_cont(", width=%u, height=%u, "
 			"pixelformat=%c%c%c%c, field=%s, "
-			"bytesperline=%u sizeimage=%u, colorspace=%d\n",
+			"bytesperline=%u, sizeimage=%u, colorspace=%d\n",
 			pix->width, pix->height,
 			(pix->pixelformat & 0xff),
 			(pix->pixelformat >>  8) & 0xff,
@@ -284,20 +283,14 @@
 	case V4L2_BUF_TYPE_VIDEO_OVERLAY:
 	case V4L2_BUF_TYPE_VIDEO_OUTPUT_OVERLAY:
 		win = &p->fmt.win;
-		pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, "
-			"chromakey=0x%08x, bitmap=%p, "
-			"global_alpha=0x%02x\n",
-			win->w.width, win->w.height,
-			win->w.left, win->w.top,
+		/* Note: we can't print the clip list here since the clips
+		 * pointer is a userspace pointer, not a kernelspace
+		 * pointer. */
+		pr_cont(", wxh=%dx%d, x,y=%d,%d, field=%s, chromakey=0x%08x, clipcount=%u, clips=%p, bitmap=%p, global_alpha=0x%02x\n",
+			win->w.width, win->w.height, win->w.left, win->w.top,
 			prt_names(win->field, v4l2_field_names),
-			win->chromakey, win->bitmap, win->global_alpha);
-		clip = win->clips;
-		for (i = 0; i < win->clipcount; i++) {
-			printk(KERN_DEBUG "clip %u: wxh=%dx%d, x,y=%d,%d\n",
-					i, clip->c.width, clip->c.height,
-					clip->c.left, clip->c.top);
-			clip = clip->next;
-		}
+			win->chromakey, win->clipcount, win->clips,
+			win->bitmap, win->global_alpha);
 		break;
 	case V4L2_BUF_TYPE_VBI_CAPTURE:
 	case V4L2_BUF_TYPE_VBI_OUTPUT:
@@ -332,7 +325,7 @@
 
 	pr_cont("capability=0x%x, flags=0x%x, base=0x%p, width=%u, "
 		"height=%u, pixelformat=%c%c%c%c, "
-		"bytesperline=%u sizeimage=%u, colorspace=%d\n",
+		"bytesperline=%u, sizeimage=%u, colorspace=%d\n",
 			p->capability, p->flags, p->base,
 			p->fmt.width, p->fmt.height,
 			(p->fmt.pixelformat & 0xff),
@@ -353,7 +346,7 @@
 	const struct v4l2_modulator *p = arg;
 
 	if (write_only)
-		pr_cont("index=%u, txsubchans=0x%x", p->index, p->txsubchans);
+		pr_cont("index=%u, txsubchans=0x%x\n", p->index, p->txsubchans);
 	else
 		pr_cont("index=%u, name=%.*s, capability=0x%x, "
 			"rangelow=%u, rangehigh=%u, txsubchans=0x%x\n",
@@ -445,13 +438,13 @@
 		for (i = 0; i < p->length; ++i) {
 			plane = &p->m.planes[i];
 			printk(KERN_DEBUG
-				"plane %d: bytesused=%d, data_offset=0x%08x "
+				"plane %d: bytesused=%d, data_offset=0x%08x, "
 				"offset/userptr=0x%lx, length=%d\n",
 				i, plane->bytesused, plane->data_offset,
 				plane->m.userptr, plane->length);
 		}
 	} else {
-		pr_cont("bytesused=%d, offset/userptr=0x%lx, length=%d\n",
+		pr_cont(", bytesused=%d, offset/userptr=0x%lx, length=%d\n",
 			p->bytesused, p->m.userptr, p->length);
 	}
 
@@ -504,6 +497,8 @@
 			c->capability, c->outputmode,
 			c->timeperframe.numerator, c->timeperframe.denominator,
 			c->extendedmode, c->writebuffers);
+	} else {
+		pr_cont("\n");
 	}
 }
 
@@ -734,11 +729,11 @@
 			p->type);
 	switch (p->type) {
 	case V4L2_FRMSIZE_TYPE_DISCRETE:
-		pr_cont(" wxh=%ux%u\n",
+		pr_cont(", wxh=%ux%u\n",
 			p->discrete.width, p->discrete.height);
 		break;
 	case V4L2_FRMSIZE_TYPE_STEPWISE:
-		pr_cont(" min=%ux%u, max=%ux%u, step=%ux%u\n",
+		pr_cont(", min=%ux%u, max=%ux%u, step=%ux%u\n",
 				p->stepwise.min_width,  p->stepwise.min_height,
 				p->stepwise.step_width, p->stepwise.step_height,
 				p->stepwise.max_width,  p->stepwise.max_height);
@@ -764,12 +759,12 @@
 			p->width, p->height, p->type);
 	switch (p->type) {
 	case V4L2_FRMIVAL_TYPE_DISCRETE:
-		pr_cont(" fps=%d/%d\n",
+		pr_cont(", fps=%d/%d\n",
 				p->discrete.numerator,
 				p->discrete.denominator);
 		break;
 	case V4L2_FRMIVAL_TYPE_STEPWISE:
-		pr_cont(" min=%d/%d, max=%d/%d, step=%d/%d\n",
+		pr_cont(", min=%d/%d, max=%d/%d, step=%d/%d\n",
 				p->stepwise.min.numerator,
 				p->stepwise.min.denominator,
 				p->stepwise.max.numerator,
@@ -807,8 +802,8 @@
 			pr_cont("value64=%lld, ", c->value64);
 		else
 			pr_cont("value=%d, ", c->value);
-		pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d,"
-				" default_value=%d\n",
+		pr_cont("flags=0x%x, minimum=%d, maximum=%d, step=%d, "
+			"default_value=%d\n",
 			c->flags, c->minimum, c->maximum,
 			c->step, c->default_value);
 		break;
@@ -845,7 +840,7 @@
 	const struct v4l2_frequency_band *p = arg;
 
 	pr_cont("tuner=%u, type=%u, index=%u, capability=0x%x, "
-			"rangelow=%u, rangehigh=%u, modulation=0x%x\n",
+		"rangelow=%u, rangehigh=%u, modulation=0x%x\n",
 			p->tuner, p->type, p->index,
 			p->capability, p->rangelow,
 			p->rangehigh, p->modulation);
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 66f599f..e96497f 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -205,7 +205,7 @@
 static void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx)
 {
 	struct v4l2_m2m_dev *m2m_dev;
-	unsigned long flags_job, flags;
+	unsigned long flags_job, flags_out, flags_cap;
 
 	m2m_dev = m2m_ctx->m2m_dev;
 	dprintk("Trying to schedule a job for m2m_ctx: %p\n", m2m_ctx);
@@ -223,23 +223,26 @@
 		return;
 	}
 
-	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+	spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 	if (list_empty(&m2m_ctx->out_q_ctx.rdy_queue)) {
-		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+					flags_out);
 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 		dprintk("No input buffers available\n");
 		return;
 	}
-	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
+	spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
 	if (list_empty(&m2m_ctx->cap_q_ctx.rdy_queue)) {
-		spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
-		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+		spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock,
+					flags_cap);
+		spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock,
+					flags_out);
 		spin_unlock_irqrestore(&m2m_dev->job_spinlock, flags_job);
 		dprintk("No output buffers available\n");
 		return;
 	}
-	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags);
-	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags);
+	spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags_cap);
+	spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags_out);
 
 	if (m2m_dev->m2m_ops->job_ready
 		&& (!m2m_dev->m2m_ops->job_ready(m2m_ctx->priv))) {
@@ -372,6 +375,20 @@
 EXPORT_SYMBOL_GPL(v4l2_m2m_dqbuf);
 
 /**
+ * v4l2_m2m_create_bufs() - create a source or destination buffer, depending
+ * on the type
+ */
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+			 struct v4l2_create_buffers *create)
+{
+	struct vb2_queue *vq;
+
+	vq = v4l2_m2m_get_vq(m2m_ctx, create->format.type);
+	return vb2_create_bufs(vq, create);
+}
+EXPORT_SYMBOL_GPL(v4l2_m2m_create_bufs);
+
+/**
  * v4l2_m2m_expbuf() - export a source or destination buffer, depending on
  * the type
  */
@@ -486,8 +503,10 @@
 	if (m2m_ctx->m2m_dev->m2m_ops->unlock)
 		m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv);
 
-	poll_wait(file, &src_q->done_wq, wait);
-	poll_wait(file, &dst_q->done_wq, wait);
+	if (list_empty(&src_q->done_list))
+		poll_wait(file, &src_q->done_wq, wait);
+	if (list_empty(&dst_q->done_list))
+		poll_wait(file, &dst_q->done_wq, wait);
 
 	if (m2m_ctx->m2m_dev->m2m_ops->lock)
 		m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv);
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 7d833ee..e3bdc3b 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -2014,7 +2014,8 @@
 	if (list_empty(&q->queued_list))
 		return res | POLLERR;
 
-	poll_wait(file, &q->done_wq, wait);
+	if (list_empty(&q->done_list))
+		poll_wait(file, &q->done_wq, wait);
 
 	/*
 	 * Take first buffer available for dequeuing.
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c
index 721b918..4b93ed4 100644
--- a/drivers/mfd/tps6586x.c
+++ b/drivers/mfd/tps6586x.c
@@ -107,7 +107,7 @@
 		.name = "tps6586x-gpio",
 	},
 	{
-		.name = "tps6586x-pmic",
+		.name = "tps6586x-regulator",
 	},
 	{
 		.name = "tps6586x-rtc",
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 713d89f..f580d30 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -197,6 +197,8 @@
 {
 	dev_dbg(&dev->pdev->dev, "stopping the device.\n");
 
+	flush_scheduled_work();
+
 	mutex_lock(&dev->device_lock);
 
 	cancel_delayed_work(&dev->timer_work);
@@ -210,8 +212,6 @@
 
 	mutex_unlock(&dev->device_lock);
 
-	flush_scheduled_work();
-
 	mei_watchdog_unregister(dev);
 }
 EXPORT_SYMBOL_GPL(mei_stop);
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 3adf8a7..d0c6907 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -142,6 +142,8 @@
 		mei_cl_unlink(ndev->cl_info);
 		kfree(ndev->cl_info);
 	}
+
+	memset(ndev, 0, sizeof(struct mei_nfc_dev));
 }
 
 static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index a727464..0f26832 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -325,6 +325,7 @@
 
 	mutex_lock(&dev->device_lock);
 	dev->dev_state = MEI_DEV_POWER_UP;
+	mei_clear_interrupts(dev);
 	mei_reset(dev, 1);
 	mutex_unlock(&dev->device_lock);
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 29b846c..f975696 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -764,8 +764,8 @@
 	struct net_device *bond_dev, *vlan_dev, *upper_dev;
 	struct vlan_entry *vlan;
 
-	rcu_read_lock();
 	read_lock(&bond->lock);
+	rcu_read_lock();
 
 	bond_dev = bond->dev;
 
@@ -787,12 +787,19 @@
 		if (vlan_dev)
 			__bond_resend_igmp_join_requests(vlan_dev);
 	}
-
-	if (--bond->igmp_retrans > 0)
-		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
-
-	read_unlock(&bond->lock);
 	rcu_read_unlock();
+
+	/* We use curr_slave_lock to protect against concurrent access to
+	 * igmp_retrans from multiple running instances of this function and
+	 * bond_change_active_slave
+	 */
+	write_lock_bh(&bond->curr_slave_lock);
+	if (bond->igmp_retrans > 1) {
+		bond->igmp_retrans--;
+		queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5);
+	}
+	write_unlock_bh(&bond->curr_slave_lock);
+	read_unlock(&bond->lock);
 }
 
 static void bond_resend_igmp_join_requests_delayed(struct work_struct *work)
@@ -1957,6 +1964,10 @@
 
 err_undo_flags:
 	bond_compute_features(bond);
+	/* Enslave of first slave has failed and we need to fix master's mac */
+	if (bond->slave_cnt == 0 &&
+	    ether_addr_equal(bond_dev->dev_addr, slave_dev->dev_addr))
+		eth_hw_addr_random(bond_dev);
 
 	return res;
 }
@@ -2402,7 +2413,8 @@
 
 			pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
 				bond->dev->name, slave->dev->name,
-				slave->speed, slave->duplex ? "full" : "half");
+				slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
+				slave->duplex ? "full" : "half");
 
 			/* notify ad that the link status has changed */
 			if (bond->params.mode == BOND_MODE_8023AD)
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2baec24..f989e15 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -225,7 +225,7 @@
 	rwlock_t curr_slave_lock;
 	u8	 send_peer_notif;
 	s8	 setup_by_slave;
-	s8       igmp_retrans;
+	u8       igmp_retrans;
 #ifdef CONFIG_PROC_FS
 	struct   proc_dir_entry *proc_entry;
 	char     proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index 6e15ef0..cbd388e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -977,7 +977,7 @@
 	err = usb_8dev_cmd_version(priv, &version);
 	if (err) {
 		netdev_err(netdev, "can't get firmware version\n");
-		goto cleanup_cmd_msg_buffer;
+		goto cleanup_unregister_candev;
 	} else {
 		netdev_info(netdev,
 			 "firmware: %d.%d, hardware: %d.%d\n",
@@ -989,6 +989,9 @@
 
 	return 0;
 
+cleanup_unregister_candev:
+	unregister_netdev(priv->netdev);
+
 cleanup_cmd_msg_buffer:
 	kfree(priv->cmd_msg_buffer);
 
diff --git a/drivers/net/ethernet/atheros/Kconfig b/drivers/net/ethernet/atheros/Kconfig
index 36d6abd..ad6aa1e9 100644
--- a/drivers/net/ethernet/atheros/Kconfig
+++ b/drivers/net/ethernet/atheros/Kconfig
@@ -67,4 +67,22 @@
 	  To compile this driver as a module, choose M here.  The module
 	  will be called atl1c.
 
+config ALX
+	tristate "Qualcomm Atheros AR816x/AR817x support"
+	depends on PCI
+	select CRC32
+	select NET_CORE
+	select MDIO
+	help
+	  This driver supports the Qualcomm Atheros L1F ethernet adapter,
+	  i.e. the following chipsets:
+
+	  1969:1091 - AR8161 Gigabit Ethernet
+	  1969:1090 - AR8162 Fast Ethernet
+	  1969:10A1 - AR8171 Gigabit Ethernet
+	  1969:10A0 - AR8172 Fast Ethernet
+
+	  To compile this driver as a module, choose M here.  The module
+	  will be called alx.
+
 endif # NET_VENDOR_ATHEROS
diff --git a/drivers/net/ethernet/atheros/Makefile b/drivers/net/ethernet/atheros/Makefile
index e7e76fb..5cf1c65 100644
--- a/drivers/net/ethernet/atheros/Makefile
+++ b/drivers/net/ethernet/atheros/Makefile
@@ -6,3 +6,4 @@
 obj-$(CONFIG_ATL2) += atlx/
 obj-$(CONFIG_ATL1E) += atl1e/
 obj-$(CONFIG_ATL1C) += atl1c/
+obj-$(CONFIG_ALX) += alx/
diff --git a/drivers/net/ethernet/atheros/alx/Makefile b/drivers/net/ethernet/atheros/alx/Makefile
new file mode 100644
index 0000000..5901fa4
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_ALX) += alx.o
+alx-objs := main.o ethtool.o hw.o
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/ethernet/atheros/alx/alx.h b/drivers/net/ethernet/atheros/alx/alx.h
new file mode 100644
index 0000000..50b3ae2
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/alx.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef _ALX_H_
+#define _ALX_H_
+
+#include <linux/types.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/spinlock.h>
+#include "hw.h"
+
+#define ALX_WATCHDOG_TIME   (5 * HZ)
+
+struct alx_buffer {
+	struct sk_buff *skb;
+	DEFINE_DMA_UNMAP_ADDR(dma);
+	DEFINE_DMA_UNMAP_LEN(size);
+};
+
+struct alx_rx_queue {
+	struct alx_rrd *rrd;
+	dma_addr_t rrd_dma;
+
+	struct alx_rfd *rfd;
+	dma_addr_t rfd_dma;
+
+	struct alx_buffer *bufs;
+
+	u16 write_idx, read_idx;
+	u16 rrd_read_idx;
+};
+#define ALX_RX_ALLOC_THRESH	32
+
+struct alx_tx_queue {
+	struct alx_txd *tpd;
+	dma_addr_t tpd_dma;
+	struct alx_buffer *bufs;
+	u16 write_idx, read_idx;
+};
+
+#define ALX_DEFAULT_TX_WORK 128
+
+enum alx_device_quirks {
+	ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG = BIT(0),
+};
+
+struct alx_priv {
+	struct net_device *dev;
+
+	struct alx_hw hw;
+
+	/* all descriptor memory */
+	struct {
+		dma_addr_t dma;
+		void *virt;
+		int size;
+	} descmem;
+
+	/* protect int_mask updates */
+	spinlock_t irq_lock;
+	u32 int_mask;
+
+	int tx_ringsz;
+	int rx_ringsz;
+	int rxbuf_size;
+
+	struct napi_struct napi;
+	struct alx_tx_queue txq;
+	struct alx_rx_queue rxq;
+
+	struct work_struct link_check_wk;
+	struct work_struct reset_wk;
+
+	u16 msg_enable;
+
+	bool msi;
+};
+
+extern const struct ethtool_ops alx_ethtool_ops;
+extern const char alx_drv_name[];
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/ethtool.c b/drivers/net/ethernet/atheros/alx/ethtool.c
new file mode 100644
index 0000000..6fa2aec
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/ethtool.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mdio.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#include "alx.h"
+#include "reg.h"
+#include "hw.h"
+
+
+static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+
+	ecmd->supported = SUPPORTED_10baseT_Half |
+			  SUPPORTED_10baseT_Full |
+			  SUPPORTED_100baseT_Half |
+			  SUPPORTED_100baseT_Full |
+			  SUPPORTED_Autoneg |
+			  SUPPORTED_TP |
+			  SUPPORTED_Pause;
+	if (alx_hw_giga(hw))
+		ecmd->supported |= SUPPORTED_1000baseT_Full;
+
+	ecmd->advertising = ADVERTISED_TP;
+	if (hw->adv_cfg & ADVERTISED_Autoneg)
+		ecmd->advertising |= hw->adv_cfg;
+
+	ecmd->port = PORT_TP;
+	ecmd->phy_address = 0;
+	if (hw->adv_cfg & ADVERTISED_Autoneg)
+		ecmd->autoneg = AUTONEG_ENABLE;
+	else
+		ecmd->autoneg = AUTONEG_DISABLE;
+	ecmd->transceiver = XCVR_INTERNAL;
+
+	if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) {
+		if (hw->flowctrl & ALX_FC_RX) {
+			ecmd->advertising |= ADVERTISED_Pause;
+
+			if (!(hw->flowctrl & ALX_FC_TX))
+				ecmd->advertising |= ADVERTISED_Asym_Pause;
+		} else if (hw->flowctrl & ALX_FC_TX) {
+			ecmd->advertising |= ADVERTISED_Asym_Pause;
+		}
+	}
+
+	if (hw->link_speed != SPEED_UNKNOWN) {
+		ethtool_cmd_speed_set(ecmd,
+				      hw->link_speed - hw->link_speed % 10);
+		ecmd->duplex = hw->link_speed % 10;
+	} else {
+		ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+		ecmd->duplex = DUPLEX_UNKNOWN;
+	}
+
+	return 0;
+}
+
+static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+	u32 adv_cfg;
+
+	ASSERT_RTNL();
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		if (ecmd->advertising & ADVERTISED_1000baseT_Half)
+			return -EINVAL;
+		adv_cfg = ecmd->advertising | ADVERTISED_Autoneg;
+	} else {
+		int speed = ethtool_cmd_speed(ecmd);
+
+		switch (speed + ecmd->duplex) {
+		case SPEED_10 + DUPLEX_HALF:
+			adv_cfg = ADVERTISED_10baseT_Half;
+			break;
+		case SPEED_10 + DUPLEX_FULL:
+			adv_cfg = ADVERTISED_10baseT_Full;
+			break;
+		case SPEED_100 + DUPLEX_HALF:
+			adv_cfg = ADVERTISED_100baseT_Half;
+			break;
+		case SPEED_100 + DUPLEX_FULL:
+			adv_cfg = ADVERTISED_100baseT_Full;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	hw->adv_cfg = adv_cfg;
+	return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl);
+}
+
+static void alx_get_pauseparam(struct net_device *netdev,
+			       struct ethtool_pauseparam *pause)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+
+	if (hw->flowctrl & ALX_FC_ANEG &&
+	    hw->adv_cfg & ADVERTISED_Autoneg)
+		pause->autoneg = AUTONEG_ENABLE;
+	else
+		pause->autoneg = AUTONEG_DISABLE;
+
+	if (hw->flowctrl & ALX_FC_TX)
+		pause->tx_pause = 1;
+	else
+		pause->tx_pause = 0;
+
+	if (hw->flowctrl & ALX_FC_RX)
+		pause->rx_pause = 1;
+	else
+		pause->rx_pause = 0;
+}
+
+
+static int alx_set_pauseparam(struct net_device *netdev,
+			      struct ethtool_pauseparam *pause)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+	int err = 0;
+	bool reconfig_phy = false;
+	u8 fc = 0;
+
+	if (pause->tx_pause)
+		fc |= ALX_FC_TX;
+	if (pause->rx_pause)
+		fc |= ALX_FC_RX;
+	if (pause->autoneg)
+		fc |= ALX_FC_ANEG;
+
+	ASSERT_RTNL();
+
+	/* restart auto-neg for auto-mode */
+	if (hw->adv_cfg & ADVERTISED_Autoneg) {
+		if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG))
+			reconfig_phy = true;
+		if (fc & hw->flowctrl & ALX_FC_ANEG &&
+		    (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+			reconfig_phy = true;
+	}
+
+	if (reconfig_phy) {
+		err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc);
+		return err;
+	}
+
+	/* flow control on mac */
+	if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX))
+		alx_cfg_mac_flowcontrol(hw, fc);
+
+	hw->flowctrl = fc;
+
+	return 0;
+}
+
+static u32 alx_get_msglevel(struct net_device *netdev)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+
+	return alx->msg_enable;
+}
+
+static void alx_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+
+	alx->msg_enable = data;
+}
+
+static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+
+	wol->supported = WAKE_MAGIC | WAKE_PHY;
+	wol->wolopts = 0;
+
+	if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+		wol->wolopts |= WAKE_MAGIC;
+	if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY)
+		wol->wolopts |= WAKE_PHY;
+}
+
+static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+
+	if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE |
+			    WAKE_UCAST | WAKE_BCAST | WAKE_MCAST))
+		return -EOPNOTSUPP;
+
+	hw->sleep_ctrl = 0;
+
+	if (wol->wolopts & WAKE_MAGIC)
+		hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC;
+	if (wol->wolopts & WAKE_PHY)
+		hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY;
+
+	device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl);
+
+	return 0;
+}
+
+static void alx_get_drvinfo(struct net_device *netdev,
+			    struct ethtool_drvinfo *drvinfo)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+
+	strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver));
+	strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev),
+		sizeof(drvinfo->bus_info));
+}
+
+const struct ethtool_ops alx_ethtool_ops = {
+	.get_settings	= alx_get_settings,
+	.set_settings	= alx_set_settings,
+	.get_pauseparam	= alx_get_pauseparam,
+	.set_pauseparam	= alx_set_pauseparam,
+	.get_drvinfo	= alx_get_drvinfo,
+	.get_msglevel	= alx_get_msglevel,
+	.set_msglevel	= alx_set_msglevel,
+	.get_wol	= alx_get_wol,
+	.set_wol	= alx_set_wol,
+	.get_link	= ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/atheros/alx/hw.c b/drivers/net/ethernet/atheros/alx/hw.c
new file mode 100644
index 0000000..220a16a
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.c
@@ -0,0 +1,1226 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+#include "reg.h"
+#include "hw.h"
+
+static inline bool alx_is_rev_a(u8 rev)
+{
+	return rev == ALX_REV_A0 || rev == ALX_REV_A1;
+}
+
+static int alx_wait_mdio_idle(struct alx_hw *hw)
+{
+	u32 val;
+	int i;
+
+	for (i = 0; i < ALX_MDIO_MAX_AC_TO; i++) {
+		val = alx_read_mem32(hw, ALX_MDIO);
+		if (!(val & ALX_MDIO_BUSY))
+			return 0;
+		udelay(10);
+	}
+
+	return -ETIMEDOUT;
+}
+
+static int alx_read_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+			     u16 reg, u16 *phy_data)
+{
+	u32 val, clk_sel;
+	int err;
+
+	*phy_data = 0;
+
+	/* use slow clock when it's in hibernation status */
+	clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+			ALX_MDIO_CLK_SEL_25MD4 :
+			ALX_MDIO_CLK_SEL_25MD128;
+
+	if (ext) {
+		val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+		      reg << ALX_MDIO_EXTN_REG_SHIFT;
+		alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+		val = ALX_MDIO_SPRES_PRMBL | ALX_MDIO_START |
+		      ALX_MDIO_MODE_EXT | ALX_MDIO_OP_READ |
+		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT;
+	} else {
+		val = ALX_MDIO_SPRES_PRMBL |
+		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+		      reg << ALX_MDIO_REG_SHIFT |
+		      ALX_MDIO_START | ALX_MDIO_OP_READ;
+	}
+	alx_write_mem32(hw, ALX_MDIO, val);
+
+	err = alx_wait_mdio_idle(hw);
+	if (err)
+		return err;
+	val = alx_read_mem32(hw, ALX_MDIO);
+	*phy_data = ALX_GET_FIELD(val, ALX_MDIO_DATA);
+	return 0;
+}
+
+static int alx_write_phy_core(struct alx_hw *hw, bool ext, u8 dev,
+			      u16 reg, u16 phy_data)
+{
+	u32 val, clk_sel;
+
+	/* use slow clock when it's in hibernation status */
+	clk_sel = hw->link_speed != SPEED_UNKNOWN ?
+			ALX_MDIO_CLK_SEL_25MD4 :
+			ALX_MDIO_CLK_SEL_25MD128;
+
+	if (ext) {
+		val = dev << ALX_MDIO_EXTN_DEVAD_SHIFT |
+		      reg << ALX_MDIO_EXTN_REG_SHIFT;
+		alx_write_mem32(hw, ALX_MDIO_EXTN, val);
+
+		val = ALX_MDIO_SPRES_PRMBL |
+		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+		      phy_data << ALX_MDIO_DATA_SHIFT |
+		      ALX_MDIO_START | ALX_MDIO_MODE_EXT;
+	} else {
+		val = ALX_MDIO_SPRES_PRMBL |
+		      clk_sel << ALX_MDIO_CLK_SEL_SHIFT |
+		      reg << ALX_MDIO_REG_SHIFT |
+		      phy_data << ALX_MDIO_DATA_SHIFT |
+		      ALX_MDIO_START;
+	}
+	alx_write_mem32(hw, ALX_MDIO, val);
+
+	return alx_wait_mdio_idle(hw);
+}
+
+static int __alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+	return alx_read_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+	return alx_write_phy_core(hw, false, 0, reg, phy_data);
+}
+
+static int __alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+	return alx_read_phy_core(hw, true, dev, reg, pdata);
+}
+
+static int __alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+	return alx_write_phy_core(hw, true, dev, reg, data);
+}
+
+static int __alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+	int err;
+
+	err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+	if (err)
+		return err;
+
+	return __alx_read_phy_reg(hw, ALX_MII_DBG_DATA, pdata);
+}
+
+static int __alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+	int err;
+
+	err = __alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, reg);
+	if (err)
+		return err;
+
+	return __alx_write_phy_reg(hw, ALX_MII_DBG_DATA, data);
+}
+
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data)
+{
+	int err;
+
+	spin_lock(&hw->mdio_lock);
+	err = __alx_read_phy_reg(hw, reg, phy_data);
+	spin_unlock(&hw->mdio_lock);
+
+	return err;
+}
+
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data)
+{
+	int err;
+
+	spin_lock(&hw->mdio_lock);
+	err = __alx_write_phy_reg(hw, reg, phy_data);
+	spin_unlock(&hw->mdio_lock);
+
+	return err;
+}
+
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata)
+{
+	int err;
+
+	spin_lock(&hw->mdio_lock);
+	err = __alx_read_phy_ext(hw, dev, reg, pdata);
+	spin_unlock(&hw->mdio_lock);
+
+	return err;
+}
+
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data)
+{
+	int err;
+
+	spin_lock(&hw->mdio_lock);
+	err = __alx_write_phy_ext(hw, dev, reg, data);
+	spin_unlock(&hw->mdio_lock);
+
+	return err;
+}
+
+static int alx_read_phy_dbg(struct alx_hw *hw, u16 reg, u16 *pdata)
+{
+	int err;
+
+	spin_lock(&hw->mdio_lock);
+	err = __alx_read_phy_dbg(hw, reg, pdata);
+	spin_unlock(&hw->mdio_lock);
+
+	return err;
+}
+
+static int alx_write_phy_dbg(struct alx_hw *hw, u16 reg, u16 data)
+{
+	int err;
+
+	spin_lock(&hw->mdio_lock);
+	err = __alx_write_phy_dbg(hw, reg, data);
+	spin_unlock(&hw->mdio_lock);
+
+	return err;
+}
+
+static u16 alx_get_phy_config(struct alx_hw *hw)
+{
+	u32 val;
+	u16 phy_val;
+
+	val = alx_read_mem32(hw, ALX_PHY_CTRL);
+	/* phy in reset */
+	if ((val & ALX_PHY_CTRL_DSPRST_OUT) == 0)
+		return ALX_DRV_PHY_UNKNOWN;
+
+	val = alx_read_mem32(hw, ALX_DRV);
+	val = ALX_GET_FIELD(val, ALX_DRV_PHY);
+	if (ALX_DRV_PHY_UNKNOWN == val)
+		return ALX_DRV_PHY_UNKNOWN;
+
+	alx_read_phy_reg(hw, ALX_MII_DBG_ADDR, &phy_val);
+	if (ALX_PHY_INITED == phy_val)
+		return val;
+
+	return ALX_DRV_PHY_UNKNOWN;
+}
+
+static bool alx_wait_reg(struct alx_hw *hw, u32 reg, u32 wait, u32 *val)
+{
+	u32 read;
+	int i;
+
+	for (i = 0; i < ALX_SLD_MAX_TO; i++) {
+		read = alx_read_mem32(hw, reg);
+		if ((read & wait) == 0) {
+			if (val)
+				*val = read;
+			return true;
+		}
+		mdelay(1);
+	}
+
+	return false;
+}
+
+static bool alx_read_macaddr(struct alx_hw *hw, u8 *addr)
+{
+	u32 mac0, mac1;
+
+	mac0 = alx_read_mem32(hw, ALX_STAD0);
+	mac1 = alx_read_mem32(hw, ALX_STAD1);
+
+	/* addr should be big-endian */
+	*(__be32 *)(addr + 2) = cpu_to_be32(mac0);
+	*(__be16 *)addr = cpu_to_be16(mac1);
+
+	return is_valid_ether_addr(addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr)
+{
+	u32 val;
+
+	/* try to get it from register first */
+	if (alx_read_macaddr(hw, addr))
+		return 0;
+
+	/* try to load from efuse */
+	if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_STAT | ALX_SLD_START, &val))
+		return -EIO;
+	alx_write_mem32(hw, ALX_SLD, val | ALX_SLD_START);
+	if (!alx_wait_reg(hw, ALX_SLD, ALX_SLD_START, NULL))
+		return -EIO;
+	if (alx_read_macaddr(hw, addr))
+		return 0;
+
+	/* try to load from flash/eeprom (if present) */
+	val = alx_read_mem32(hw, ALX_EFLD);
+	if (val & (ALX_EFLD_F_EXIST | ALX_EFLD_E_EXIST)) {
+		if (!alx_wait_reg(hw, ALX_EFLD,
+				  ALX_EFLD_STAT | ALX_EFLD_START, &val))
+			return -EIO;
+		alx_write_mem32(hw, ALX_EFLD, val | ALX_EFLD_START);
+		if (!alx_wait_reg(hw, ALX_EFLD, ALX_EFLD_START, NULL))
+			return -EIO;
+		if (alx_read_macaddr(hw, addr))
+			return 0;
+	}
+
+	return -EIO;
+}
+
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr)
+{
+	u32 val;
+
+	/* for example: 00-0B-6A-F6-00-DC * STAD0=6AF600DC, STAD1=000B */
+	val = be32_to_cpu(*(__be32 *)(addr + 2));
+	alx_write_mem32(hw, ALX_STAD0, val);
+	val = be16_to_cpu(*(__be16 *)addr);
+	alx_write_mem32(hw, ALX_STAD1, val);
+}
+
+static void alx_enable_osc(struct alx_hw *hw)
+{
+	u32 val;
+
+	/* rising edge */
+	val = alx_read_mem32(hw, ALX_MISC);
+	alx_write_mem32(hw, ALX_MISC, val & ~ALX_MISC_INTNLOSC_OPEN);
+	alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+}
+
+static void alx_reset_osc(struct alx_hw *hw, u8 rev)
+{
+	u32 val, val2;
+
+	/* clear Internal OSC settings, switching OSC by hw itself */
+	val = alx_read_mem32(hw, ALX_MISC3);
+	alx_write_mem32(hw, ALX_MISC3,
+			(val & ~ALX_MISC3_25M_BY_SW) |
+			ALX_MISC3_25M_NOTO_INTNL);
+
+	/* 25M clk from chipset may be unstable 1s after de-assert of
+	 * PERST, driver need re-calibrate before enter Sleep for WoL
+	 */
+	val = alx_read_mem32(hw, ALX_MISC);
+	if (rev >= ALX_REV_B0) {
+		/* restore over current protection def-val,
+		 * this val could be reset by MAC-RST
+		 */
+		ALX_SET_FIELD(val, ALX_MISC_PSW_OCP, ALX_MISC_PSW_OCP_DEF);
+		/* a 0->1 change will update the internal val of osc */
+		val &= ~ALX_MISC_INTNLOSC_OPEN;
+		alx_write_mem32(hw, ALX_MISC, val);
+		alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+		/* hw will automatically dis OSC after cab. */
+		val2 = alx_read_mem32(hw, ALX_MSIC2);
+		val2 &= ~ALX_MSIC2_CALB_START;
+		alx_write_mem32(hw, ALX_MSIC2, val2);
+		alx_write_mem32(hw, ALX_MSIC2, val2 | ALX_MSIC2_CALB_START);
+	} else {
+		val &= ~ALX_MISC_INTNLOSC_OPEN;
+		/* disable isolate for rev A devices */
+		if (alx_is_rev_a(rev))
+			val &= ~ALX_MISC_ISO_EN;
+
+		alx_write_mem32(hw, ALX_MISC, val | ALX_MISC_INTNLOSC_OPEN);
+		alx_write_mem32(hw, ALX_MISC, val);
+	}
+
+	udelay(20);
+}
+
+static int alx_stop_mac(struct alx_hw *hw)
+{
+	u32 rxq, txq, val;
+	u16 i;
+
+	rxq = alx_read_mem32(hw, ALX_RXQ0);
+	alx_write_mem32(hw, ALX_RXQ0, rxq & ~ALX_RXQ0_EN);
+	txq = alx_read_mem32(hw, ALX_TXQ0);
+	alx_write_mem32(hw, ALX_TXQ0, txq & ~ALX_TXQ0_EN);
+
+	udelay(40);
+
+	hw->rx_ctrl &= ~(ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+	for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+		val = alx_read_mem32(hw, ALX_MAC_STS);
+		if (!(val & ALX_MAC_STS_IDLE))
+			return 0;
+		udelay(10);
+	}
+
+	return -ETIMEDOUT;
+}
+
+int alx_reset_mac(struct alx_hw *hw)
+{
+	u32 val, pmctrl;
+	int i, ret;
+	u8 rev;
+	bool a_cr;
+
+	pmctrl = 0;
+	rev = alx_hw_revision(hw);
+	a_cr = alx_is_rev_a(rev) && alx_hw_with_cr(hw);
+
+	/* disable all interrupts, RXQ/TXQ */
+	alx_write_mem32(hw, ALX_MSIX_MASK, 0xFFFFFFFF);
+	alx_write_mem32(hw, ALX_IMR, 0);
+	alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+
+	ret = alx_stop_mac(hw);
+	if (ret)
+		return ret;
+
+	/* mac reset workaroud */
+	alx_write_mem32(hw, ALX_RFD_PIDX, 1);
+
+	/* dis l0s/l1 before mac reset */
+	if (a_cr) {
+		pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+		if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+			alx_write_mem32(hw, ALX_PMCTRL,
+					pmctrl & ~(ALX_PMCTRL_L1_EN |
+						   ALX_PMCTRL_L0S_EN));
+	}
+
+	/* reset whole mac safely */
+	val = alx_read_mem32(hw, ALX_MASTER);
+	alx_write_mem32(hw, ALX_MASTER,
+			val | ALX_MASTER_DMA_MAC_RST | ALX_MASTER_OOB_DIS);
+
+	/* make sure it's real idle */
+	udelay(10);
+	for (i = 0; i < ALX_DMA_MAC_RST_TO; i++) {
+		val = alx_read_mem32(hw, ALX_RFD_PIDX);
+		if (val == 0)
+			break;
+		udelay(10);
+	}
+	for (; i < ALX_DMA_MAC_RST_TO; i++) {
+		val = alx_read_mem32(hw, ALX_MASTER);
+		if ((val & ALX_MASTER_DMA_MAC_RST) == 0)
+			break;
+		udelay(10);
+	}
+	if (i == ALX_DMA_MAC_RST_TO)
+		return -EIO;
+	udelay(10);
+
+	if (a_cr) {
+		alx_write_mem32(hw, ALX_MASTER, val | ALX_MASTER_PCLKSEL_SRDS);
+		/* restore l0s / l1 */
+		if (pmctrl & (ALX_PMCTRL_L1_EN | ALX_PMCTRL_L0S_EN))
+			alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+	}
+
+	alx_reset_osc(hw, rev);
+
+	/* clear Internal OSC settings, switching OSC by hw itself,
+	 * disable isolate for rev A devices
+	 */
+	val = alx_read_mem32(hw, ALX_MISC3);
+	alx_write_mem32(hw, ALX_MISC3,
+			(val & ~ALX_MISC3_25M_BY_SW) |
+			ALX_MISC3_25M_NOTO_INTNL);
+	val = alx_read_mem32(hw, ALX_MISC);
+	val &= ~ALX_MISC_INTNLOSC_OPEN;
+	if (alx_is_rev_a(rev))
+		val &= ~ALX_MISC_ISO_EN;
+	alx_write_mem32(hw, ALX_MISC, val);
+	udelay(20);
+
+	/* driver control speed/duplex, hash-alg */
+	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+
+	val = alx_read_mem32(hw, ALX_SERDES);
+	alx_write_mem32(hw, ALX_SERDES,
+			val | ALX_SERDES_MACCLK_SLWDWN |
+			ALX_SERDES_PHYCLK_SLWDWN);
+
+	return 0;
+}
+
+void alx_reset_phy(struct alx_hw *hw)
+{
+	int i;
+	u32 val;
+	u16 phy_val;
+
+	/* (DSP)reset PHY core */
+	val = alx_read_mem32(hw, ALX_PHY_CTRL);
+	val &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_IDDQ |
+		 ALX_PHY_CTRL_GATE_25M | ALX_PHY_CTRL_POWER_DOWN |
+		 ALX_PHY_CTRL_CLS);
+	val |= ALX_PHY_CTRL_RST_ANALOG;
+
+	val |= (ALX_PHY_CTRL_HIB_PULSE | ALX_PHY_CTRL_HIB_EN);
+	alx_write_mem32(hw, ALX_PHY_CTRL, val);
+	udelay(10);
+	alx_write_mem32(hw, ALX_PHY_CTRL, val | ALX_PHY_CTRL_DSPRST_OUT);
+
+	for (i = 0; i < ALX_PHY_CTRL_DSPRST_TO; i++)
+		udelay(10);
+
+	/* phy power saving & hib */
+	alx_write_phy_dbg(hw, ALX_MIIDBG_LEGCYPS, ALX_LEGCYPS_DEF);
+	alx_write_phy_dbg(hw, ALX_MIIDBG_SYSMODCTRL,
+			  ALX_SYSMODCTRL_IECHOADJ_DEF);
+	alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_VDRVBIAS,
+			  ALX_VDRVBIAS_DEF);
+
+	/* EEE advertisement */
+	val = alx_read_mem32(hw, ALX_LPI_CTRL);
+	alx_write_mem32(hw, ALX_LPI_CTRL, val & ~ALX_LPI_CTRL_EN);
+	alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_LOCAL_EEEADV, 0);
+
+	/* phy power saving */
+	alx_write_phy_dbg(hw, ALX_MIIDBG_TST10BTCFG, ALX_TST10BTCFG_DEF);
+	alx_write_phy_dbg(hw, ALX_MIIDBG_SRDSYSMOD, ALX_SRDSYSMOD_DEF);
+	alx_write_phy_dbg(hw, ALX_MIIDBG_TST100BTCFG, ALX_TST100BTCFG_DEF);
+	alx_write_phy_dbg(hw, ALX_MIIDBG_ANACTRL, ALX_ANACTRL_DEF);
+	alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+	alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+			  phy_val & ~ALX_GREENCFG2_GATE_DFSE_EN);
+	/* rtl8139c, 120m issue */
+	alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_NLP78,
+			  ALX_MIIEXT_NLP78_120M_DEF);
+	alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_S3DIG10,
+			  ALX_MIIEXT_S3DIG10_DEF);
+
+	if (hw->lnk_patch) {
+		/* Turn off half amplitude */
+		alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+				 &phy_val);
+		alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL3,
+				  phy_val | ALX_CLDCTRL3_BP_CABLE1TH_DET_GT);
+		/* Turn off Green feature */
+		alx_read_phy_dbg(hw, ALX_MIIDBG_GREENCFG2, &phy_val);
+		alx_write_phy_dbg(hw, ALX_MIIDBG_GREENCFG2,
+				  phy_val | ALX_GREENCFG2_BP_GREEN);
+		/* Turn off half Bias */
+		alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+				 &phy_val);
+		alx_write_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL5,
+				  phy_val | ALX_CLDCTRL5_BP_VD_HLFBIAS);
+	}
+
+	/* set phy interrupt mask */
+	alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP | ALX_IER_LINK_DOWN);
+}
+
+#define ALX_PCI_CMD (PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
+
+void alx_reset_pcie(struct alx_hw *hw)
+{
+	u8 rev = alx_hw_revision(hw);
+	u32 val;
+	u16 val16;
+
+	/* Workaround for PCI problem when BIOS sets MMRBC incorrectly. */
+	pci_read_config_word(hw->pdev, PCI_COMMAND, &val16);
+	if (!(val16 & ALX_PCI_CMD) || (val16 & PCI_COMMAND_INTX_DISABLE)) {
+		val16 = (val16 | ALX_PCI_CMD) & ~PCI_COMMAND_INTX_DISABLE;
+		pci_write_config_word(hw->pdev, PCI_COMMAND, val16);
+	}
+
+	/* clear WoL setting/status */
+	val = alx_read_mem32(hw, ALX_WOL0);
+	alx_write_mem32(hw, ALX_WOL0, 0);
+
+	val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+	alx_write_mem32(hw, ALX_PDLL_TRNS1, val & ~ALX_PDLL_TRNS1_D3PLLOFF_EN);
+
+	/* mask some pcie error bits */
+	val = alx_read_mem32(hw, ALX_UE_SVRT);
+	val &= ~(ALX_UE_SVRT_DLPROTERR | ALX_UE_SVRT_FCPROTERR);
+	alx_write_mem32(hw, ALX_UE_SVRT, val);
+
+	/* wol 25M & pclk */
+	val = alx_read_mem32(hw, ALX_MASTER);
+	if (alx_is_rev_a(rev) && alx_hw_with_cr(hw)) {
+		if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+		    (val & ALX_MASTER_PCLKSEL_SRDS) == 0)
+			alx_write_mem32(hw, ALX_MASTER,
+					val | ALX_MASTER_PCLKSEL_SRDS |
+					ALX_MASTER_WAKEN_25M);
+	} else {
+		if ((val & ALX_MASTER_WAKEN_25M) == 0 ||
+		    (val & ALX_MASTER_PCLKSEL_SRDS) != 0)
+			alx_write_mem32(hw, ALX_MASTER,
+					(val & ~ALX_MASTER_PCLKSEL_SRDS) |
+					ALX_MASTER_WAKEN_25M);
+	}
+
+	/* ASPM setting */
+	alx_enable_aspm(hw, true, true);
+
+	udelay(10);
+}
+
+void alx_start_mac(struct alx_hw *hw)
+{
+	u32 mac, txq, rxq;
+
+	rxq = alx_read_mem32(hw, ALX_RXQ0);
+	alx_write_mem32(hw, ALX_RXQ0, rxq | ALX_RXQ0_EN);
+	txq = alx_read_mem32(hw, ALX_TXQ0);
+	alx_write_mem32(hw, ALX_TXQ0, txq | ALX_TXQ0_EN);
+
+	mac = hw->rx_ctrl;
+	if (hw->link_speed % 10 == DUPLEX_FULL)
+		mac |= ALX_MAC_CTRL_FULLD;
+	else
+		mac &= ~ALX_MAC_CTRL_FULLD;
+	ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+		      hw->link_speed >= SPEED_1000 ? ALX_MAC_CTRL_SPEED_1000 :
+						     ALX_MAC_CTRL_SPEED_10_100);
+	mac |= ALX_MAC_CTRL_TX_EN | ALX_MAC_CTRL_RX_EN;
+	hw->rx_ctrl = mac;
+	alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+}
+
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc)
+{
+	if (fc & ALX_FC_RX)
+		hw->rx_ctrl |= ALX_MAC_CTRL_RXFC_EN;
+	else
+		hw->rx_ctrl &= ~ALX_MAC_CTRL_RXFC_EN;
+
+	if (fc & ALX_FC_TX)
+		hw->rx_ctrl |= ALX_MAC_CTRL_TXFC_EN;
+	else
+		hw->rx_ctrl &= ~ALX_MAC_CTRL_TXFC_EN;
+
+	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en)
+{
+	u32 pmctrl;
+	u8 rev = alx_hw_revision(hw);
+
+	pmctrl = alx_read_mem32(hw, ALX_PMCTRL);
+
+	ALX_SET_FIELD(pmctrl, ALX_PMCTRL_LCKDET_TIMER,
+		      ALX_PMCTRL_LCKDET_TIMER_DEF);
+	pmctrl |= ALX_PMCTRL_RCVR_WT_1US |
+		  ALX_PMCTRL_L1_CLKSW_EN |
+		  ALX_PMCTRL_L1_SRDSRX_PWD;
+	ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1REQ_TO, ALX_PMCTRL_L1REG_TO_DEF);
+	ALX_SET_FIELD(pmctrl, ALX_PMCTRL_L1_TIMER, ALX_PMCTRL_L1_TIMER_16US);
+	pmctrl &= ~(ALX_PMCTRL_L1_SRDS_EN |
+		    ALX_PMCTRL_L1_SRDSPLL_EN |
+		    ALX_PMCTRL_L1_BUFSRX_EN |
+		    ALX_PMCTRL_SADLY_EN |
+		    ALX_PMCTRL_HOTRST_WTEN|
+		    ALX_PMCTRL_L0S_EN |
+		    ALX_PMCTRL_L1_EN |
+		    ALX_PMCTRL_ASPM_FCEN |
+		    ALX_PMCTRL_TXL1_AFTER_L0S |
+		    ALX_PMCTRL_RXL1_AFTER_L0S);
+	if (alx_is_rev_a(rev) && alx_hw_with_cr(hw))
+		pmctrl |= ALX_PMCTRL_L1_SRDS_EN | ALX_PMCTRL_L1_SRDSPLL_EN;
+
+	if (l0s_en)
+		pmctrl |= (ALX_PMCTRL_L0S_EN | ALX_PMCTRL_ASPM_FCEN);
+	if (l1_en)
+		pmctrl |= (ALX_PMCTRL_L1_EN | ALX_PMCTRL_ASPM_FCEN);
+
+	alx_write_mem32(hw, ALX_PMCTRL, pmctrl);
+}
+
+
+static u32 ethadv_to_hw_cfg(struct alx_hw *hw, u32 ethadv_cfg)
+{
+	u32 cfg = 0;
+
+	if (ethadv_cfg & ADVERTISED_Autoneg) {
+		cfg |= ALX_DRV_PHY_AUTO;
+		if (ethadv_cfg & ADVERTISED_10baseT_Half)
+			cfg |= ALX_DRV_PHY_10;
+		if (ethadv_cfg & ADVERTISED_10baseT_Full)
+			cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+		if (ethadv_cfg & ADVERTISED_100baseT_Half)
+			cfg |= ALX_DRV_PHY_100;
+		if (ethadv_cfg & ADVERTISED_100baseT_Full)
+			cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+		if (ethadv_cfg & ADVERTISED_1000baseT_Half)
+			cfg |= ALX_DRV_PHY_1000;
+		if (ethadv_cfg & ADVERTISED_1000baseT_Full)
+			cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+		if (ethadv_cfg & ADVERTISED_Pause)
+			cfg |= ADVERTISE_PAUSE_CAP;
+		if (ethadv_cfg & ADVERTISED_Asym_Pause)
+			cfg |= ADVERTISE_PAUSE_ASYM;
+	} else {
+		switch (ethadv_cfg) {
+		case ADVERTISED_10baseT_Half:
+			cfg |= ALX_DRV_PHY_10;
+			break;
+		case ADVERTISED_100baseT_Half:
+			cfg |= ALX_DRV_PHY_100;
+			break;
+		case ADVERTISED_10baseT_Full:
+			cfg |= ALX_DRV_PHY_10 | ALX_DRV_PHY_DUPLEX;
+			break;
+		case ADVERTISED_100baseT_Full:
+			cfg |= ALX_DRV_PHY_100 | ALX_DRV_PHY_DUPLEX;
+			break;
+		}
+	}
+
+	return cfg;
+}
+
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl)
+{
+	u16 adv, giga, cr;
+	u32 val;
+	int err = 0;
+
+	alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, 0);
+	val = alx_read_mem32(hw, ALX_DRV);
+	ALX_SET_FIELD(val, ALX_DRV_PHY, 0);
+
+	if (ethadv & ADVERTISED_Autoneg) {
+		adv = ADVERTISE_CSMA;
+		adv |= ethtool_adv_to_mii_adv_t(ethadv);
+
+		if (flowctrl & ALX_FC_ANEG) {
+			if (flowctrl & ALX_FC_RX) {
+				adv |= ADVERTISED_Pause;
+				if (!(flowctrl & ALX_FC_TX))
+					adv |= ADVERTISED_Asym_Pause;
+			} else if (flowctrl & ALX_FC_TX) {
+				adv |= ADVERTISED_Asym_Pause;
+			}
+		}
+		giga = 0;
+		if (alx_hw_giga(hw))
+			giga = ethtool_adv_to_mii_ctrl1000_t(ethadv);
+
+		cr = BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART;
+
+		if (alx_write_phy_reg(hw, MII_ADVERTISE, adv) ||
+		    alx_write_phy_reg(hw, MII_CTRL1000, giga) ||
+		    alx_write_phy_reg(hw, MII_BMCR, cr))
+			err = -EBUSY;
+	} else {
+		cr = BMCR_RESET;
+		if (ethadv == ADVERTISED_100baseT_Half ||
+		    ethadv == ADVERTISED_100baseT_Full)
+			cr |= BMCR_SPEED100;
+		if (ethadv == ADVERTISED_10baseT_Full ||
+		    ethadv == ADVERTISED_100baseT_Full)
+			cr |= BMCR_FULLDPLX;
+
+		err = alx_write_phy_reg(hw, MII_BMCR, cr);
+	}
+
+	if (!err) {
+		alx_write_phy_reg(hw, ALX_MII_DBG_ADDR, ALX_PHY_INITED);
+		val |= ethadv_to_hw_cfg(hw, ethadv);
+	}
+
+	alx_write_mem32(hw, ALX_DRV, val);
+
+	return err;
+}
+
+
+void alx_post_phy_link(struct alx_hw *hw)
+{
+	u16 phy_val, len, agc;
+	u8 revid = alx_hw_revision(hw);
+	bool adj_th = revid == ALX_REV_B0;
+	int speed;
+
+	if (hw->link_speed == SPEED_UNKNOWN)
+		speed = SPEED_UNKNOWN;
+	else
+		speed = hw->link_speed - hw->link_speed % 10;
+
+	if (revid != ALX_REV_B0 && !alx_is_rev_a(revid))
+		return;
+
+	/* 1000BT/AZ, wrong cable length */
+	if (speed != SPEED_UNKNOWN) {
+		alx_read_phy_ext(hw, ALX_MIIEXT_PCS, ALX_MIIEXT_CLDCTRL6,
+				 &phy_val);
+		len = ALX_GET_FIELD(phy_val, ALX_CLDCTRL6_CAB_LEN);
+		alx_read_phy_dbg(hw, ALX_MIIDBG_AGC, &phy_val);
+		agc = ALX_GET_FIELD(phy_val, ALX_AGC_2_VGA);
+
+		if ((speed == SPEED_1000 &&
+		     (len > ALX_CLDCTRL6_CAB_LEN_SHORT1G ||
+		      (len == 0 && agc > ALX_AGC_LONG1G_LIMT))) ||
+		    (speed == SPEED_100 &&
+		     (len > ALX_CLDCTRL6_CAB_LEN_SHORT100M ||
+		      (len == 0 && agc > ALX_AGC_LONG100M_LIMT)))) {
+			alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+					  ALX_AZ_ANADECT_LONG);
+			alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+					 &phy_val);
+			alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+					  phy_val | ALX_AFE_10BT_100M_TH);
+		} else {
+			alx_write_phy_dbg(hw, ALX_MIIDBG_AZ_ANADECT,
+					  ALX_AZ_ANADECT_DEF);
+			alx_read_phy_ext(hw, ALX_MIIEXT_ANEG,
+					 ALX_MIIEXT_AFE, &phy_val);
+			alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+					  phy_val & ~ALX_AFE_10BT_100M_TH);
+		}
+
+		/* threshold adjust */
+		if (adj_th && hw->lnk_patch) {
+			if (speed == SPEED_100) {
+				alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+						  ALX_MSE16DB_UP);
+			} else if (speed == SPEED_1000) {
+				/*
+				 * Giga link threshold, raise the tolerance of
+				 * noise 50%
+				 */
+				alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+						 &phy_val);
+				ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+					      ALX_MSE20DB_TH_HI);
+				alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB,
+						  phy_val);
+			}
+		}
+	} else {
+		alx_read_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+				 &phy_val);
+		alx_write_phy_ext(hw, ALX_MIIEXT_ANEG, ALX_MIIEXT_AFE,
+				  phy_val & ~ALX_AFE_10BT_100M_TH);
+
+		if (adj_th && hw->lnk_patch) {
+			alx_write_phy_dbg(hw, ALX_MIIDBG_MSE16DB,
+					  ALX_MSE16DB_DOWN);
+			alx_read_phy_dbg(hw, ALX_MIIDBG_MSE20DB, &phy_val);
+			ALX_SET_FIELD(phy_val, ALX_MSE20DB_TH,
+				      ALX_MSE20DB_TH_DEF);
+			alx_write_phy_dbg(hw, ALX_MIIDBG_MSE20DB, phy_val);
+		}
+	}
+}
+
+
+/* NOTE:
+ *    1. phy link must be established before calling this function
+ *    2. wol option (pattern,magic,link,etc.) is configed before call it.
+ */
+int alx_pre_suspend(struct alx_hw *hw, int speed)
+{
+	u32 master, mac, phy, val;
+	int err = 0;
+
+	master = alx_read_mem32(hw, ALX_MASTER);
+	master &= ~ALX_MASTER_PCLKSEL_SRDS;
+	mac = hw->rx_ctrl;
+	/* 10/100 half */
+	ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,  ALX_MAC_CTRL_SPEED_10_100);
+	mac &= ~(ALX_MAC_CTRL_FULLD | ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_TX_EN);
+
+	phy = alx_read_mem32(hw, ALX_PHY_CTRL);
+	phy &= ~(ALX_PHY_CTRL_DSPRST_OUT | ALX_PHY_CTRL_CLS);
+	phy |= ALX_PHY_CTRL_RST_ANALOG | ALX_PHY_CTRL_HIB_PULSE |
+	       ALX_PHY_CTRL_HIB_EN;
+
+	/* without any activity  */
+	if (!(hw->sleep_ctrl & ALX_SLEEP_ACTIVE)) {
+		err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+		if (err)
+			return err;
+		phy |= ALX_PHY_CTRL_IDDQ | ALX_PHY_CTRL_POWER_DOWN;
+	} else {
+		if (hw->sleep_ctrl & (ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_CIFS))
+			mac |= ALX_MAC_CTRL_RX_EN | ALX_MAC_CTRL_BRD_EN;
+		if (hw->sleep_ctrl & ALX_SLEEP_CIFS)
+			mac |= ALX_MAC_CTRL_TX_EN;
+		if (speed % 10 == DUPLEX_FULL)
+			mac |= ALX_MAC_CTRL_FULLD;
+		if (speed >= SPEED_1000)
+			ALX_SET_FIELD(mac, ALX_MAC_CTRL_SPEED,
+				      ALX_MAC_CTRL_SPEED_1000);
+		phy |= ALX_PHY_CTRL_DSPRST_OUT;
+		err = alx_write_phy_ext(hw, ALX_MIIEXT_ANEG,
+					ALX_MIIEXT_S3DIG10,
+					ALX_MIIEXT_S3DIG10_SL);
+		if (err)
+			return err;
+	}
+
+	alx_enable_osc(hw);
+	hw->rx_ctrl = mac;
+	alx_write_mem32(hw, ALX_MASTER, master);
+	alx_write_mem32(hw, ALX_MAC_CTRL, mac);
+	alx_write_mem32(hw, ALX_PHY_CTRL, phy);
+
+	/* set val of PDLL D3PLLOFF */
+	val = alx_read_mem32(hw, ALX_PDLL_TRNS1);
+	val |= ALX_PDLL_TRNS1_D3PLLOFF_EN;
+	alx_write_mem32(hw, ALX_PDLL_TRNS1, val);
+
+	return 0;
+}
+
+bool alx_phy_configured(struct alx_hw *hw)
+{
+	u32 cfg, hw_cfg;
+
+	cfg = ethadv_to_hw_cfg(hw, hw->adv_cfg);
+	cfg = ALX_GET_FIELD(cfg, ALX_DRV_PHY);
+	hw_cfg = alx_get_phy_config(hw);
+
+	if (hw_cfg == ALX_DRV_PHY_UNKNOWN)
+		return false;
+
+	return cfg == hw_cfg;
+}
+
+int alx_get_phy_link(struct alx_hw *hw, int *speed)
+{
+	struct pci_dev *pdev = hw->pdev;
+	u16 bmsr, giga;
+	int err;
+
+	err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+	if (err)
+		return err;
+
+	err = alx_read_phy_reg(hw, MII_BMSR, &bmsr);
+	if (err)
+		return err;
+
+	if (!(bmsr & BMSR_LSTATUS)) {
+		*speed = SPEED_UNKNOWN;
+		return 0;
+	}
+
+	/* speed/duplex result is saved in PHY Specific Status Register */
+	err = alx_read_phy_reg(hw, ALX_MII_GIGA_PSSR, &giga);
+	if (err)
+		return err;
+
+	if (!(giga & ALX_GIGA_PSSR_SPD_DPLX_RESOLVED))
+		goto wrong_speed;
+
+	switch (giga & ALX_GIGA_PSSR_SPEED) {
+	case ALX_GIGA_PSSR_1000MBS:
+		*speed = SPEED_1000;
+		break;
+	case ALX_GIGA_PSSR_100MBS:
+		*speed = SPEED_100;
+		break;
+	case ALX_GIGA_PSSR_10MBS:
+		*speed = SPEED_10;
+		break;
+	default:
+		goto wrong_speed;
+	}
+
+	*speed += (giga & ALX_GIGA_PSSR_DPLX) ? DUPLEX_FULL : DUPLEX_HALF;
+	return 1;
+
+wrong_speed:
+	dev_err(&pdev->dev, "invalid PHY speed/duplex: 0x%x\n", giga);
+	return -EINVAL;
+}
+
+int alx_clear_phy_intr(struct alx_hw *hw)
+{
+	u16 isr;
+
+	/* clear interrupt status by reading it */
+	return alx_read_phy_reg(hw, ALX_MII_ISR, &isr);
+}
+
+int alx_config_wol(struct alx_hw *hw)
+{
+	u32 wol = 0;
+	int err = 0;
+
+	/* turn on magic packet event */
+	if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC)
+		wol |= ALX_WOL0_MAGIC_EN | ALX_WOL0_PME_MAGIC_EN;
+
+	/* turn on link up event */
+	if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) {
+		wol |=  ALX_WOL0_LINK_EN | ALX_WOL0_PME_LINK;
+		/* only link up can wake up */
+		err = alx_write_phy_reg(hw, ALX_MII_IER, ALX_IER_LINK_UP);
+	}
+	alx_write_mem32(hw, ALX_WOL0, wol);
+
+	return err;
+}
+
+void alx_disable_rss(struct alx_hw *hw)
+{
+	u32 ctrl = alx_read_mem32(hw, ALX_RXQ0);
+
+	ctrl &= ~ALX_RXQ0_RSS_HASH_EN;
+	alx_write_mem32(hw, ALX_RXQ0, ctrl);
+}
+
+void alx_configure_basic(struct alx_hw *hw)
+{
+	u32 val, raw_mtu, max_payload;
+	u16 val16;
+	u8 chip_rev = alx_hw_revision(hw);
+
+	alx_set_macaddr(hw, hw->mac_addr);
+
+	alx_write_mem32(hw, ALX_CLK_GATE, ALX_CLK_GATE_ALL);
+
+	/* idle timeout to switch clk_125M */
+	if (chip_rev >= ALX_REV_B0)
+		alx_write_mem32(hw, ALX_IDLE_DECISN_TIMER,
+				ALX_IDLE_DECISN_TIMER_DEF);
+
+	alx_write_mem32(hw, ALX_SMB_TIMER, hw->smb_timer * 500UL);
+
+	val = alx_read_mem32(hw, ALX_MASTER);
+	val |= ALX_MASTER_IRQMOD2_EN |
+	       ALX_MASTER_IRQMOD1_EN |
+	       ALX_MASTER_SYSALVTIMER_EN;
+	alx_write_mem32(hw, ALX_MASTER, val);
+	alx_write_mem32(hw, ALX_IRQ_MODU_TIMER,
+			(hw->imt >> 1) << ALX_IRQ_MODU_TIMER1_SHIFT);
+	/* intr re-trig timeout */
+	alx_write_mem32(hw, ALX_INT_RETRIG, ALX_INT_RETRIG_TO);
+	/* tpd threshold to trig int */
+	alx_write_mem32(hw, ALX_TINT_TPD_THRSHLD, hw->ith_tpd);
+	alx_write_mem32(hw, ALX_TINT_TIMER, hw->imt);
+
+	raw_mtu = hw->mtu + ETH_HLEN;
+	alx_write_mem32(hw, ALX_MTU, raw_mtu + 8);
+	if (raw_mtu > ALX_MTU_JUMBO_TH)
+		hw->rx_ctrl &= ~ALX_MAC_CTRL_FAST_PAUSE;
+
+	if ((raw_mtu + 8) < ALX_TXQ1_JUMBO_TSO_TH)
+		val = (raw_mtu + 8 + 7) >> 3;
+	else
+		val = ALX_TXQ1_JUMBO_TSO_TH >> 3;
+	alx_write_mem32(hw, ALX_TXQ1, val | ALX_TXQ1_ERRLGPKT_DROP_EN);
+
+	max_payload = pcie_get_readrq(hw->pdev) >> 8;
+	/*
+	 * if BIOS had changed the default dma read max length,
+	 * restore it to default value
+	 */
+	if (max_payload < ALX_DEV_CTRL_MAXRRS_MIN)
+		pcie_set_readrq(hw->pdev, 128 << ALX_DEV_CTRL_MAXRRS_MIN);
+
+	val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_TXQ0_TPD_BURSTPREF_SHIFT |
+	      ALX_TXQ0_MODE_ENHANCE | ALX_TXQ0_LSO_8023_EN |
+	      ALX_TXQ0_SUPT_IPOPT |
+	      ALX_TXQ_TXF_BURST_PREF_DEF << ALX_TXQ0_TXF_BURST_PREF_SHIFT;
+	alx_write_mem32(hw, ALX_TXQ0, val);
+	val = ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q1_NUMPREF_SHIFT |
+	      ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q2_NUMPREF_SHIFT |
+	      ALX_TXQ_TPD_BURSTPREF_DEF << ALX_HQTPD_Q3_NUMPREF_SHIFT |
+	      ALX_HQTPD_BURST_EN;
+	alx_write_mem32(hw, ALX_HQTPD, val);
+
+	/* rxq, flow control */
+	val = alx_read_mem32(hw, ALX_SRAM5);
+	val = ALX_GET_FIELD(val, ALX_SRAM_RXF_LEN) << 3;
+	if (val > ALX_SRAM_RXF_LEN_8K) {
+		val16 = ALX_MTU_STD_ALGN >> 3;
+		val = (val - ALX_RXQ2_RXF_FLOW_CTRL_RSVD) >> 3;
+	} else {
+		val16 = ALX_MTU_STD_ALGN >> 3;
+		val = (val - ALX_MTU_STD_ALGN) >> 3;
+	}
+	alx_write_mem32(hw, ALX_RXQ2,
+			val16 << ALX_RXQ2_RXF_XOFF_THRESH_SHIFT |
+			val << ALX_RXQ2_RXF_XON_THRESH_SHIFT);
+	val = ALX_RXQ0_NUM_RFD_PREF_DEF << ALX_RXQ0_NUM_RFD_PREF_SHIFT |
+	      ALX_RXQ0_RSS_MODE_DIS << ALX_RXQ0_RSS_MODE_SHIFT |
+	      ALX_RXQ0_IDT_TBL_SIZE_DEF << ALX_RXQ0_IDT_TBL_SIZE_SHIFT |
+	      ALX_RXQ0_RSS_HSTYP_ALL | ALX_RXQ0_RSS_HASH_EN |
+	      ALX_RXQ0_IPV6_PARSE_EN;
+
+	if (alx_hw_giga(hw))
+		ALX_SET_FIELD(val, ALX_RXQ0_ASPM_THRESH,
+			      ALX_RXQ0_ASPM_THRESH_100M);
+
+	alx_write_mem32(hw, ALX_RXQ0, val);
+
+	val = alx_read_mem32(hw, ALX_DMA);
+	val = ALX_DMA_RORDER_MODE_OUT << ALX_DMA_RORDER_MODE_SHIFT |
+	      ALX_DMA_RREQ_PRI_DATA |
+	      max_payload << ALX_DMA_RREQ_BLEN_SHIFT |
+	      ALX_DMA_WDLY_CNT_DEF << ALX_DMA_WDLY_CNT_SHIFT |
+	      ALX_DMA_RDLY_CNT_DEF << ALX_DMA_RDLY_CNT_SHIFT |
+	      (hw->dma_chnl - 1) << ALX_DMA_RCHNL_SEL_SHIFT;
+	alx_write_mem32(hw, ALX_DMA, val);
+
+	/* default multi-tx-q weights */
+	val = ALX_WRR_PRI_RESTRICT_NONE << ALX_WRR_PRI_SHIFT |
+	      4 << ALX_WRR_PRI0_SHIFT |
+	      4 << ALX_WRR_PRI1_SHIFT |
+	      4 << ALX_WRR_PRI2_SHIFT |
+	      4 << ALX_WRR_PRI3_SHIFT;
+	alx_write_mem32(hw, ALX_WRR, val);
+}
+
+static inline u32 alx_speed_to_ethadv(int speed)
+{
+	switch (speed) {
+	case SPEED_1000 + DUPLEX_FULL:
+		return ADVERTISED_1000baseT_Full;
+	case SPEED_100 + DUPLEX_FULL:
+		return ADVERTISED_100baseT_Full;
+	case SPEED_100 + DUPLEX_HALF:
+		return ADVERTISED_10baseT_Half;
+	case SPEED_10 + DUPLEX_FULL:
+		return ADVERTISED_10baseT_Full;
+	case SPEED_10 + DUPLEX_HALF:
+		return ADVERTISED_10baseT_Half;
+	default:
+		return 0;
+	}
+}
+
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed)
+{
+	int i, err, spd;
+	u16 lpa;
+
+	err = alx_get_phy_link(hw, &spd);
+	if (err < 0)
+		return err;
+
+	if (spd == SPEED_UNKNOWN)
+		return 0;
+
+	err = alx_read_phy_reg(hw, MII_LPA, &lpa);
+	if (err)
+		return err;
+
+	if (!(lpa & LPA_LPACK)) {
+		*speed = spd;
+		return 0;
+	}
+
+	if (lpa & LPA_10FULL)
+		*speed = SPEED_10 + DUPLEX_FULL;
+	else if (lpa & LPA_10HALF)
+		*speed = SPEED_10 + DUPLEX_HALF;
+	else if (lpa & LPA_100FULL)
+		*speed = SPEED_100 + DUPLEX_FULL;
+	else
+		*speed = SPEED_100 + DUPLEX_HALF;
+
+	if (*speed != spd) {
+		err = alx_write_phy_reg(hw, ALX_MII_IER, 0);
+		if (err)
+			return err;
+		err = alx_setup_speed_duplex(hw,
+					     alx_speed_to_ethadv(*speed) |
+					     ADVERTISED_Autoneg,
+					     ALX_FC_ANEG | ALX_FC_RX |
+					     ALX_FC_TX);
+		if (err)
+			return err;
+
+		/* wait for linkup */
+		for (i = 0; i < ALX_MAX_SETUP_LNK_CYCLE; i++) {
+			int speed2;
+
+			msleep(100);
+
+			err = alx_get_phy_link(hw, &speed2);
+			if (err < 0)
+				return err;
+			if (speed2 != SPEED_UNKNOWN)
+				break;
+		}
+		if (i == ALX_MAX_SETUP_LNK_CYCLE)
+			return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+bool alx_get_phy_info(struct alx_hw *hw)
+{
+	u16  devs1, devs2;
+
+	if (alx_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id[0]) ||
+	    alx_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id[1]))
+		return false;
+
+	/* since we haven't PMA/PMD status2 register, we can't
+	 * use mdio45_probe function for prtad and mmds.
+	 * use fixed MMD3 to get mmds.
+	 */
+	if (alx_read_phy_ext(hw, 3, MDIO_DEVS1, &devs1) ||
+	    alx_read_phy_ext(hw, 3, MDIO_DEVS2, &devs2))
+		return false;
+	hw->mdio.mmds = devs1 | devs2 << 16;
+
+	return true;
+}
diff --git a/drivers/net/ethernet/atheros/alx/hw.h b/drivers/net/ethernet/atheros/alx/hw.h
new file mode 100644
index 0000000..65e723d
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/hw.h
@@ -0,0 +1,499 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_HW_H_
+#define ALX_HW_H_
+#include <linux/types.h>
+#include <linux/mdio.h>
+#include <linux/pci.h>
+#include "reg.h"
+
+/* Transmit Packet Descriptor, contains 4 32-bit words.
+ *
+ *   31               16               0
+ *   +----------------+----------------+
+ *   |    vlan-tag    |   buf length   |
+ *   +----------------+----------------+
+ *   |              Word 1             |
+ *   +----------------+----------------+
+ *   |      Word 2: buf addr lo        |
+ *   +----------------+----------------+
+ *   |      Word 3: buf addr hi        |
+ *   +----------------+----------------+
+ *
+ * Word 2 and 3 combine to form a 64-bit buffer address
+ *
+ * Word 1 has three forms, depending on the state of bit 8/12/13:
+ * if bit8 =='1', the definition is just for custom checksum offload.
+ * if bit8 == '0' && bit12 == '1' && bit13 == '1', the *FIRST* descriptor
+ *     for the skb is special for LSO V2, Word 2 become total skb length ,
+ *     Word 3 is meaningless.
+ * other condition, the definition is for general skb or ip/tcp/udp
+ *     checksum or LSO(TSO) offload.
+ *
+ * Here is the depiction:
+ *
+ *   0-+                                  0-+
+ *   1 |                                  1 |
+ *   2 |                                  2 |
+ *   3 |    Payload offset                3 |    L4 header offset
+ *   4 |        (7:0)                     4 |        (7:0)
+ *   5 |                                  5 |
+ *   6 |                                  6 |
+ *   7-+                                  7-+
+ *   8      Custom csum enable = 1        8      Custom csum enable = 0
+ *   9      General IPv4 checksum         9      General IPv4 checksum
+ *   10     General TCP checksum          10     General TCP checksum
+ *   11     General UDP checksum          11     General UDP checksum
+ *   12     Large Send Segment enable     12     Large Send Segment enable
+ *   13     Large Send Segment type       13     Large Send Segment type
+ *   14     VLAN tagged                   14     VLAN tagged
+ *   15     Insert VLAN tag               15     Insert VLAN tag
+ *   16     IPv4 packet                   16     IPv4 packet
+ *   17     Ethernet frame type           17     Ethernet frame type
+ *   18-+                                 18-+
+ *   19 |                                 19 |
+ *   20 |                                 20 |
+ *   21 |   Custom csum offset            21 |
+ *   22 |       (25:18)                   22 |
+ *   23 |                                 23 |   MSS (30:18)
+ *   24 |                                 24 |
+ *   25-+                                 25 |
+ *   26-+                                 26 |
+ *   27 |                                 27 |
+ *   28 |   Reserved                      28 |
+ *   29 |                                 29 |
+ *   30-+                                 30-+
+ *   31     End of packet                 31     End of packet
+ */
+struct alx_txd {
+	__le16 len;
+	__le16 vlan_tag;
+	__le32 word1;
+	union {
+		__le64 addr;
+		struct {
+			__le32 pkt_len;
+			__le32 resvd;
+		} l;
+	} adrl;
+} __packed;
+
+/* tpd word 1 */
+#define TPD_CXSUMSTART_MASK		0x00FF
+#define TPD_CXSUMSTART_SHIFT		0
+#define TPD_L4HDROFFSET_MASK		0x00FF
+#define TPD_L4HDROFFSET_SHIFT		0
+#define TPD_CXSUM_EN_MASK		0x0001
+#define TPD_CXSUM_EN_SHIFT		8
+#define TPD_IP_XSUM_MASK		0x0001
+#define TPD_IP_XSUM_SHIFT		9
+#define TPD_TCP_XSUM_MASK		0x0001
+#define TPD_TCP_XSUM_SHIFT		10
+#define TPD_UDP_XSUM_MASK		0x0001
+#define TPD_UDP_XSUM_SHIFT		11
+#define TPD_LSO_EN_MASK			0x0001
+#define TPD_LSO_EN_SHIFT		12
+#define TPD_LSO_V2_MASK			0x0001
+#define TPD_LSO_V2_SHIFT		13
+#define TPD_VLTAGGED_MASK		0x0001
+#define TPD_VLTAGGED_SHIFT		14
+#define TPD_INS_VLTAG_MASK		0x0001
+#define TPD_INS_VLTAG_SHIFT		15
+#define TPD_IPV4_MASK			0x0001
+#define TPD_IPV4_SHIFT			16
+#define TPD_ETHTYPE_MASK		0x0001
+#define TPD_ETHTYPE_SHIFT		17
+#define TPD_CXSUMOFFSET_MASK		0x00FF
+#define TPD_CXSUMOFFSET_SHIFT		18
+#define TPD_MSS_MASK			0x1FFF
+#define TPD_MSS_SHIFT			18
+#define TPD_EOP_MASK			0x0001
+#define TPD_EOP_SHIFT			31
+
+#define DESC_GET(_x, _name) ((_x) >> _name##SHIFT & _name##MASK)
+
+/* Receive Free Descriptor */
+struct alx_rfd {
+	__le64 addr;		/* data buffer address, length is
+				 * declared in register --- every
+				 * buffer has the same size
+				 */
+} __packed;
+
+/* Receive Return Descriptor, contains 4 32-bit words.
+ *
+ *   31               16               0
+ *   +----------------+----------------+
+ *   |              Word 0             |
+ *   +----------------+----------------+
+ *   |     Word 1: RSS Hash value      |
+ *   +----------------+----------------+
+ *   |              Word 2             |
+ *   +----------------+----------------+
+ *   |              Word 3             |
+ *   +----------------+----------------+
+ *
+ * Word 0 depiction         &            Word 2 depiction:
+ *
+ *   0--+                                 0--+
+ *   1  |                                 1  |
+ *   2  |                                 2  |
+ *   3  |                                 3  |
+ *   4  |                                 4  |
+ *   5  |                                 5  |
+ *   6  |                                 6  |
+ *   7  |    IP payload checksum          7  |     VLAN tag
+ *   8  |         (15:0)                  8  |      (15:0)
+ *   9  |                                 9  |
+ *   10 |                                 10 |
+ *   11 |                                 11 |
+ *   12 |                                 12 |
+ *   13 |                                 13 |
+ *   14 |                                 14 |
+ *   15-+                                 15-+
+ *   16-+                                 16-+
+ *   17 |     Number of RFDs              17 |
+ *   18 |        (19:16)                  18 |
+ *   19-+                                 19 |     Protocol ID
+ *   20-+                                 20 |      (23:16)
+ *   21 |                                 21 |
+ *   22 |                                 22 |
+ *   23 |                                 23-+
+ *   24 |                                 24 |     Reserved
+ *   25 |     Start index of RFD-ring     25-+
+ *   26 |         (31:20)                 26 |     RSS Q-num (27:25)
+ *   27 |                                 27-+
+ *   28 |                                 28-+
+ *   29 |                                 29 |     RSS Hash algorithm
+ *   30 |                                 30 |      (31:28)
+ *   31-+                                 31-+
+ *
+ * Word 3 depiction:
+ *
+ *   0--+
+ *   1  |
+ *   2  |
+ *   3  |
+ *   4  |
+ *   5  |
+ *   6  |
+ *   7  |    Packet length (include FCS)
+ *   8  |         (13:0)
+ *   9  |
+ *   10 |
+ *   11 |
+ *   12 |
+ *   13-+
+ *   14      L4 Header checksum error
+ *   15      IPv4 checksum error
+ *   16      VLAN tagged
+ *   17-+
+ *   18 |    Protocol ID (19:17)
+ *   19-+
+ *   20      Receive error summary
+ *   21      FCS(CRC) error
+ *   22      Frame alignment error
+ *   23      Truncated packet
+ *   24      Runt packet
+ *   25      Incomplete packet due to insufficient rx-desc
+ *   26      Broadcast packet
+ *   27      Multicast packet
+ *   28      Ethernet type (EII or 802.3)
+ *   29      FIFO overflow
+ *   30      Length error (for 802.3, length field mismatch with actual len)
+ *   31      Updated, indicate to driver that this RRD is refreshed.
+ */
+struct alx_rrd {
+	__le32 word0;
+	__le32 rss_hash;
+	__le32 word2;
+	__le32 word3;
+} __packed;
+
+/* rrd word 0 */
+#define RRD_XSUM_MASK		0xFFFF
+#define RRD_XSUM_SHIFT		0
+#define RRD_NOR_MASK		0x000F
+#define RRD_NOR_SHIFT		16
+#define RRD_SI_MASK		0x0FFF
+#define RRD_SI_SHIFT		20
+
+/* rrd word 2 */
+#define RRD_VLTAG_MASK		0xFFFF
+#define RRD_VLTAG_SHIFT		0
+#define RRD_PID_MASK		0x00FF
+#define RRD_PID_SHIFT		16
+/* non-ip packet */
+#define RRD_PID_NONIP		0
+/* ipv4(only) */
+#define RRD_PID_IPV4		1
+/* tcp/ipv6 */
+#define RRD_PID_IPV6TCP		2
+/* tcp/ipv4 */
+#define RRD_PID_IPV4TCP		3
+/* udp/ipv6 */
+#define RRD_PID_IPV6UDP		4
+/* udp/ipv4 */
+#define RRD_PID_IPV4UDP		5
+/* ipv6(only) */
+#define RRD_PID_IPV6		6
+/* LLDP packet */
+#define RRD_PID_LLDP		7
+/* 1588 packet */
+#define RRD_PID_1588		8
+#define RRD_RSSQ_MASK		0x0007
+#define RRD_RSSQ_SHIFT		25
+#define RRD_RSSALG_MASK		0x000F
+#define RRD_RSSALG_SHIFT	28
+#define RRD_RSSALG_TCPV6	0x1
+#define RRD_RSSALG_IPV6		0x2
+#define RRD_RSSALG_TCPV4	0x4
+#define RRD_RSSALG_IPV4		0x8
+
+/* rrd word 3 */
+#define RRD_PKTLEN_MASK		0x3FFF
+#define RRD_PKTLEN_SHIFT	0
+#define RRD_ERR_L4_MASK		0x0001
+#define RRD_ERR_L4_SHIFT	14
+#define RRD_ERR_IPV4_MASK	0x0001
+#define RRD_ERR_IPV4_SHIFT	15
+#define RRD_VLTAGGED_MASK	0x0001
+#define RRD_VLTAGGED_SHIFT	16
+#define RRD_OLD_PID_MASK	0x0007
+#define RRD_OLD_PID_SHIFT	17
+#define RRD_ERR_RES_MASK	0x0001
+#define RRD_ERR_RES_SHIFT	20
+#define RRD_ERR_FCS_MASK	0x0001
+#define RRD_ERR_FCS_SHIFT	21
+#define RRD_ERR_FAE_MASK	0x0001
+#define RRD_ERR_FAE_SHIFT	22
+#define RRD_ERR_TRUNC_MASK	0x0001
+#define RRD_ERR_TRUNC_SHIFT	23
+#define RRD_ERR_RUNT_MASK	0x0001
+#define RRD_ERR_RUNT_SHIFT	24
+#define RRD_ERR_ICMP_MASK	0x0001
+#define RRD_ERR_ICMP_SHIFT	25
+#define RRD_BCAST_MASK		0x0001
+#define RRD_BCAST_SHIFT		26
+#define RRD_MCAST_MASK		0x0001
+#define RRD_MCAST_SHIFT		27
+#define RRD_ETHTYPE_MASK	0x0001
+#define RRD_ETHTYPE_SHIFT	28
+#define RRD_ERR_FIFOV_MASK	0x0001
+#define RRD_ERR_FIFOV_SHIFT	29
+#define RRD_ERR_LEN_MASK	0x0001
+#define RRD_ERR_LEN_SHIFT	30
+#define RRD_UPDATED_MASK	0x0001
+#define RRD_UPDATED_SHIFT	31
+
+
+#define ALX_MAX_SETUP_LNK_CYCLE	50
+
+/* for FlowControl */
+#define ALX_FC_RX		0x01
+#define ALX_FC_TX		0x02
+#define ALX_FC_ANEG		0x04
+
+/* for sleep control */
+#define ALX_SLEEP_WOL_PHY	0x00000001
+#define ALX_SLEEP_WOL_MAGIC	0x00000002
+#define ALX_SLEEP_CIFS		0x00000004
+#define ALX_SLEEP_ACTIVE	(ALX_SLEEP_WOL_PHY | \
+				 ALX_SLEEP_WOL_MAGIC | \
+				 ALX_SLEEP_CIFS)
+
+/* for RSS hash type */
+#define ALX_RSS_HASH_TYPE_IPV4		0x1
+#define ALX_RSS_HASH_TYPE_IPV4_TCP	0x2
+#define ALX_RSS_HASH_TYPE_IPV6		0x4
+#define ALX_RSS_HASH_TYPE_IPV6_TCP	0x8
+#define ALX_RSS_HASH_TYPE_ALL		(ALX_RSS_HASH_TYPE_IPV4 | \
+					 ALX_RSS_HASH_TYPE_IPV4_TCP | \
+					 ALX_RSS_HASH_TYPE_IPV6 | \
+					 ALX_RSS_HASH_TYPE_IPV6_TCP)
+#define ALX_DEF_RXBUF_SIZE	1536
+#define ALX_MAX_JUMBO_PKT_SIZE	(9*1024)
+#define ALX_MAX_TSO_PKT_SIZE	(7*1024)
+#define ALX_MAX_FRAME_SIZE	ALX_MAX_JUMBO_PKT_SIZE
+#define ALX_MIN_FRAME_SIZE	68
+#define ALX_RAW_MTU(_mtu)	(_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
+
+#define ALX_MAX_RX_QUEUES	8
+#define ALX_MAX_TX_QUEUES	4
+#define ALX_MAX_HANDLED_INTRS	5
+
+#define ALX_ISR_MISC		(ALX_ISR_PCIE_LNKDOWN | \
+				 ALX_ISR_DMAW | \
+				 ALX_ISR_DMAR | \
+				 ALX_ISR_SMB | \
+				 ALX_ISR_MANU | \
+				 ALX_ISR_TIMER)
+
+#define ALX_ISR_FATAL		(ALX_ISR_PCIE_LNKDOWN | \
+				 ALX_ISR_DMAW | ALX_ISR_DMAR)
+
+#define ALX_ISR_ALERT		(ALX_ISR_RXF_OV | \
+				 ALX_ISR_TXF_UR | \
+				 ALX_ISR_RFD_UR)
+
+#define ALX_ISR_ALL_QUEUES	(ALX_ISR_TX_Q0 | \
+				 ALX_ISR_TX_Q1 | \
+				 ALX_ISR_TX_Q2 | \
+				 ALX_ISR_TX_Q3 | \
+				 ALX_ISR_RX_Q0 | \
+				 ALX_ISR_RX_Q1 | \
+				 ALX_ISR_RX_Q2 | \
+				 ALX_ISR_RX_Q3 | \
+				 ALX_ISR_RX_Q4 | \
+				 ALX_ISR_RX_Q5 | \
+				 ALX_ISR_RX_Q6 | \
+				 ALX_ISR_RX_Q7)
+
+/* maximum interrupt vectors for msix */
+#define ALX_MAX_MSIX_INTRS	16
+
+#define ALX_GET_FIELD(_data, _field)					\
+	(((_data) >> _field ## _SHIFT) & _field ## _MASK)
+
+#define ALX_SET_FIELD(_data, _field, _value)	do {			\
+		(_data) &= ~(_field ## _MASK << _field ## _SHIFT);	\
+		(_data) |= ((_value) & _field ## _MASK) << _field ## _SHIFT;\
+	} while (0)
+
+struct alx_hw {
+	struct pci_dev *pdev;
+	u8 __iomem *hw_addr;
+
+	/* current & permanent mac addr */
+	u8 mac_addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+
+	u16 mtu;
+	u16 imt;
+	u8 dma_chnl;
+	u8 max_dma_chnl;
+	/* tpd threshold to trig INT */
+	u32 ith_tpd;
+	u32 rx_ctrl;
+	u32 mc_hash[2];
+
+	u32 smb_timer;
+	/* SPEED_* + DUPLEX_*, SPEED_UNKNOWN if link is down */
+	int link_speed;
+
+	/* auto-neg advertisement or force mode config */
+	u32 adv_cfg;
+	u8 flowctrl;
+
+	u32 sleep_ctrl;
+
+	spinlock_t mdio_lock;
+	struct mdio_if_info mdio;
+	u16 phy_id[2];
+
+	/* PHY link patch flag */
+	bool lnk_patch;
+};
+
+static inline int alx_hw_revision(struct alx_hw *hw)
+{
+	return hw->pdev->revision >> ALX_PCI_REVID_SHIFT;
+}
+
+static inline bool alx_hw_with_cr(struct alx_hw *hw)
+{
+	return hw->pdev->revision & 1;
+}
+
+static inline bool alx_hw_giga(struct alx_hw *hw)
+{
+	return hw->pdev->device & 1;
+}
+
+static inline void alx_write_mem8(struct alx_hw *hw, u32 reg, u8 val)
+{
+	writeb(val, hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem16(struct alx_hw *hw, u32 reg, u16 val)
+{
+	writew(val, hw->hw_addr + reg);
+}
+
+static inline u16 alx_read_mem16(struct alx_hw *hw, u32 reg)
+{
+	return readw(hw->hw_addr + reg);
+}
+
+static inline void alx_write_mem32(struct alx_hw *hw, u32 reg, u32 val)
+{
+	writel(val, hw->hw_addr + reg);
+}
+
+static inline u32 alx_read_mem32(struct alx_hw *hw, u32 reg)
+{
+	return readl(hw->hw_addr + reg);
+}
+
+static inline void alx_post_write(struct alx_hw *hw)
+{
+	readl(hw->hw_addr);
+}
+
+int alx_get_perm_macaddr(struct alx_hw *hw, u8 *addr);
+void alx_reset_phy(struct alx_hw *hw);
+void alx_reset_pcie(struct alx_hw *hw);
+void alx_enable_aspm(struct alx_hw *hw, bool l0s_en, bool l1_en);
+int alx_setup_speed_duplex(struct alx_hw *hw, u32 ethadv, u8 flowctrl);
+void alx_post_phy_link(struct alx_hw *hw);
+int alx_pre_suspend(struct alx_hw *hw, int speed);
+int alx_read_phy_reg(struct alx_hw *hw, u16 reg, u16 *phy_data);
+int alx_write_phy_reg(struct alx_hw *hw, u16 reg, u16 phy_data);
+int alx_read_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 *pdata);
+int alx_write_phy_ext(struct alx_hw *hw, u8 dev, u16 reg, u16 data);
+int alx_get_phy_link(struct alx_hw *hw, int *speed);
+int alx_clear_phy_intr(struct alx_hw *hw);
+int alx_config_wol(struct alx_hw *hw);
+void alx_cfg_mac_flowcontrol(struct alx_hw *hw, u8 fc);
+void alx_start_mac(struct alx_hw *hw);
+int alx_reset_mac(struct alx_hw *hw);
+void alx_set_macaddr(struct alx_hw *hw, const u8 *addr);
+bool alx_phy_configured(struct alx_hw *hw);
+void alx_configure_basic(struct alx_hw *hw);
+void alx_disable_rss(struct alx_hw *hw);
+int alx_select_powersaving_speed(struct alx_hw *hw, int *speed);
+bool alx_get_phy_info(struct alx_hw *hw);
+
+#endif
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
new file mode 100644
index 0000000..418de8b
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -0,0 +1,1625 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
+#include <linux/mdio.h>
+#include <linux/aer.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <net/ip6_checksum.h>
+#include <linux/crc32.h>
+#include "alx.h"
+#include "hw.h"
+#include "reg.h"
+
+const char alx_drv_name[] = "alx";
+
+
+static void alx_free_txbuf(struct alx_priv *alx, int entry)
+{
+	struct alx_buffer *txb = &alx->txq.bufs[entry];
+
+	if (dma_unmap_len(txb, size)) {
+		dma_unmap_single(&alx->hw.pdev->dev,
+				 dma_unmap_addr(txb, dma),
+				 dma_unmap_len(txb, size),
+				 DMA_TO_DEVICE);
+		dma_unmap_len_set(txb, size, 0);
+	}
+
+	if (txb->skb) {
+		dev_kfree_skb_any(txb->skb);
+		txb->skb = NULL;
+	}
+}
+
+static int alx_refill_rx_ring(struct alx_priv *alx, gfp_t gfp)
+{
+	struct alx_rx_queue *rxq = &alx->rxq;
+	struct sk_buff *skb;
+	struct alx_buffer *cur_buf;
+	dma_addr_t dma;
+	u16 cur, next, count = 0;
+
+	next = cur = rxq->write_idx;
+	if (++next == alx->rx_ringsz)
+		next = 0;
+	cur_buf = &rxq->bufs[cur];
+
+	while (!cur_buf->skb && next != rxq->read_idx) {
+		struct alx_rfd *rfd = &rxq->rfd[cur];
+
+		skb = __netdev_alloc_skb(alx->dev, alx->rxbuf_size, gfp);
+		if (!skb)
+			break;
+		dma = dma_map_single(&alx->hw.pdev->dev,
+				     skb->data, alx->rxbuf_size,
+				     DMA_FROM_DEVICE);
+		if (dma_mapping_error(&alx->hw.pdev->dev, dma)) {
+			dev_kfree_skb(skb);
+			break;
+		}
+
+		/* Unfortunately, RX descriptor buffers must be 4-byte
+		 * aligned, so we can't use IP alignment.
+		 */
+		if (WARN_ON(dma & 3)) {
+			dev_kfree_skb(skb);
+			break;
+		}
+
+		cur_buf->skb = skb;
+		dma_unmap_len_set(cur_buf, size, alx->rxbuf_size);
+		dma_unmap_addr_set(cur_buf, dma, dma);
+		rfd->addr = cpu_to_le64(dma);
+
+		cur = next;
+		if (++next == alx->rx_ringsz)
+			next = 0;
+		cur_buf = &rxq->bufs[cur];
+		count++;
+	}
+
+	if (count) {
+		/* flush all updates before updating hardware */
+		wmb();
+		rxq->write_idx = cur;
+		alx_write_mem16(&alx->hw, ALX_RFD_PIDX, cur);
+	}
+
+	return count;
+}
+
+static inline int alx_tpd_avail(struct alx_priv *alx)
+{
+	struct alx_tx_queue *txq = &alx->txq;
+
+	if (txq->write_idx >= txq->read_idx)
+		return alx->tx_ringsz + txq->read_idx - txq->write_idx - 1;
+	return txq->read_idx - txq->write_idx - 1;
+}
+
+static bool alx_clean_tx_irq(struct alx_priv *alx)
+{
+	struct alx_tx_queue *txq = &alx->txq;
+	u16 hw_read_idx, sw_read_idx;
+	unsigned int total_bytes = 0, total_packets = 0;
+	int budget = ALX_DEFAULT_TX_WORK;
+
+	sw_read_idx = txq->read_idx;
+	hw_read_idx = alx_read_mem16(&alx->hw, ALX_TPD_PRI0_CIDX);
+
+	if (sw_read_idx != hw_read_idx) {
+		while (sw_read_idx != hw_read_idx && budget > 0) {
+			struct sk_buff *skb;
+
+			skb = txq->bufs[sw_read_idx].skb;
+			if (skb) {
+				total_bytes += skb->len;
+				total_packets++;
+				budget--;
+			}
+
+			alx_free_txbuf(alx, sw_read_idx);
+
+			if (++sw_read_idx == alx->tx_ringsz)
+				sw_read_idx = 0;
+		}
+		txq->read_idx = sw_read_idx;
+
+		netdev_completed_queue(alx->dev, total_packets, total_bytes);
+	}
+
+	if (netif_queue_stopped(alx->dev) && netif_carrier_ok(alx->dev) &&
+	    alx_tpd_avail(alx) > alx->tx_ringsz/4)
+		netif_wake_queue(alx->dev);
+
+	return sw_read_idx == hw_read_idx;
+}
+
+static void alx_schedule_link_check(struct alx_priv *alx)
+{
+	schedule_work(&alx->link_check_wk);
+}
+
+static void alx_schedule_reset(struct alx_priv *alx)
+{
+	schedule_work(&alx->reset_wk);
+}
+
+static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+{
+	struct alx_rx_queue *rxq = &alx->rxq;
+	struct alx_rrd *rrd;
+	struct alx_buffer *rxb;
+	struct sk_buff *skb;
+	u16 length, rfd_cleaned = 0;
+
+	while (budget > 0) {
+		rrd = &rxq->rrd[rxq->rrd_read_idx];
+		if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
+			break;
+		rrd->word3 &= ~cpu_to_le32(1 << RRD_UPDATED_SHIFT);
+
+		if (ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+				  RRD_SI) != rxq->read_idx ||
+		    ALX_GET_FIELD(le32_to_cpu(rrd->word0),
+				  RRD_NOR) != 1) {
+			alx_schedule_reset(alx);
+			return 0;
+		}
+
+		rxb = &rxq->bufs[rxq->read_idx];
+		dma_unmap_single(&alx->hw.pdev->dev,
+				 dma_unmap_addr(rxb, dma),
+				 dma_unmap_len(rxb, size),
+				 DMA_FROM_DEVICE);
+		dma_unmap_len_set(rxb, size, 0);
+		skb = rxb->skb;
+		rxb->skb = NULL;
+
+		if (rrd->word3 & cpu_to_le32(1 << RRD_ERR_RES_SHIFT) ||
+		    rrd->word3 & cpu_to_le32(1 << RRD_ERR_LEN_SHIFT)) {
+			rrd->word3 = 0;
+			dev_kfree_skb_any(skb);
+			goto next_pkt;
+		}
+
+		length = ALX_GET_FIELD(le32_to_cpu(rrd->word3),
+				       RRD_PKTLEN) - ETH_FCS_LEN;
+		skb_put(skb, length);
+		skb->protocol = eth_type_trans(skb, alx->dev);
+
+		skb_checksum_none_assert(skb);
+		if (alx->dev->features & NETIF_F_RXCSUM &&
+		    !(rrd->word3 & (cpu_to_le32(1 << RRD_ERR_L4_SHIFT) |
+				    cpu_to_le32(1 << RRD_ERR_IPV4_SHIFT)))) {
+			switch (ALX_GET_FIELD(le32_to_cpu(rrd->word2),
+					      RRD_PID)) {
+			case RRD_PID_IPV6UDP:
+			case RRD_PID_IPV4UDP:
+			case RRD_PID_IPV4TCP:
+			case RRD_PID_IPV6TCP:
+				skb->ip_summed = CHECKSUM_UNNECESSARY;
+				break;
+			}
+		}
+
+		napi_gro_receive(&alx->napi, skb);
+		budget--;
+
+next_pkt:
+		if (++rxq->read_idx == alx->rx_ringsz)
+			rxq->read_idx = 0;
+		if (++rxq->rrd_read_idx == alx->rx_ringsz)
+			rxq->rrd_read_idx = 0;
+
+		if (++rfd_cleaned > ALX_RX_ALLOC_THRESH)
+			rfd_cleaned -= alx_refill_rx_ring(alx, GFP_ATOMIC);
+	}
+
+	if (rfd_cleaned)
+		alx_refill_rx_ring(alx, GFP_ATOMIC);
+
+	return budget > 0;
+}
+
+static int alx_poll(struct napi_struct *napi, int budget)
+{
+	struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
+	struct alx_hw *hw = &alx->hw;
+	bool complete = true;
+	unsigned long flags;
+
+	complete = alx_clean_tx_irq(alx) &&
+		   alx_clean_rx_irq(alx, budget);
+
+	if (!complete)
+		return 1;
+
+	napi_complete(&alx->napi);
+
+	/* enable interrupt */
+	spin_lock_irqsave(&alx->irq_lock, flags);
+	alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+	spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+	alx_post_write(hw);
+
+	return 0;
+}
+
+static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
+{
+	struct alx_hw *hw = &alx->hw;
+	bool write_int_mask = false;
+
+	spin_lock(&alx->irq_lock);
+
+	/* ACK interrupt */
+	alx_write_mem32(hw, ALX_ISR, intr | ALX_ISR_DIS);
+	intr &= alx->int_mask;
+
+	if (intr & ALX_ISR_FATAL) {
+		netif_warn(alx, hw, alx->dev,
+			   "fatal interrupt 0x%x, resetting\n", intr);
+		alx_schedule_reset(alx);
+		goto out;
+	}
+
+	if (intr & ALX_ISR_ALERT)
+		netdev_warn(alx->dev, "alert interrupt: 0x%x\n", intr);
+
+	if (intr & ALX_ISR_PHY) {
+		/* suppress PHY interrupt, because the source
+		 * is from PHY internal. only the internal status
+		 * is cleared, the interrupt status could be cleared.
+		 */
+		alx->int_mask &= ~ALX_ISR_PHY;
+		write_int_mask = true;
+		alx_schedule_link_check(alx);
+	}
+
+	if (intr & (ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0)) {
+		napi_schedule(&alx->napi);
+		/* mask rx/tx interrupt, enable them when napi complete */
+		alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+		write_int_mask = true;
+	}
+
+	if (write_int_mask)
+		alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+
+	alx_write_mem32(hw, ALX_ISR, 0);
+
+ out:
+	spin_unlock(&alx->irq_lock);
+	return IRQ_HANDLED;
+}
+
+static irqreturn_t alx_intr_msi(int irq, void *data)
+{
+	struct alx_priv *alx = data;
+
+	return alx_intr_handle(alx, alx_read_mem32(&alx->hw, ALX_ISR));
+}
+
+static irqreturn_t alx_intr_legacy(int irq, void *data)
+{
+	struct alx_priv *alx = data;
+	struct alx_hw *hw = &alx->hw;
+	u32 intr;
+
+	intr = alx_read_mem32(hw, ALX_ISR);
+
+	if (intr & ALX_ISR_DIS || !(intr & alx->int_mask))
+		return IRQ_NONE;
+
+	return alx_intr_handle(alx, intr);
+}
+
+static void alx_init_ring_ptrs(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+	u32 addr_hi = ((u64)alx->descmem.dma) >> 32;
+
+	alx->rxq.read_idx = 0;
+	alx->rxq.write_idx = 0;
+	alx->rxq.rrd_read_idx = 0;
+	alx_write_mem32(hw, ALX_RX_BASE_ADDR_HI, addr_hi);
+	alx_write_mem32(hw, ALX_RRD_ADDR_LO, alx->rxq.rrd_dma);
+	alx_write_mem32(hw, ALX_RRD_RING_SZ, alx->rx_ringsz);
+	alx_write_mem32(hw, ALX_RFD_ADDR_LO, alx->rxq.rfd_dma);
+	alx_write_mem32(hw, ALX_RFD_RING_SZ, alx->rx_ringsz);
+	alx_write_mem32(hw, ALX_RFD_BUF_SZ, alx->rxbuf_size);
+
+	alx->txq.read_idx = 0;
+	alx->txq.write_idx = 0;
+	alx_write_mem32(hw, ALX_TX_BASE_ADDR_HI, addr_hi);
+	alx_write_mem32(hw, ALX_TPD_PRI0_ADDR_LO, alx->txq.tpd_dma);
+	alx_write_mem32(hw, ALX_TPD_RING_SZ, alx->tx_ringsz);
+
+	/* load these pointers into the chip */
+	alx_write_mem32(hw, ALX_SRAM9, ALX_SRAM_LOAD_PTR);
+}
+
+static void alx_free_txring_buf(struct alx_priv *alx)
+{
+	struct alx_tx_queue *txq = &alx->txq;
+	int i;
+
+	if (!txq->bufs)
+		return;
+
+	for (i = 0; i < alx->tx_ringsz; i++)
+		alx_free_txbuf(alx, i);
+
+	memset(txq->bufs, 0, alx->tx_ringsz * sizeof(struct alx_buffer));
+	memset(txq->tpd, 0, alx->tx_ringsz * sizeof(struct alx_txd));
+	txq->write_idx = 0;
+	txq->read_idx = 0;
+
+	netdev_reset_queue(alx->dev);
+}
+
+static void alx_free_rxring_buf(struct alx_priv *alx)
+{
+	struct alx_rx_queue *rxq = &alx->rxq;
+	struct alx_buffer *cur_buf;
+	u16 i;
+
+	if (rxq == NULL)
+		return;
+
+	for (i = 0; i < alx->rx_ringsz; i++) {
+		cur_buf = rxq->bufs + i;
+		if (cur_buf->skb) {
+			dma_unmap_single(&alx->hw.pdev->dev,
+					 dma_unmap_addr(cur_buf, dma),
+					 dma_unmap_len(cur_buf, size),
+					 DMA_FROM_DEVICE);
+			dev_kfree_skb(cur_buf->skb);
+			cur_buf->skb = NULL;
+			dma_unmap_len_set(cur_buf, size, 0);
+			dma_unmap_addr_set(cur_buf, dma, 0);
+		}
+	}
+
+	rxq->write_idx = 0;
+	rxq->read_idx = 0;
+	rxq->rrd_read_idx = 0;
+}
+
+static void alx_free_buffers(struct alx_priv *alx)
+{
+	alx_free_txring_buf(alx);
+	alx_free_rxring_buf(alx);
+}
+
+static int alx_reinit_rings(struct alx_priv *alx)
+{
+	alx_free_buffers(alx);
+
+	alx_init_ring_ptrs(alx);
+
+	if (!alx_refill_rx_ring(alx, GFP_KERNEL))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void alx_add_mc_addr(struct alx_hw *hw, const u8 *addr, u32 *mc_hash)
+{
+	u32 crc32, bit, reg;
+
+	crc32 = ether_crc(ETH_ALEN, addr);
+	reg = (crc32 >> 31) & 0x1;
+	bit = (crc32 >> 26) & 0x1F;
+
+	mc_hash[reg] |= BIT(bit);
+}
+
+static void __alx_set_rx_mode(struct net_device *netdev)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+	struct netdev_hw_addr *ha;
+	u32 mc_hash[2] = {};
+
+	if (!(netdev->flags & IFF_ALLMULTI)) {
+		netdev_for_each_mc_addr(ha, netdev)
+			alx_add_mc_addr(hw, ha->addr, mc_hash);
+
+		alx_write_mem32(hw, ALX_HASH_TBL0, mc_hash[0]);
+		alx_write_mem32(hw, ALX_HASH_TBL1, mc_hash[1]);
+	}
+
+	hw->rx_ctrl &= ~(ALX_MAC_CTRL_MULTIALL_EN | ALX_MAC_CTRL_PROMISC_EN);
+	if (netdev->flags & IFF_PROMISC)
+		hw->rx_ctrl |= ALX_MAC_CTRL_PROMISC_EN;
+	if (netdev->flags & IFF_ALLMULTI)
+		hw->rx_ctrl |= ALX_MAC_CTRL_MULTIALL_EN;
+
+	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_set_rx_mode(struct net_device *netdev)
+{
+	__alx_set_rx_mode(netdev);
+}
+
+static int alx_set_mac_address(struct net_device *netdev, void *data)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+	struct sockaddr *addr = data;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	if (netdev->addr_assign_type & NET_ADDR_RANDOM)
+		netdev->addr_assign_type ^= NET_ADDR_RANDOM;
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
+	alx_set_macaddr(hw, hw->mac_addr);
+
+	return 0;
+}
+
+static int alx_alloc_descriptors(struct alx_priv *alx)
+{
+	alx->txq.bufs = kcalloc(alx->tx_ringsz,
+				sizeof(struct alx_buffer),
+				GFP_KERNEL);
+	if (!alx->txq.bufs)
+		return -ENOMEM;
+
+	alx->rxq.bufs = kcalloc(alx->rx_ringsz,
+				sizeof(struct alx_buffer),
+				GFP_KERNEL);
+	if (!alx->rxq.bufs)
+		goto out_free;
+
+	/* physical tx/rx ring descriptors
+	 *
+	 * Allocate them as a single chunk because they must not cross a
+	 * 4G boundary (hardware has a single register for high 32 bits
+	 * of addresses only)
+	 */
+	alx->descmem.size = sizeof(struct alx_txd) * alx->tx_ringsz +
+			    sizeof(struct alx_rrd) * alx->rx_ringsz +
+			    sizeof(struct alx_rfd) * alx->rx_ringsz;
+	alx->descmem.virt = dma_zalloc_coherent(&alx->hw.pdev->dev,
+						alx->descmem.size,
+						&alx->descmem.dma,
+						GFP_KERNEL);
+	if (!alx->descmem.virt)
+		goto out_free;
+
+	alx->txq.tpd = (void *)alx->descmem.virt;
+	alx->txq.tpd_dma = alx->descmem.dma;
+
+	/* alignment requirement for next block */
+	BUILD_BUG_ON(sizeof(struct alx_txd) % 8);
+
+	alx->rxq.rrd =
+		(void *)((u8 *)alx->descmem.virt +
+			 sizeof(struct alx_txd) * alx->tx_ringsz);
+	alx->rxq.rrd_dma = alx->descmem.dma +
+			   sizeof(struct alx_txd) * alx->tx_ringsz;
+
+	/* alignment requirement for next block */
+	BUILD_BUG_ON(sizeof(struct alx_rrd) % 8);
+
+	alx->rxq.rfd =
+		(void *)((u8 *)alx->descmem.virt +
+			 sizeof(struct alx_txd) * alx->tx_ringsz +
+			 sizeof(struct alx_rrd) * alx->rx_ringsz);
+	alx->rxq.rfd_dma = alx->descmem.dma +
+			   sizeof(struct alx_txd) * alx->tx_ringsz +
+			   sizeof(struct alx_rrd) * alx->rx_ringsz;
+
+	return 0;
+out_free:
+	kfree(alx->txq.bufs);
+	kfree(alx->rxq.bufs);
+	return -ENOMEM;
+}
+
+static int alx_alloc_rings(struct alx_priv *alx)
+{
+	int err;
+
+	err = alx_alloc_descriptors(alx);
+	if (err)
+		return err;
+
+	alx->int_mask &= ~ALX_ISR_ALL_QUEUES;
+	alx->int_mask |= ALX_ISR_TX_Q0 | ALX_ISR_RX_Q0;
+	alx->tx_ringsz = alx->tx_ringsz;
+
+	netif_napi_add(alx->dev, &alx->napi, alx_poll, 64);
+
+	alx_reinit_rings(alx);
+	return 0;
+}
+
+static void alx_free_rings(struct alx_priv *alx)
+{
+	netif_napi_del(&alx->napi);
+	alx_free_buffers(alx);
+
+	kfree(alx->txq.bufs);
+	kfree(alx->rxq.bufs);
+
+	dma_free_coherent(&alx->hw.pdev->dev,
+			  alx->descmem.size,
+			  alx->descmem.virt,
+			  alx->descmem.dma);
+}
+
+static void alx_config_vector_mapping(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+
+	alx_write_mem32(hw, ALX_MSI_MAP_TBL1, 0);
+	alx_write_mem32(hw, ALX_MSI_MAP_TBL2, 0);
+	alx_write_mem32(hw, ALX_MSI_ID_MAP, 0);
+}
+
+static void alx_irq_enable(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+
+	/* level-1 interrupt switch */
+	alx_write_mem32(hw, ALX_ISR, 0);
+	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+	alx_post_write(hw);
+}
+
+static void alx_irq_disable(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+
+	alx_write_mem32(hw, ALX_ISR, ALX_ISR_DIS);
+	alx_write_mem32(hw, ALX_IMR, 0);
+	alx_post_write(hw);
+
+	synchronize_irq(alx->hw.pdev->irq);
+}
+
+static int alx_request_irq(struct alx_priv *alx)
+{
+	struct pci_dev *pdev = alx->hw.pdev;
+	struct alx_hw *hw = &alx->hw;
+	int err;
+	u32 msi_ctrl;
+
+	msi_ctrl = (hw->imt >> 1) << ALX_MSI_RETRANS_TM_SHIFT;
+
+	if (!pci_enable_msi(alx->hw.pdev)) {
+		alx->msi = true;
+
+		alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER,
+				msi_ctrl | ALX_MSI_MASK_SEL_LINE);
+		err = request_irq(pdev->irq, alx_intr_msi, 0,
+				  alx->dev->name, alx);
+		if (!err)
+			goto out;
+		/* fall back to legacy interrupt */
+		pci_disable_msi(alx->hw.pdev);
+	}
+
+	alx_write_mem32(hw, ALX_MSI_RETRANS_TIMER, 0);
+	err = request_irq(pdev->irq, alx_intr_legacy, IRQF_SHARED,
+			  alx->dev->name, alx);
+out:
+	if (!err)
+		alx_config_vector_mapping(alx);
+	return err;
+}
+
+static void alx_free_irq(struct alx_priv *alx)
+{
+	struct pci_dev *pdev = alx->hw.pdev;
+
+	free_irq(pdev->irq, alx);
+
+	if (alx->msi) {
+		pci_disable_msi(alx->hw.pdev);
+		alx->msi = false;
+	}
+}
+
+static int alx_identify_hw(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+	int rev = alx_hw_revision(hw);
+
+	if (rev > ALX_REV_C0)
+		return -EINVAL;
+
+	hw->max_dma_chnl = rev >= ALX_REV_B0 ? 4 : 2;
+
+	return 0;
+}
+
+static int alx_init_sw(struct alx_priv *alx)
+{
+	struct pci_dev *pdev = alx->hw.pdev;
+	struct alx_hw *hw = &alx->hw;
+	int err;
+
+	err = alx_identify_hw(alx);
+	if (err) {
+		dev_err(&pdev->dev, "unrecognized chip, aborting\n");
+		return err;
+	}
+
+	alx->hw.lnk_patch =
+		pdev->device == ALX_DEV_ID_AR8161 &&
+		pdev->subsystem_vendor == PCI_VENDOR_ID_ATTANSIC &&
+		pdev->subsystem_device == 0x0091 &&
+		pdev->revision == 0;
+
+	hw->smb_timer = 400;
+	hw->mtu = alx->dev->mtu;
+	alx->rxbuf_size = ALIGN(ALX_RAW_MTU(hw->mtu), 8);
+	alx->tx_ringsz = 256;
+	alx->rx_ringsz = 512;
+	hw->sleep_ctrl = ALX_SLEEP_WOL_MAGIC | ALX_SLEEP_WOL_PHY;
+	hw->imt = 200;
+	alx->int_mask = ALX_ISR_MISC;
+	hw->dma_chnl = hw->max_dma_chnl;
+	hw->ith_tpd = alx->tx_ringsz / 3;
+	hw->link_speed = SPEED_UNKNOWN;
+	hw->adv_cfg = ADVERTISED_Autoneg |
+		      ADVERTISED_10baseT_Half |
+		      ADVERTISED_10baseT_Full |
+		      ADVERTISED_100baseT_Full |
+		      ADVERTISED_100baseT_Half |
+		      ADVERTISED_1000baseT_Full;
+	hw->flowctrl = ALX_FC_ANEG | ALX_FC_RX | ALX_FC_TX;
+
+	hw->rx_ctrl = ALX_MAC_CTRL_WOLSPED_SWEN |
+		      ALX_MAC_CTRL_MHASH_ALG_HI5B |
+		      ALX_MAC_CTRL_BRD_EN |
+		      ALX_MAC_CTRL_PCRCE |
+		      ALX_MAC_CTRL_CRCE |
+		      ALX_MAC_CTRL_RXFC_EN |
+		      ALX_MAC_CTRL_TXFC_EN |
+		      7 << ALX_MAC_CTRL_PRMBLEN_SHIFT;
+
+	return err;
+}
+
+
+static netdev_features_t alx_fix_features(struct net_device *netdev,
+					  netdev_features_t features)
+{
+	if (netdev->mtu > ALX_MAX_TSO_PKT_SIZE)
+		features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+	return features;
+}
+
+static void alx_netif_stop(struct alx_priv *alx)
+{
+	alx->dev->trans_start = jiffies;
+	if (netif_carrier_ok(alx->dev)) {
+		netif_carrier_off(alx->dev);
+		netif_tx_disable(alx->dev);
+		napi_disable(&alx->napi);
+	}
+}
+
+static void alx_halt(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+
+	alx_netif_stop(alx);
+	hw->link_speed = SPEED_UNKNOWN;
+
+	alx_reset_mac(hw);
+
+	/* disable l0s/l1 */
+	alx_enable_aspm(hw, false, false);
+	alx_irq_disable(alx);
+	alx_free_buffers(alx);
+}
+
+static void alx_configure(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+
+	alx_configure_basic(hw);
+	alx_disable_rss(hw);
+	__alx_set_rx_mode(alx->dev);
+
+	alx_write_mem32(hw, ALX_MAC_CTRL, hw->rx_ctrl);
+}
+
+static void alx_activate(struct alx_priv *alx)
+{
+	/* hardware setting lost, restore it */
+	alx_reinit_rings(alx);
+	alx_configure(alx);
+
+	/* clear old interrupts */
+	alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+	alx_irq_enable(alx);
+
+	alx_schedule_link_check(alx);
+}
+
+static void alx_reinit(struct alx_priv *alx)
+{
+	ASSERT_RTNL();
+
+	alx_halt(alx);
+	alx_activate(alx);
+}
+
+static int alx_change_mtu(struct net_device *netdev, int mtu)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	int max_frame = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+	if ((max_frame < ALX_MIN_FRAME_SIZE) ||
+	    (max_frame > ALX_MAX_FRAME_SIZE))
+		return -EINVAL;
+
+	if (netdev->mtu == mtu)
+		return 0;
+
+	netdev->mtu = mtu;
+	alx->hw.mtu = mtu;
+	alx->rxbuf_size = mtu > ALX_DEF_RXBUF_SIZE ?
+			   ALIGN(max_frame, 8) : ALX_DEF_RXBUF_SIZE;
+	netdev_update_features(netdev);
+	if (netif_running(netdev))
+		alx_reinit(alx);
+	return 0;
+}
+
+static void alx_netif_start(struct alx_priv *alx)
+{
+	netif_tx_wake_all_queues(alx->dev);
+	napi_enable(&alx->napi);
+	netif_carrier_on(alx->dev);
+}
+
+static int __alx_open(struct alx_priv *alx, bool resume)
+{
+	int err;
+
+	if (!resume)
+		netif_carrier_off(alx->dev);
+
+	err = alx_alloc_rings(alx);
+	if (err)
+		return err;
+
+	alx_configure(alx);
+
+	err = alx_request_irq(alx);
+	if (err)
+		goto out_free_rings;
+
+	/* clear old interrupts */
+	alx_write_mem32(&alx->hw, ALX_ISR, ~(u32)ALX_ISR_DIS);
+
+	alx_irq_enable(alx);
+
+	if (!resume)
+		netif_tx_start_all_queues(alx->dev);
+
+	alx_schedule_link_check(alx);
+	return 0;
+
+out_free_rings:
+	alx_free_rings(alx);
+	return err;
+}
+
+static void __alx_stop(struct alx_priv *alx)
+{
+	alx_halt(alx);
+	alx_free_irq(alx);
+	alx_free_rings(alx);
+}
+
+static const char *alx_speed_desc(u16 speed)
+{
+	switch (speed) {
+	case SPEED_1000 + DUPLEX_FULL:
+		return "1 Gbps Full";
+	case SPEED_100 + DUPLEX_FULL:
+		return "100 Mbps Full";
+	case SPEED_100 + DUPLEX_HALF:
+		return "100 Mbps Half";
+	case SPEED_10 + DUPLEX_FULL:
+		return "10 Mbps Full";
+	case SPEED_10 + DUPLEX_HALF:
+		return "10 Mbps Half";
+	default:
+		return "Unknown speed";
+	}
+}
+
+static void alx_check_link(struct alx_priv *alx)
+{
+	struct alx_hw *hw = &alx->hw;
+	unsigned long flags;
+	int speed, old_speed;
+	int err;
+
+	/* clear PHY internal interrupt status, otherwise the main
+	 * interrupt status will be asserted forever
+	 */
+	alx_clear_phy_intr(hw);
+
+	err = alx_get_phy_link(hw, &speed);
+	if (err < 0)
+		goto reset;
+
+	spin_lock_irqsave(&alx->irq_lock, flags);
+	alx->int_mask |= ALX_ISR_PHY;
+	alx_write_mem32(hw, ALX_IMR, alx->int_mask);
+	spin_unlock_irqrestore(&alx->irq_lock, flags);
+
+	old_speed = hw->link_speed;
+
+	if (old_speed == speed)
+		return;
+	hw->link_speed = speed;
+
+	if (speed != SPEED_UNKNOWN) {
+		netif_info(alx, link, alx->dev,
+			   "NIC Up: %s\n", alx_speed_desc(speed));
+		alx_post_phy_link(hw);
+		alx_enable_aspm(hw, true, true);
+		alx_start_mac(hw);
+
+		if (old_speed == SPEED_UNKNOWN)
+			alx_netif_start(alx);
+	} else {
+		/* link is now down */
+		alx_netif_stop(alx);
+		netif_info(alx, link, alx->dev, "Link Down\n");
+		err = alx_reset_mac(hw);
+		if (err)
+			goto reset;
+		alx_irq_disable(alx);
+
+		/* MAC reset causes all HW settings to be lost, restore all */
+		err = alx_reinit_rings(alx);
+		if (err)
+			goto reset;
+		alx_configure(alx);
+		alx_enable_aspm(hw, false, true);
+		alx_post_phy_link(hw);
+		alx_irq_enable(alx);
+	}
+
+	return;
+
+reset:
+	alx_schedule_reset(alx);
+}
+
+static int alx_open(struct net_device *netdev)
+{
+	return __alx_open(netdev_priv(netdev), false);
+}
+
+static int alx_stop(struct net_device *netdev)
+{
+	__alx_stop(netdev_priv(netdev));
+	return 0;
+}
+
+static int __alx_shutdown(struct pci_dev *pdev, bool *wol_en)
+{
+	struct alx_priv *alx = pci_get_drvdata(pdev);
+	struct net_device *netdev = alx->dev;
+	struct alx_hw *hw = &alx->hw;
+	int err, speed;
+
+	netif_device_detach(netdev);
+
+	if (netif_running(netdev))
+		__alx_stop(alx);
+
+#ifdef CONFIG_PM_SLEEP
+	err = pci_save_state(pdev);
+	if (err)
+		return err;
+#endif
+
+	err = alx_select_powersaving_speed(hw, &speed);
+	if (err)
+		return err;
+	err = alx_clear_phy_intr(hw);
+	if (err)
+		return err;
+	err = alx_pre_suspend(hw, speed);
+	if (err)
+		return err;
+	err = alx_config_wol(hw);
+	if (err)
+		return err;
+
+	*wol_en = false;
+	if (hw->sleep_ctrl & ALX_SLEEP_ACTIVE) {
+		netif_info(alx, wol, netdev,
+			   "wol: ctrl=%X, speed=%X\n",
+			   hw->sleep_ctrl, speed);
+		device_set_wakeup_enable(&pdev->dev, true);
+		*wol_en = true;
+	}
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+static void alx_shutdown(struct pci_dev *pdev)
+{
+	int err;
+	bool wol_en;
+
+	err = __alx_shutdown(pdev, &wol_en);
+	if (!err) {
+		pci_wake_from_d3(pdev, wol_en);
+		pci_set_power_state(pdev, PCI_D3hot);
+	} else {
+		dev_err(&pdev->dev, "shutdown fail %d\n", err);
+	}
+}
+
+static void alx_link_check(struct work_struct *work)
+{
+	struct alx_priv *alx;
+
+	alx = container_of(work, struct alx_priv, link_check_wk);
+
+	rtnl_lock();
+	alx_check_link(alx);
+	rtnl_unlock();
+}
+
+static void alx_reset(struct work_struct *work)
+{
+	struct alx_priv *alx = container_of(work, struct alx_priv, reset_wk);
+
+	rtnl_lock();
+	alx_reinit(alx);
+	rtnl_unlock();
+}
+
+static int alx_tx_csum(struct sk_buff *skb, struct alx_txd *first)
+{
+	u8 cso, css;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL)
+		return 0;
+
+	cso = skb_checksum_start_offset(skb);
+	if (cso & 1)
+		return -EINVAL;
+
+	css = cso + skb->csum_offset;
+	first->word1 |= cpu_to_le32((cso >> 1) << TPD_CXSUMSTART_SHIFT);
+	first->word1 |= cpu_to_le32((css >> 1) << TPD_CXSUMOFFSET_SHIFT);
+	first->word1 |= cpu_to_le32(1 << TPD_CXSUM_EN_SHIFT);
+
+	return 0;
+}
+
+static int alx_map_tx_skb(struct alx_priv *alx, struct sk_buff *skb)
+{
+	struct alx_tx_queue *txq = &alx->txq;
+	struct alx_txd *tpd, *first_tpd;
+	dma_addr_t dma;
+	int maplen, f, first_idx = txq->write_idx;
+
+	first_tpd = &txq->tpd[txq->write_idx];
+	tpd = first_tpd;
+
+	maplen = skb_headlen(skb);
+	dma = dma_map_single(&alx->hw.pdev->dev, skb->data, maplen,
+			     DMA_TO_DEVICE);
+	if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+		goto err_dma;
+
+	dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+	dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+	tpd->adrl.addr = cpu_to_le64(dma);
+	tpd->len = cpu_to_le16(maplen);
+
+	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+
+		if (++txq->write_idx == alx->tx_ringsz)
+			txq->write_idx = 0;
+		tpd = &txq->tpd[txq->write_idx];
+
+		tpd->word1 = first_tpd->word1;
+
+		maplen = skb_frag_size(frag);
+		dma = skb_frag_dma_map(&alx->hw.pdev->dev, frag, 0,
+				       maplen, DMA_TO_DEVICE);
+		if (dma_mapping_error(&alx->hw.pdev->dev, dma))
+			goto err_dma;
+		dma_unmap_len_set(&txq->bufs[txq->write_idx], size, maplen);
+		dma_unmap_addr_set(&txq->bufs[txq->write_idx], dma, dma);
+
+		tpd->adrl.addr = cpu_to_le64(dma);
+		tpd->len = cpu_to_le16(maplen);
+	}
+
+	/* last TPD, set EOP flag and store skb */
+	tpd->word1 |= cpu_to_le32(1 << TPD_EOP_SHIFT);
+	txq->bufs[txq->write_idx].skb = skb;
+
+	if (++txq->write_idx == alx->tx_ringsz)
+		txq->write_idx = 0;
+
+	return 0;
+
+err_dma:
+	f = first_idx;
+	while (f != txq->write_idx) {
+		alx_free_txbuf(alx, f);
+		if (++f == alx->tx_ringsz)
+			f = 0;
+	}
+	return -ENOMEM;
+}
+
+static netdev_tx_t alx_start_xmit(struct sk_buff *skb,
+				  struct net_device *netdev)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_tx_queue *txq = &alx->txq;
+	struct alx_txd *first;
+	int tpdreq = skb_shinfo(skb)->nr_frags + 1;
+
+	if (alx_tpd_avail(alx) < tpdreq) {
+		netif_stop_queue(alx->dev);
+		goto drop;
+	}
+
+	first = &txq->tpd[txq->write_idx];
+	memset(first, 0, sizeof(*first));
+
+	if (alx_tx_csum(skb, first))
+		goto drop;
+
+	if (alx_map_tx_skb(alx, skb) < 0)
+		goto drop;
+
+	netdev_sent_queue(alx->dev, skb->len);
+
+	/* flush updates before updating hardware */
+	wmb();
+	alx_write_mem16(&alx->hw, ALX_TPD_PRI0_PIDX, txq->write_idx);
+
+	if (alx_tpd_avail(alx) < alx->tx_ringsz/8)
+		netif_stop_queue(alx->dev);
+
+	return NETDEV_TX_OK;
+
+drop:
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static void alx_tx_timeout(struct net_device *dev)
+{
+	struct alx_priv *alx = netdev_priv(dev);
+
+	alx_schedule_reset(alx);
+}
+
+static int alx_mdio_read(struct net_device *netdev,
+			 int prtad, int devad, u16 addr)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+	u16 val;
+	int err;
+
+	if (prtad != hw->mdio.prtad)
+		return -EINVAL;
+
+	if (devad == MDIO_DEVAD_NONE)
+		err = alx_read_phy_reg(hw, addr, &val);
+	else
+		err = alx_read_phy_ext(hw, devad, addr, &val);
+
+	if (err)
+		return err;
+	return val;
+}
+
+static int alx_mdio_write(struct net_device *netdev,
+			  int prtad, int devad, u16 addr, u16 val)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+	struct alx_hw *hw = &alx->hw;
+
+	if (prtad != hw->mdio.prtad)
+		return -EINVAL;
+
+	if (devad == MDIO_DEVAD_NONE)
+		return alx_write_phy_reg(hw, addr, val);
+
+	return alx_write_phy_ext(hw, devad, addr, val);
+}
+
+static int alx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+
+	if (!netif_running(netdev))
+		return -EAGAIN;
+
+	return mdio_mii_ioctl(&alx->hw.mdio, if_mii(ifr), cmd);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void alx_poll_controller(struct net_device *netdev)
+{
+	struct alx_priv *alx = netdev_priv(netdev);
+
+	if (alx->msi)
+		alx_intr_msi(0, alx);
+	else
+		alx_intr_legacy(0, alx);
+}
+#endif
+
+static const struct net_device_ops alx_netdev_ops = {
+	.ndo_open               = alx_open,
+	.ndo_stop               = alx_stop,
+	.ndo_start_xmit         = alx_start_xmit,
+	.ndo_set_rx_mode        = alx_set_rx_mode,
+	.ndo_validate_addr      = eth_validate_addr,
+	.ndo_set_mac_address    = alx_set_mac_address,
+	.ndo_change_mtu         = alx_change_mtu,
+	.ndo_do_ioctl           = alx_ioctl,
+	.ndo_tx_timeout         = alx_tx_timeout,
+	.ndo_fix_features	= alx_fix_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller    = alx_poll_controller,
+#endif
+};
+
+static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct alx_priv *alx;
+	struct alx_hw *hw;
+	bool phy_configured;
+	int bars, pm_cap, err;
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	/* The alx chip can DMA to 64-bit addresses, but it uses a single
+	 * shared register for the high 32 bits, so only a single, aligned,
+	 * 4 GB physical address range can be used for descriptors.
+	 */
+	if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
+	    !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+		dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev,
+					"No usable DMA config, aborting\n");
+				goto out_pci_disable;
+			}
+		}
+	}
+
+	bars = pci_select_bars(pdev, IORESOURCE_MEM);
+	err = pci_request_selected_regions(pdev, bars, alx_drv_name);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_request_selected_regions failed(bars:%d)\n", bars);
+		goto out_pci_disable;
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_set_master(pdev);
+
+	pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pm_cap == 0) {
+		dev_err(&pdev->dev,
+			"Can't find power management capability, aborting\n");
+		err = -EIO;
+		goto out_pci_release;
+	}
+
+	err = pci_set_power_state(pdev, PCI_D0);
+	if (err)
+		goto out_pci_release;
+
+	netdev = alloc_etherdev(sizeof(*alx));
+	if (!netdev) {
+		err = -ENOMEM;
+		goto out_pci_release;
+	}
+
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	alx = netdev_priv(netdev);
+	alx->dev = netdev;
+	alx->hw.pdev = pdev;
+	alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
+			  NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_WOL;
+	hw = &alx->hw;
+	pci_set_drvdata(pdev, alx);
+
+	hw->hw_addr = pci_ioremap_bar(pdev, 0);
+	if (!hw->hw_addr) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -EIO;
+		goto out_free_netdev;
+	}
+
+	netdev->netdev_ops = &alx_netdev_ops;
+	SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+	netdev->irq = pdev->irq;
+	netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
+
+	if (ent->driver_data & ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG)
+		pdev->dev_flags |= PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG;
+
+	err = alx_init_sw(alx);
+	if (err) {
+		dev_err(&pdev->dev, "net device private data init failed\n");
+		goto out_unmap;
+	}
+
+	alx_reset_pcie(hw);
+
+	phy_configured = alx_phy_configured(hw);
+
+	if (!phy_configured)
+		alx_reset_phy(hw);
+
+	err = alx_reset_mac(hw);
+	if (err) {
+		dev_err(&pdev->dev, "MAC Reset failed, error = %d\n", err);
+		goto out_unmap;
+	}
+
+	/* setup link to put it in a known good starting state */
+	if (!phy_configured) {
+		err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+		if (err) {
+			dev_err(&pdev->dev,
+				"failed to configure PHY speed/duplex (err=%d)\n",
+				err);
+			goto out_unmap;
+		}
+	}
+
+	netdev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
+
+	if (alx_get_perm_macaddr(hw, hw->perm_addr)) {
+		dev_warn(&pdev->dev,
+			 "Invalid permanent address programmed, using random one\n");
+		eth_hw_addr_random(netdev);
+		memcpy(hw->perm_addr, netdev->dev_addr, netdev->addr_len);
+	}
+
+	memcpy(hw->mac_addr, hw->perm_addr, ETH_ALEN);
+	memcpy(netdev->dev_addr, hw->mac_addr, ETH_ALEN);
+	memcpy(netdev->perm_addr, hw->perm_addr, ETH_ALEN);
+
+	hw->mdio.prtad = 0;
+	hw->mdio.mmds = 0;
+	hw->mdio.dev = netdev;
+	hw->mdio.mode_support = MDIO_SUPPORTS_C45 |
+				MDIO_SUPPORTS_C22 |
+				MDIO_EMULATE_C22;
+	hw->mdio.mdio_read = alx_mdio_read;
+	hw->mdio.mdio_write = alx_mdio_write;
+
+	if (!alx_get_phy_info(hw)) {
+		dev_err(&pdev->dev, "failed to identify PHY\n");
+		err = -EIO;
+		goto out_unmap;
+	}
+
+	INIT_WORK(&alx->link_check_wk, alx_link_check);
+	INIT_WORK(&alx->reset_wk, alx_reset);
+	spin_lock_init(&alx->hw.mdio_lock);
+	spin_lock_init(&alx->irq_lock);
+
+	netif_carrier_off(netdev);
+
+	err = register_netdev(netdev);
+	if (err) {
+		dev_err(&pdev->dev, "register netdevice failed\n");
+		goto out_unmap;
+	}
+
+	device_set_wakeup_enable(&pdev->dev, hw->sleep_ctrl);
+
+	netdev_info(netdev,
+		    "Qualcomm Atheros AR816x/AR817x Ethernet [%pM]\n",
+		    netdev->dev_addr);
+
+	return 0;
+
+out_unmap:
+	iounmap(hw->hw_addr);
+out_free_netdev:
+	free_netdev(netdev);
+out_pci_release:
+	pci_release_selected_regions(pdev, bars);
+out_pci_disable:
+	pci_disable_device(pdev);
+	return err;
+}
+
+static void alx_remove(struct pci_dev *pdev)
+{
+	struct alx_priv *alx = pci_get_drvdata(pdev);
+	struct alx_hw *hw = &alx->hw;
+
+	cancel_work_sync(&alx->link_check_wk);
+	cancel_work_sync(&alx->reset_wk);
+
+	/* restore permanent mac address */
+	alx_set_macaddr(hw, hw->perm_addr);
+
+	unregister_netdev(alx->dev);
+	iounmap(hw->hw_addr);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	free_netdev(alx->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int alx_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int err;
+	bool wol_en;
+
+	err = __alx_shutdown(pdev, &wol_en);
+	if (err) {
+		dev_err(&pdev->dev, "shutdown fail in suspend %d\n", err);
+		return err;
+	}
+
+	if (wol_en) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int alx_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct alx_priv *alx = pci_get_drvdata(pdev);
+	struct net_device *netdev = alx->dev;
+	struct alx_hw *hw = &alx->hw;
+	int err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	hw->link_speed = SPEED_UNKNOWN;
+	alx->int_mask = ALX_ISR_MISC;
+
+	alx_reset_pcie(hw);
+	alx_reset_phy(hw);
+
+	err = alx_reset_mac(hw);
+	if (err) {
+		netif_err(alx, hw, alx->dev,
+			  "resume:reset_mac fail %d\n", err);
+		return -EIO;
+	}
+
+	err = alx_setup_speed_duplex(hw, hw->adv_cfg, hw->flowctrl);
+	if (err) {
+		netif_err(alx, hw, alx->dev,
+			  "resume:setup_speed_duplex fail %d\n", err);
+		return -EIO;
+	}
+
+	if (netif_running(netdev)) {
+		err = __alx_open(alx, true);
+		if (err)
+			return err;
+	}
+
+	netif_device_attach(netdev);
+
+	return err;
+}
+#endif
+
+static pci_ers_result_t alx_pci_error_detected(struct pci_dev *pdev,
+					       pci_channel_state_t state)
+{
+	struct alx_priv *alx = pci_get_drvdata(pdev);
+	struct net_device *netdev = alx->dev;
+	pci_ers_result_t rc = PCI_ERS_RESULT_NEED_RESET;
+
+	dev_info(&pdev->dev, "pci error detected\n");
+
+	rtnl_lock();
+
+	if (netif_running(netdev)) {
+		netif_device_detach(netdev);
+		alx_halt(alx);
+	}
+
+	if (state == pci_channel_io_perm_failure)
+		rc = PCI_ERS_RESULT_DISCONNECT;
+	else
+		pci_disable_device(pdev);
+
+	rtnl_unlock();
+
+	return rc;
+}
+
+static pci_ers_result_t alx_pci_error_slot_reset(struct pci_dev *pdev)
+{
+	struct alx_priv *alx = pci_get_drvdata(pdev);
+	struct alx_hw *hw = &alx->hw;
+	pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
+
+	dev_info(&pdev->dev, "pci error slot reset\n");
+
+	rtnl_lock();
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "Failed to re-enable PCI device after reset\n");
+		goto out;
+	}
+
+	pci_set_master(pdev);
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	alx_reset_pcie(hw);
+	if (!alx_reset_mac(hw))
+		rc = PCI_ERS_RESULT_RECOVERED;
+out:
+	pci_cleanup_aer_uncorrect_error_status(pdev);
+
+	rtnl_unlock();
+
+	return rc;
+}
+
+static void alx_pci_error_resume(struct pci_dev *pdev)
+{
+	struct alx_priv *alx = pci_get_drvdata(pdev);
+	struct net_device *netdev = alx->dev;
+
+	dev_info(&pdev->dev, "pci error resume\n");
+
+	rtnl_lock();
+
+	if (netif_running(netdev)) {
+		alx_activate(alx);
+		netif_device_attach(netdev);
+	}
+
+	rtnl_unlock();
+}
+
+static const struct pci_error_handlers alx_err_handlers = {
+	.error_detected = alx_pci_error_detected,
+	.slot_reset     = alx_pci_error_slot_reset,
+	.resume         = alx_pci_error_resume,
+};
+
+#ifdef CONFIG_PM_SLEEP
+static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
+#define ALX_PM_OPS      (&alx_pm_ops)
+#else
+#define ALX_PM_OPS      NULL
+#endif
+
+static DEFINE_PCI_DEVICE_TABLE(alx_pci_tbl) = {
+	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8161),
+	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2200),
+	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
+	  .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
+	{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8172) },
+	{}
+};
+
+static struct pci_driver alx_driver = {
+	.name        = alx_drv_name,
+	.id_table    = alx_pci_tbl,
+	.probe       = alx_probe,
+	.remove      = alx_remove,
+	.shutdown    = alx_shutdown,
+	.err_handler = &alx_err_handlers,
+	.driver.pm   = ALX_PM_OPS,
+};
+
+module_pci_driver(alx_driver);
+MODULE_DEVICE_TABLE(pci, alx_pci_tbl);
+MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>");
+MODULE_AUTHOR("Qualcomm Corporation, <nic-devel@qualcomm.com>");
+MODULE_DESCRIPTION(
+	"Qualcomm Atheros(R) AR816x/AR817x PCI-E Ethernet Network Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
new file mode 100644
index 0000000..e4358c9
--- /dev/null
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -0,0 +1,810 @@
+/*
+ * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net>
+ *
+ *  This file is free software: you may copy, redistribute and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation, either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  This file is distributed in the hope that it will be useful, but
+ *  WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2012 Qualcomm Atheros, Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef ALX_REG_H
+#define ALX_REG_H
+
+#define ALX_DEV_ID_AR8161				0x1091
+#define ALX_DEV_ID_E2200				0xe091
+#define ALX_DEV_ID_AR8162				0x1090
+#define ALX_DEV_ID_AR8171				0x10A1
+#define ALX_DEV_ID_AR8172				0x10A0
+
+/* rev definition,
+ * bit(0): with xD support
+ * bit(1): with Card Reader function
+ * bit(7:2): real revision
+ */
+#define ALX_PCI_REVID_SHIFT				3
+#define ALX_REV_A0					0
+#define ALX_REV_A1					1
+#define ALX_REV_B0					2
+#define ALX_REV_C0					3
+
+#define ALX_DEV_CTRL					0x0060
+#define ALX_DEV_CTRL_MAXRRS_MIN				2
+
+#define ALX_MSIX_MASK					0x0090
+
+#define ALX_UE_SVRT					0x010C
+#define ALX_UE_SVRT_FCPROTERR				BIT(13)
+#define ALX_UE_SVRT_DLPROTERR				BIT(4)
+
+/* eeprom & flash load register */
+#define ALX_EFLD					0x0204
+#define ALX_EFLD_F_EXIST				BIT(10)
+#define ALX_EFLD_E_EXIST				BIT(9)
+#define ALX_EFLD_STAT					BIT(5)
+#define ALX_EFLD_START					BIT(0)
+
+/* eFuse load register */
+#define ALX_SLD						0x0218
+#define ALX_SLD_STAT					BIT(12)
+#define ALX_SLD_START					BIT(11)
+#define ALX_SLD_MAX_TO					100
+
+#define ALX_PDLL_TRNS1					0x1104
+#define ALX_PDLL_TRNS1_D3PLLOFF_EN			BIT(11)
+
+#define ALX_PMCTRL					0x12F8
+#define ALX_PMCTRL_HOTRST_WTEN				BIT(31)
+/* bit30: L0s/L1 controlled by MAC based on throughput(setting in 15A0) */
+#define ALX_PMCTRL_ASPM_FCEN				BIT(30)
+#define ALX_PMCTRL_SADLY_EN				BIT(29)
+#define ALX_PMCTRL_LCKDET_TIMER_MASK			0xF
+#define ALX_PMCTRL_LCKDET_TIMER_SHIFT			24
+#define ALX_PMCTRL_LCKDET_TIMER_DEF			0xC
+/* bit[23:20] if pm_request_l1 time > @, then enter L0s not L1 */
+#define ALX_PMCTRL_L1REQ_TO_MASK			0xF
+#define ALX_PMCTRL_L1REQ_TO_SHIFT			20
+#define ALX_PMCTRL_L1REG_TO_DEF				0xF
+#define ALX_PMCTRL_TXL1_AFTER_L0S			BIT(19)
+#define ALX_PMCTRL_L1_TIMER_MASK			0x7
+#define ALX_PMCTRL_L1_TIMER_SHIFT			16
+#define ALX_PMCTRL_L1_TIMER_16US			4
+#define ALX_PMCTRL_RCVR_WT_1US				BIT(15)
+/* bit13: enable pcie clk switch in L1 state */
+#define ALX_PMCTRL_L1_CLKSW_EN				BIT(13)
+#define ALX_PMCTRL_L0S_EN				BIT(12)
+#define ALX_PMCTRL_RXL1_AFTER_L0S			BIT(11)
+#define ALX_PMCTRL_L1_BUFSRX_EN				BIT(7)
+/* bit6: power down serdes RX */
+#define ALX_PMCTRL_L1_SRDSRX_PWD			BIT(6)
+#define ALX_PMCTRL_L1_SRDSPLL_EN			BIT(5)
+#define ALX_PMCTRL_L1_SRDS_EN				BIT(4)
+#define ALX_PMCTRL_L1_EN				BIT(3)
+
+/*******************************************************/
+/* following registers are mapped only to memory space */
+/*******************************************************/
+
+#define ALX_MASTER					0x1400
+/* bit12: 1:alwys select pclk from serdes, not sw to 25M */
+#define ALX_MASTER_PCLKSEL_SRDS				BIT(12)
+/* bit11: irq moduration for rx */
+#define ALX_MASTER_IRQMOD2_EN				BIT(11)
+/* bit10: irq moduration for tx/rx */
+#define ALX_MASTER_IRQMOD1_EN				BIT(10)
+#define ALX_MASTER_SYSALVTIMER_EN			BIT(7)
+#define ALX_MASTER_OOB_DIS				BIT(6)
+/* bit5: wakeup without pcie clk */
+#define ALX_MASTER_WAKEN_25M				BIT(5)
+/* bit0: MAC & DMA reset */
+#define ALX_MASTER_DMA_MAC_RST				BIT(0)
+#define ALX_DMA_MAC_RST_TO				50
+
+#define ALX_IRQ_MODU_TIMER				0x1408
+#define ALX_IRQ_MODU_TIMER1_MASK			0xFFFF
+#define ALX_IRQ_MODU_TIMER1_SHIFT			0
+
+#define ALX_PHY_CTRL					0x140C
+#define ALX_PHY_CTRL_100AB_EN				BIT(17)
+/* bit14: affect MAC & PHY, go to low power sts */
+#define ALX_PHY_CTRL_POWER_DOWN				BIT(14)
+/* bit13: 1:pll always ON, 0:can switch in lpw */
+#define ALX_PHY_CTRL_PLL_ON				BIT(13)
+#define ALX_PHY_CTRL_RST_ANALOG				BIT(12)
+#define ALX_PHY_CTRL_HIB_PULSE				BIT(11)
+#define ALX_PHY_CTRL_HIB_EN				BIT(10)
+#define ALX_PHY_CTRL_IDDQ				BIT(7)
+#define ALX_PHY_CTRL_GATE_25M				BIT(5)
+#define ALX_PHY_CTRL_LED_MODE				BIT(2)
+/* bit0: out of dsp RST state */
+#define ALX_PHY_CTRL_DSPRST_OUT				BIT(0)
+#define ALX_PHY_CTRL_DSPRST_TO				80
+#define ALX_PHY_CTRL_CLS	(ALX_PHY_CTRL_LED_MODE | \
+				 ALX_PHY_CTRL_100AB_EN | \
+				 ALX_PHY_CTRL_PLL_ON)
+
+#define ALX_MAC_STS					0x1410
+#define ALX_MAC_STS_TXQ_BUSY				BIT(3)
+#define ALX_MAC_STS_RXQ_BUSY				BIT(2)
+#define ALX_MAC_STS_TXMAC_BUSY				BIT(1)
+#define ALX_MAC_STS_RXMAC_BUSY				BIT(0)
+#define ALX_MAC_STS_IDLE	(ALX_MAC_STS_TXQ_BUSY | \
+				 ALX_MAC_STS_RXQ_BUSY | \
+				 ALX_MAC_STS_TXMAC_BUSY | \
+				 ALX_MAC_STS_RXMAC_BUSY)
+
+#define ALX_MDIO					0x1414
+#define ALX_MDIO_MODE_EXT				BIT(30)
+#define ALX_MDIO_BUSY					BIT(27)
+#define ALX_MDIO_CLK_SEL_MASK				0x7
+#define ALX_MDIO_CLK_SEL_SHIFT				24
+#define ALX_MDIO_CLK_SEL_25MD4				0
+#define ALX_MDIO_CLK_SEL_25MD128			7
+#define ALX_MDIO_START					BIT(23)
+#define ALX_MDIO_SPRES_PRMBL				BIT(22)
+/* bit21: 1:read,0:write */
+#define ALX_MDIO_OP_READ				BIT(21)
+#define ALX_MDIO_REG_MASK				0x1F
+#define ALX_MDIO_REG_SHIFT				16
+#define ALX_MDIO_DATA_MASK				0xFFFF
+#define ALX_MDIO_DATA_SHIFT				0
+#define ALX_MDIO_MAX_AC_TO				120
+
+#define ALX_MDIO_EXTN					0x1448
+#define ALX_MDIO_EXTN_DEVAD_MASK			0x1F
+#define ALX_MDIO_EXTN_DEVAD_SHIFT			16
+#define ALX_MDIO_EXTN_REG_MASK				0xFFFF
+#define ALX_MDIO_EXTN_REG_SHIFT				0
+
+#define ALX_SERDES					0x1424
+#define ALX_SERDES_PHYCLK_SLWDWN			BIT(18)
+#define ALX_SERDES_MACCLK_SLWDWN			BIT(17)
+
+#define ALX_LPI_CTRL					0x1440
+#define ALX_LPI_CTRL_EN					BIT(0)
+
+/* for B0+, bit[13..] for C0+ */
+#define ALX_HRTBT_EXT_CTRL				0x1AD0
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_MASK		0x3F
+#define L1F_HRTBT_EXT_CTRL_PERIOD_HIGH_SHIFT		24
+#define L1F_HRTBT_EXT_CTRL_SWOI_STARTUP_PKT_EN		BIT(23)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_FRAGMENTED		BIT(22)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_FRAGMENTED		BIT(21)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_KEEPALIVE_EN		BIT(20)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_HAS_VLAN		BIT(19)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_8023		BIT(18)
+#define L1F_HRTBT_EXT_CTRL_IOAC_1_IS_IPV6		BIT(17)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_KEEPALIVE_EN		BIT(16)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_HAS_VLAN		BIT(15)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_8023		BIT(14)
+#define L1F_HRTBT_EXT_CTRL_IOAC_2_IS_IPV6		BIT(13)
+#define ALX_HRTBT_EXT_CTRL_NS_EN			BIT(12)
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_MASK		0xFF
+#define ALX_HRTBT_EXT_CTRL_FRAG_LEN_SHIFT		4
+#define ALX_HRTBT_EXT_CTRL_IS_8023			BIT(3)
+#define ALX_HRTBT_EXT_CTRL_IS_IPV6			BIT(2)
+#define ALX_HRTBT_EXT_CTRL_WAKEUP_EN			BIT(1)
+#define ALX_HRTBT_EXT_CTRL_ARP_EN			BIT(0)
+
+#define ALX_HRTBT_REM_IPV4_ADDR				0x1AD4
+#define ALX_HRTBT_HOST_IPV4_ADDR			0x1478
+#define ALX_HRTBT_REM_IPV6_ADDR3			0x1AD8
+#define ALX_HRTBT_REM_IPV6_ADDR2			0x1ADC
+#define ALX_HRTBT_REM_IPV6_ADDR1			0x1AE0
+#define ALX_HRTBT_REM_IPV6_ADDR0			0x1AE4
+
+/* 1B8C ~ 1B94 for C0+ */
+#define ALX_SWOI_ACER_CTRL				0x1B8C
+#define ALX_SWOI_ORIG_ACK_NAK_EN			BIT(20)
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_MASK		0XFF
+#define ALX_SWOI_ORIG_ACK_NAK_PKT_LEN_SHIFT		12
+#define ALX_SWOI_ORIG_ACK_ADDR_MASK			0XFFF
+#define ALX_SWOI_ORIG_ACK_ADDR_SHIFT			0
+
+#define ALX_SWOI_IOAC_CTRL_2				0x1B90
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_MASK	0xFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_FRAG_LEN_SHIFT	24
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_MASK	0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_PKT_LEN_SHIFT	12
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_MASK	0xFFF
+#define ALX_SWOI_IOAC_CTRL_2_SWOI_1_HDR_ADDR_SHIFT	0
+
+#define ALX_SWOI_IOAC_CTRL_3				0x1B94
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_MASK	0xFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_FRAG_LEN_SHIFT	24
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_MASK	0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_PKT_LEN_SHIFT	12
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_MASK	0xFFF
+#define ALX_SWOI_IOAC_CTRL_3_SWOI_2_HDR_ADDR_SHIFT	0
+
+/* for B0 */
+#define ALX_IDLE_DECISN_TIMER				0x1474
+/* 1ms */
+#define ALX_IDLE_DECISN_TIMER_DEF			0x400
+
+#define ALX_MAC_CTRL					0x1480
+#define ALX_MAC_CTRL_FAST_PAUSE				BIT(31)
+#define ALX_MAC_CTRL_WOLSPED_SWEN			BIT(30)
+/* bit29: 1:legacy(hi5b), 0:marvl(lo5b)*/
+#define ALX_MAC_CTRL_MHASH_ALG_HI5B			BIT(29)
+#define ALX_MAC_CTRL_BRD_EN				BIT(26)
+#define ALX_MAC_CTRL_MULTIALL_EN			BIT(25)
+#define ALX_MAC_CTRL_SPEED_MASK				0x3
+#define ALX_MAC_CTRL_SPEED_SHIFT			20
+#define ALX_MAC_CTRL_SPEED_10_100			1
+#define ALX_MAC_CTRL_SPEED_1000				2
+#define ALX_MAC_CTRL_PROMISC_EN				BIT(15)
+#define ALX_MAC_CTRL_VLANSTRIP				BIT(14)
+#define ALX_MAC_CTRL_PRMBLEN_MASK			0xF
+#define ALX_MAC_CTRL_PRMBLEN_SHIFT			10
+#define ALX_MAC_CTRL_PCRCE				BIT(7)
+#define ALX_MAC_CTRL_CRCE				BIT(6)
+#define ALX_MAC_CTRL_FULLD				BIT(5)
+#define ALX_MAC_CTRL_RXFC_EN				BIT(3)
+#define ALX_MAC_CTRL_TXFC_EN				BIT(2)
+#define ALX_MAC_CTRL_RX_EN				BIT(1)
+#define ALX_MAC_CTRL_TX_EN				BIT(0)
+
+#define ALX_STAD0					0x1488
+#define ALX_STAD1					0x148C
+
+#define ALX_HASH_TBL0					0x1490
+#define ALX_HASH_TBL1					0x1494
+
+#define ALX_MTU						0x149C
+#define ALX_MTU_JUMBO_TH				1514
+#define ALX_MTU_STD_ALGN				1536
+
+#define ALX_SRAM5					0x1524
+#define ALX_SRAM_RXF_LEN_MASK				0xFFF
+#define ALX_SRAM_RXF_LEN_SHIFT				0
+#define ALX_SRAM_RXF_LEN_8K				(8*1024)
+
+#define ALX_SRAM9					0x1534
+#define ALX_SRAM_LOAD_PTR				BIT(0)
+
+#define ALX_RX_BASE_ADDR_HI				0x1540
+
+#define ALX_TX_BASE_ADDR_HI				0x1544
+
+#define ALX_RFD_ADDR_LO					0x1550
+#define ALX_RFD_RING_SZ					0x1560
+#define ALX_RFD_BUF_SZ					0x1564
+
+#define ALX_RRD_ADDR_LO					0x1568
+#define ALX_RRD_RING_SZ					0x1578
+
+/* pri3: highest, pri0: lowest */
+#define ALX_TPD_PRI3_ADDR_LO				0x14E4
+#define ALX_TPD_PRI2_ADDR_LO				0x14E0
+#define ALX_TPD_PRI1_ADDR_LO				0x157C
+#define ALX_TPD_PRI0_ADDR_LO				0x1580
+
+/* producer index is 16bit */
+#define ALX_TPD_PRI3_PIDX				0x1618
+#define ALX_TPD_PRI2_PIDX				0x161A
+#define ALX_TPD_PRI1_PIDX				0x15F0
+#define ALX_TPD_PRI0_PIDX				0x15F2
+
+/* consumer index is 16bit */
+#define ALX_TPD_PRI3_CIDX				0x161C
+#define ALX_TPD_PRI2_CIDX				0x161E
+#define ALX_TPD_PRI1_CIDX				0x15F4
+#define ALX_TPD_PRI0_CIDX				0x15F6
+
+#define ALX_TPD_RING_SZ					0x1584
+
+#define ALX_TXQ0					0x1590
+#define ALX_TXQ0_TXF_BURST_PREF_MASK			0xFFFF
+#define ALX_TXQ0_TXF_BURST_PREF_SHIFT			16
+#define ALX_TXQ_TXF_BURST_PREF_DEF			0x200
+#define ALX_TXQ0_LSO_8023_EN				BIT(7)
+#define ALX_TXQ0_MODE_ENHANCE				BIT(6)
+#define ALX_TXQ0_EN					BIT(5)
+#define ALX_TXQ0_SUPT_IPOPT				BIT(4)
+#define ALX_TXQ0_TPD_BURSTPREF_MASK			0xF
+#define ALX_TXQ0_TPD_BURSTPREF_SHIFT			0
+#define ALX_TXQ_TPD_BURSTPREF_DEF			5
+
+#define ALX_TXQ1					0x1594
+/* bit11:  drop large packet, len > (rfd buf) */
+#define ALX_TXQ1_ERRLGPKT_DROP_EN			BIT(11)
+#define ALX_TXQ1_JUMBO_TSO_TH				(7*1024)
+
+#define ALX_RXQ0					0x15A0
+#define ALX_RXQ0_EN					BIT(31)
+#define ALX_RXQ0_RSS_HASH_EN				BIT(29)
+#define ALX_RXQ0_RSS_MODE_MASK				0x3
+#define ALX_RXQ0_RSS_MODE_SHIFT				26
+#define ALX_RXQ0_RSS_MODE_DIS				0
+#define ALX_RXQ0_RSS_MODE_MQMI				3
+#define ALX_RXQ0_NUM_RFD_PREF_MASK			0x3F
+#define ALX_RXQ0_NUM_RFD_PREF_SHIFT			20
+#define ALX_RXQ0_NUM_RFD_PREF_DEF			8
+#define ALX_RXQ0_IDT_TBL_SIZE_MASK			0x1FF
+#define ALX_RXQ0_IDT_TBL_SIZE_SHIFT			8
+#define ALX_RXQ0_IDT_TBL_SIZE_DEF			0x100
+#define ALX_RXQ0_IDT_TBL_SIZE_NORMAL			128
+#define ALX_RXQ0_IPV6_PARSE_EN				BIT(7)
+#define ALX_RXQ0_RSS_HSTYP_MASK				0xF
+#define ALX_RXQ0_RSS_HSTYP_SHIFT			2
+#define ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN			BIT(5)
+#define ALX_RXQ0_RSS_HSTYP_IPV6_EN			BIT(4)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN			BIT(3)
+#define ALX_RXQ0_RSS_HSTYP_IPV4_EN			BIT(2)
+#define ALX_RXQ0_RSS_HSTYP_ALL		(ALX_RXQ0_RSS_HSTYP_IPV6_TCP_EN | \
+					 ALX_RXQ0_RSS_HSTYP_IPV4_TCP_EN | \
+					 ALX_RXQ0_RSS_HSTYP_IPV6_EN | \
+					 ALX_RXQ0_RSS_HSTYP_IPV4_EN)
+#define ALX_RXQ0_ASPM_THRESH_MASK			0x3
+#define ALX_RXQ0_ASPM_THRESH_SHIFT			0
+#define ALX_RXQ0_ASPM_THRESH_100M			3
+
+#define ALX_RXQ2					0x15A8
+#define ALX_RXQ2_RXF_XOFF_THRESH_MASK			0xFFF
+#define ALX_RXQ2_RXF_XOFF_THRESH_SHIFT			16
+#define ALX_RXQ2_RXF_XON_THRESH_MASK			0xFFF
+#define ALX_RXQ2_RXF_XON_THRESH_SHIFT			0
+/* Size = tx-packet(1522) + IPG(12) + SOF(8) + 64(Pause) + IPG(12) + SOF(8) +
+ *        rx-packet(1522) + delay-of-link(64)
+ *      = 3212.
+ */
+#define ALX_RXQ2_RXF_FLOW_CTRL_RSVD			3212
+
+#define ALX_DMA						0x15C0
+#define ALX_DMA_RCHNL_SEL_MASK				0x3
+#define ALX_DMA_RCHNL_SEL_SHIFT				26
+#define ALX_DMA_WDLY_CNT_MASK				0xF
+#define ALX_DMA_WDLY_CNT_SHIFT				16
+#define ALX_DMA_WDLY_CNT_DEF				4
+#define ALX_DMA_RDLY_CNT_MASK				0x1F
+#define ALX_DMA_RDLY_CNT_SHIFT				11
+#define ALX_DMA_RDLY_CNT_DEF				15
+/* bit10: 0:tpd with pri, 1: data */
+#define ALX_DMA_RREQ_PRI_DATA				BIT(10)
+#define ALX_DMA_RREQ_BLEN_MASK				0x7
+#define ALX_DMA_RREQ_BLEN_SHIFT				4
+#define ALX_DMA_RORDER_MODE_MASK			0x7
+#define ALX_DMA_RORDER_MODE_SHIFT			0
+#define ALX_DMA_RORDER_MODE_OUT				4
+
+#define ALX_WOL0					0x14A0
+#define ALX_WOL0_PME_LINK				BIT(5)
+#define ALX_WOL0_LINK_EN				BIT(4)
+#define ALX_WOL0_PME_MAGIC_EN				BIT(3)
+#define ALX_WOL0_MAGIC_EN				BIT(2)
+
+#define ALX_RFD_PIDX					0x15E0
+
+#define ALX_RFD_CIDX					0x15F8
+
+/* MIB */
+#define ALX_MIB_BASE					0x1700
+#define ALX_MIB_RX_OK					(ALX_MIB_BASE + 0)
+#define ALX_MIB_RX_ERRADDR				(ALX_MIB_BASE + 92)
+#define ALX_MIB_TX_OK					(ALX_MIB_BASE + 96)
+#define ALX_MIB_TX_MCCNT				(ALX_MIB_BASE + 192)
+
+#define ALX_RX_STATS_BIN				ALX_MIB_RX_OK
+#define ALX_RX_STATS_END				ALX_MIB_RX_ERRADDR
+#define ALX_TX_STATS_BIN				ALX_MIB_TX_OK
+#define ALX_TX_STATS_END				ALX_MIB_TX_MCCNT
+
+#define ALX_ISR						0x1600
+#define ALX_ISR_DIS					BIT(31)
+#define ALX_ISR_RX_Q7					BIT(30)
+#define ALX_ISR_RX_Q6					BIT(29)
+#define ALX_ISR_RX_Q5					BIT(28)
+#define ALX_ISR_RX_Q4					BIT(27)
+#define ALX_ISR_PCIE_LNKDOWN				BIT(26)
+#define ALX_ISR_RX_Q3					BIT(19)
+#define ALX_ISR_RX_Q2					BIT(18)
+#define ALX_ISR_RX_Q1					BIT(17)
+#define ALX_ISR_RX_Q0					BIT(16)
+#define ALX_ISR_TX_Q0					BIT(15)
+#define ALX_ISR_PHY					BIT(12)
+#define ALX_ISR_DMAW					BIT(10)
+#define ALX_ISR_DMAR					BIT(9)
+#define ALX_ISR_TXF_UR					BIT(8)
+#define ALX_ISR_TX_Q3					BIT(7)
+#define ALX_ISR_TX_Q2					BIT(6)
+#define ALX_ISR_TX_Q1					BIT(5)
+#define ALX_ISR_RFD_UR					BIT(4)
+#define ALX_ISR_RXF_OV					BIT(3)
+#define ALX_ISR_MANU					BIT(2)
+#define ALX_ISR_TIMER					BIT(1)
+#define ALX_ISR_SMB					BIT(0)
+
+#define ALX_IMR						0x1604
+
+/* re-send assert msg if SW no response */
+#define ALX_INT_RETRIG					0x1608
+/* 40ms */
+#define ALX_INT_RETRIG_TO				20000
+
+#define ALX_SMB_TIMER					0x15C4
+
+#define ALX_TINT_TPD_THRSHLD				0x15C8
+
+#define ALX_TINT_TIMER					0x15CC
+
+#define ALX_CLK_GATE					0x1814
+#define ALX_CLK_GATE_RXMAC				BIT(5)
+#define ALX_CLK_GATE_TXMAC				BIT(4)
+#define ALX_CLK_GATE_RXQ				BIT(3)
+#define ALX_CLK_GATE_TXQ				BIT(2)
+#define ALX_CLK_GATE_DMAR				BIT(1)
+#define ALX_CLK_GATE_DMAW				BIT(0)
+#define ALX_CLK_GATE_ALL		(ALX_CLK_GATE_RXMAC | \
+					 ALX_CLK_GATE_TXMAC | \
+					 ALX_CLK_GATE_RXQ | \
+					 ALX_CLK_GATE_TXQ | \
+					 ALX_CLK_GATE_DMAR | \
+					 ALX_CLK_GATE_DMAW)
+
+/* interop between drivers */
+#define ALX_DRV						0x1804
+#define ALX_DRV_PHY_AUTO				BIT(28)
+#define ALX_DRV_PHY_1000				BIT(27)
+#define ALX_DRV_PHY_100					BIT(26)
+#define ALX_DRV_PHY_10					BIT(25)
+#define ALX_DRV_PHY_DUPLEX				BIT(24)
+/* bit23: adv Pause */
+#define ALX_DRV_PHY_PAUSE				BIT(23)
+/* bit22: adv Asym Pause */
+#define ALX_DRV_PHY_MASK				0xFF
+#define ALX_DRV_PHY_SHIFT				21
+#define ALX_DRV_PHY_UNKNOWN				0
+
+/* flag of phy inited */
+#define ALX_PHY_INITED					0x003F
+
+/* reg 1830 ~ 186C for C0+, 16 bit map patterns and wake packet detection */
+#define ALX_WOL_CTRL2					0x1830
+#define ALX_WOL_CTRL2_DATA_STORE			BIT(3)
+#define ALX_WOL_CTRL2_PTRN_EVT				BIT(2)
+#define ALX_WOL_CTRL2_PME_PTRN_EN			BIT(1)
+#define ALX_WOL_CTRL2_PTRN_EN				BIT(0)
+
+#define ALX_WOL_CTRL3					0x1834
+#define ALX_WOL_CTRL3_PTRN_ADDR_MASK			0xFFFFF
+#define ALX_WOL_CTRL3_PTRN_ADDR_SHIFT			0
+
+#define ALX_WOL_CTRL4					0x1838
+#define ALX_WOL_CTRL4_PT15_MATCH			BIT(31)
+#define ALX_WOL_CTRL4_PT14_MATCH			BIT(30)
+#define ALX_WOL_CTRL4_PT13_MATCH			BIT(29)
+#define ALX_WOL_CTRL4_PT12_MATCH			BIT(28)
+#define ALX_WOL_CTRL4_PT11_MATCH			BIT(27)
+#define ALX_WOL_CTRL4_PT10_MATCH			BIT(26)
+#define ALX_WOL_CTRL4_PT9_MATCH				BIT(25)
+#define ALX_WOL_CTRL4_PT8_MATCH				BIT(24)
+#define ALX_WOL_CTRL4_PT7_MATCH				BIT(23)
+#define ALX_WOL_CTRL4_PT6_MATCH				BIT(22)
+#define ALX_WOL_CTRL4_PT5_MATCH				BIT(21)
+#define ALX_WOL_CTRL4_PT4_MATCH				BIT(20)
+#define ALX_WOL_CTRL4_PT3_MATCH				BIT(19)
+#define ALX_WOL_CTRL4_PT2_MATCH				BIT(18)
+#define ALX_WOL_CTRL4_PT1_MATCH				BIT(17)
+#define ALX_WOL_CTRL4_PT0_MATCH				BIT(16)
+#define ALX_WOL_CTRL4_PT15_EN				BIT(15)
+#define ALX_WOL_CTRL4_PT14_EN				BIT(14)
+#define ALX_WOL_CTRL4_PT13_EN				BIT(13)
+#define ALX_WOL_CTRL4_PT12_EN				BIT(12)
+#define ALX_WOL_CTRL4_PT11_EN				BIT(11)
+#define ALX_WOL_CTRL4_PT10_EN				BIT(10)
+#define ALX_WOL_CTRL4_PT9_EN				BIT(9)
+#define ALX_WOL_CTRL4_PT8_EN				BIT(8)
+#define ALX_WOL_CTRL4_PT7_EN				BIT(7)
+#define ALX_WOL_CTRL4_PT6_EN				BIT(6)
+#define ALX_WOL_CTRL4_PT5_EN				BIT(5)
+#define ALX_WOL_CTRL4_PT4_EN				BIT(4)
+#define ALX_WOL_CTRL4_PT3_EN				BIT(3)
+#define ALX_WOL_CTRL4_PT2_EN				BIT(2)
+#define ALX_WOL_CTRL4_PT1_EN				BIT(1)
+#define ALX_WOL_CTRL4_PT0_EN				BIT(0)
+
+#define ALX_WOL_CTRL5					0x183C
+#define ALX_WOL_CTRL5_PT3_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT3_LEN_SHIFT			24
+#define ALX_WOL_CTRL5_PT2_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT2_LEN_SHIFT			16
+#define ALX_WOL_CTRL5_PT1_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT1_LEN_SHIFT			8
+#define ALX_WOL_CTRL5_PT0_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT0_LEN_SHIFT			0
+
+#define ALX_WOL_CTRL6					0x1840
+#define ALX_WOL_CTRL5_PT7_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT7_LEN_SHIFT			24
+#define ALX_WOL_CTRL5_PT6_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT6_LEN_SHIFT			16
+#define ALX_WOL_CTRL5_PT5_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT5_LEN_SHIFT			8
+#define ALX_WOL_CTRL5_PT4_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT4_LEN_SHIFT			0
+
+#define ALX_WOL_CTRL7					0x1844
+#define ALX_WOL_CTRL5_PT11_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT11_LEN_SHIFT			24
+#define ALX_WOL_CTRL5_PT10_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT10_LEN_SHIFT			16
+#define ALX_WOL_CTRL5_PT9_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT9_LEN_SHIFT			8
+#define ALX_WOL_CTRL5_PT8_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT8_LEN_SHIFT			0
+
+#define ALX_WOL_CTRL8					0x1848
+#define ALX_WOL_CTRL5_PT15_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT15_LEN_SHIFT			24
+#define ALX_WOL_CTRL5_PT14_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT14_LEN_SHIFT			16
+#define ALX_WOL_CTRL5_PT13_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT13_LEN_SHIFT			8
+#define ALX_WOL_CTRL5_PT12_LEN_MASK			0xFF
+#define ALX_WOL_CTRL5_PT12_LEN_SHIFT			0
+
+#define ALX_ACER_FIXED_PTN0				0x1850
+#define ALX_ACER_FIXED_PTN0_MASK			0xFFFFFFFF
+#define ALX_ACER_FIXED_PTN0_SHIFT			0
+
+#define ALX_ACER_FIXED_PTN1				0x1854
+#define ALX_ACER_FIXED_PTN1_MASK			0xFFFF
+#define ALX_ACER_FIXED_PTN1_SHIFT			0
+
+#define ALX_ACER_RANDOM_NUM0				0x1858
+#define ALX_ACER_RANDOM_NUM0_MASK			0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM0_SHIFT			0
+
+#define ALX_ACER_RANDOM_NUM1				0x185C
+#define ALX_ACER_RANDOM_NUM1_MASK			0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM1_SHIFT			0
+
+#define ALX_ACER_RANDOM_NUM2				0x1860
+#define ALX_ACER_RANDOM_NUM2_MASK			0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM2_SHIFT			0
+
+#define ALX_ACER_RANDOM_NUM3				0x1864
+#define ALX_ACER_RANDOM_NUM3_MASK			0xFFFFFFFF
+#define ALX_ACER_RANDOM_NUM3_SHIFT			0
+
+#define ALX_ACER_MAGIC					0x1868
+#define ALX_ACER_MAGIC_EN				BIT(31)
+#define ALX_ACER_MAGIC_PME_EN				BIT(30)
+#define ALX_ACER_MAGIC_MATCH				BIT(29)
+#define ALX_ACER_MAGIC_FF_CHECK				BIT(10)
+#define ALX_ACER_MAGIC_RAN_LEN_MASK			0x1F
+#define ALX_ACER_MAGIC_RAN_LEN_SHIFT			5
+#define ALX_ACER_MAGIC_FIX_LEN_MASK			0x1F
+#define ALX_ACER_MAGIC_FIX_LEN_SHIFT			0
+
+#define ALX_ACER_TIMER					0x186C
+#define ALX_ACER_TIMER_EN				BIT(31)
+#define ALX_ACER_TIMER_PME_EN				BIT(30)
+#define ALX_ACER_TIMER_MATCH				BIT(29)
+#define ALX_ACER_TIMER_THRES_MASK			0x1FFFF
+#define ALX_ACER_TIMER_THRES_SHIFT			0
+#define ALX_ACER_TIMER_THRES_DEF			1
+
+/* RSS definitions */
+#define ALX_RSS_KEY0					0x14B0
+#define ALX_RSS_KEY1					0x14B4
+#define ALX_RSS_KEY2					0x14B8
+#define ALX_RSS_KEY3					0x14BC
+#define ALX_RSS_KEY4					0x14C0
+#define ALX_RSS_KEY5					0x14C4
+#define ALX_RSS_KEY6					0x14C8
+#define ALX_RSS_KEY7					0x14CC
+#define ALX_RSS_KEY8					0x14D0
+#define ALX_RSS_KEY9					0x14D4
+
+#define ALX_RSS_IDT_TBL0				0x1B00
+
+#define ALX_MSI_MAP_TBL1				0x15D0
+#define ALX_MSI_MAP_TBL1_TXQ1_SHIFT			20
+#define ALX_MSI_MAP_TBL1_TXQ0_SHIFT			16
+#define ALX_MSI_MAP_TBL1_RXQ3_SHIFT			12
+#define ALX_MSI_MAP_TBL1_RXQ2_SHIFT			8
+#define ALX_MSI_MAP_TBL1_RXQ1_SHIFT			4
+#define ALX_MSI_MAP_TBL1_RXQ0_SHIFT			0
+
+#define ALX_MSI_MAP_TBL2				0x15D8
+#define ALX_MSI_MAP_TBL2_TXQ3_SHIFT			20
+#define ALX_MSI_MAP_TBL2_TXQ2_SHIFT			16
+#define ALX_MSI_MAP_TBL2_RXQ7_SHIFT			12
+#define ALX_MSI_MAP_TBL2_RXQ6_SHIFT			8
+#define ALX_MSI_MAP_TBL2_RXQ5_SHIFT			4
+#define ALX_MSI_MAP_TBL2_RXQ4_SHIFT			0
+
+#define ALX_MSI_ID_MAP					0x15D4
+
+#define ALX_MSI_RETRANS_TIMER				0x1920
+/* bit16: 1:line,0:standard */
+#define ALX_MSI_MASK_SEL_LINE				BIT(16)
+#define ALX_MSI_RETRANS_TM_MASK				0xFFFF
+#define ALX_MSI_RETRANS_TM_SHIFT			0
+
+/* CR DMA ctrl */
+
+/* TX QoS */
+#define ALX_WRR						0x1938
+#define ALX_WRR_PRI_MASK				0x3
+#define ALX_WRR_PRI_SHIFT				29
+#define ALX_WRR_PRI_RESTRICT_NONE			3
+#define ALX_WRR_PRI3_MASK				0x1F
+#define ALX_WRR_PRI3_SHIFT				24
+#define ALX_WRR_PRI2_MASK				0x1F
+#define ALX_WRR_PRI2_SHIFT				16
+#define ALX_WRR_PRI1_MASK				0x1F
+#define ALX_WRR_PRI1_SHIFT				8
+#define ALX_WRR_PRI0_MASK				0x1F
+#define ALX_WRR_PRI0_SHIFT				0
+
+#define ALX_HQTPD					0x193C
+#define ALX_HQTPD_BURST_EN				BIT(31)
+#define ALX_HQTPD_Q3_NUMPREF_MASK			0xF
+#define ALX_HQTPD_Q3_NUMPREF_SHIFT			8
+#define ALX_HQTPD_Q2_NUMPREF_MASK			0xF
+#define ALX_HQTPD_Q2_NUMPREF_SHIFT			4
+#define ALX_HQTPD_Q1_NUMPREF_MASK			0xF
+#define ALX_HQTPD_Q1_NUMPREF_SHIFT			0
+
+#define ALX_MISC					0x19C0
+#define ALX_MISC_PSW_OCP_MASK				0x7
+#define ALX_MISC_PSW_OCP_SHIFT				21
+#define ALX_MISC_PSW_OCP_DEF				0x7
+#define ALX_MISC_ISO_EN					BIT(12)
+#define ALX_MISC_INTNLOSC_OPEN				BIT(3)
+
+#define ALX_MSIC2					0x19C8
+#define ALX_MSIC2_CALB_START				BIT(0)
+
+#define ALX_MISC3					0x19CC
+/* bit1: 1:Software control 25M */
+#define ALX_MISC3_25M_BY_SW				BIT(1)
+/* bit0: 25M switch to intnl OSC */
+#define ALX_MISC3_25M_NOTO_INTNL			BIT(0)
+
+/* MSIX tbl in memory space */
+#define ALX_MSIX_ENTRY_BASE				0x2000
+
+/********************* PHY regs definition ***************************/
+
+/* PHY Specific Status Register */
+#define ALX_MII_GIGA_PSSR				0x11
+#define ALX_GIGA_PSSR_SPD_DPLX_RESOLVED			0x0800
+#define ALX_GIGA_PSSR_DPLX				0x2000
+#define ALX_GIGA_PSSR_SPEED				0xC000
+#define ALX_GIGA_PSSR_10MBS				0x0000
+#define ALX_GIGA_PSSR_100MBS				0x4000
+#define ALX_GIGA_PSSR_1000MBS				0x8000
+
+/* PHY Interrupt Enable Register */
+#define ALX_MII_IER					0x12
+#define ALX_IER_LINK_UP					0x0400
+#define ALX_IER_LINK_DOWN				0x0800
+
+/* PHY Interrupt Status Register */
+#define ALX_MII_ISR					0x13
+
+#define ALX_MII_DBG_ADDR				0x1D
+#define ALX_MII_DBG_DATA				0x1E
+
+/***************************** debug port *************************************/
+
+#define ALX_MIIDBG_ANACTRL				0x00
+#define ALX_ANACTRL_DEF					0x02EF
+
+#define ALX_MIIDBG_SYSMODCTRL				0x04
+/* en half bias */
+#define ALX_SYSMODCTRL_IECHOADJ_DEF			0xBB8B
+
+#define ALX_MIIDBG_SRDSYSMOD				0x05
+#define ALX_SRDSYSMOD_DEEMP_EN				0x0040
+#define ALX_SRDSYSMOD_DEF				0x2C46
+
+#define ALX_MIIDBG_HIBNEG				0x0B
+#define ALX_HIBNEG_PSHIB_EN				0x8000
+#define ALX_HIBNEG_HIB_PSE				0x1000
+#define ALX_HIBNEG_DEF					0xBC40
+#define ALX_HIBNEG_NOHIB	(ALX_HIBNEG_DEF & \
+				 ~(ALX_HIBNEG_PSHIB_EN | ALX_HIBNEG_HIB_PSE))
+
+#define ALX_MIIDBG_TST10BTCFG				0x12
+#define ALX_TST10BTCFG_DEF				0x4C04
+
+#define ALX_MIIDBG_AZ_ANADECT				0x15
+#define ALX_AZ_ANADECT_DEF				0x3220
+#define ALX_AZ_ANADECT_LONG				0x3210
+
+#define ALX_MIIDBG_MSE16DB				0x18
+#define ALX_MSE16DB_UP					0x05EA
+#define ALX_MSE16DB_DOWN				0x02EA
+
+#define ALX_MIIDBG_MSE20DB				0x1C
+#define ALX_MSE20DB_TH_MASK				0x7F
+#define ALX_MSE20DB_TH_SHIFT				2
+#define ALX_MSE20DB_TH_DEF				0x2E
+#define ALX_MSE20DB_TH_HI				0x54
+
+#define ALX_MIIDBG_AGC					0x23
+#define ALX_AGC_2_VGA_MASK				0x3FU
+#define ALX_AGC_2_VGA_SHIFT				8
+#define ALX_AGC_LONG1G_LIMT				40
+#define ALX_AGC_LONG100M_LIMT				44
+
+#define ALX_MIIDBG_LEGCYPS				0x29
+#define ALX_LEGCYPS_EN					0x8000
+#define ALX_LEGCYPS_DEF					0x129D
+
+#define ALX_MIIDBG_TST100BTCFG				0x36
+#define ALX_TST100BTCFG_DEF				0xE12C
+
+#define ALX_MIIDBG_GREENCFG				0x3B
+#define ALX_GREENCFG_DEF				0x7078
+
+#define ALX_MIIDBG_GREENCFG2				0x3D
+#define ALX_GREENCFG2_BP_GREEN				0x8000
+#define ALX_GREENCFG2_GATE_DFSE_EN			0x0080
+
+/******* dev 3 *********/
+#define ALX_MIIEXT_PCS					3
+
+#define ALX_MIIEXT_CLDCTRL3				0x8003
+#define ALX_CLDCTRL3_BP_CABLE1TH_DET_GT			0x8000
+
+#define ALX_MIIEXT_CLDCTRL5				0x8005
+#define ALX_CLDCTRL5_BP_VD_HLFBIAS			0x4000
+
+#define ALX_MIIEXT_CLDCTRL6				0x8006
+#define ALX_CLDCTRL6_CAB_LEN_MASK			0xFF
+#define ALX_CLDCTRL6_CAB_LEN_SHIFT			0
+#define ALX_CLDCTRL6_CAB_LEN_SHORT1G			116
+#define ALX_CLDCTRL6_CAB_LEN_SHORT100M			152
+
+#define ALX_MIIEXT_VDRVBIAS				0x8062
+#define ALX_VDRVBIAS_DEF				0x3
+
+/********* dev 7 **********/
+#define ALX_MIIEXT_ANEG					7
+
+#define ALX_MIIEXT_LOCAL_EEEADV				0x3C
+#define ALX_LOCAL_EEEADV_1000BT				0x0004
+#define ALX_LOCAL_EEEADV_100BT				0x0002
+
+#define ALX_MIIEXT_AFE					0x801A
+#define ALX_AFE_10BT_100M_TH				0x0040
+
+#define ALX_MIIEXT_S3DIG10				0x8023
+/* bit0: 1:bypass 10BT rx fifo, 0:original 10BT rx */
+#define ALX_MIIEXT_S3DIG10_SL				0x0001
+#define ALX_MIIEXT_S3DIG10_DEF				0
+
+#define ALX_MIIEXT_NLP78				0x8027
+#define ALX_MIIEXT_NLP78_120M_DEF			0x8A05
+
+#endif
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 0f493c8..a13463e 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -744,6 +744,9 @@
 		status = tg3_ape_read32(tp, gnt + off);
 		if (status == bit)
 			break;
+		if (pci_channel_offline(tp->pdev))
+			break;
+
 		udelay(10);
 	}
 
@@ -1635,6 +1638,9 @@
 	for (i = 0; i < delay_cnt; i++) {
 		if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
 			break;
+		if (pci_channel_offline(tp->pdev))
+			break;
+
 		udelay(8);
 	}
 }
@@ -1800,6 +1806,9 @@
 	int i;
 	u32 val;
 
+	if (tg3_flag(tp, NO_FWARE_REPORTED))
+		return 0;
+
 	if (tg3_flag(tp, IS_SSB_CORE)) {
 		/* We don't use firmware. */
 		return 0;
@@ -1810,6 +1819,9 @@
 		for (i = 0; i < 200; i++) {
 			if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
 				return 0;
+			if (pci_channel_offline(tp->pdev))
+				return -ENODEV;
+
 			udelay(100);
 		}
 		return -ENODEV;
@@ -1820,6 +1832,15 @@
 		tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
 		if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
 			break;
+		if (pci_channel_offline(tp->pdev)) {
+			if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
+				tg3_flag_set(tp, NO_FWARE_REPORTED);
+				netdev_info(tp->dev, "No firmware running\n");
+			}
+
+			break;
+		}
+
 		udelay(10);
 	}
 
@@ -3517,6 +3538,8 @@
 		tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
 		if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
 			break;
+		if (pci_channel_offline(tp->pdev))
+			return -EBUSY;
 	}
 
 	return (i == iters) ? -EBUSY : 0;
@@ -8586,6 +8609,14 @@
 	tw32_f(ofs, val);
 
 	for (i = 0; i < MAX_WAIT_CNT; i++) {
+		if (pci_channel_offline(tp->pdev)) {
+			dev_err(&tp->pdev->dev,
+				"tg3_stop_block device offline, "
+				"ofs=%lx enable_bit=%x\n",
+				ofs, enable_bit);
+			return -ENODEV;
+		}
+
 		udelay(100);
 		val = tr32(ofs);
 		if ((val & enable_bit) == 0)
@@ -8609,6 +8640,13 @@
 
 	tg3_disable_ints(tp);
 
+	if (pci_channel_offline(tp->pdev)) {
+		tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
+		tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
+		err = -ENODEV;
+		goto err_no_dev;
+	}
+
 	tp->rx_mode &= ~RX_MODE_ENABLE;
 	tw32_f(MAC_RX_MODE, tp->rx_mode);
 	udelay(10);
@@ -8657,6 +8695,7 @@
 	err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
 	err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
 
+err_no_dev:
 	for (i = 0; i < tp->irq_cnt; i++) {
 		struct tg3_napi *tnapi = &tp->napi[i];
 		if (tnapi->hw_status)
@@ -10404,6 +10443,13 @@
  */
 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
 {
+	/* Chip may have been just powered on. If so, the boot code may still
+	 * be running initialization. Wait for it to finish to avoid races in
+	 * accessing the hardware.
+	 */
+	tg3_enable_register_access(tp);
+	tg3_poll_fw(tp);
+
 	tg3_switch_clocks(tp);
 
 	tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 6e8bc9d..94d957d 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -244,7 +244,7 @@
 		file->f_pos += offset;
 		break;
 	case 2:
-		file->f_pos = debug->buffer_len - offset;
+		file->f_pos = debug->buffer_len + offset;
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 28a5e42..92306b3 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -76,6 +76,12 @@
 
 			mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
 						 PCI_DMA_FROMDEVICE);
+			if (dma_mapping_error(&tp->pdev->dev, mapping)) {
+				dev_kfree_skb(skb);
+				tp->rx_buffers[entry].skb = NULL;
+				break;
+			}
+
 			tp->rx_buffers[entry].mapping = mapping;
 
 			tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8bc1b21..a0b4be5 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4262,6 +4262,9 @@
 		netdev->features |= NETIF_F_HIGHDMA;
 	} else {
 		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (!status)
+			status = dma_set_coherent_mask(&pdev->dev,
+						       DMA_BIT_MASK(32));
 		if (status) {
 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
 			goto free_netdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index a667015..d48099f 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -516,6 +516,7 @@
 	/* Set MII speed */
 	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
+#if !defined(CONFIG_M5272)
 	/* set RX checksum */
 	val = readl(fep->hwp + FEC_RACC);
 	if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
@@ -523,6 +524,7 @@
 	else
 		val &= ~FEC_RACC_OPTIONS;
 	writel(val, fep->hwp + FEC_RACC);
+#endif
 
 	/*
 	 * The phy interface and speed need to get configured
@@ -575,6 +577,7 @@
 #endif
 	}
 
+#if !defined(CONFIG_M5272)
 	/* enable pause frame*/
 	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
 	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
@@ -592,6 +595,7 @@
 	} else {
 		rcntl &= ~FEC_ENET_FCE;
 	}
+#endif /* !defined(CONFIG_M5272) */
 
 	writel(rcntl, fep->hwp + FEC_R_CNTRL);
 
@@ -1205,7 +1209,9 @@
 	/* mask with MAC supported features */
 	if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
 		phy_dev->supported &= PHY_GBIT_FEATURES;
+#if !defined(CONFIG_M5272)
 		phy_dev->supported |= SUPPORTED_Pause;
+#endif
 	}
 	else
 		phy_dev->supported &= PHY_BASIC_FEATURES;
@@ -1390,6 +1396,8 @@
 	}
 }
 
+#if !defined(CONFIG_M5272)
+
 static void fec_enet_get_pauseparam(struct net_device *ndev,
 				    struct ethtool_pauseparam *pause)
 {
@@ -1436,9 +1444,13 @@
 	return 0;
 }
 
+#endif /* !defined(CONFIG_M5272) */
+
 static const struct ethtool_ops fec_enet_ethtool_ops = {
+#if !defined(CONFIG_M5272)
 	.get_pauseparam		= fec_enet_get_pauseparam,
 	.set_pauseparam		= fec_enet_set_pauseparam,
+#endif
 	.get_settings		= fec_enet_get_settings,
 	.set_settings		= fec_enet_set_settings,
 	.get_drvinfo		= fec_enet_get_drvinfo,
@@ -1874,10 +1886,12 @@
 	/* setup board info structure */
 	fep = netdev_priv(ndev);
 
+#if !defined(CONFIG_M5272)
 	/* default enable pause frame auto negotiation */
 	if (pdev->id_entry &&
 	    (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT))
 		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+#endif
 
 	fep->hwp = devm_request_and_ioremap(&pdev->dev, r);
 	fep->pdev = pdev;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 2ad1494..d1cbfb1 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1757,7 +1757,7 @@
 	memset(rxq->rx_desc_area, 0, size);
 
 	rxq->rx_desc_area_size = size;
-	rxq->rx_skb = kmalloc_array(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
+	rxq->rx_skb = kcalloc(rxq->rx_ring_size, sizeof(*rxq->rx_skb),
 				    GFP_KERNEL);
 	if (rxq->rx_skb == NULL)
 		goto out_free;
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 339bb32..1c8af8b 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1015,7 +1015,7 @@
 	int rx_desc_num = pep->rx_ring_size;
 
 	/* Allocate RX skb rings */
-	pep->rx_skb = kmalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
+	pep->rx_skb = kzalloc(sizeof(*pep->rx_skb) * pep->rx_ring_size,
 			     GFP_KERNEL);
 	if (!pep->rx_skb)
 		return -ENOMEM;
@@ -1076,7 +1076,7 @@
 	int size = 0, i = 0;
 	int tx_desc_num = pep->tx_ring_size;
 
-	pep->tx_skb = kmalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
+	pep->tx_skb = kzalloc(sizeof(*pep->tx_skb) * pep->tx_ring_size,
 			     GFP_KERNEL);
 	if (!pep->tx_skb)
 		return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2f4a260..8a43499 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -632,6 +632,9 @@
 		dev->caps.cqe_size   = 32;
 	}
 
+	dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
+	mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+
 	slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
 
 	return 0;
diff --git a/drivers/net/ethernet/octeon/octeon_mgmt.c b/drivers/net/ethernet/octeon/octeon_mgmt.c
index 921729f..91a8a5d 100644
--- a/drivers/net/ethernet/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/octeon/octeon_mgmt.c
@@ -46,17 +46,25 @@
 union mgmt_port_ring_entry {
 	u64 d64;
 	struct {
-		u64    reserved_62_63:2;
-		/* Length of the buffer/packet in bytes */
-		u64    len:14;
-		/* For TX, signals that the packet should be timestamped */
-		u64    tstamp:1;
-		/* The RX error code */
-		u64    code:7;
 #define RING_ENTRY_CODE_DONE 0xf
 #define RING_ENTRY_CODE_MORE 0x10
+#ifdef __BIG_ENDIAN_BITFIELD
+		u64 reserved_62_63:2;
+		/* Length of the buffer/packet in bytes */
+		u64 len:14;
+		/* For TX, signals that the packet should be timestamped */
+		u64 tstamp:1;
+		/* The RX error code */
+		u64 code:7;
 		/* Physical address of the buffer */
-		u64    addr:40;
+		u64 addr:40;
+#else
+		u64 addr:40;
+		u64 code:7;
+		u64 tstamp:1;
+		u64 len:14;
+		u64 reserved_62_63:2;
+#endif
 	} s;
 };
 
@@ -1141,10 +1149,13 @@
 		/* For compensation state to lock. */
 		ndelay(1040 * NS_PER_PHY_CLK);
 
-		/* Some Ethernet switches cannot handle standard
-		 * Interframe Gap, increase to 16 bytes.
+		/* Default Interframe Gaps are too small.  Recommended
+		 * workaround is.
+		 *
+		 * AGL_GMX_TX_IFG[IFG1]=14
+		 * AGL_GMX_TX_IFG[IFG2]=10
 		 */
-		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0x88);
+		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
 	}
 
 	octeon_mgmt_rx_fill_ring(netdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 43562c2..6acf82b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -642,7 +642,7 @@
 				qlcnic_83xx_config_intrpt(adapter, 0);
 		}
 		/* Allow dma queues to drain after context reset */
-		msleep(20);
+		mdelay(20);
 	}
 }
 
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index b4479b5..e29fe8d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -380,8 +380,9 @@
 	.eesipr_value	= 0x01ff009f,
 
 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
-	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
-			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+			  EESR_ECI,
 	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
 	.apr		= 1,
@@ -427,8 +428,9 @@
 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x01ff009f,
 
 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
-	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
-			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+			  EESR_ECI,
 	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
 	.apr		= 1,
@@ -478,8 +480,9 @@
 	.rmcr_value	= 0x00000001,
 
 	.tx_check	= EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
-	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RDE |
-			  EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI,
+	.eesr_err_check	= EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
+			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
+			  EESR_ECI,
 	.tx_error_check	= EESR_TWB | EESR_TABT | EESR_TDE | EESR_TFE,
 
 	.apr		= 1,
@@ -592,9 +595,9 @@
 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
-	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
-			  EESR_ECI,
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+			  EESR_TDE | EESR_ECI,
 	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 			  EESR_TFE,
 	.fdr_value	= 0x0000072f,
@@ -674,9 +677,9 @@
 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
-	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
-			  EESR_ECI,
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+			  EESR_TDE | EESR_ECI,
 	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 			  EESR_TFE,
 
@@ -811,9 +814,9 @@
 	.eesipr_value	= DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
 
 	.tx_check	= EESR_TC1 | EESR_FTC,
-	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT | \
-			  EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | \
-			  EESR_ECI,
+	.eesr_err_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
+			  EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
+			  EESR_TDE | EESR_ECI,
 	.tx_error_check	= EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_TDE | \
 			  EESR_TFE,
 
@@ -1401,16 +1404,23 @@
 		desc_status = edmac_to_cpu(mdp, rxdesc->status);
 		pkt_len = rxdesc->frame_length;
 
-#if defined(CONFIG_ARCH_R8A7740)
-		desc_status >>= 16;
-#endif
-
 		if (--boguscnt < 0)
 			break;
 
 		if (!(desc_status & RDFEND))
 			ndev->stats.rx_length_errors++;
 
+#if defined(CONFIG_ARCH_R8A7740)
+		/*
+		 * In case of almost all GETHER/ETHERs, the Receive Frame State
+		 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
+		 * bit 0. However, in case of the R8A7740's GETHER, the RFS
+		 * bits are from bit 25 to bit 16. So, the driver needs right
+		 * shifting by 16.
+		 */
+		desc_status >>= 16;
+#endif
+
 		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
 				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
 			ndev->stats.rx_errors++;
@@ -1542,11 +1552,12 @@
 
 ignore_link:
 	if (intr_status & EESR_TWB) {
-		/* Write buck end. unused write back interrupt */
-		if (intr_status & EESR_TABT)	/* Transmit Abort int */
+		/* Unused write back interrupt */
+		if (intr_status & EESR_TABT) {	/* Transmit Abort int */
 			ndev->stats.tx_aborted_errors++;
 			if (netif_msg_tx_err(mdp))
 				dev_err(&ndev->dev, "Transmit Abort\n");
+		}
 	}
 
 	if (intr_status & EESR_RABT) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 1ddc9f2..62689a5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -253,7 +253,7 @@
 
 #define DEFAULT_TX_CHECK	(EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | \
 				 EESR_RTO)
-#define DEFAULT_EESR_ERR_CHECK	(EESR_TWB | EESR_TABT | EESR_RABT | \
+#define DEFAULT_EESR_ERR_CHECK	(EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | \
 				 EESR_RDE | EESR_RFRMER | EESR_ADE | \
 				 EESR_TFE | EESR_TDE | EESR_ECI)
 #define DEFAULT_TX_ERROR_CHECK	(EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | \
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 39e4cb3..4a14a94 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2139,7 +2139,7 @@
 	struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
 	return sprintf(buf, "%d\n", efx->phy_type);
 }
-static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
+static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
 
 static int efx_register_netdev(struct efx_nic *efx)
 {
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7788fbe..9517697 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -297,8 +297,8 @@
 #define MAC_RNABLE_RX		0x00000004	/* Receiver Enable */
 
 /* Default LPI timers */
-#define STMMAC_DEFAULT_LIT_LS_TIMER	0x3E8
-#define STMMAC_DEFAULT_TWT_LS_TIMER	0x0
+#define STMMAC_DEFAULT_LIT_LS	0x3E8
+#define STMMAC_DEFAULT_TWT_LS	0x0
 
 #define STMMAC_CHAIN_MODE	0x1
 #define STMMAC_RING_MODE	0x2
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 618446a..e9eab29 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -130,7 +130,7 @@
 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
-#define STMMAC_LPI_TIMER(x) (jiffies + msecs_to_jiffies(x))
+#define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
 
 /* By default the driver will use the ring mode to manage tx and rx descriptors
  * but passing this value so user can force to use the chain instead of the ring
@@ -288,7 +288,7 @@
 	struct stmmac_priv *priv = (struct stmmac_priv *)arg;
 
 	stmmac_enable_eee_mode(priv);
-	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+	mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 }
 
 /**
@@ -304,22 +304,34 @@
 {
 	bool ret = false;
 
+	/* Using PCS we cannot dial with the phy registers at this stage
+	 * so we do not support extra feature like EEE.
+	 */
+	if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
+	    (priv->pcs == STMMAC_PCS_RTBI))
+		goto out;
+
 	/* MAC core supports the EEE feature. */
 	if (priv->dma_cap.eee) {
 		/* Check if the PHY supports EEE */
 		if (phy_init_eee(priv->phydev, 1))
 			goto out;
 
-		priv->eee_active = 1;
-		init_timer(&priv->eee_ctrl_timer);
-		priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
-		priv->eee_ctrl_timer.data = (unsigned long)priv;
-		priv->eee_ctrl_timer.expires = STMMAC_LPI_TIMER(eee_timer);
-		add_timer(&priv->eee_ctrl_timer);
+		if (!priv->eee_active) {
+			priv->eee_active = 1;
+			init_timer(&priv->eee_ctrl_timer);
+			priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
+			priv->eee_ctrl_timer.data = (unsigned long)priv;
+			priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
+			add_timer(&priv->eee_ctrl_timer);
 
-		priv->hw->mac->set_eee_timer(priv->ioaddr,
-					     STMMAC_DEFAULT_LIT_LS_TIMER,
-					     priv->tx_lpi_timer);
+			priv->hw->mac->set_eee_timer(priv->ioaddr,
+						     STMMAC_DEFAULT_LIT_LS,
+						     priv->tx_lpi_timer);
+		} else
+			/* Set HW EEE according to the speed */
+			priv->hw->mac->set_eee_pls(priv->ioaddr,
+						   priv->phydev->link);
 
 		pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
 
@@ -329,20 +341,6 @@
 	return ret;
 }
 
-/**
- * stmmac_eee_adjust: adjust HW EEE according to the speed
- * @priv: driver private structure
- * Description:
- *	When the EEE has been already initialised we have to
- *	modify the PLS bit in the LPI ctrl & status reg according
- *	to the PHY link status. For this reason.
- */
-static void stmmac_eee_adjust(struct stmmac_priv *priv)
-{
-	if (priv->eee_enabled)
-		priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link);
-}
-
 /* stmmac_get_tx_hwtstamp: get HW TX timestamps
  * @priv: driver private structure
  * @entry : descriptor index to be used.
@@ -769,7 +767,10 @@
 	if (new_state && netif_msg_link(priv))
 		phy_print_status(phydev);
 
-	stmmac_eee_adjust(priv);
+	/* At this stage, it could be needed to setup the EEE or adjust some
+	 * MAC related HW registers.
+	 */
+	priv->eee_enabled = stmmac_eee_init(priv);
 
 	spin_unlock_irqrestore(&priv->lock, flags);
 
@@ -1277,7 +1278,7 @@
 
 	if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
 		stmmac_enable_eee_mode(priv);
-		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_TIMER(eee_timer));
+		mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
 	}
 	spin_unlock(&priv->tx_lock);
 }
@@ -1671,14 +1672,9 @@
 	if (priv->phydev)
 		phy_start(priv->phydev);
 
-	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS_TIMER;
+	priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
 
-	/* Using PCS we cannot dial with the phy registers at this stage
-	 * so we do not support extra feature like EEE.
-	 */
-	if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
-	    priv->pcs != STMMAC_PCS_RTBI)
-		priv->eee_enabled = stmmac_eee_init(priv);
+	priv->eee_enabled = stmmac_eee_init(priv);
 
 	stmmac_init_tx_coalesce(priv);
 
@@ -1899,7 +1895,7 @@
 
 #ifdef STMMAC_XMIT_DEBUG
 	if (netif_msg_pktdata(priv)) {
-		pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d"
+		pr_info("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
 			__func__, (priv->cur_tx % txsize),
 			(priv->dirty_tx % txsize), entry, first, nfrags);
 		if (priv->extend_desc)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 21a5b29..d1a769f 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1679,7 +1679,7 @@
 	priv->rx_packet_max = max(rx_packet_max, 128);
 	priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
 	priv->irq_enabled = true;
-	if (!ndev) {
+	if (!priv->cpts) {
 		pr_err("error allocating cpts\n");
 		goto clean_ndev_ret;
 	}
@@ -1973,9 +1973,12 @@
 {
 	struct platform_device	*pdev = to_platform_device(dev);
 	struct net_device	*ndev = platform_get_drvdata(pdev);
+	struct cpsw_priv	*priv = netdev_priv(ndev);
 
 	if (netif_running(ndev))
 		cpsw_ndo_stop(ndev);
+	soft_reset("sliver 0", &priv->slaves[0].sliver->soft_reset);
+	soft_reset("sliver 1", &priv->slaves[1].sliver->soft_reset);
 	pm_runtime_put_sync(&pdev->dev);
 
 	return 0;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 49dfd59..053c84f 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -705,6 +705,13 @@
 	}
 
 	buffer = dma_map_single(ctlr->dev, data, len, chan->dir);
+	ret = dma_mapping_error(ctlr->dev, buffer);
+	if (ret) {
+		cpdma_desc_free(ctlr->pool, desc, 1);
+		ret = -EINVAL;
+		goto unlock_ret;
+	}
+
 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
 	cpdma_desc_to_port(chan, mode, directed);
 
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index b2275d1..c47f0db 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -459,15 +459,12 @@
 static int davinci_mdio_resume(struct device *dev)
 {
 	struct davinci_mdio_data *data = dev_get_drvdata(dev);
-	u32 ctrl;
 
 	pm_runtime_get_sync(data->dev);
 
 	spin_lock(&data->lock);
 	/* restart the scan state machine */
-	ctrl = __raw_readl(&data->regs->control);
-	ctrl |= CONTROL_ENABLE;
-	__raw_writel(ctrl, &data->regs->control);
+	__davinci_mdio_reset(data);
 
 	data->suspended = false;
 	spin_unlock(&data->lock);
@@ -476,8 +473,8 @@
 }
 
 static const struct dev_pm_ops davinci_mdio_pm_ops = {
-	.suspend	= davinci_mdio_suspend,
-	.resume		= davinci_mdio_resume,
+	.suspend_late	= davinci_mdio_suspend,
+	.resume_early	= davinci_mdio_resume,
 };
 
 static const struct of_device_id davinci_mdio_of_mtable[] = {
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index ab2307b..4dccead 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -285,7 +285,9 @@
 
 	skb->protocol = eth_type_trans(skb, net);
 	skb->ip_summed = CHECKSUM_NONE;
-	__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), packet->vlan_tci);
+	if (packet->vlan_tci & VLAN_TAG_PRESENT)
+		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+				       packet->vlan_tci);
 
 	net->stats.rx_packets++;
 	net->stats.rx_bytes += packet->total_data_buflen;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1c502bb..6e91931 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -853,18 +853,24 @@
 		struct nlattr *tb[], struct nlattr *data[])
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
-	if (data && data[IFLA_MACVLAN_MODE])
-		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
+
 	if (data && data[IFLA_MACVLAN_FLAGS]) {
 		__u16 flags = nla_get_u16(data[IFLA_MACVLAN_FLAGS]);
 		bool promisc = (flags ^ vlan->flags) & MACVLAN_FLAG_NOPROMISC;
+		if (vlan->port->passthru && promisc) {
+			int err;
 
-		if (promisc && (flags & MACVLAN_FLAG_NOPROMISC))
-			dev_set_promiscuity(vlan->lowerdev, -1);
-		else if (promisc && !(flags & MACVLAN_FLAG_NOPROMISC))
-			dev_set_promiscuity(vlan->lowerdev, 1);
+			if (flags & MACVLAN_FLAG_NOPROMISC)
+				err = dev_set_promiscuity(vlan->lowerdev, -1);
+			else
+				err = dev_set_promiscuity(vlan->lowerdev, 1);
+			if (err < 0)
+				return err;
+		}
 		vlan->flags = flags;
 	}
+	if (data && data[IFLA_MACVLAN_MODE])
+		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
 	return 0;
 }
 
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 59e9605..b6dd6a7 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -524,8 +524,10 @@
 			return -EMSGSIZE;
 		num_pages = get_user_pages_fast(base, size, 0, &page[i]);
 		if (num_pages != size) {
-			for (i = 0; i < num_pages; i++)
-				put_page(page[i]);
+			int j;
+
+			for (j = 0; j < num_pages; j++)
+				put_page(page[i + j]);
 			return -EFAULT;
 		}
 		truesize = size * PAGE_SIZE;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index bfa9bb4..9c61f87 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1010,8 +1010,10 @@
 			return -EMSGSIZE;
 		num_pages = get_user_pages_fast(base, size, 0, &page[i]);
 		if (num_pages != size) {
-			for (i = 0; i < num_pages; i++)
-				put_page(page[i]);
+			int j;
+
+			for (j = 0; j < num_pages; j++)
+				put_page(page[i + j]);
 			return -EFAULT;
 		}
 		truesize = size * PAGE_SIZE;
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index d095d0d..5645921 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -590,7 +590,13 @@
 	{QMI_GOBI1K_DEVICE(0x03f0, 0x1f1d)},	/* HP un2400 Gobi Modem Device */
 	{QMI_GOBI1K_DEVICE(0x04da, 0x250d)},	/* Panasonic Gobi Modem device */
 	{QMI_GOBI1K_DEVICE(0x413c, 0x8172)},	/* Dell Gobi Modem device */
-	{QMI_GOBI1K_DEVICE(0x1410, 0xa001)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa001)},	/* Novatel/Verizon USB-1000 */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa002)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa003)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa004)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa005)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa006)},	/* Novatel Gobi Modem device */
+	{QMI_GOBI1K_DEVICE(0x1410, 0xa007)},	/* Novatel Gobi Modem device */
 	{QMI_GOBI1K_DEVICE(0x0b05, 0x1776)},	/* Asus Gobi Modem device */
 	{QMI_GOBI1K_DEVICE(0x19d2, 0xfff3)},	/* ONDA Gobi Modem device */
 	{QMI_GOBI1K_DEVICE(0x05c6, 0x9001)},	/* Generic Gobi Modem device */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 3b1d2ee..57325f3 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -565,18 +565,22 @@
 
 /* Watch incoming packets to learn mapping between Ethernet address
  * and Tunnel endpoint.
+ * Return true if packet is bogus and should be droppped.
  */
-static void vxlan_snoop(struct net_device *dev,
+static bool vxlan_snoop(struct net_device *dev,
 			__be32 src_ip, const u8 *src_mac)
 {
 	struct vxlan_dev *vxlan = netdev_priv(dev);
 	struct vxlan_fdb *f;
-	int err;
 
 	f = vxlan_find_mac(vxlan, src_mac);
 	if (likely(f)) {
 		if (likely(f->remote.remote_ip == src_ip))
-			return;
+			return false;
+
+		/* Don't migrate static entries, drop packets */
+		if (f->state & NUD_NOARP)
+			return true;
 
 		if (net_ratelimit())
 			netdev_info(dev,
@@ -588,14 +592,19 @@
 	} else {
 		/* learned new entry */
 		spin_lock(&vxlan->hash_lock);
-		err = vxlan_fdb_create(vxlan, src_mac, src_ip,
-				       NUD_REACHABLE,
-				       NLM_F_EXCL|NLM_F_CREATE,
-				       vxlan->dst_port,
-				       vxlan->default_dst.remote_vni,
-				       0, NTF_SELF);
+
+		/* close off race between vxlan_flush and incoming packets */
+		if (netif_running(dev))
+			vxlan_fdb_create(vxlan, src_mac, src_ip,
+					 NUD_REACHABLE,
+					 NLM_F_EXCL|NLM_F_CREATE,
+					 vxlan->dst_port,
+					 vxlan->default_dst.remote_vni,
+					 0, NTF_SELF);
 		spin_unlock(&vxlan->hash_lock);
 	}
+
+	return false;
 }
 
 
@@ -727,8 +736,9 @@
 			       vxlan->dev->dev_addr) == 0)
 		goto drop;
 
-	if (vxlan->flags & VXLAN_F_LEARN)
-		vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
+	if ((vxlan->flags & VXLAN_F_LEARN) &&
+	    vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source))
+		goto drop;
 
 	__skb_tunnel_rx(skb, vxlan->dev);
 	skb_reset_network_header(skb);
@@ -1151,9 +1161,11 @@
 		struct sk_buff *skb1;
 
 		skb1 = skb_clone(skb, GFP_ATOMIC);
-		rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
-		if (rc == NETDEV_TX_OK)
-			rc = rc1;
+		if (skb1) {
+			rc1 = vxlan_xmit_one(skb1, dev, rdst, did_rsc);
+			if (rc == NETDEV_TX_OK)
+				rc = rc1;
+		}
 	}
 
 	rc1 = vxlan_xmit_one(skb, dev, rdst0, did_rsc);
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 147614e..6a8a382 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -384,21 +384,37 @@
 	struct frad_local	*flp;
 	struct net_device	*master, *slave;
 	int			err;
+	bool			found = false;
+
+	rtnl_lock();
 
 	/* validate slave device */
 	master = __dev_get_by_name(&init_net, dlci->devname);
-	if (!master)
-		return -ENODEV;
+	if (!master) {
+		err = -ENODEV;
+		goto out;
+	}
+
+	list_for_each_entry(dlp, &dlci_devs, list) {
+		if (dlp->master == master) {
+			found = true;
+			break;
+		}
+	}
+	if (!found) {
+		err = -ENODEV;
+		goto out;
+	}
 
 	if (netif_running(master)) {
-		return -EBUSY;
+		err = -EBUSY;
+		goto out;
 	}
 
 	dlp = netdev_priv(master);
 	slave = dlp->slave;
 	flp = netdev_priv(slave);
 
-	rtnl_lock();
 	err = (*flp->deassoc)(slave, master);
 	if (!err) {
 		list_del(&dlp->list);
@@ -407,8 +423,8 @@
 
 		dev_put(slave);
 	}
+out:
 	rtnl_unlock();
-
 	return err;
 }
 
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 0743a47..62f1b76 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1174,7 +1174,7 @@
 		mutex_lock(&priv->htc_pm_lock);
 
 		priv->ps_idle = !!(conf->flags & IEEE80211_CONF_IDLE);
-		if (priv->ps_idle)
+		if (!priv->ps_idle)
 			chip_reset = true;
 
 		mutex_unlock(&priv->htc_pm_lock);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 1c9b1ba..83ab6be 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1570,6 +1570,8 @@
 	    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
 		return;
 
+	rcu_read_lock();
+
 	ac = list_first_entry(&txq->axq_acq, struct ath_atx_ac, list);
 	last_ac = list_entry(txq->axq_acq.prev, struct ath_atx_ac, list);
 
@@ -1608,8 +1610,10 @@
 
 		if (ac == last_ac ||
 		    txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH)
-			return;
+			break;
 	}
+
+	rcu_read_unlock();
 }
 
 /***********/
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
index b98f223..2c59357 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
@@ -930,6 +930,10 @@
 			brcmf_fws_del_interface(ifp);
 			brcmf_fws_deinit(drvr);
 		}
+		if (drvr->iflist[0]) {
+			free_netdev(ifp->ndev);
+			drvr->iflist[0] = NULL;
+		}
 		if (p2p_ifp) {
 			free_netdev(p2p_ifp->ndev);
 			drvr->iflist[1] = NULL;
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c
index 28e7aee..9fd6f2f 100644
--- a/drivers/net/wireless/brcm80211/brcmsmac/main.c
+++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c
@@ -3074,21 +3074,8 @@
  */
 static bool brcms_c_ps_allowed(struct brcms_c_info *wlc)
 {
-	/* disallow PS when one of the following global conditions meets */
-	if (!wlc->pub->associated)
-		return false;
-
-	/* disallow PS when one of these meets when not scanning */
-	if (wlc->filter_flags & FIF_PROMISC_IN_BSS)
-		return false;
-
-	if (wlc->bsscfg->type == BRCMS_TYPE_AP)
-		return false;
-
-	if (wlc->bsscfg->type == BRCMS_TYPE_ADHOC)
-		return false;
-
-	return true;
+	/* not supporting PS so always return false for now */
+	return false;
 }
 
 static void brcms_c_statsupd(struct brcms_c_info *wlc)
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index c9f197d..fe31590 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -816,6 +816,7 @@
 		rs_sta->last_txrate_idx = idx;
 		info->control.rates[0].idx = rs_sta->last_txrate_idx;
 	}
+	info->control.rates[0].count = 1;
 
 	D_RATE("leave: %d\n", idx);
 }
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 1fc0b227..ed3c42a 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2268,7 +2268,7 @@
 		info->control.rates[0].flags = 0;
 	}
 	info->control.rates[0].idx = rate_idx;
-
+	info->control.rates[0].count = 1;
 }
 
 static void *
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 907bd6e..10fbb17 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -2799,7 +2799,7 @@
 		info->control.rates[0].flags = 0;
 	}
 	info->control.rates[0].idx = rate_idx;
-
+	info->control.rates[0].count = 1;
 }
 
 static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index 707446f..cd1ad001 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1378,7 +1378,7 @@
 	struct iwl_chain_noise_data *data = &priv->chain_noise_data;
 	int ret;
 
-	if (!(priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED))
+	if (priv->calib_disabled & IWL_CHAIN_NOISE_CALIB_DISABLED)
 		return;
 
 	if ((data->state == IWL_CHAIN_NOISE_ALIVE) &&
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 39aad98..40fed1f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1000,10 +1000,12 @@
 	 */
 	if (load_module) {
 		err = request_module("%s", op->name);
+#ifdef CONFIG_IWLWIFI_OPMODE_MODULAR
 		if (err)
 			IWL_ERR(drv,
 				"failed to load module %s (error %d), is dynamic loading enabled?\n",
 				op->name, err);
+#endif
 	}
 	return;
 
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 55334d5..b99fe31 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -2546,6 +2546,7 @@
 		info->control.rates[0].flags = 0;
 	}
 	info->control.rates[0].idx = rate_idx;
+	info->control.rates[0].count = 1;
 }
 
 static void *rs_alloc_sta(void *mvm_rate, struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index f212f16..48c1891 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -180,7 +180,8 @@
 		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
 		return;
 	} else if (ieee80211_is_back_req(fc)) {
-		tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
+		tx_cmd->tx_flags |=
+			cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
 	}
 
 	/* HT rate doesn't make sense for a non data frame */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b52d70c..72f32e5 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -3027,19 +3027,26 @@
 	 * TODO: we do not use +6 dBm option to do not increase power beyond
 	 * regulatory limit, however this could be utilized for devices with
 	 * CAPABILITY_POWER_LIMIT.
+	 *
+	 * TODO: add different temperature compensation code for RT3290 & RT5390
+	 * to allow to use BBP_R1 for those chips.
 	 */
-	rt2800_bbp_read(rt2x00dev, 1, &r1);
-	if (delta <= -12) {
-		power_ctrl = 2;
-		delta += 12;
-	} else if (delta <= -6) {
-		power_ctrl = 1;
-		delta += 6;
-	} else {
-		power_ctrl = 0;
+	if (!rt2x00_rt(rt2x00dev, RT3290) &&
+	    !rt2x00_rt(rt2x00dev, RT5390)) {
+		rt2800_bbp_read(rt2x00dev, 1, &r1);
+		if (delta <= -12) {
+			power_ctrl = 2;
+			delta += 12;
+		} else if (delta <= -6) {
+			power_ctrl = 1;
+			delta += 6;
+		} else {
+			power_ctrl = 0;
+		}
+		rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
+		rt2800_bbp_write(rt2x00dev, 1, r1);
 	}
-	rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl);
-	rt2800_bbp_write(rt2x00dev, 1, r1);
+
 	offset = TX_PWR_CFG_0;
 
 	for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) {
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 37984e6..8c20935 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -662,7 +662,7 @@
 {
 	struct xenvif *vif = NULL, *tmp;
 	s8 status;
-	u16 irq, flags;
+	u16 flags;
 	struct xen_netif_rx_response *resp;
 	struct sk_buff_head rxq;
 	struct sk_buff *skb;
@@ -771,13 +771,13 @@
 					 sco->meta_slots_used);
 
 		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);
-		irq = vif->irq;
-		if (ret && list_empty(&vif->notify_list))
-			list_add_tail(&vif->notify_list, &notify);
 
 		xenvif_notify_tx_completion(vif);
 
-		xenvif_put(vif);
+		if (ret && list_empty(&vif->notify_list))
+			list_add_tail(&vif->notify_list, &notify);
+		else
+			xenvif_put(vif);
 		npo.meta_cons += sco->meta_slots_used;
 		dev_kfree_skb(skb);
 	}
@@ -785,6 +785,7 @@
 	list_for_each_entry_safe(vif, tmp, &notify, notify_list) {
 		notify_remote_via_irq(vif->irq);
 		list_del_init(&vif->notify_list);
+		xenvif_put(vif);
 	}
 
 	/* More work to do? */
diff --git a/drivers/of/base.c b/drivers/of/base.c
index f53b992..a6f584a 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -192,14 +192,15 @@
 struct device_node *of_find_all_nodes(struct device_node *prev)
 {
 	struct device_node *np;
+	unsigned long flags;
 
-	raw_spin_lock(&devtree_lock);
+	raw_spin_lock_irqsave(&devtree_lock, flags);
 	np = prev ? prev->allnext : of_allnodes;
 	for (; np != NULL; np = np->allnext)
 		if (of_node_get(np))
 			break;
 	of_node_put(prev);
-	raw_spin_unlock(&devtree_lock);
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 	return np;
 }
 EXPORT_SYMBOL(of_find_all_nodes);
@@ -421,8 +422,9 @@
 	struct device_node *prev)
 {
 	struct device_node *next;
+	unsigned long flags;
 
-	raw_spin_lock(&devtree_lock);
+	raw_spin_lock_irqsave(&devtree_lock, flags);
 	next = prev ? prev->sibling : node->child;
 	for (; next; next = next->sibling) {
 		if (!__of_device_is_available(next))
@@ -431,7 +433,7 @@
 			break;
 	}
 	of_node_put(prev);
-	raw_spin_unlock(&devtree_lock);
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 	return next;
 }
 EXPORT_SYMBOL(of_get_next_available_child);
@@ -735,13 +737,14 @@
 struct device_node *of_find_node_by_phandle(phandle handle)
 {
 	struct device_node *np;
+	unsigned long flags;
 
-	raw_spin_lock(&devtree_lock);
+	raw_spin_lock_irqsave(&devtree_lock, flags);
 	for (np = of_allnodes; np; np = np->allnext)
 		if (np->phandle == handle)
 			break;
 	of_node_get(np);
-	raw_spin_unlock(&devtree_lock);
+	raw_spin_unlock_irqrestore(&devtree_lock, flags);
 	return np;
 }
 EXPORT_SYMBOL(of_find_node_by_phandle);
diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c
index 9544cdc..e79e006 100644
--- a/drivers/parisc/iosapic.c
+++ b/drivers/parisc/iosapic.c
@@ -811,6 +811,70 @@
 	return pcidev->irq;
 }
 
+static struct iosapic_info *first_isi = NULL;
+
+#ifdef CONFIG_64BIT
+int iosapic_serial_irq(int num)
+{
+	struct iosapic_info *isi = first_isi;
+	struct irt_entry *irte = NULL;  /* only used if PAT PDC */
+	struct vector_info *vi;
+	int isi_line;	/* line used by device */
+
+	/* lookup IRT entry for isi/slot/pin set */
+	irte = &irt_cell[num];
+
+	DBG_IRT("iosapic_serial_irq(): irte %p %x %x %x %x %x %x %x %x\n",
+		irte,
+		irte->entry_type,
+		irte->entry_length,
+		irte->polarity_trigger,
+		irte->src_bus_irq_devno,
+		irte->src_bus_id,
+		irte->src_seg_id,
+		irte->dest_iosapic_intin,
+		(u32) irte->dest_iosapic_addr);
+	isi_line = irte->dest_iosapic_intin;
+
+	/* get vector info for this input line */
+	vi = isi->isi_vector + isi_line;
+	DBG_IRT("iosapic_serial_irq:  line %d vi 0x%p\n", isi_line, vi);
+
+	/* If this IRQ line has already been setup, skip it */
+	if (vi->irte)
+		goto out;
+
+	vi->irte = irte;
+
+	/*
+	 * Allocate processor IRQ
+	 *
+	 * XXX/FIXME The txn_alloc_irq() code and related code should be
+	 * moved to enable_irq(). That way we only allocate processor IRQ
+	 * bits for devices that actually have drivers claiming them.
+	 * Right now we assign an IRQ to every PCI device present,
+	 * regardless of whether it's used or not.
+	 */
+	vi->txn_irq = txn_alloc_irq(8);
+
+	if (vi->txn_irq < 0)
+		panic("I/O sapic: couldn't get TXN IRQ\n");
+
+	/* enable_irq() will use txn_* to program IRdT */
+	vi->txn_addr = txn_alloc_addr(vi->txn_irq);
+	vi->txn_data = txn_alloc_data(vi->txn_irq);
+
+	vi->eoi_addr = isi->addr + IOSAPIC_REG_EOI;
+	vi->eoi_data = cpu_to_le32(vi->txn_data);
+
+	cpu_claim_irq(vi->txn_irq, &iosapic_interrupt_type, vi);
+
+ out:
+
+	return vi->txn_irq;
+}
+#endif
+
 
 /*
 ** squirrel away the I/O Sapic Version
@@ -877,6 +941,8 @@
 		vip->irqline = (unsigned char) cnt;
 		vip->iosapic = isi;
 	}
+	if (!first_isi)
+		first_isi = isi;
 	return isi;
 }
 
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 716aa93..59df857 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,6 +61,7 @@
 static void handle_hotplug_event_bridge (acpi_handle, u32, void *);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
 static void acpiphp_set_hpp_values(struct pci_bus *bus);
+static void hotplug_event_func(acpi_handle handle, u32 type, void *context);
 static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
 static void free_bridge(struct kref *kref);
 
@@ -147,7 +148,7 @@
 
 
 static const struct acpi_dock_ops acpiphp_dock_ops = {
-	.handler = handle_hotplug_event_func,
+	.handler = hotplug_event_func,
 };
 
 /* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -179,6 +180,20 @@
 	return true;
 }
 
+static void acpiphp_dock_init(void *data)
+{
+	struct acpiphp_func *func = data;
+
+	get_bridge(func->slot->bridge);
+}
+
+static void acpiphp_dock_release(void *data)
+{
+	struct acpiphp_func *func = data;
+
+	put_bridge(func->slot->bridge);
+}
+
 /* callback routine to register each ACPI PCI slot object */
 static acpi_status
 register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -298,7 +313,8 @@
 		 */
 		newfunc->flags &= ~FUNC_HAS_EJ0;
 		if (register_hotplug_dock_device(handle,
-			&acpiphp_dock_ops, newfunc))
+			&acpiphp_dock_ops, newfunc,
+			acpiphp_dock_init, acpiphp_dock_release))
 			dbg("failed to register dock device\n");
 
 		/* we need to be notified when dock events happen
@@ -670,6 +686,7 @@
 	struct pci_bus *bus = slot->bridge->pci_bus;
 	struct acpiphp_func *func;
 	int num, max, pass;
+	LIST_HEAD(add_list);
 
 	if (slot->flags & SLOT_ENABLED)
 		goto err_exit;
@@ -694,13 +711,15 @@
 				max = pci_scan_bridge(bus, dev, max, pass);
 				if (pass && dev->subordinate) {
 					check_hotplug_bridge(slot, dev);
-					pci_bus_size_bridges(dev->subordinate);
+					pcibios_resource_survey_bus(dev->subordinate);
+					__pci_bus_size_bridges(dev->subordinate,
+							       &add_list);
 				}
 			}
 		}
 	}
 
-	pci_bus_assign_resources(bus);
+	__pci_bus_assign_resources(bus, &add_list, NULL);
 	acpiphp_sanitize_bus(bus);
 	acpiphp_set_hpp_values(bus);
 	acpiphp_set_acpi_region(slot);
@@ -1065,22 +1084,12 @@
 	alloc_acpi_hp_work(handle, type, context, _handle_hotplug_event_bridge);
 }
 
-static void _handle_hotplug_event_func(struct work_struct *work)
+static void hotplug_event_func(acpi_handle handle, u32 type, void *context)
 {
-	struct acpiphp_func *func;
+	struct acpiphp_func *func = context;
 	char objname[64];
 	struct acpi_buffer buffer = { .length = sizeof(objname),
 				      .pointer = objname };
-	struct acpi_hp_work *hp_work;
-	acpi_handle handle;
-	u32 type;
-
-	hp_work = container_of(work, struct acpi_hp_work, work);
-	handle = hp_work->handle;
-	type = hp_work->type;
-	func = (struct acpiphp_func *)hp_work->context;
-
-	acpi_scan_lock_acquire();
 
 	acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
 
@@ -1113,6 +1122,18 @@
 		warn("notify_handler: unknown event type 0x%x for %s\n", type, objname);
 		break;
 	}
+}
+
+static void _handle_hotplug_event_func(struct work_struct *work)
+{
+	struct acpi_hp_work *hp_work;
+	struct acpiphp_func *func;
+
+	hp_work = container_of(work, struct acpi_hp_work, work);
+	func = hp_work->context;
+	acpi_scan_lock_acquire();
+
+	hotplug_event_func(hp_work->handle, hp_work->type, func);
 
 	acpi_scan_lock_release();
 	kfree(hp_work); /* allocated in handle_hotplug_event_func */
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 68678ed..d1182c4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -202,6 +202,11 @@
 		    struct resource *res, unsigned int reg);
 int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type);
 void pci_configure_ari(struct pci_dev *dev);
+void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+			struct list_head *realloc_head);
+void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+				      struct list_head *realloc_head,
+				      struct list_head *fail_head);
 
 /**
  * pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 16abaaa..d254e23 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -1044,7 +1044,7 @@
 	;
 }
 
-static void __ref __pci_bus_size_bridges(struct pci_bus *bus,
+void __ref __pci_bus_size_bridges(struct pci_bus *bus,
 			struct list_head *realloc_head)
 {
 	struct pci_dev *dev;
@@ -1115,9 +1115,9 @@
 }
 EXPORT_SYMBOL(pci_bus_size_bridges);
 
-static void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
-					 struct list_head *realloc_head,
-					 struct list_head *fail_head)
+void __ref __pci_bus_assign_resources(const struct pci_bus *bus,
+				      struct list_head *realloc_head,
+				      struct list_head *fail_head)
 {
 	struct pci_bus *b;
 	struct pci_dev *dev;
diff --git a/drivers/regulator/tps6586x-regulator.c b/drivers/regulator/tps6586x-regulator.c
index d8fa37d..2c9155b 100644
--- a/drivers/regulator/tps6586x-regulator.c
+++ b/drivers/regulator/tps6586x-regulator.c
@@ -439,7 +439,7 @@
 
 static struct platform_driver tps6586x_regulator_driver = {
 	.driver	= {
-		.name	= "tps6586x-pmic",
+		.name	= "tps6586x-regulator",
 		.owner	= THIS_MODULE,
 	},
 	.probe		= tps6586x_regulator_probe,
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index 4ffa66c..9ca3996 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -2040,6 +2040,7 @@
 			   netiucv_setup_netdevice);
 	if (!dev)
 		return NULL;
+	rtnl_lock();
 	if (dev_alloc_name(dev, dev->name) < 0)
 		goto out_netdev;
 
@@ -2061,6 +2062,7 @@
 out_fsm:
 	kfree_fsm(privptr->fsm);
 out_netdev:
+	rtnl_unlock();
 	free_netdev(dev);
 	return NULL;
 }
@@ -2100,6 +2102,7 @@
 
 	rc = netiucv_register_device(dev);
 	if (rc) {
+		rtnl_unlock();
 		IUCV_DBF_TEXT_(setup, 2,
 			"ret %d from netiucv_register_device\n", rc);
 		goto out_free_ndev;
@@ -2109,7 +2112,8 @@
 	priv = netdev_priv(dev);
 	SET_NETDEV_DEV(dev, priv->dev);
 
-	rc = register_netdev(dev);
+	rc = register_netdevice(dev);
+	rtnl_unlock();
 	if (rc)
 		goto out_unreg;
 
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 439c012..b63d534 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -186,7 +186,7 @@
 		file->f_pos += offset;
 		break;
 	case 2:
-		file->f_pos = debug->buffer_len - offset;
+		file->f_pos = debug->buffer_len + offset;
 		break;
 	default:
 		return -EINVAL;
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 292b24f..32ae6c6 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1656,9 +1656,12 @@
 
 	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN &&
 	    fcoe->realdev->features & NETIF_F_HW_VLAN_CTAG_TX) {
-		skb->vlan_tci = VLAN_TAG_PRESENT |
-				vlan_dev_vlan_id(fcoe->netdev);
+		/* must set skb->dev before calling vlan_put_tag */
 		skb->dev = fcoe->realdev;
+		skb = __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+					     vlan_dev_vlan_id(fcoe->netdev));
+		if (!skb)
+			return -ENOMEM;
 	} else
 		skb->dev = fcoe->netdev;
 
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index cd743c5..795843d 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -1548,9 +1548,6 @@
 {
 	struct fcoe_fcf *fcf;
 	struct fcoe_fcf *best = fip->sel_fcf;
-	struct fcoe_fcf *first;
-
-	first = list_first_entry(&fip->fcfs, struct fcoe_fcf, list);
 
 	list_for_each_entry(fcf, &fip->fcfs, list) {
 		LIBFCOE_FIP_DBG(fip, "consider FCF fab %16.16llx "
@@ -1568,17 +1565,15 @@
 					"" : "un");
 			continue;
 		}
-		if (fcf->fabric_name != first->fabric_name ||
-		    fcf->vfid != first->vfid ||
-		    fcf->fc_map != first->fc_map) {
+		if (!best || fcf->pri < best->pri || best->flogi_sent)
+			best = fcf;
+		if (fcf->fabric_name != best->fabric_name ||
+		    fcf->vfid != best->vfid ||
+		    fcf->fc_map != best->fc_map) {
 			LIBFCOE_FIP_DBG(fip, "Conflicting fabric, VFID, "
 					"or FC-MAP\n");
 			return NULL;
 		}
-		if (fcf->flogi_sent)
-			continue;
-		if (!best || fcf->pri < best->pri || best->flogi_sent)
-			best = fcf;
 	}
 	fip->sel_fcf = best;
 	if (best) {
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index adc1f7f..85e1ffd 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -174,7 +174,7 @@
 		pos = file->f_pos + offset;
 		break;
 	case 2:
-		pos = fnic_dbg_prt->buffer_len - offset;
+		pos = fnic_dbg_prt->buffer_len + offset;
 	}
 	return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
 			  -EINVAL : (file->f_pos = pos);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 82a3c1e..6c4cedb 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -8980,19 +8980,6 @@
 	if (!ioa_cfg->res_entries)
 		goto out;
 
-	if (ioa_cfg->sis64) {
-		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
-					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
-		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
-					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
-		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
-					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
-
-		if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
-			|| !ioa_cfg->vset_ids)
-			goto out_free_res_entries;
-	}
-
 	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
 		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
@@ -9089,9 +9076,6 @@
 			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
 out_free_res_entries:
 	kfree(ioa_cfg->res_entries);
-	kfree(ioa_cfg->target_ids);
-	kfree(ioa_cfg->array_ids);
-	kfree(ioa_cfg->vset_ids);
 	goto out;
 }
 
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index a1fb840..07a85ce 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1440,9 +1440,9 @@
 	/*
 	 * Bitmaps for SIS64 generated target values
 	 */
-	unsigned long *target_ids;
-	unsigned long *array_ids;
-	unsigned long *vset_ids;
+	unsigned long target_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+	unsigned long array_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
+	unsigned long vset_ids[BITS_TO_LONGS(IPR_MAX_SIS64_DEVS)];
 
 	u16 type; /* CCIN of the card */
 
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index c772d8d..8b928c6 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -463,13 +463,7 @@
 	fc_exch_release(ep);	/* drop hold for exch in mp */
 }
 
-/**
- * fc_seq_send() - Send a frame using existing sequence/exchange pair
- * @lport: The local port that the exchange will be sent on
- * @sp:	   The sequence to be sent
- * @fp:	   The frame to be sent on the exchange
- */
-static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
+static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
 		       struct fc_frame *fp)
 {
 	struct fc_exch *ep;
@@ -479,7 +473,7 @@
 	u8 fh_type = fh->fh_type;
 
 	ep = fc_seq_exch(sp);
-	WARN_ON((ep->esb_stat & ESB_ST_SEQ_INIT) != ESB_ST_SEQ_INIT);
+	WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
 
 	f_ctl = ntoh24(fh->fh_f_ctl);
 	fc_exch_setup_hdr(ep, fp, f_ctl);
@@ -502,17 +496,34 @@
 	error = lport->tt.frame_send(lport, fp);
 
 	if (fh_type == FC_TYPE_BLS)
-		return error;
+		goto out;
 
 	/*
 	 * Update the exchange and sequence flags,
 	 * assuming all frames for the sequence have been sent.
 	 * We can only be called to send once for each sequence.
 	 */
-	spin_lock_bh(&ep->ex_lock);
 	ep->f_ctl = f_ctl & ~FC_FC_FIRST_SEQ;	/* not first seq */
 	if (f_ctl & FC_FC_SEQ_INIT)
 		ep->esb_stat &= ~ESB_ST_SEQ_INIT;
+out:
+	return error;
+}
+
+/**
+ * fc_seq_send() - Send a frame using existing sequence/exchange pair
+ * @lport: The local port that the exchange will be sent on
+ * @sp:	   The sequence to be sent
+ * @fp:	   The frame to be sent on the exchange
+ */
+static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
+		       struct fc_frame *fp)
+{
+	struct fc_exch *ep;
+	int error;
+	ep = fc_seq_exch(sp);
+	spin_lock_bh(&ep->ex_lock);
+	error = fc_seq_send_locked(lport, sp, fp);
 	spin_unlock_bh(&ep->ex_lock);
 	return error;
 }
@@ -629,7 +640,7 @@
 	if (fp) {
 		fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
 			       FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
-		error = fc_seq_send(ep->lp, sp, fp);
+		error = fc_seq_send_locked(ep->lp, sp, fp);
 	} else
 		error = -ENOBUFS;
 	return error;
@@ -1132,7 +1143,7 @@
 	f_ctl = FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
 	f_ctl |= ep->f_ctl;
 	fc_fill_fc_hdr(fp, rctl, ep->did, ep->sid, fh_type, f_ctl, 0);
-	fc_seq_send(ep->lp, sp, fp);
+	fc_seq_send_locked(ep->lp, sp, fp);
 }
 
 /**
@@ -1307,8 +1318,8 @@
 		ap->ba_low_seq_cnt = htons(sp->cnt);
 	}
 	sp = fc_seq_start_next_locked(sp);
-	spin_unlock_bh(&ep->ex_lock);
 	fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+	spin_unlock_bh(&ep->ex_lock);
 	fc_frame_free(rx_fp);
 	return;
 
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index d518d17..6bbb944 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -1962,7 +1962,7 @@
 		rdata->flags |= FC_RP_FLAGS_RETRY;
 	rdata->supported_classes = FC_COS_CLASS3;
 
-	if (!(lport->service_params & FC_RPORT_ROLE_FCP_INITIATOR))
+	if (!(lport->service_params & FCP_SPPF_INIT_FCN))
 		return 0;
 
 	spp->spp_flags |= rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index f63f5ff..f525ecb 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1178,7 +1178,7 @@
 		pos = file->f_pos + off;
 		break;
 	case 2:
-		pos = debug->len - off;
+		pos = debug->len + off;
 	}
 	return (pos < 0 || pos > debug->len) ? -EINVAL : (file->f_pos = pos);
 }
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 98ab921..0a5c895 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -278,3 +278,14 @@
 
 	set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
 }
+
+static inline void
+qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
+{
+	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
+	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
+		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
+		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
+		complete(&ha->mbx_intr_comp);
+	}
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 259d920..d2a4c75 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -104,14 +104,9 @@
 			RD_REG_WORD(&reg->hccr);
 		}
 	}
+	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-		complete(&ha->mbx_intr_comp);
-	}
-
 	return (IRQ_HANDLED);
 }
 
@@ -221,14 +216,9 @@
 		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
 		RD_REG_WORD_RELAXED(&reg->hccr);
 	}
+	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-		complete(&ha->mbx_intr_comp);
-	}
-
 	return (IRQ_HANDLED);
 }
 
@@ -2613,14 +2603,9 @@
 		if (unlikely(IS_QLA83XX(ha) && (ha->pdev->revision == 1)))
 			ndelay(3500);
 	}
+	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-		complete(&ha->mbx_intr_comp);
-	}
-
 	return IRQ_HANDLED;
 }
 
@@ -2763,13 +2748,9 @@
 		}
 		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 	} while (0);
+	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-		complete(&ha->mbx_intr_comp);
-	}
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 9e5d89d..3587ec2 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -179,8 +179,6 @@
 
 		wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
 
-		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
-
 	} else {
 		ql_dbg(ql_dbg_mbx, vha, 0x1011,
 		    "Cmd=%x Polling Mode.\n", command);
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 937fed8..a6df558 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -148,9 +148,6 @@
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 		wait_for_completion_timeout(&ha->mbx_intr_comp, mcp->tov * HZ);
-
-		clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags);
-
 	} else {
 		ql_dbg(ql_dbg_mbx, vha, 0x112c,
 		    "Cmd=%x Polling Mode.\n", command);
@@ -2934,13 +2931,10 @@
 		QLAFX00_CLR_INTR_REG(ha, clr_intr);
 		QLAFX00_RD_INTR_REG(ha);
 	}
+
+	qla2x00_handle_mbx_completion(ha, status);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-		complete(&ha->mbx_intr_comp);
-	}
 	return IRQ_HANDLED;
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 10754f51..cce0cd0 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -2074,9 +2074,6 @@
 		}
 		WRT_REG_DWORD(&reg->host_int, 0);
 	}
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-	if (!ha->flags.msi_enabled)
-		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
 
 #ifdef QL_DEBUG_LEVEL_17
 	if (!irq && ha->flags.eeh_busy)
@@ -2085,11 +2082,12 @@
 		    status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
 #endif
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-		complete(&ha->mbx_intr_comp);
-	}
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	if (!ha->flags.msi_enabled)
+		qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
+
 	return IRQ_HANDLED;
 }
 
@@ -2149,8 +2147,6 @@
 		WRT_REG_DWORD(&reg->host_int, 0);
 	} while (0);
 
-	spin_unlock_irqrestore(&ha->hardware_lock, flags);
-
 #ifdef QL_DEBUG_LEVEL_17
 	if (!irq && ha->flags.eeh_busy)
 		ql_log(ql_log_warn, vha, 0x5044,
@@ -2158,11 +2154,9 @@
 		    status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat);
 #endif
 
-	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
-		(status & MBX_INTERRUPT) && ha->flags.mbox_int) {
-			set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
-			complete(&ha->mbx_intr_comp);
-	}
+	qla2x00_handle_mbx_completion(ha, status);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
 	return IRQ_HANDLED;
 }
 
@@ -3345,7 +3339,7 @@
 		ha->flags.mbox_busy = 0;
 		ql_log(ql_log_warn, vha, 0x6010,
 		    "Doing premature completion of mbx command.\n");
-		if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
+		if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags))
 			complete(&ha->mbx_intr_comp);
 	}
 }
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 7a3870f..66b0b26 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -688,8 +688,12 @@
 		 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
 		 * for qla_tgt_xmit_response LLD code
 		 */
+		if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+			se_cmd->se_cmd_flags &= ~SCF_OVERFLOW_BIT;
+			se_cmd->residual_count = 0;
+		}
 		se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
-		se_cmd->residual_count = se_cmd->data_length;
+		se_cmd->residual_count += se_cmd->data_length;
 
 		cmd->bufflen = 0;
 	}
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index c735c5a..6427600 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -59,7 +59,7 @@
 		int ret;
 
 		sg_free_table(sgt);
-		ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+		ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index f5d84d6..48b396f 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1075,7 +1075,7 @@
 	    acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
 		return NULL;
 
-	pdata = devm_kzalloc(&pdev->dev, sizeof(*ssp), GFP_KERNEL);
+	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata) {
 		dev_err(&pdev->dev,
 			"failed to allocate memory for platform data\n");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index 5000586..71cc3e6 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -444,7 +444,7 @@
 	}
 
 	ret = pm_runtime_get_sync(&sdd->pdev->dev);
-	if (ret != 0) {
+	if (ret < 0) {
 		dev_err(dev, "Failed to enable device: %d\n", ret);
 		goto out_tx;
 	}
diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
index 2e4a28b..12f321d 100644
--- a/drivers/staging/media/davinci_vpfe/Kconfig
+++ b/drivers/staging/media/davinci_vpfe/Kconfig
@@ -1,6 +1,6 @@
 config VIDEO_DM365_VPFE
 	tristate "DM365 VPFE Media Controller Capture Driver"
-	depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_VPFE_CAPTURE
+	depends on VIDEO_V4L2 && ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF
 	select VIDEOBUF2_DMA_CONTIG
 	help
 	  Support for DM365 VPFE based Media Controller Capture driver.
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
index b88e1dd..d8ce20d 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_mc_capture.c
@@ -639,7 +639,8 @@
 	if (ret)
 		goto probe_free_dev_mem;
 
-	if (vpfe_initialize_modules(vpfe_dev, pdev))
+	ret = vpfe_initialize_modules(vpfe_dev, pdev);
+	if (ret)
 		goto probe_disable_clock;
 
 	vpfe_dev->media_dev.dev = vpfe_dev->pdev;
@@ -663,7 +664,8 @@
 	/* set the driver data in platform device */
 	platform_set_drvdata(pdev, vpfe_dev);
 	/* register subdevs/entities */
-	if (vpfe_register_entities(vpfe_dev))
+	ret = vpfe_register_entities(vpfe_dev);
+	if (ret)
 		goto probe_out_v4l2_unregister;
 
 	ret = vpfe_attach_irq(vpfe_dev);
diff --git a/drivers/staging/media/solo6x10/Kconfig b/drivers/staging/media/solo6x10/Kconfig
index df6569b..34f3b6d 100644
--- a/drivers/staging/media/solo6x10/Kconfig
+++ b/drivers/staging/media/solo6x10/Kconfig
@@ -5,6 +5,7 @@
 	select VIDEOBUF2_DMA_SG
 	select VIDEOBUF2_DMA_CONTIG
 	select SND_PCM
+	select FONT_8x16
 	---help---
 	  This driver supports the Softlogic based MPEG-4 and h.264 codec
 	  cards.
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index 13e9e71..8d8b3ff 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -155,7 +155,7 @@
 	struct iscsi_tpg_np *tpg_np_iser = NULL;
 	char *endptr;
 	u32 op;
-	int rc;
+	int rc = 0;
 
 	op = simple_strtoul(page, &endptr, 0);
 	if ((op != 1) && (op != 0)) {
@@ -174,31 +174,32 @@
 		return -EINVAL;
 
 	if (op) {
-		int rc = request_module("ib_isert");
-		if (rc != 0)
+		rc = request_module("ib_isert");
+		if (rc != 0) {
 			pr_warn("Unable to request_module for ib_isert\n");
+			rc = 0;
+		}
 
 		tpg_np_iser = iscsit_tpg_add_network_portal(tpg, &np->np_sockaddr,
 				np->np_ip, tpg_np, ISCSI_INFINIBAND);
-		if (!tpg_np_iser || IS_ERR(tpg_np_iser))
+		if (IS_ERR(tpg_np_iser)) {
+			rc = PTR_ERR(tpg_np_iser);
 			goto out;
+		}
 	} else {
 		tpg_np_iser = iscsit_tpg_locate_child_np(tpg_np, ISCSI_INFINIBAND);
-		if (!tpg_np_iser)
-			goto out;
-
-		rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
-		if (rc < 0)
-			goto out;
+		if (tpg_np_iser) {
+			rc = iscsit_tpg_del_network_portal(tpg, tpg_np_iser);
+			if (rc < 0)
+				goto out;
+		}
 	}
 
-	printk("lio_target_np_store_iser() done, op: %d\n", op);
-
 	iscsit_put_tpg(tpg);
 	return count;
 out:
 	iscsit_put_tpg(tpg);
-	return -EINVAL;
+	return rc;
 }
 
 TF_NP_BASE_ATTR(lio_target, iser, S_IRUGO | S_IWUSR);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 8e6298c..dcb199d 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -842,11 +842,11 @@
 		return 0;
 
 	sess->time2retain_timer_flags |= ISCSI_TF_STOP;
-	spin_unlock_bh(&se_tpg->session_lock);
+	spin_unlock(&se_tpg->session_lock);
 
 	del_timer_sync(&sess->time2retain_timer);
 
-	spin_lock_bh(&se_tpg->session_lock);
+	spin_lock(&se_tpg->session_lock);
 	sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
 	pr_debug("Stopped Time2Retain Timer for SID: %u\n",
 			sess->sid);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index bb5d5c5..3402241 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -984,8 +984,6 @@
 	}
 
 	np->np_transport = t;
-	printk("Set np->np_transport to %p -> %s\n", np->np_transport,
-				np->np_transport->name);
 	return 0;
 }
 
@@ -1002,7 +1000,6 @@
 
 	conn->sock = new_sock;
 	conn->login_family = np->np_sockaddr.ss_family;
-	printk("iSCSI/TCP: Setup conn->sock from new_sock: %p\n", new_sock);
 
 	if (np->np_sockaddr.ss_family == AF_INET6) {
 		memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 7ad9120..cd5018f 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -721,9 +721,6 @@
 
 		start += strlen(key) + strlen(value) + 2;
 	}
-
-	printk("i_buf: %s, s_buf: %s, t_buf: %s\n", i_buf, s_buf, t_buf);
-
 	/*
 	 * See 5.3.  Login Phase.
 	 */
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59bfaec..abfd990 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -244,14 +244,9 @@
 
 static int pty_open(struct tty_struct *tty, struct file *filp)
 {
-	int	retval = -ENODEV;
-
 	if (!tty || !tty->link)
-		goto out;
+		return -ENODEV;
 
-	set_bit(TTY_IO_ERROR, &tty->flags);
-
-	retval = -EIO;
 	if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
 		goto out;
 	if (test_bit(TTY_PTY_LOCK, &tty->link->flags))
@@ -262,9 +257,11 @@
 	clear_bit(TTY_IO_ERROR, &tty->flags);
 	clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
 	set_bit(TTY_THROTTLED, &tty->flags);
-	retval = 0;
+	return 0;
+
 out:
-	return retval;
+	set_bit(TTY_IO_ERROR, &tty->flags);
+	return -EIO;
 }
 
 static void pty_set_termios(struct tty_struct *tty,
diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_gsc.c
index 097dff9..bb91b47 100644
--- a/drivers/tty/serial/8250/8250_gsc.c
+++ b/drivers/tty/serial/8250/8250_gsc.c
@@ -30,6 +30,12 @@
 	unsigned long address;
 	int err;
 
+#ifdef CONFIG_64BIT
+	extern int iosapic_serial_irq(int cellnum);
+	if (!dev->irq && (dev->id.sversion == 0xad))
+		dev->irq = iosapic_serial_irq(dev->mod_index-1);
+#endif
+
 	if (!dev->irq) {
 		/* We find some unattached serial ports by walking native
 		 * busses.  These should be silently ignored.  Otherwise,
@@ -51,7 +57,8 @@
 	memset(&uart, 0, sizeof(uart));
 	uart.port.iotype	= UPIO_MEM;
 	/* 7.272727MHz on Lasi.  Assumed the same for Dino, Wax and Timi. */
-	uart.port.uartclk	= 7272727;
+	uart.port.uartclk	= (dev->id.sversion != 0xad) ?
+					7272727 : 1843200;
 	uart.port.mapbase	= address;
 	uart.port.membase	= ioremap_nocache(address, 16);
 	uart.port.irq	= dev->irq;
@@ -73,6 +80,7 @@
 	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00075 },
 	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008c },
 	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008d },
+	{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x000ad },
 	{ 0 }
 };
 
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index fc2c06c..2bd78e2 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -289,13 +289,10 @@
 	struct vc_data *vc = NULL;
 	int ret = 0;
 
-	if (!vc_num)
-		return 0;
-
 	console_lock();
 	if (VT_BUSY(vc_num))
 		ret = -EBUSY;
-	else
+	else if (vc_num)
 		vc = vc_deallocate(vc_num);
 	console_unlock();
 
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 49b098b..475c9c1 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -276,8 +276,9 @@
 
 		ci_role_stop(ci);
 		ci_role_start(ci, role);
-		enable_irq(ci->irq);
 	}
+
+	enable_irq(ci->irq);
 }
 
 static irqreturn_t ci_irq(int irq, void *data)
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 519ead2..b501346 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1678,8 +1678,11 @@
 
 	ci->gadget.ep0 = &ci->ep0in->ep;
 
-	if (ci->global_phy)
+	if (ci->global_phy) {
 		ci->transceiver = usb_get_phy(USB_PHY_TYPE_USB2);
+		if (IS_ERR(ci->transceiver))
+			ci->transceiver = NULL;
+	}
 
 	if (ci->platdata->flags & CI13XXX_REQUIRE_TRANSCEIVER) {
 		if (ci->transceiver == NULL) {
@@ -1694,7 +1697,7 @@
 			goto put_transceiver;
 	}
 
-	if (!IS_ERR_OR_NULL(ci->transceiver)) {
+	if (ci->transceiver) {
 		retval = otg_set_peripheral(ci->transceiver->otg,
 						&ci->gadget);
 		if (retval)
@@ -1711,7 +1714,7 @@
 	return retval;
 
 remove_trans:
-	if (!IS_ERR_OR_NULL(ci->transceiver)) {
+	if (ci->transceiver) {
 		otg_set_peripheral(ci->transceiver->otg, NULL);
 		if (ci->global_phy)
 			usb_put_phy(ci->transceiver);
@@ -1719,7 +1722,7 @@
 
 	dev_err(dev, "error = %i\n", retval);
 put_transceiver:
-	if (!IS_ERR_OR_NULL(ci->transceiver) && ci->global_phy)
+	if (ci->transceiver && ci->global_phy)
 		usb_put_phy(ci->transceiver);
 destroy_eps:
 	destroy_eps(ci);
@@ -1747,7 +1750,7 @@
 	dma_pool_destroy(ci->td_pool);
 	dma_pool_destroy(ci->qh_pool);
 
-	if (!IS_ERR_OR_NULL(ci->transceiver)) {
+	if (ci->transceiver) {
 		otg_set_peripheral(ci->transceiver->otg, NULL);
 		if (ci->global_phy)
 			usb_put_phy(ci->transceiver);
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 7ef3eb8..2311b1e 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -4,11 +4,17 @@
 menuconfig USB_PHY
 	bool "USB Physical Layer drivers"
 	help
-	  USB controllers (those which are host, device or DRD) need a
-	  device to handle the physical layer signalling, commonly called
-	  a PHY.
+	  Most USB controllers have the physical layer signalling part
+	  (commonly called a PHY) built in.  However, dual-role devices
+	  (a.k.a. USB on-the-go) which support being USB master or slave
+	  with the same connector often use an external PHY.
 
-	  The following drivers add support for such PHY devices.
+	  The drivers in this submenu add support for such PHY devices.
+	  They are not needed for standard master-only (or the vast
+	  majority of slave-only) USB interfaces.
+
+	  If you're not sure if this applies to you, it probably doesn't;
+	  say N here.
 
 if USB_PHY
 
diff --git a/drivers/usb/serial/f81232.c b/drivers/usb/serial/f81232.c
index 090b411..7d8dd5a 100644
--- a/drivers/usb/serial/f81232.c
+++ b/drivers/usb/serial/f81232.c
@@ -165,11 +165,12 @@
 	/* FIXME - Stubbed out for now */
 
 	/* Don't change anything if nothing has changed */
-	if (!tty_termios_hw_change(&tty->termios, old_termios))
+	if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
 		return;
 
 	/* Do the real work here... */
-	tty_termios_copy_hw(&tty->termios, old_termios);
+	if (old_termios)
+		tty_termios_copy_hw(&tty->termios, old_termios);
 }
 
 static int f81232_tiocmget(struct tty_struct *tty)
@@ -187,12 +188,11 @@
 
 static int f81232_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct ktermios tmp_termios;
 	int result;
 
 	/* Setup termios */
 	if (tty)
-		f81232_set_termios(tty, port, &tmp_termios);
+		f81232_set_termios(tty, port, NULL);
 
 	result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
 	if (result) {
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 7151659..048cd44 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -284,7 +284,7 @@
 	   serial settings even to the same values as before. Thus
 	   we actually need to filter in this specific case */
 
-	if (!tty_termios_hw_change(&tty->termios, old_termios))
+	if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
 		return;
 
 	cflag = tty->termios.c_cflag;
@@ -293,7 +293,8 @@
 	if (!buf) {
 		dev_err(&port->dev, "%s - out of memory.\n", __func__);
 		/* Report back no change occurred */
-		tty->termios = *old_termios;
+		if (old_termios)
+			tty->termios = *old_termios;
 		return;
 	}
 
@@ -433,7 +434,7 @@
 	control = priv->line_control;
 	if ((cflag & CBAUD) == B0)
 		priv->line_control &= ~(CONTROL_DTR | CONTROL_RTS);
-	else if ((old_termios->c_cflag & CBAUD) == B0)
+	else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
 		priv->line_control |= (CONTROL_DTR | CONTROL_RTS);
 	if (control != priv->line_control) {
 		control = priv->line_control;
@@ -492,7 +493,6 @@
 
 static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct ktermios tmp_termios;
 	struct usb_serial *serial = port->serial;
 	struct pl2303_serial_private *spriv = usb_get_serial_data(serial);
 	int result;
@@ -508,7 +508,7 @@
 
 	/* Setup termios */
 	if (tty)
-		pl2303_set_termios(tty, port, &tmp_termios);
+		pl2303_set_termios(tty, port, NULL);
 
 	result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
 	if (result) {
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index cf3df79..ddf6c47 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -291,7 +291,6 @@
 	struct spcp8x5_private *priv = usb_get_serial_port_data(port);
 	unsigned long flags;
 	unsigned int cflag = tty->termios.c_cflag;
-	unsigned int old_cflag = old_termios->c_cflag;
 	unsigned short uartdata;
 	unsigned char buf[2] = {0, 0};
 	int baud;
@@ -299,15 +298,15 @@
 	u8 control;
 
 	/* check that they really want us to change something */
-	if (!tty_termios_hw_change(&tty->termios, old_termios))
+	if (old_termios && !tty_termios_hw_change(&tty->termios, old_termios))
 		return;
 
 	/* set DTR/RTS active */
 	spin_lock_irqsave(&priv->lock, flags);
 	control = priv->line_control;
-	if ((old_cflag & CBAUD) == B0) {
+	if (old_termios && (old_termios->c_cflag & CBAUD) == B0) {
 		priv->line_control |= MCR_DTR;
-		if (!(old_cflag & CRTSCTS))
+		if (!(old_termios->c_cflag & CRTSCTS))
 			priv->line_control |= MCR_RTS;
 	}
 	if (control != priv->line_control) {
@@ -394,7 +393,6 @@
 
 static int spcp8x5_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
-	struct ktermios tmp_termios;
 	struct usb_serial *serial = port->serial;
 	struct spcp8x5_private *priv = usb_get_serial_port_data(port);
 	int ret;
@@ -411,7 +409,7 @@
 	spcp8x5_set_ctrl_line(port, priv->line_control);
 
 	if (tty)
-		spcp8x5_set_termios(tty, port, &tmp_termios);
+		spcp8x5_set_termios(tty, port, NULL);
 
 	port->port.drain_delay = 256;
 
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index c92c5ed..e581c25 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -172,7 +172,8 @@
 	{ USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) },
 	{ USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) },
 	{ USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) },
-	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) },
+	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) },
+	{ USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) },
 	{ USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) },
 };
 
diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h
index b353e7e..4a2423e 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.h
+++ b/drivers/usb/serial/ti_usb_3410_5052.h
@@ -52,7 +52,9 @@
 
 /* Abbott Diabetics vendor and product ids */
 #define ABBOTT_VENDOR_ID		0x1a61
-#define ABBOTT_PRODUCT_ID		0x3410
+#define ABBOTT_STEREO_PLUG_ID		0x3410
+#define ABBOTT_PRODUCT_ID		ABBOTT_STEREO_PLUG_ID
+#define ABBOTT_STRIP_PORT_ID		0x3420
 
 /* Commands */
 #define TI_GET_VERSION			0x01
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e7b3cb5..b8b60b6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2859,8 +2859,8 @@
 	btrfs_free_qgroup_config(fs_info);
 fail_trans_kthread:
 	kthread_stop(fs_info->transaction_kthread);
-	del_fs_roots(fs_info);
 	btrfs_cleanup_transaction(fs_info->tree_root);
+	del_fs_roots(fs_info);
 fail_cleaner:
 	kthread_stop(fs_info->cleaner_kthread);
 
@@ -3512,15 +3512,15 @@
 		       percpu_counter_sum(&fs_info->delalloc_bytes));
 	}
 
-	free_root_pointers(fs_info, 1);
-
 	btrfs_free_block_groups(fs_info);
 
+	btrfs_stop_all_workers(fs_info);
+
 	del_fs_roots(fs_info);
 
-	iput(fs_info->btree_inode);
+	free_root_pointers(fs_info, 1);
 
-	btrfs_stop_all_workers(fs_info);
+	iput(fs_info->btree_inode);
 
 #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
 	if (btrfs_test_opt(root, CHECK_INTEGRITY))
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index af978f7..17f3064 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -8012,6 +8012,9 @@
 {
 	struct btrfs_root *root = BTRFS_I(inode)->root;
 
+	if (root == NULL)
+		return 1;
+
 	/* the snap/subvol tree is on deleting */
 	if (btrfs_root_refs(&root->root_item) == 0 &&
 	    root != root->fs_info->tree_root)
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index 395b820..4febca4 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4082,7 +4082,7 @@
 	return inode;
 }
 
-static struct reloc_control *alloc_reloc_control(void)
+static struct reloc_control *alloc_reloc_control(struct btrfs_fs_info *fs_info)
 {
 	struct reloc_control *rc;
 
@@ -4093,7 +4093,8 @@
 	INIT_LIST_HEAD(&rc->reloc_roots);
 	backref_cache_init(&rc->backref_cache);
 	mapping_tree_init(&rc->reloc_root_tree);
-	extent_io_tree_init(&rc->processed_blocks, NULL);
+	extent_io_tree_init(&rc->processed_blocks,
+			    fs_info->btree_inode->i_mapping);
 	return rc;
 }
 
@@ -4110,7 +4111,7 @@
 	int rw = 0;
 	int err = 0;
 
-	rc = alloc_reloc_control();
+	rc = alloc_reloc_control(fs_info);
 	if (!rc)
 		return -ENOMEM;
 
@@ -4311,7 +4312,7 @@
 	if (list_empty(&reloc_roots))
 		goto out;
 
-	rc = alloc_reloc_control();
+	rc = alloc_reloc_control(root->fs_info);
 	if (!rc) {
 		err = -ENOMEM;
 		goto out;
diff --git a/fs/exec.c b/fs/exec.c
index 6430195..ffd7a81 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1135,13 +1135,6 @@
 			set_dumpable(current->mm, suid_dumpable);
 	}
 
-	/*
-	 * Flush performance counters when crossing a
-	 * security domain:
-	 */
-	if (!get_dumpable(current->mm))
-		perf_event_exit_task(current);
-
 	/* An exec changes our domain. We are no longer part of the thread
 	   group */
 
@@ -1205,6 +1198,15 @@
 
 	commit_creds(bprm->cred);
 	bprm->cred = NULL;
+
+	/*
+	 * Disable monitoring for regular users
+	 * when executing setuid binaries. Must
+	 * wait until new credentials are committed
+	 * by commit_creds() above
+	 */
+	if (get_dumpable(current->mm) != SUID_DUMP_USER)
+		perf_event_exit_task(current);
 	/*
 	 * cred_guard_mutex must be held at least to this point to prevent
 	 * ptrace_attach() from altering our determination of the task's
diff --git a/fs/file_table.c b/fs/file_table.c
index cd4d87a..485dc0e 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -306,17 +306,18 @@
 {
 	if (atomic_long_dec_and_test(&file->f_count)) {
 		struct task_struct *task = current;
+		unsigned long flags;
+
 		file_sb_list_del(file);
-		if (unlikely(in_interrupt() || task->flags & PF_KTHREAD)) {
-			unsigned long flags;
-			spin_lock_irqsave(&delayed_fput_lock, flags);
-			list_add(&file->f_u.fu_list, &delayed_fput_list);
-			schedule_work(&delayed_fput_work);
-			spin_unlock_irqrestore(&delayed_fput_lock, flags);
-			return;
+		if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
+			init_task_work(&file->f_u.fu_rcuhead, ____fput);
+			if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
+				return;
 		}
-		init_task_work(&file->f_u.fu_rcuhead, ____fput);
-		task_work_add(task, &file->f_u.fu_rcuhead, true);
+		spin_lock_irqsave(&delayed_fput_lock, flags);
+		list_add(&file->f_u.fu_list, &delayed_fput_list);
+		schedule_work(&delayed_fput_work);
+		spin_unlock_irqrestore(&delayed_fput_lock, flags);
 	}
 }
 
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e570081..35f2810 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2470,13 +2470,16 @@
 		.mode = mode
 	};
 	int err;
+	bool lock_inode = !(mode & FALLOC_FL_KEEP_SIZE) ||
+			   (mode & FALLOC_FL_PUNCH_HOLE);
 
 	if (fc->no_fallocate)
 		return -EOPNOTSUPP;
 
-	if (mode & FALLOC_FL_PUNCH_HOLE) {
+	if (lock_inode) {
 		mutex_lock(&inode->i_mutex);
-		fuse_set_nowrite(inode);
+		if (mode & FALLOC_FL_PUNCH_HOLE)
+			fuse_set_nowrite(inode);
 	}
 
 	req = fuse_get_req_nopages(fc);
@@ -2511,8 +2514,9 @@
 	fuse_invalidate_attr(inode);
 
 out:
-	if (mode & FALLOC_FL_PUNCH_HOLE) {
-		fuse_release_nowrite(inode);
+	if (lock_inode) {
+		if (mode & FALLOC_FL_PUNCH_HOLE)
+			fuse_release_nowrite(inode);
 		mutex_unlock(&inode->i_mutex);
 	}
 
diff --git a/fs/internal.h b/fs/internal.h
index eaa75f7..6812158 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -132,6 +132,12 @@
 extern ssize_t __kernel_write(struct file *, const char *, size_t, loff_t *);
 
 /*
+ * splice.c
+ */
+extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
+		loff_t *opos, size_t len, unsigned int flags);
+
+/*
  * pipe.c
  */
 extern const struct file_operations pipefifo_fops;
diff --git a/fs/namei.c b/fs/namei.c
index 85e40d1..9ed9361 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1976,7 +1976,7 @@
 		err = complete_walk(nd);
 
 	if (!err && nd->flags & LOOKUP_DIRECTORY) {
-		if (!nd->inode->i_op->lookup) {
+		if (!can_lookup(nd->inode)) {
 			path_put(&nd->path);
 			err = -ENOTDIR;
 		}
@@ -2850,7 +2850,7 @@
 	if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
 		goto out;
 	error = -ENOTDIR;
-	if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
+	if ((nd->flags & LOOKUP_DIRECTORY) && !can_lookup(nd->inode))
 		goto out;
 	audit_inode(name, nd->path.dentry, 0);
 finish_open:
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
index 8163260..6792ce1 100644
--- a/fs/ncpfs/dir.c
+++ b/fs/ncpfs/dir.c
@@ -1029,15 +1029,6 @@
 	DPRINTK("ncp_rmdir: removing %s/%s\n",
 		dentry->d_parent->d_name.name, dentry->d_name.name);
 
-	/*
-	 * fail with EBUSY if there are still references to this
-	 * directory.
-	 */
-	dentry_unhash(dentry);
-	error = -EBUSY;
-	if (!d_unhashed(dentry))
-		goto out;
-
 	len = sizeof(__name);
 	error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
 			   dentry->d_name.len, !ncp_preserve_case(dir));
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 86d1038..4637ec4 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -159,7 +159,7 @@
 			break;
 
 		ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
-				    oopscount, hsize + len, psinfo);
+				    oopscount, hsize, hsize + len, psinfo);
 		if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
 			pstore_new_entry = 1;
 
@@ -196,7 +196,7 @@
 			spin_lock_irqsave(&psinfo->buf_lock, flags);
 		}
 		memcpy(psinfo->buf, s, c);
-		psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, c, psinfo);
+		psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, 0, c, psinfo);
 		spin_unlock_irqrestore(&psinfo->buf_lock, flags);
 		s += c;
 		c = e - s;
@@ -221,9 +221,11 @@
 static int pstore_write_compat(enum pstore_type_id type,
 			       enum kmsg_dump_reason reason,
 			       u64 *id, unsigned int part, int count,
-			       size_t size, struct pstore_info *psi)
+			       size_t hsize, size_t size,
+			       struct pstore_info *psi)
 {
-	return psi->write_buf(type, reason, id, part, psinfo->buf, size, psi);
+	return psi->write_buf(type, reason, id, part, psinfo->buf, hsize,
+			     size, psi);
 }
 
 /*
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 1376e5a..c6bb77c 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -195,7 +195,8 @@
 static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
 					    enum kmsg_dump_reason reason,
 					    u64 *id, unsigned int part,
-					    const char *buf, size_t size,
+					    const char *buf,
+					    size_t hsize, size_t size,
 					    struct pstore_info *psi)
 {
 	struct ramoops_context *cxt = psi->data;
diff --git a/fs/read_write.c b/fs/read_write.c
index 0343000..2cefa41 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -1064,6 +1064,7 @@
 	struct fd in, out;
 	struct inode *in_inode, *out_inode;
 	loff_t pos;
+	loff_t out_pos;
 	ssize_t retval;
 	int fl;
 
@@ -1077,12 +1078,14 @@
 	if (!(in.file->f_mode & FMODE_READ))
 		goto fput_in;
 	retval = -ESPIPE;
-	if (!ppos)
-		ppos = &in.file->f_pos;
-	else
+	if (!ppos) {
+		pos = in.file->f_pos;
+	} else {
+		pos = *ppos;
 		if (!(in.file->f_mode & FMODE_PREAD))
 			goto fput_in;
-	retval = rw_verify_area(READ, in.file, ppos, count);
+	}
+	retval = rw_verify_area(READ, in.file, &pos, count);
 	if (retval < 0)
 		goto fput_in;
 	count = retval;
@@ -1099,7 +1102,8 @@
 	retval = -EINVAL;
 	in_inode = file_inode(in.file);
 	out_inode = file_inode(out.file);
-	retval = rw_verify_area(WRITE, out.file, &out.file->f_pos, count);
+	out_pos = out.file->f_pos;
+	retval = rw_verify_area(WRITE, out.file, &out_pos, count);
 	if (retval < 0)
 		goto fput_out;
 	count = retval;
@@ -1107,7 +1111,6 @@
 	if (!max)
 		max = min(in_inode->i_sb->s_maxbytes, out_inode->i_sb->s_maxbytes);
 
-	pos = *ppos;
 	if (unlikely(pos + count > max)) {
 		retval = -EOVERFLOW;
 		if (pos >= max)
@@ -1126,18 +1129,23 @@
 	if (in.file->f_flags & O_NONBLOCK)
 		fl = SPLICE_F_NONBLOCK;
 #endif
-	retval = do_splice_direct(in.file, ppos, out.file, count, fl);
+	retval = do_splice_direct(in.file, &pos, out.file, &out_pos, count, fl);
 
 	if (retval > 0) {
 		add_rchar(current, retval);
 		add_wchar(current, retval);
 		fsnotify_access(in.file);
 		fsnotify_modify(out.file);
+		out.file->f_pos = out_pos;
+		if (ppos)
+			*ppos = pos;
+		else
+			in.file->f_pos = pos;
 	}
 
 	inc_syscr(current);
 	inc_syscw(current);
-	if (*ppos > max)
+	if (pos > max)
 		retval = -EOVERFLOW;
 
 fput_out:
diff --git a/fs/splice.c b/fs/splice.c
index e6b2559..d37431d 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1274,7 +1274,7 @@
 {
 	struct file *file = sd->u.file;
 
-	return do_splice_from(pipe, file, &file->f_pos, sd->total_len,
+	return do_splice_from(pipe, file, sd->opos, sd->total_len,
 			      sd->flags);
 }
 
@@ -1283,6 +1283,7 @@
  * @in:		file to splice from
  * @ppos:	input file offset
  * @out:	file to splice to
+ * @opos:	output file offset
  * @len:	number of bytes to splice
  * @flags:	splice modifier flags
  *
@@ -1294,7 +1295,7 @@
  *
  */
 long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-		      size_t len, unsigned int flags)
+		      loff_t *opos, size_t len, unsigned int flags)
 {
 	struct splice_desc sd = {
 		.len		= len,
@@ -1302,6 +1303,7 @@
 		.flags		= flags,
 		.pos		= *ppos,
 		.u.file		= out,
+		.opos		= opos,
 	};
 	long ret;
 
@@ -1325,7 +1327,7 @@
 {
 	struct pipe_inode_info *ipipe;
 	struct pipe_inode_info *opipe;
-	loff_t offset, *off;
+	loff_t offset;
 	long ret;
 
 	ipipe = get_pipe_info(in);
@@ -1356,13 +1358,15 @@
 				return -EINVAL;
 			if (copy_from_user(&offset, off_out, sizeof(loff_t)))
 				return -EFAULT;
-			off = &offset;
-		} else
-			off = &out->f_pos;
+		} else {
+			offset = out->f_pos;
+		}
 
-		ret = do_splice_from(ipipe, out, off, len, flags);
+		ret = do_splice_from(ipipe, out, &offset, len, flags);
 
-		if (off_out && copy_to_user(off_out, off, sizeof(loff_t)))
+		if (!off_out)
+			out->f_pos = offset;
+		else if (copy_to_user(off_out, &offset, sizeof(loff_t)))
 			ret = -EFAULT;
 
 		return ret;
@@ -1376,13 +1380,15 @@
 				return -EINVAL;
 			if (copy_from_user(&offset, off_in, sizeof(loff_t)))
 				return -EFAULT;
-			off = &offset;
-		} else
-			off = &in->f_pos;
+		} else {
+			offset = in->f_pos;
+		}
 
-		ret = do_splice_to(in, off, opipe, len, flags);
+		ret = do_splice_to(in, &offset, opipe, len, flags);
 
-		if (off_in && copy_to_user(off_in, off, sizeof(loff_t)))
+		if (!off_in)
+			in->f_pos = offset;
+		else if (copy_to_user(off_in, &offset, sizeof(loff_t)))
 			ret = -EFAULT;
 
 		return ret;
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index de08c92f..605af51 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -349,31 +349,50 @@
 static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir)
 {
 	int err, over = 0;
+	loff_t pos = file->f_pos;
 	struct qstr nm;
 	union ubifs_key key;
 	struct ubifs_dent_node *dent;
 	struct inode *dir = file_inode(file);
 	struct ubifs_info *c = dir->i_sb->s_fs_info;
 
-	dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos);
+	dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos);
 
-	if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2)
+	if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2)
 		/*
 		 * The directory was seek'ed to a senseless position or there
 		 * are no more entries.
 		 */
 		return 0;
 
+	if (file->f_version == 0) {
+		/*
+		 * The file was seek'ed, which means that @file->private_data
+		 * is now invalid. This may also be just the first
+		 * 'ubifs_readdir()' invocation, in which case
+		 * @file->private_data is NULL, and the below code is
+		 * basically a no-op.
+		 */
+		kfree(file->private_data);
+		file->private_data = NULL;
+	}
+
+	/*
+	 * 'generic_file_llseek()' unconditionally sets @file->f_version to
+	 * zero, and we use this for detecting whether the file was seek'ed.
+	 */
+	file->f_version = 1;
+
 	/* File positions 0 and 1 correspond to "." and ".." */
-	if (file->f_pos == 0) {
+	if (pos == 0) {
 		ubifs_assert(!file->private_data);
 		over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR);
 		if (over)
 			return 0;
-		file->f_pos = 1;
+		file->f_pos = pos = 1;
 	}
 
-	if (file->f_pos == 1) {
+	if (pos == 1) {
 		ubifs_assert(!file->private_data);
 		over = filldir(dirent, "..", 2, 1,
 			       parent_ino(file->f_path.dentry), DT_DIR);
@@ -389,7 +408,7 @@
 			goto out;
 		}
 
-		file->f_pos = key_hash_flash(c, &dent->key);
+		file->f_pos = pos = key_hash_flash(c, &dent->key);
 		file->private_data = dent;
 	}
 
@@ -397,17 +416,16 @@
 	if (!dent) {
 		/*
 		 * The directory was seek'ed to and is now readdir'ed.
-		 * Find the entry corresponding to @file->f_pos or the
-		 * closest one.
+		 * Find the entry corresponding to @pos or the closest one.
 		 */
-		dent_key_init_hash(c, &key, dir->i_ino, file->f_pos);
+		dent_key_init_hash(c, &key, dir->i_ino, pos);
 		nm.name = NULL;
 		dent = ubifs_tnc_next_ent(c, &key, &nm);
 		if (IS_ERR(dent)) {
 			err = PTR_ERR(dent);
 			goto out;
 		}
-		file->f_pos = key_hash_flash(c, &dent->key);
+		file->f_pos = pos = key_hash_flash(c, &dent->key);
 		file->private_data = dent;
 	}
 
@@ -419,7 +437,7 @@
 			     ubifs_inode(dir)->creat_sqnum);
 
 		nm.len = le16_to_cpu(dent->nlen);
-		over = filldir(dirent, dent->name, nm.len, file->f_pos,
+		over = filldir(dirent, dent->name, nm.len, pos,
 			       le64_to_cpu(dent->inum),
 			       vfs_dent_type(dent->type));
 		if (over)
@@ -435,9 +453,17 @@
 		}
 
 		kfree(file->private_data);
-		file->f_pos = key_hash_flash(c, &dent->key);
+		file->f_pos = pos = key_hash_flash(c, &dent->key);
 		file->private_data = dent;
 		cond_resched();
+
+		if (file->f_version == 0)
+			/*
+			 * The file was seek'ed meanwhile, lets return and start
+			 * reading direntries from the new position on the next
+			 * invocation.
+			 */
+			return 0;
 	}
 
 out:
@@ -448,15 +474,13 @@
 
 	kfree(file->private_data);
 	file->private_data = NULL;
+	/* 2 is a special value indicating that there are no more direntries */
 	file->f_pos = 2;
 	return 0;
 }
 
-/* If a directory is seeked, we have to free saved readdir() state */
 static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int whence)
 {
-	kfree(file->private_data);
-	file->private_data = NULL;
 	return generic_file_llseek(file, offset, whence);
 }
 
diff --git a/fs/xfs/xfs_attr_leaf.h b/fs/xfs/xfs_attr_leaf.h
index f9d7846..444a770 100644
--- a/fs/xfs/xfs_attr_leaf.h
+++ b/fs/xfs/xfs_attr_leaf.h
@@ -128,6 +128,7 @@
 	__u8			holes;
 	__u8			pad1;
 	struct xfs_attr_leaf_map freemap[XFS_ATTR_LEAF_MAPSIZE];
+	__be32			pad2;		/* 64 bit alignment */
 };
 
 #define XFS_ATTR3_LEAF_CRC_OFF	(offsetof(struct xfs_attr3_leaf_hdr, info.crc))
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 8804b8a..0903960 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -2544,7 +2544,17 @@
 	if (error)
 		goto error0;
 
+	/*
+	 * we can't just memcpy() the root in for CRC enabled btree blocks.
+	 * In that case have to also ensure the blkno remains correct
+	 */
 	memcpy(cblock, block, xfs_btree_block_len(cur));
+	if (cur->bc_flags & XFS_BTREE_CRC_BLOCKS) {
+		if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
+			cblock->bb_u.l.bb_blkno = cpu_to_be64(cbp->b_bn);
+		else
+			cblock->bb_u.s.bb_blkno = cpu_to_be64(cbp->b_bn);
+	}
 
 	be16_add_cpu(&block->bb_level, 1);
 	xfs_btree_set_numrecs(block, 1);
diff --git a/fs/xfs/xfs_dir2_format.h b/fs/xfs/xfs_dir2_format.h
index 995f1f5..7826782b 100644
--- a/fs/xfs/xfs_dir2_format.h
+++ b/fs/xfs/xfs_dir2_format.h
@@ -266,6 +266,7 @@
 struct xfs_dir3_data_hdr {
 	struct xfs_dir3_blk_hdr	hdr;
 	xfs_dir2_data_free_t	best_free[XFS_DIR2_DATA_FD_COUNT];
+	__be32			pad;	/* 64 bit alignment */
 };
 
 #define XFS_DIR3_DATA_CRC_OFF  offsetof(struct xfs_dir3_data_hdr, hdr.crc)
@@ -477,7 +478,7 @@
 	struct xfs_da3_blkinfo	info;		/* header for da routines */
 	__be16			count;		/* count of entries */
 	__be16			stale;		/* count of stale entries */
-	__be32			pad;
+	__be32			pad;		/* 64 bit alignment */
 };
 
 struct xfs_dir3_icleaf_hdr {
@@ -715,7 +716,7 @@
 	__be32			firstdb;	/* db of first entry */
 	__be32			nvalid;		/* count of valid entries */
 	__be32			nused;		/* count of used entries */
-	__be32			pad;		/* 64 bit alignment. */
+	__be32			pad;		/* 64 bit alignment */
 };
 
 struct xfs_dir3_free {
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 45a85ff..7cf5e4e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1845,7 +1845,13 @@
 	xfs_agino_t		*buffer_nextp;
 
 	trace_xfs_log_recover_buf_inode_buf(mp->m_log, buf_f);
-	bp->b_ops = &xfs_inode_buf_ops;
+
+	/*
+	 * Post recovery validation only works properly on CRC enabled
+	 * filesystems.
+	 */
+	if (xfs_sb_version_hascrc(&mp->m_sb))
+		bp->b_ops = &xfs_inode_buf_ops;
 
 	inodes_per_buf = BBTOB(bp->b_io_length) >> mp->m_sb.sb_inodelog;
 	for (i = 0; i < inodes_per_buf; i++) {
@@ -2205,7 +2211,16 @@
 	/* Shouldn't be any more regions */
 	ASSERT(i == item->ri_total);
 
-	xlog_recovery_validate_buf_type(mp, bp, buf_f);
+	/*
+	 * We can only do post recovery validation on items on CRC enabled
+	 * fielsystems as we need to know when the buffer was written to be able
+	 * to determine if we should have replayed the item. If we replay old
+	 * metadata over a newer buffer, then it will enter a temporarily
+	 * inconsistent state resulting in verification failures. Hence for now
+	 * just avoid the verification stage for non-crc filesystems
+	 */
+	if (xfs_sb_version_hascrc(&mp->m_sb))
+		xlog_recovery_validate_buf_type(mp, bp, buf_f);
 }
 
 /*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index f6bfbd7..e8e310c 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -314,7 +314,8 @@
 xfs_mount_validate_sb(
 	xfs_mount_t	*mp,
 	xfs_sb_t	*sbp,
-	bool		check_inprogress)
+	bool		check_inprogress,
+	bool		check_version)
 {
 
 	/*
@@ -337,9 +338,10 @@
 
 	/*
 	 * Version 5 superblock feature mask validation. Reject combinations the
-	 * kernel cannot support up front before checking anything else.
+	 * kernel cannot support up front before checking anything else. For
+	 * write validation, we don't need to check feature masks.
 	 */
-	if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
+	if (check_version && XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) {
 		xfs_alert(mp,
 "Version 5 superblock detected. This kernel has EXPERIMENTAL support enabled!\n"
 "Use of these features in this kernel is at your own risk!");
@@ -675,7 +677,8 @@
 
 static int
 xfs_sb_verify(
-	struct xfs_buf	*bp)
+	struct xfs_buf	*bp,
+	bool		check_version)
 {
 	struct xfs_mount *mp = bp->b_target->bt_mount;
 	struct xfs_sb	sb;
@@ -686,7 +689,8 @@
 	 * Only check the in progress field for the primary superblock as
 	 * mkfs.xfs doesn't clear it from secondary superblocks.
 	 */
-	return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR);
+	return xfs_mount_validate_sb(mp, &sb, bp->b_bn == XFS_SB_DADDR,
+				     check_version);
 }
 
 /*
@@ -719,7 +723,7 @@
 			goto out_error;
 		}
 	}
-	error = xfs_sb_verify(bp);
+	error = xfs_sb_verify(bp, true);
 
 out_error:
 	if (error) {
@@ -758,7 +762,7 @@
 	struct xfs_buf_log_item	*bip = bp->b_fspriv;
 	int			error;
 
-	error = xfs_sb_verify(bp);
+	error = xfs_sb_verify(bp, false);
 	if (error) {
 		XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, bp->b_addr);
 		xfs_buf_ioerror(bp, error);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 636c59f..c13c919 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -382,6 +382,7 @@
 int acpi_device_get_power(struct acpi_device *device, int *state);
 int acpi_device_set_power(struct acpi_device *device, int state);
 int acpi_bus_init_power(struct acpi_device *device);
+int acpi_device_fix_up_power(struct acpi_device *device);
 int acpi_bus_update_power(acpi_handle handle, int *state_p);
 bool acpi_bus_power_manageable(acpi_handle handle);
 
diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
index e6168a2..b420939 100644
--- a/include/acpi/acpi_drivers.h
+++ b/include/acpi/acpi_drivers.h
@@ -123,7 +123,9 @@
 extern void unregister_dock_notifier(struct notifier_block *nb);
 extern int register_hotplug_dock_device(acpi_handle handle,
 					const struct acpi_dock_ops *ops,
-					void *context);
+					void *context,
+					void (*init)(void *),
+					void (*release)(void *));
 extern void unregister_hotplug_dock_device(acpi_handle handle);
 #else
 static inline int is_dock_device(acpi_handle handle)
@@ -139,7 +141,9 @@
 }
 static inline int register_hotplug_dock_device(acpi_handle handle,
 					       const struct acpi_dock_ops *ops,
-					       void *context)
+					       void *context,
+					       void (*init)(void *),
+					       void (*release)(void *))
 {
 	return -ENODEV;
 }
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index 365f4a6..fc09d7b 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -3,6 +3,7 @@
 
 #include <linux/sched.h>
 #include <linux/percpu.h>
+#include <linux/vtime.h>
 #include <asm/ptrace.h>
 
 struct context_tracking {
@@ -19,6 +20,26 @@
 	} state;
 };
 
+static inline void __guest_enter(void)
+{
+	/*
+	 * This is running in ioctl context so we can avoid
+	 * the call to vtime_account() with its unnecessary idle check.
+	 */
+	vtime_account_system(current);
+	current->flags |= PF_VCPU;
+}
+
+static inline void __guest_exit(void)
+{
+	/*
+	 * This is running in ioctl context so we can avoid
+	 * the call to vtime_account() with its unnecessary idle check.
+	 */
+	vtime_account_system(current);
+	current->flags &= ~PF_VCPU;
+}
+
 #ifdef CONFIG_CONTEXT_TRACKING
 DECLARE_PER_CPU(struct context_tracking, context_tracking);
 
@@ -35,6 +56,9 @@
 extern void user_enter(void);
 extern void user_exit(void);
 
+extern void guest_enter(void);
+extern void guest_exit(void);
+
 static inline enum ctx_state exception_enter(void)
 {
 	enum ctx_state prev_ctx;
@@ -57,6 +81,17 @@
 static inline bool context_tracking_in_user(void) { return false; }
 static inline void user_enter(void) { }
 static inline void user_exit(void) { }
+
+static inline void guest_enter(void)
+{
+	__guest_enter();
+}
+
+static inline void guest_exit(void)
+{
+	__guest_exit();
+}
+
 static inline enum ctx_state exception_enter(void) { return 0; }
 static inline void exception_exit(enum ctx_state prev_ctx) { }
 static inline void context_tracking_task_switch(struct task_struct *prev,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 43db02e..65c2be2 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2414,8 +2414,6 @@
 		struct file *, loff_t *, size_t, unsigned int);
 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
 		struct file *out, loff_t *, size_t len, unsigned int flags);
-extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
-		size_t len, unsigned int flags);
 
 extern void
 file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cc276d2..e2dbefb 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -58,11 +58,12 @@
 
 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 #define HPAGE_PMD_SHIFT PMD_SHIFT
 #define HPAGE_PMD_SIZE	((1UL) << HPAGE_PMD_SHIFT)
 #define HPAGE_PMD_MASK	(~(HPAGE_PMD_SIZE - 1))
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
 
 #define transparent_hugepage_enabled(__vma)				\
@@ -180,6 +181,9 @@
 				unsigned long addr, pmd_t pmd, pmd_t *pmdp);
 
 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
+#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
+#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
+#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
 
 #define hpage_nr_pages(x) 1
 
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 52bd03b..637fa71d 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -44,7 +44,7 @@
  *	struct vlan_ethhdr - vlan ethernet header (ethhdr + vlan_hdr)
  *	@h_dest: destination ethernet address
  *	@h_source: source ethernet address
- *	@h_vlan_proto: ethernet protocol (always 0x8100)
+ *	@h_vlan_proto: ethernet protocol
  *	@h_vlan_TCI: priority and VLAN ID
  *	@h_vlan_encapsulated_proto: packet type ID or len
  */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index f0eea07..8db53cf 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -23,6 +23,7 @@
 #include <linux/ratelimit.h>
 #include <linux/err.h>
 #include <linux/irqflags.h>
+#include <linux/context_tracking.h>
 #include <asm/signal.h>
 
 #include <linux/kvm.h>
@@ -760,42 +761,6 @@
 }
 #endif
 
-static inline void __guest_enter(void)
-{
-	/*
-	 * This is running in ioctl context so we can avoid
-	 * the call to vtime_account() with its unnecessary idle check.
-	 */
-	vtime_account_system(current);
-	current->flags |= PF_VCPU;
-}
-
-static inline void __guest_exit(void)
-{
-	/*
-	 * This is running in ioctl context so we can avoid
-	 * the call to vtime_account() with its unnecessary idle check.
-	 */
-	vtime_account_system(current);
-	current->flags &= ~PF_VCPU;
-}
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern void guest_enter(void);
-extern void guest_exit(void);
-
-#else /* !CONFIG_CONTEXT_TRACKING */
-static inline void guest_enter(void)
-{
-	__guest_enter();
-}
-
-static inline void guest_exit(void)
-{
-	__guest_exit();
-}
-#endif /* !CONFIG_CONTEXT_TRACKING */
-
 static inline void kvm_guest_enter(void)
 {
 	unsigned long flags;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 60584b1..96e4c21 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1695,6 +1695,7 @@
 extern struct net_device	*dev_get_by_index(struct net *net, int ifindex);
 extern struct net_device	*__dev_get_by_index(struct net *net, int ifindex);
 extern struct net_device	*dev_get_by_index_rcu(struct net *net, int ifindex);
+extern int		netdev_get_name(struct net *net, char *name, int ifindex);
 extern int		dev_restart(struct net_device *dev);
 #ifdef CONFIG_NETPOLL_TRAP
 extern int		netpoll_trap(void);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f463a46..c5b6dbf 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -389,8 +389,7 @@
 	/* mmap bits */
 	struct mutex			mmap_mutex;
 	atomic_t			mmap_count;
-	int				mmap_locked;
-	struct user_struct		*mmap_user;
+
 	struct ring_buffer		*rb;
 	struct list_head		rb_entry;
 
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 87a03c7..f5d4723 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -33,9 +33,25 @@
 		preempt_schedule(); \
 } while (0)
 
+#ifdef CONFIG_CONTEXT_TRACKING
+
+void preempt_schedule_context(void);
+
+#define preempt_check_resched_context() \
+do { \
+	if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+		preempt_schedule_context(); \
+} while (0)
+#else
+
+#define preempt_check_resched_context() preempt_check_resched()
+
+#endif /* CONFIG_CONTEXT_TRACKING */
+
 #else /* !CONFIG_PREEMPT */
 
 #define preempt_check_resched()		do { } while (0)
+#define preempt_check_resched_context()	do { } while (0)
 
 #endif /* CONFIG_PREEMPT */
 
@@ -88,7 +104,7 @@
 do { \
 	preempt_enable_no_resched_notrace(); \
 	barrier(); \
-	preempt_check_resched(); \
+	preempt_check_resched_context(); \
 } while (0)
 
 #else /* !CONFIG_PREEMPT_COUNT */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 656699f..4aa80ba 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -58,12 +58,12 @@
 			struct pstore_info *psi);
 	int		(*write)(enum pstore_type_id type,
 			enum kmsg_dump_reason reason, u64 *id,
-			unsigned int part, int count, size_t size,
-			struct pstore_info *psi);
+			unsigned int part, int count, size_t hsize,
+			size_t size, struct pstore_info *psi);
 	int		(*write_buf)(enum pstore_type_id type,
 			enum kmsg_dump_reason reason, u64 *id,
-			unsigned int part, const char *buf, size_t size,
-			struct pstore_info *psi);
+			unsigned int part, const char *buf, size_t hsize,
+			size_t size, struct pstore_info *psi);
 	int		(*erase)(enum pstore_type_id type, u64 id,
 			int count, struct timespec time,
 			struct pstore_info *psi);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9c676eae..dec1748 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -627,6 +627,7 @@
 }
 
 extern void kfree_skb(struct sk_buff *skb);
+extern void kfree_skb_list(struct sk_buff *segs);
 extern void skb_tx_error(struct sk_buff *skb);
 extern void consume_skb(struct sk_buff *skb);
 extern void	       __kfree_skb(struct sk_buff *skb);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index e6564c1..c848876 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -11,6 +11,7 @@
 #include <linux/list.h>
 #include <linux/cpumask.h>
 #include <linux/init.h>
+#include <linux/irqflags.h>
 
 extern void cpu_idle(void);
 
@@ -139,13 +140,17 @@
 }
 #define smp_call_function(func, info, wait) \
 			(up_smp_call_function(func, info))
-#define on_each_cpu(func,info,wait)		\
-	({					\
-		local_irq_disable();		\
-		func(info);			\
-		local_irq_enable();		\
-		0;				\
-	})
+
+static inline int on_each_cpu(smp_call_func_t func, void *info, int wait)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	func(info);
+	local_irq_restore(flags);
+	return 0;
+}
+
 /*
  * Note we still need to test the mask even for UP
  * because we actually can get an empty mask from
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 09a545a..74575cb 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -35,6 +35,7 @@
 		void *data;		/* cookie */
 	} u;
 	loff_t pos;			/* file position */
+	loff_t *opos;			/* sendfile: output position */
 	size_t num_spliced;		/* number of bytes already spliced */
 	bool need_wakeup;		/* need to wake up writer */
 };
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index 71a5782..b1dd2db 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -34,7 +34,7 @@
 }
 extern void vtime_guest_enter(struct task_struct *tsk);
 extern void vtime_guest_exit(struct task_struct *tsk);
-extern void vtime_init_idle(struct task_struct *tsk);
+extern void vtime_init_idle(struct task_struct *tsk, int cpu);
 #else
 static inline void vtime_account_irq_exit(struct task_struct *tsk)
 {
@@ -45,7 +45,7 @@
 static inline void vtime_user_exit(struct task_struct *tsk) { }
 static inline void vtime_guest_enter(struct task_struct *tsk) { }
 static inline void vtime_guest_exit(struct task_struct *tsk) { }
-static inline void vtime_init_idle(struct task_struct *tsk) { }
+static inline void vtime_init_idle(struct task_struct *tsk, int cpu) { }
 #endif
 
 #ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index d3eef01..0f4555b 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -110,6 +110,8 @@
 		  struct v4l2_buffer *buf);
 int v4l2_m2m_dqbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		   struct v4l2_buffer *buf);
+int v4l2_m2m_create_bufs(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+			 struct v4l2_create_buffers *create);
 
 int v4l2_m2m_expbuf(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
 		   struct v4l2_exportbuffer *eb);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 4b6f0b2..09b1360 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -95,10 +95,10 @@
 int ip_tunnel_init(struct net_device *dev);
 void ip_tunnel_uninit(struct net_device *dev);
 void  ip_tunnel_dellink(struct net_device *dev, struct list_head *head);
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
-				  struct rtnl_link_ops *ops, char *devname);
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+		       struct rtnl_link_ops *ops, char *devname);
 
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn);
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn);
 
 void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
 		    const struct iphdr *tnl_params);
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index ab5d499..bdc6e87 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -261,6 +261,7 @@
 header-y += net_tstamp.h
 header-y += netconf.h
 header-y += netdevice.h
+header-y += netlink_diag.h
 header-y += netfilter.h
 header-y += netfilter_arp.h
 header-y += netfilter_bridge.h
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 65349f0..383f823 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -15,7 +15,6 @@
  */
 
 #include <linux/context_tracking.h>
-#include <linux/kvm_host.h>
 #include <linux/rcupdate.h>
 #include <linux/sched.h>
 #include <linux/hardirq.h>
@@ -71,6 +70,46 @@
 	local_irq_restore(flags);
 }
 
+#ifdef CONFIG_PREEMPT
+/**
+ * preempt_schedule_context - preempt_schedule called by tracing
+ *
+ * The tracing infrastructure uses preempt_enable_notrace to prevent
+ * recursion and tracing preempt enabling caused by the tracing
+ * infrastructure itself. But as tracing can happen in areas coming
+ * from userspace or just about to enter userspace, a preempt enable
+ * can occur before user_exit() is called. This will cause the scheduler
+ * to be called when the system is still in usermode.
+ *
+ * To prevent this, the preempt_enable_notrace will use this function
+ * instead of preempt_schedule() to exit user context if needed before
+ * calling the scheduler.
+ */
+void __sched notrace preempt_schedule_context(void)
+{
+	struct thread_info *ti = current_thread_info();
+	enum ctx_state prev_ctx;
+
+	if (likely(ti->preempt_count || irqs_disabled()))
+		return;
+
+	/*
+	 * Need to disable preemption in case user_exit() is traced
+	 * and the tracer calls preempt_enable_notrace() causing
+	 * an infinite recursion.
+	 */
+	preempt_disable_notrace();
+	prev_ctx = exception_enter();
+	preempt_enable_no_resched_notrace();
+
+	preempt_schedule();
+
+	preempt_disable_notrace();
+	exception_exit(prev_ctx);
+	preempt_enable_notrace();
+}
+EXPORT_SYMBOL_GPL(preempt_schedule_context);
+#endif /* CONFIG_PREEMPT */
 
 /**
  * user_exit - Inform the context tracking that the CPU is
diff --git a/kernel/cpu/idle.c b/kernel/cpu/idle.c
index d5585f5..e695c0a 100644
--- a/kernel/cpu/idle.c
+++ b/kernel/cpu/idle.c
@@ -5,6 +5,7 @@
 #include <linux/cpu.h>
 #include <linux/tick.h>
 #include <linux/mm.h>
+#include <linux/stackprotector.h>
 
 #include <asm/tlb.h>
 
@@ -58,6 +59,7 @@
 void __weak arch_cpu_idle(void)
 {
 	cpu_idle_force_poll = 1;
+	local_irq_enable();
 }
 
 /*
@@ -112,6 +114,21 @@
 
 void cpu_startup_entry(enum cpuhp_state state)
 {
+	/*
+	 * This #ifdef needs to die, but it's too late in the cycle to
+	 * make this generic (arm and sh have never invoked the canary
+	 * init for the non boot cpus!). Will be fixed in 3.11
+	 */
+#ifdef CONFIG_X86
+	/*
+	 * If we're the non-boot CPU, nothing set the stack canary up
+	 * for us. The boot CPU already has it initialized but no harm
+	 * in doing it again. This is a good place for updating it, as
+	 * we wont ever return from this function (so the invalid
+	 * canaries already on the stack wont ever trigger).
+	 */
+	boot_init_stack_canary();
+#endif
 	current_set_polling();
 	arch_cpu_idle_prepare();
 	cpu_idle_loop();
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9dc297f..b391907 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -196,9 +196,6 @@
 static void update_context_time(struct perf_event_context *ctx);
 static u64 perf_event_time(struct perf_event *event);
 
-static void ring_buffer_attach(struct perf_event *event,
-			       struct ring_buffer *rb);
-
 void __weak perf_event_print_debug(void)	{ }
 
 extern __weak const char *perf_pmu_name(void)
@@ -2918,6 +2915,7 @@
 }
 
 static void ring_buffer_put(struct ring_buffer *rb);
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
 
 static void free_event(struct perf_event *event)
 {
@@ -2942,15 +2940,30 @@
 		if (has_branch_stack(event)) {
 			static_key_slow_dec_deferred(&perf_sched_events);
 			/* is system-wide event */
-			if (!(event->attach_state & PERF_ATTACH_TASK))
+			if (!(event->attach_state & PERF_ATTACH_TASK)) {
 				atomic_dec(&per_cpu(perf_branch_stack_events,
 						    event->cpu));
+			}
 		}
 	}
 
 	if (event->rb) {
-		ring_buffer_put(event->rb);
-		event->rb = NULL;
+		struct ring_buffer *rb;
+
+		/*
+		 * Can happen when we close an event with re-directed output.
+		 *
+		 * Since we have a 0 refcount, perf_mmap_close() will skip
+		 * over us; possibly making our ring_buffer_put() the last.
+		 */
+		mutex_lock(&event->mmap_mutex);
+		rb = event->rb;
+		if (rb) {
+			rcu_assign_pointer(event->rb, NULL);
+			ring_buffer_detach(event, rb);
+			ring_buffer_put(rb); /* could be last */
+		}
+		mutex_unlock(&event->mmap_mutex);
 	}
 
 	if (is_cgroup_event(event))
@@ -3188,30 +3201,13 @@
 	unsigned int events = POLL_HUP;
 
 	/*
-	 * Race between perf_event_set_output() and perf_poll(): perf_poll()
-	 * grabs the rb reference but perf_event_set_output() overrides it.
-	 * Here is the timeline for two threads T1, T2:
-	 * t0: T1, rb = rcu_dereference(event->rb)
-	 * t1: T2, old_rb = event->rb
-	 * t2: T2, event->rb = new rb
-	 * t3: T2, ring_buffer_detach(old_rb)
-	 * t4: T1, ring_buffer_attach(rb1)
-	 * t5: T1, poll_wait(event->waitq)
-	 *
-	 * To avoid this problem, we grab mmap_mutex in perf_poll()
-	 * thereby ensuring that the assignment of the new ring buffer
-	 * and the detachment of the old buffer appear atomic to perf_poll()
+	 * Pin the event->rb by taking event->mmap_mutex; otherwise
+	 * perf_event_set_output() can swizzle our rb and make us miss wakeups.
 	 */
 	mutex_lock(&event->mmap_mutex);
-
-	rcu_read_lock();
-	rb = rcu_dereference(event->rb);
-	if (rb) {
-		ring_buffer_attach(event, rb);
+	rb = event->rb;
+	if (rb)
 		events = atomic_xchg(&rb->poll, 0);
-	}
-	rcu_read_unlock();
-
 	mutex_unlock(&event->mmap_mutex);
 
 	poll_wait(file, &event->waitq, wait);
@@ -3521,16 +3517,12 @@
 		return;
 
 	spin_lock_irqsave(&rb->event_lock, flags);
-	if (!list_empty(&event->rb_entry))
-		goto unlock;
-
-	list_add(&event->rb_entry, &rb->event_list);
-unlock:
+	if (list_empty(&event->rb_entry))
+		list_add(&event->rb_entry, &rb->event_list);
 	spin_unlock_irqrestore(&rb->event_lock, flags);
 }
 
-static void ring_buffer_detach(struct perf_event *event,
-			       struct ring_buffer *rb)
+static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
 {
 	unsigned long flags;
 
@@ -3549,13 +3541,10 @@
 
 	rcu_read_lock();
 	rb = rcu_dereference(event->rb);
-	if (!rb)
-		goto unlock;
-
-	list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
-		wake_up_all(&event->waitq);
-
-unlock:
+	if (rb) {
+		list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
+			wake_up_all(&event->waitq);
+	}
 	rcu_read_unlock();
 }
 
@@ -3584,18 +3573,10 @@
 
 static void ring_buffer_put(struct ring_buffer *rb)
 {
-	struct perf_event *event, *n;
-	unsigned long flags;
-
 	if (!atomic_dec_and_test(&rb->refcount))
 		return;
 
-	spin_lock_irqsave(&rb->event_lock, flags);
-	list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
-		list_del_init(&event->rb_entry);
-		wake_up_all(&event->waitq);
-	}
-	spin_unlock_irqrestore(&rb->event_lock, flags);
+	WARN_ON_ONCE(!list_empty(&rb->event_list));
 
 	call_rcu(&rb->rcu_head, rb_free_rcu);
 }
@@ -3605,26 +3586,100 @@
 	struct perf_event *event = vma->vm_file->private_data;
 
 	atomic_inc(&event->mmap_count);
+	atomic_inc(&event->rb->mmap_count);
 }
 
+/*
+ * A buffer can be mmap()ed multiple times; either directly through the same
+ * event, or through other events by use of perf_event_set_output().
+ *
+ * In order to undo the VM accounting done by perf_mmap() we need to destroy
+ * the buffer here, where we still have a VM context. This means we need
+ * to detach all events redirecting to us.
+ */
 static void perf_mmap_close(struct vm_area_struct *vma)
 {
 	struct perf_event *event = vma->vm_file->private_data;
 
-	if (atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) {
-		unsigned long size = perf_data_size(event->rb);
-		struct user_struct *user = event->mmap_user;
-		struct ring_buffer *rb = event->rb;
+	struct ring_buffer *rb = event->rb;
+	struct user_struct *mmap_user = rb->mmap_user;
+	int mmap_locked = rb->mmap_locked;
+	unsigned long size = perf_data_size(rb);
 
-		atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-		vma->vm_mm->pinned_vm -= event->mmap_locked;
-		rcu_assign_pointer(event->rb, NULL);
-		ring_buffer_detach(event, rb);
-		mutex_unlock(&event->mmap_mutex);
+	atomic_dec(&rb->mmap_count);
 
-		ring_buffer_put(rb);
-		free_uid(user);
+	if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
+		return;
+
+	/* Detach current event from the buffer. */
+	rcu_assign_pointer(event->rb, NULL);
+	ring_buffer_detach(event, rb);
+	mutex_unlock(&event->mmap_mutex);
+
+	/* If there's still other mmap()s of this buffer, we're done. */
+	if (atomic_read(&rb->mmap_count)) {
+		ring_buffer_put(rb); /* can't be last */
+		return;
 	}
+
+	/*
+	 * No other mmap()s, detach from all other events that might redirect
+	 * into the now unreachable buffer. Somewhat complicated by the
+	 * fact that rb::event_lock otherwise nests inside mmap_mutex.
+	 */
+again:
+	rcu_read_lock();
+	list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+		if (!atomic_long_inc_not_zero(&event->refcount)) {
+			/*
+			 * This event is en-route to free_event() which will
+			 * detach it and remove it from the list.
+			 */
+			continue;
+		}
+		rcu_read_unlock();
+
+		mutex_lock(&event->mmap_mutex);
+		/*
+		 * Check we didn't race with perf_event_set_output() which can
+		 * swizzle the rb from under us while we were waiting to
+		 * acquire mmap_mutex.
+		 *
+		 * If we find a different rb; ignore this event, a next
+		 * iteration will no longer find it on the list. We have to
+		 * still restart the iteration to make sure we're not now
+		 * iterating the wrong list.
+		 */
+		if (event->rb == rb) {
+			rcu_assign_pointer(event->rb, NULL);
+			ring_buffer_detach(event, rb);
+			ring_buffer_put(rb); /* can't be last, we still have one */
+		}
+		mutex_unlock(&event->mmap_mutex);
+		put_event(event);
+
+		/*
+		 * Restart the iteration; either we're on the wrong list or
+		 * destroyed its integrity by doing a deletion.
+		 */
+		goto again;
+	}
+	rcu_read_unlock();
+
+	/*
+	 * It could be there's still a few 0-ref events on the list; they'll
+	 * get cleaned up by free_event() -- they'll also still have their
+	 * ref on the rb and will free it whenever they are done with it.
+	 *
+	 * Aside from that, this buffer is 'fully' detached and unmapped,
+	 * undo the VM accounting.
+	 */
+
+	atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
+	vma->vm_mm->pinned_vm -= mmap_locked;
+	free_uid(mmap_user);
+
+	ring_buffer_put(rb); /* could be last */
 }
 
 static const struct vm_operations_struct perf_mmap_vmops = {
@@ -3674,12 +3729,24 @@
 		return -EINVAL;
 
 	WARN_ON_ONCE(event->ctx->parent_ctx);
+again:
 	mutex_lock(&event->mmap_mutex);
 	if (event->rb) {
-		if (event->rb->nr_pages == nr_pages)
-			atomic_inc(&event->rb->refcount);
-		else
+		if (event->rb->nr_pages != nr_pages) {
 			ret = -EINVAL;
+			goto unlock;
+		}
+
+		if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
+			/*
+			 * Raced against perf_mmap_close() through
+			 * perf_event_set_output(). Try again, hope for better
+			 * luck.
+			 */
+			mutex_unlock(&event->mmap_mutex);
+			goto again;
+		}
+
 		goto unlock;
 	}
 
@@ -3720,12 +3787,16 @@
 		ret = -ENOMEM;
 		goto unlock;
 	}
-	rcu_assign_pointer(event->rb, rb);
+
+	atomic_set(&rb->mmap_count, 1);
+	rb->mmap_locked = extra;
+	rb->mmap_user = get_current_user();
 
 	atomic_long_add(user_extra, &user->locked_vm);
-	event->mmap_locked = extra;
-	event->mmap_user = get_current_user();
-	vma->vm_mm->pinned_vm += event->mmap_locked;
+	vma->vm_mm->pinned_vm += extra;
+
+	ring_buffer_attach(event, rb);
+	rcu_assign_pointer(event->rb, rb);
 
 	perf_event_update_userpage(event);
 
@@ -3734,7 +3805,11 @@
 		atomic_inc(&event->mmap_count);
 	mutex_unlock(&event->mmap_mutex);
 
-	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+	/*
+	 * Since pinned accounting is per vm we cannot allow fork() to copy our
+	 * vma.
+	 */
+	vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_ops = &perf_mmap_vmops;
 
 	return ret;
@@ -6412,6 +6487,8 @@
 	if (atomic_read(&event->mmap_count))
 		goto unlock;
 
+	old_rb = event->rb;
+
 	if (output_event) {
 		/* get the rb we want to redirect to */
 		rb = ring_buffer_get(output_event);
@@ -6419,16 +6496,28 @@
 			goto unlock;
 	}
 
-	old_rb = event->rb;
-	rcu_assign_pointer(event->rb, rb);
 	if (old_rb)
 		ring_buffer_detach(event, old_rb);
+
+	if (rb)
+		ring_buffer_attach(event, rb);
+
+	rcu_assign_pointer(event->rb, rb);
+
+	if (old_rb) {
+		ring_buffer_put(old_rb);
+		/*
+		 * Since we detached before setting the new rb, so that we
+		 * could attach the new rb, we could have missed a wakeup.
+		 * Provide it now.
+		 */
+		wake_up_all(&event->waitq);
+	}
+
 	ret = 0;
 unlock:
 	mutex_unlock(&event->mmap_mutex);
 
-	if (old_rb)
-		ring_buffer_put(old_rb);
 out:
 	return ret;
 }
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index a64f8ae..20185ea 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -120,7 +120,7 @@
 	list_for_each_entry(iter, &bp_task_head, hw.bp_list) {
 		if (iter->hw.bp_target == tsk &&
 		    find_slot_idx(iter) == type &&
-		    cpu == iter->cpu)
+		    (iter->cpu < 0 || cpu == iter->cpu))
 			count += hw_breakpoint_weight(iter);
 	}
 
@@ -149,7 +149,7 @@
 		return;
 	}
 
-	for_each_online_cpu(cpu) {
+	for_each_possible_cpu(cpu) {
 		unsigned int nr;
 
 		nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
@@ -235,7 +235,7 @@
 	if (cpu >= 0) {
 		toggle_bp_task_slot(bp, cpu, enable, type, weight);
 	} else {
-		for_each_online_cpu(cpu)
+		for_each_possible_cpu(cpu)
 			toggle_bp_task_slot(bp, cpu, enable, type, weight);
 	}
 
diff --git a/kernel/events/internal.h b/kernel/events/internal.h
index eb675c4..ca65997 100644
--- a/kernel/events/internal.h
+++ b/kernel/events/internal.h
@@ -31,6 +31,10 @@
 	spinlock_t			event_lock;
 	struct list_head		event_list;
 
+	atomic_t			mmap_count;
+	unsigned long			mmap_locked;
+	struct user_struct		*mmap_user;
+
 	struct perf_event_mmap_page	*user_page;
 	void				*data_pages[0];
 };
diff --git a/kernel/exit.c b/kernel/exit.c
index af2eb3c..7bb73f9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -649,7 +649,6 @@
 	 *	jobs, send them a SIGHUP and then a SIGCONT.  (POSIX 3.2.2.2)
 	 */
 	forget_original_parent(tsk);
-	exit_task_namespaces(tsk);
 
 	write_lock_irq(&tasklist_lock);
 	if (group_dead)
@@ -795,6 +794,7 @@
 	exit_shm(tsk);
 	exit_files(tsk);
 	exit_fs(tsk);
+	exit_task_namespaces(tsk);
 	exit_task_work(tsk);
 	check_stack_usage();
 	exit_thread();
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 3fed7f0..bddf3b2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -467,6 +467,7 @@
 /* Optimization staging list, protected by kprobe_mutex */
 static LIST_HEAD(optimizing_list);
 static LIST_HEAD(unoptimizing_list);
+static LIST_HEAD(freeing_list);
 
 static void kprobe_optimizer(struct work_struct *work);
 static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
@@ -504,7 +505,7 @@
  * Unoptimize (replace a jump with a breakpoint and remove the breakpoint
  * if need) kprobes listed on unoptimizing_list.
  */
-static __kprobes void do_unoptimize_kprobes(struct list_head *free_list)
+static __kprobes void do_unoptimize_kprobes(void)
 {
 	struct optimized_kprobe *op, *tmp;
 
@@ -515,9 +516,9 @@
 	/* Ditto to do_optimize_kprobes */
 	get_online_cpus();
 	mutex_lock(&text_mutex);
-	arch_unoptimize_kprobes(&unoptimizing_list, free_list);
+	arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
 	/* Loop free_list for disarming */
-	list_for_each_entry_safe(op, tmp, free_list, list) {
+	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 		/* Disarm probes if marked disabled */
 		if (kprobe_disabled(&op->kp))
 			arch_disarm_kprobe(&op->kp);
@@ -536,11 +537,11 @@
 }
 
 /* Reclaim all kprobes on the free_list */
-static __kprobes void do_free_cleaned_kprobes(struct list_head *free_list)
+static __kprobes void do_free_cleaned_kprobes(void)
 {
 	struct optimized_kprobe *op, *tmp;
 
-	list_for_each_entry_safe(op, tmp, free_list, list) {
+	list_for_each_entry_safe(op, tmp, &freeing_list, list) {
 		BUG_ON(!kprobe_unused(&op->kp));
 		list_del_init(&op->list);
 		free_aggr_kprobe(&op->kp);
@@ -556,8 +557,6 @@
 /* Kprobe jump optimizer */
 static __kprobes void kprobe_optimizer(struct work_struct *work)
 {
-	LIST_HEAD(free_list);
-
 	mutex_lock(&kprobe_mutex);
 	/* Lock modules while optimizing kprobes */
 	mutex_lock(&module_mutex);
@@ -566,7 +565,7 @@
 	 * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
 	 * kprobes before waiting for quiesence period.
 	 */
-	do_unoptimize_kprobes(&free_list);
+	do_unoptimize_kprobes();
 
 	/*
 	 * Step 2: Wait for quiesence period to ensure all running interrupts
@@ -581,7 +580,7 @@
 	do_optimize_kprobes();
 
 	/* Step 4: Free cleaned kprobes after quiesence period */
-	do_free_cleaned_kprobes(&free_list);
+	do_free_cleaned_kprobes();
 
 	mutex_unlock(&module_mutex);
 	mutex_unlock(&kprobe_mutex);
@@ -723,8 +722,19 @@
 	if (!list_empty(&op->list))
 		/* Dequeue from the (un)optimization queue */
 		list_del_init(&op->list);
-
 	op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
+
+	if (kprobe_unused(p)) {
+		/* Enqueue if it is unused */
+		list_add(&op->list, &freeing_list);
+		/*
+		 * Remove unused probes from the hash list. After waiting
+		 * for synchronization, this probe is reclaimed.
+		 * (reclaiming is done by do_free_cleaned_kprobes().)
+		 */
+		hlist_del_rcu(&op->kp.hlist);
+	}
+
 	/* Don't touch the code, because it is already freed. */
 	arch_remove_optimized_kprobe(op);
 }
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index aed981a..335a7ae 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -665,20 +665,22 @@
 		if (unlikely(is_compat_task())) {
 			compat_siginfo_t __user *uinfo = compat_ptr(data);
 
-			ret = copy_siginfo_to_user32(uinfo, &info);
-			ret |= __put_user(info.si_code, &uinfo->si_code);
+			if (copy_siginfo_to_user32(uinfo, &info) ||
+			    __put_user(info.si_code, &uinfo->si_code)) {
+				ret = -EFAULT;
+				break;
+			}
+
 		} else
 #endif
 		{
 			siginfo_t __user *uinfo = (siginfo_t __user *) data;
 
-			ret = copy_siginfo_to_user(uinfo, &info);
-			ret |= __put_user(info.si_code, &uinfo->si_code);
-		}
-
-		if (ret) {
-			ret = -EFAULT;
-			break;
+			if (copy_siginfo_to_user(uinfo, &info) ||
+			    __put_user(info.si_code, &uinfo->si_code)) {
+				ret = -EFAULT;
+				break;
+			}
 		}
 
 		data += sizeof(siginfo_t);
diff --git a/kernel/range.c b/kernel/range.c
index eb911db..322ea8e 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -4,7 +4,7 @@
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/sort.h>
-
+#include <linux/string.h>
 #include <linux/range.h>
 
 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end)
@@ -32,9 +32,8 @@
 	if (start >= end)
 		return nr_range;
 
-	/* Try to merge it with old one: */
+	/* get new start/end: */
 	for (i = 0; i < nr_range; i++) {
-		u64 final_start, final_end;
 		u64 common_start, common_end;
 
 		if (!range[i].end)
@@ -45,14 +44,16 @@
 		if (common_start > common_end)
 			continue;
 
-		final_start = min(range[i].start, start);
-		final_end = max(range[i].end, end);
+		/* new start/end, will add it back at last */
+		start = min(range[i].start, start);
+		end = max(range[i].end, end);
 
-		/* clear it and add it back for further merge */
-		range[i].start = 0;
-		range[i].end =  0;
-		return add_range_with_merge(range, az, nr_range,
-			final_start, final_end);
+		memmove(&range[i], &range[i + 1],
+			(nr_range - (i + 1)) * sizeof(range[i]));
+		range[nr_range - 1].start = 0;
+		range[nr_range - 1].end   = 0;
+		nr_range--;
+		i--;
 	}
 
 	/* Need to add it: */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 58453b8..e8b3350 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -633,7 +633,19 @@
 static inline bool got_nohz_idle_kick(void)
 {
 	int cpu = smp_processor_id();
-	return idle_cpu(cpu) && test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+
+	if (!test_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu)))
+		return false;
+
+	if (idle_cpu(cpu) && !need_resched())
+		return true;
+
+	/*
+	 * We can't run Idle Load Balance on this CPU for this time so we
+	 * cancel it and clear NOHZ_BALANCE_KICK
+	 */
+	clear_bit(NOHZ_BALANCE_KICK, nohz_flags(cpu));
+	return false;
 }
 
 #else /* CONFIG_NO_HZ_COMMON */
@@ -1393,8 +1405,9 @@
 
 void scheduler_ipi(void)
 {
-	if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()
-	    && !tick_nohz_full_cpu(smp_processor_id()))
+	if (llist_empty(&this_rq()->wake_list)
+			&& !tick_nohz_full_cpu(smp_processor_id())
+			&& !got_nohz_idle_kick())
 		return;
 
 	/*
@@ -1417,7 +1430,7 @@
 	/*
 	 * Check if someone kicked us for doing the nohz idle load balance.
 	 */
-	if (unlikely(got_nohz_idle_kick() && !need_resched())) {
+	if (unlikely(got_nohz_idle_kick())) {
 		this_rq()->idle_balance = 1;
 		raise_softirq_irqoff(SCHED_SOFTIRQ);
 	}
@@ -4745,7 +4758,7 @@
 	 */
 	idle->sched_class = &idle_sched_class;
 	ftrace_graph_init_idle_task(idle, cpu);
-	vtime_init_idle(idle);
+	vtime_init_idle(idle, cpu);
 #if defined(CONFIG_SMP)
 	sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
 #endif
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index cc2dc3e..b5ccba2 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -747,17 +747,17 @@
 
 	write_seqlock(&current->vtime_seqlock);
 	current->vtime_snap_whence = VTIME_SYS;
-	current->vtime_snap = sched_clock();
+	current->vtime_snap = sched_clock_cpu(smp_processor_id());
 	write_sequnlock(&current->vtime_seqlock);
 }
 
-void vtime_init_idle(struct task_struct *t)
+void vtime_init_idle(struct task_struct *t, int cpu)
 {
 	unsigned long flags;
 
 	write_seqlock_irqsave(&t->vtime_seqlock, flags);
 	t->vtime_snap_whence = VTIME_SYS;
-	t->vtime_snap = sched_clock();
+	t->vtime_snap = sched_clock_cpu(cpu);
 	write_sequnlock_irqrestore(&t->vtime_seqlock, flags);
 }
 
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 0c73942..20d6fba 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -599,8 +599,6 @@
 	} else {
 		if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
 			clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
-			if (dev->next_event.tv64 == KTIME_MAX)
-				goto out;
 			/*
 			 * The cpu which was handling the broadcast
 			 * timer marked this cpu in the broadcast
@@ -615,6 +613,11 @@
 				goto out;
 
 			/*
+			 * Bail out if there is no next event.
+			 */
+			if (dev->next_event.tv64 == KTIME_MAX)
+				goto out;
+			/*
 			 * If the pending bit is not set, then we are
 			 * either the CPU handling the broadcast
 			 * interrupt or we got woken by something else.
@@ -698,10 +701,6 @@
 
 		bc->event_handler = tick_handle_oneshot_broadcast;
 
-		/* Take the do_timer update */
-		if (!tick_nohz_full_cpu(cpu))
-			tick_do_timer_cpu = cpu;
-
 		/*
 		 * We must be careful here. There might be other CPUs
 		 * waiting for periodic broadcast. We need to set the
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f420813..0cf1c14 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -306,7 +306,7 @@
 		 * we can't safely shutdown that CPU.
 		 */
 		if (have_nohz_full_mask && tick_do_timer_cpu == cpu)
-			return -EINVAL;
+			return NOTIFY_BAD;
 		break;
 	}
 	return NOTIFY_OK;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index ff3218a..2d41450 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -373,8 +373,10 @@
 {
 	int index;
 
-	if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE))
+	if (size > KMALLOC_MAX_SIZE) {
+		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
 		return NULL;
+	}
 
 	if (size <= 192) {
 		if (!size)
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 071f288..f680ee1 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -29,6 +29,21 @@
 #include "bat_algo.h"
 #include "network-coding.h"
 
+/**
+ * batadv_dup_status - duplicate status
+ * @BATADV_NO_DUP: the packet is a duplicate
+ * @BATADV_ORIG_DUP: OGM is a duplicate in the originator (but not for the
+ *  neighbor)
+ * @BATADV_NEIGH_DUP: OGM is a duplicate for the neighbor
+ * @BATADV_PROTECTED: originator is currently protected (after reboot)
+ */
+enum batadv_dup_status {
+	BATADV_NO_DUP = 0,
+	BATADV_ORIG_DUP,
+	BATADV_NEIGH_DUP,
+	BATADV_PROTECTED,
+};
+
 static struct batadv_neigh_node *
 batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
 			const uint8_t *neigh_addr,
@@ -650,7 +665,7 @@
 			  const struct batadv_ogm_packet *batadv_ogm_packet,
 			  struct batadv_hard_iface *if_incoming,
 			  const unsigned char *tt_buff,
-			  int is_duplicate)
+			  enum batadv_dup_status dup_status)
 {
 	struct batadv_neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
 	struct batadv_neigh_node *router = NULL;
@@ -676,7 +691,7 @@
 			continue;
 		}
 
-		if (is_duplicate)
+		if (dup_status != BATADV_NO_DUP)
 			continue;
 
 		spin_lock_bh(&tmp_neigh_node->lq_update_lock);
@@ -718,7 +733,7 @@
 	neigh_node->tq_avg = batadv_ring_buffer_avg(neigh_node->tq_recv);
 	spin_unlock_bh(&neigh_node->lq_update_lock);
 
-	if (!is_duplicate) {
+	if (dup_status == BATADV_NO_DUP) {
 		orig_node->last_ttl = batadv_ogm_packet->header.ttl;
 		neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
 	}
@@ -902,15 +917,16 @@
 	return ret;
 }
 
-/* processes a batman packet for all interfaces, adjusts the sequence number and
- * finds out whether it is a duplicate.
- * returns:
- *   1 the packet is a duplicate
- *   0 the packet has not yet been received
- *  -1 the packet is old and has been received while the seqno window
- *     was protected. Caller should drop it.
+/**
+ * batadv_iv_ogm_update_seqnos -  process a batman packet for all interfaces,
+ *  adjust the sequence number and find out whether it is a duplicate
+ * @ethhdr: ethernet header of the packet
+ * @batadv_ogm_packet: OGM packet to be considered
+ * @if_incoming: interface on which the OGM packet was received
+ *
+ * Returns duplicate status as enum batadv_dup_status
  */
-static int
+static enum batadv_dup_status
 batadv_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
 			    const struct batadv_ogm_packet *batadv_ogm_packet,
 			    const struct batadv_hard_iface *if_incoming)
@@ -918,17 +934,18 @@
 	struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
 	struct batadv_orig_node *orig_node;
 	struct batadv_neigh_node *tmp_neigh_node;
-	int is_duplicate = 0;
+	int is_dup;
 	int32_t seq_diff;
 	int need_update = 0;
-	int set_mark, ret = -1;
+	int set_mark;
+	enum batadv_dup_status ret = BATADV_NO_DUP;
 	uint32_t seqno = ntohl(batadv_ogm_packet->seqno);
 	uint8_t *neigh_addr;
 	uint8_t packet_count;
 
 	orig_node = batadv_get_orig_node(bat_priv, batadv_ogm_packet->orig);
 	if (!orig_node)
-		return 0;
+		return BATADV_NO_DUP;
 
 	spin_lock_bh(&orig_node->ogm_cnt_lock);
 	seq_diff = seqno - orig_node->last_real_seqno;
@@ -936,22 +953,29 @@
 	/* signalize caller that the packet is to be dropped. */
 	if (!hlist_empty(&orig_node->neigh_list) &&
 	    batadv_window_protected(bat_priv, seq_diff,
-				    &orig_node->batman_seqno_reset))
+				    &orig_node->batman_seqno_reset)) {
+		ret = BATADV_PROTECTED;
 		goto out;
+	}
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(tmp_neigh_node,
 				 &orig_node->neigh_list, list) {
-		is_duplicate |= batadv_test_bit(tmp_neigh_node->real_bits,
-						orig_node->last_real_seqno,
-						seqno);
-
 		neigh_addr = tmp_neigh_node->addr;
+		is_dup = batadv_test_bit(tmp_neigh_node->real_bits,
+					 orig_node->last_real_seqno,
+					 seqno);
+
 		if (batadv_compare_eth(neigh_addr, ethhdr->h_source) &&
-		    tmp_neigh_node->if_incoming == if_incoming)
+		    tmp_neigh_node->if_incoming == if_incoming) {
 			set_mark = 1;
-		else
+			if (is_dup)
+				ret = BATADV_NEIGH_DUP;
+		} else {
 			set_mark = 0;
+			if (is_dup && (ret != BATADV_NEIGH_DUP))
+				ret = BATADV_ORIG_DUP;
+		}
 
 		/* if the window moved, set the update flag. */
 		need_update |= batadv_bit_get_packet(bat_priv,
@@ -971,8 +995,6 @@
 		orig_node->last_real_seqno = seqno;
 	}
 
-	ret = is_duplicate;
-
 out:
 	spin_unlock_bh(&orig_node->ogm_cnt_lock);
 	batadv_orig_node_free_ref(orig_node);
@@ -994,7 +1016,8 @@
 	int is_broadcast = 0, is_bidirect;
 	bool is_single_hop_neigh = false;
 	bool is_from_best_next_hop = false;
-	int is_duplicate, sameseq, simlar_ttl;
+	int sameseq, similar_ttl;
+	enum batadv_dup_status dup_status;
 	uint32_t if_incoming_seqno;
 	uint8_t *prev_sender;
 
@@ -1138,10 +1161,10 @@
 	if (!orig_node)
 		return;
 
-	is_duplicate = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
-						   if_incoming);
+	dup_status = batadv_iv_ogm_update_seqnos(ethhdr, batadv_ogm_packet,
+						 if_incoming);
 
-	if (is_duplicate == -1) {
+	if (dup_status == BATADV_PROTECTED) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Drop packet: packet within seqno protection time (sender: %pM)\n",
 			   ethhdr->h_source);
@@ -1211,11 +1234,12 @@
 	 * seqno and similar ttl as the non-duplicate
 	 */
 	sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
-	simlar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
-	if (is_bidirect && (!is_duplicate || (sameseq && simlar_ttl)))
+	similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+	if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
+			    (sameseq && similar_ttl)))
 		batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
 					  batadv_ogm_packet, if_incoming,
-					  tt_buff, is_duplicate);
+					  tt_buff, dup_status);
 
 	/* is single hop (direct) neighbor */
 	if (is_single_hop_neigh) {
@@ -1236,7 +1260,7 @@
 		goto out_neigh;
 	}
 
-	if (is_duplicate) {
+	if (dup_status == BATADV_NEIGH_DUP) {
 		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 			   "Drop packet: duplicate packet received\n");
 		goto out_neigh;
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 379061c..de27b31 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1067,6 +1067,10 @@
 	group = htons(crc16(0, primary_if->net_dev->dev_addr, ETH_ALEN));
 	bat_priv->bla.claim_dest.group = group;
 
+	/* purge everything when bridge loop avoidance is turned off */
+	if (!atomic_read(&bat_priv->bridge_loop_avoidance))
+		oldif = NULL;
+
 	if (!oldif) {
 		batadv_bla_purge_claims(bat_priv, NULL, 1);
 		batadv_bla_purge_backbone_gw(bat_priv, 1);
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 15a22ef..929e304 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -582,10 +582,7 @@
 	    (strncmp(hard_iface->soft_iface->name, buff, IFNAMSIZ) == 0))
 		goto out;
 
-	if (!rtnl_trylock()) {
-		ret = -ERESTARTSYS;
-		goto out;
-	}
+	rtnl_lock();
 
 	if (status_tmp == BATADV_IF_NOT_IN_USE) {
 		batadv_hardif_disable_interface(hard_iface,
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index d817c93..ace5e55 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -341,7 +341,6 @@
 
 static void bredr_setup(struct hci_request *req)
 {
-	struct hci_cp_delete_stored_link_key cp;
 	__le16 param;
 	__u8 flt_type;
 
@@ -365,10 +364,6 @@
 	param = __constant_cpu_to_le16(0x7d00);
 	hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
 
-	bacpy(&cp.bdaddr, BDADDR_ANY);
-	cp.delete_all = 0x01;
-	hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
-
 	/* Read page scan parameters */
 	if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
 		hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
@@ -602,6 +597,16 @@
 	struct hci_dev *hdev = req->hdev;
 	u8 p;
 
+	/* Only send HCI_Delete_Stored_Link_Key if it is supported */
+	if (hdev->commands[6] & 0x80) {
+		struct hci_cp_delete_stored_link_key cp;
+
+		bacpy(&cp.bdaddr, BDADDR_ANY);
+		cp.delete_all = 0x01;
+		hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
+			    sizeof(cp), &cp);
+	}
+
 	if (hdev->commands[5] & 0x10)
 		hci_setup_link_policy(req);
 
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 24bee07..68843a2 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -2852,6 +2852,9 @@
 	BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
 	       conn, code, ident, dlen);
 
+	if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
+		return NULL;
+
 	len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
 	count = min_t(unsigned int, conn->mtu, len);
 
@@ -4330,7 +4333,7 @@
 	struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
 	u16 type, result;
 
-	if (cmd_len != sizeof(*rsp))
+	if (cmd_len < sizeof(*rsp))
 		return -EPROTO;
 
 	type   = __le16_to_cpu(rsp->type);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 81f2389..d6448e3 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -465,8 +465,9 @@
 	skb_set_transport_header(skb, skb->len);
 	mldq = (struct mld_msg *) icmp6_hdr(skb);
 
-	interval = ipv6_addr_any(group) ? br->multicast_last_member_interval :
-					  br->multicast_query_response_interval;
+	interval = ipv6_addr_any(group) ?
+			br->multicast_query_response_interval :
+			br->multicast_last_member_interval;
 
 	mldq->mld_type = ICMPV6_MGM_QUERY;
 	mldq->mld_code = 0;
diff --git a/net/core/dev.c b/net/core/dev.c
index fc1e289..faebb39 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -792,6 +792,40 @@
 EXPORT_SYMBOL(dev_get_by_index);
 
 /**
+ *	netdev_get_name - get a netdevice name, knowing its ifindex.
+ *	@net: network namespace
+ *	@name: a pointer to the buffer where the name will be stored.
+ *	@ifindex: the ifindex of the interface to get the name from.
+ *
+ *	The use of raw_seqcount_begin() and cond_resched() before
+ *	retrying is required as we want to give the writers a chance
+ *	to complete when CONFIG_PREEMPT is not set.
+ */
+int netdev_get_name(struct net *net, char *name, int ifindex)
+{
+	struct net_device *dev;
+	unsigned int seq;
+
+retry:
+	seq = raw_seqcount_begin(&devnet_rename_seq);
+	rcu_read_lock();
+	dev = dev_get_by_index_rcu(net, ifindex);
+	if (!dev) {
+		rcu_read_unlock();
+		return -ENODEV;
+	}
+
+	strcpy(name, dev->name);
+	rcu_read_unlock();
+	if (read_seqcount_retry(&devnet_rename_seq, seq)) {
+		cond_resched();
+		goto retry;
+	}
+
+	return 0;
+}
+
+/**
  *	dev_getbyhwaddr_rcu - find a device by its hardware address
  *	@net: the applicable net namespace
  *	@type: media type of device
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index 6cc0481..5b7d0e1 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -19,9 +19,8 @@
 
 static int dev_ifname(struct net *net, struct ifreq __user *arg)
 {
-	struct net_device *dev;
 	struct ifreq ifr;
-	unsigned seq;
+	int error;
 
 	/*
 	 *	Fetch the caller's info block.
@@ -30,19 +29,9 @@
 	if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
 		return -EFAULT;
 
-retry:
-	seq = read_seqcount_begin(&devnet_rename_seq);
-	rcu_read_lock();
-	dev = dev_get_by_index_rcu(net, ifr.ifr_ifindex);
-	if (!dev) {
-		rcu_read_unlock();
-		return -ENODEV;
-	}
-
-	strcpy(ifr.ifr_name, dev->name);
-	rcu_read_unlock();
-	if (read_seqcount_retry(&devnet_rename_seq, seq))
-		goto retry;
+	error = netdev_get_name(net, ifr.ifr_name, ifr.ifr_ifindex);
+	if (error)
+		return error;
 
 	if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
 		return -EFAULT;
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 22efdaa..ce91766 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -60,10 +60,10 @@
 	[NETIF_F_IPV6_CSUM_BIT] =        "tx-checksum-ipv6",
 	[NETIF_F_HIGHDMA_BIT] =          "highdma",
 	[NETIF_F_FRAGLIST_BIT] =         "tx-scatter-gather-fraglist",
-	[NETIF_F_HW_VLAN_CTAG_TX_BIT] =  "tx-vlan-ctag-hw-insert",
+	[NETIF_F_HW_VLAN_CTAG_TX_BIT] =  "tx-vlan-hw-insert",
 
-	[NETIF_F_HW_VLAN_CTAG_RX_BIT] =  "rx-vlan-ctag-hw-parse",
-	[NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-ctag-filter",
+	[NETIF_F_HW_VLAN_CTAG_RX_BIT] =  "rx-vlan-hw-parse",
+	[NETIF_F_HW_VLAN_CTAG_FILTER_BIT] = "rx-vlan-filter",
 	[NETIF_F_HW_VLAN_STAG_TX_BIT] =  "tx-vlan-stag-hw-insert",
 	[NETIF_F_HW_VLAN_STAG_RX_BIT] =  "rx-vlan-stag-hw-parse",
 	[NETIF_F_HW_VLAN_STAG_FILTER_BIT] = "rx-vlan-stag-filter",
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index cfd777b..1c1738c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -483,15 +483,8 @@
 
 static void skb_drop_list(struct sk_buff **listp)
 {
-	struct sk_buff *list = *listp;
-
+	kfree_skb_list(*listp);
 	*listp = NULL;
-
-	do {
-		struct sk_buff *this = list;
-		list = list->next;
-		kfree_skb(this);
-	} while (list);
 }
 
 static inline void skb_drop_fraglist(struct sk_buff *skb)
@@ -651,6 +644,17 @@
 }
 EXPORT_SYMBOL(kfree_skb);
 
+void kfree_skb_list(struct sk_buff *segs)
+{
+	while (segs) {
+		struct sk_buff *next = segs->next;
+
+		kfree_skb(segs);
+		segs = next;
+	}
+}
+EXPORT_SYMBOL(kfree_skb_list);
+
 /**
  *	skb_tx_error - report an sk_buff xmit error
  *	@skb: buffer that triggered an error
diff --git a/net/core/sock.c b/net/core/sock.c
index 88868a9..d6d024c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -571,9 +571,7 @@
 	int ret = -ENOPROTOOPT;
 #ifdef CONFIG_NETDEVICES
 	struct net *net = sock_net(sk);
-	struct net_device *dev;
 	char devname[IFNAMSIZ];
-	unsigned seq;
 
 	if (sk->sk_bound_dev_if == 0) {
 		len = 0;
@@ -584,20 +582,9 @@
 	if (len < IFNAMSIZ)
 		goto out;
 
-retry:
-	seq = read_seqcount_begin(&devnet_rename_seq);
-	rcu_read_lock();
-	dev = dev_get_by_index_rcu(net, sk->sk_bound_dev_if);
-	ret = -ENODEV;
-	if (!dev) {
-		rcu_read_unlock();
+	ret = netdev_get_name(net, devname, sk->sk_bound_dev_if);
+	if (ret)
 		goto out;
-	}
-
-	strcpy(devname, dev->name);
-	rcu_read_unlock();
-	if (read_seqcount_retry(&devnet_rename_seq, seq))
-		goto retry;
 
 	len = strlen(devname) + 1;
 
diff --git a/net/ipv4/gre.c b/net/ipv4/gre.c
index b2e805a..7856d16 100644
--- a/net/ipv4/gre.c
+++ b/net/ipv4/gre.c
@@ -178,7 +178,7 @@
 
 				err = __skb_linearize(skb);
 				if (err) {
-					kfree_skb(segs);
+					kfree_skb_list(segs);
 					segs = ERR_PTR(err);
 					goto out;
 				}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index be2f8da..7fa8f08 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -853,7 +853,7 @@
 }
 EXPORT_SYMBOL_GPL(ip_tunnel_dellink);
 
-int __net_init ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
+int ip_tunnel_init_net(struct net *net, int ip_tnl_net_id,
 				  struct rtnl_link_ops *ops, char *devname)
 {
 	struct ip_tunnel_net *itn = net_generic(net, ip_tnl_net_id);
@@ -899,7 +899,7 @@
 		unregister_netdevice_queue(itn->fb_tunnel_dev, head);
 }
 
-void __net_exit ip_tunnel_delete_net(struct ip_tunnel_net *itn)
+void ip_tunnel_delete_net(struct ip_tunnel_net *itn)
 {
 	LIST_HEAD(list);
 
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 9d2bdb2..c118f6b 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -361,8 +361,7 @@
 			tunnel->err_count = 0;
 	}
 
-	IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
-			      IPSKB_REROUTED);
+	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 	skb_dst_drop(skb);
 	skb_dst_set(skb, &rt->dst);
 	nf_reset(skb);
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
index ff4b781..32b0e97 100644
--- a/net/ipv4/netfilter/ipt_ULOG.c
+++ b/net/ipv4/netfilter/ipt_ULOG.c
@@ -125,15 +125,16 @@
 /* timer function to flush queue in flushtimeout time */
 static void ulog_timer(unsigned long data)
 {
+	unsigned int groupnum = *((unsigned int *)data);
 	struct ulog_net *ulog = container_of((void *)data,
 					     struct ulog_net,
-					     nlgroup[*(unsigned int *)data]);
+					     nlgroup[groupnum]);
 	pr_debug("timer function called, calling ulog_send\n");
 
 	/* lock to protect against somebody modifying our structure
 	 * from ipt_ulog_target at the same time */
 	spin_lock_bh(&ulog->lock);
-	ulog_send(ulog, data);
+	ulog_send(ulog, groupnum);
 	spin_unlock_bh(&ulog->lock);
 }
 
@@ -407,8 +408,11 @@
 
 	spin_lock_init(&ulog->lock);
 	/* initialize ulog_buffers */
-	for (i = 0; i < ULOG_MAXNLGROUPS; i++)
-		setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i);
+	for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
+		ulog->nlgroup[i] = i;
+		setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
+			    (unsigned long)&ulog->nlgroup[i]);
+	}
 
 	ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
 	if (!ulog->nflognl)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 7196523..7999fc5 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1003,7 +1003,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct tcp_md5sig_info *md5sig;
 
-	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
+	key = tcp_md5_do_lookup(sk, addr, family);
 	if (key) {
 		/* Pre-existing entry - just update that one. */
 		memcpy(key->key, newkey, newkeylen);
@@ -1048,7 +1048,7 @@
 	struct tcp_md5sig_key *key;
 	struct tcp_md5sig_info *md5sig;
 
-	key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
+	key = tcp_md5_do_lookup(sk, addr, family);
 	if (!key)
 		return -ENOENT;
 	hlist_del_rcu(&key->node);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 1bbf744..4ab4c38 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2655,6 +2655,9 @@
 			if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
 				continue;
 
+			if (sp_ifa->rt)
+				continue;
+
 			sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
 
 			/* Failure cases are ignored */
@@ -4303,6 +4306,7 @@
 	struct inet6_ifaddr *ifp;
 	struct net_device *dev = idev->dev;
 	bool update_rs = false;
+	struct in6_addr ll_addr;
 
 	if (token == NULL)
 		return -EINVAL;
@@ -4322,11 +4326,9 @@
 
 	write_unlock_bh(&idev->lock);
 
-	if (!idev->dead && (idev->if_flags & IF_READY)) {
-		struct in6_addr ll_addr;
-
-		ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
-				IFA_F_OPTIMISTIC);
+	if (!idev->dead && (idev->if_flags & IF_READY) &&
+	    !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
+			     IFA_F_OPTIMISTIC)) {
 
 		/* If we're not ready, then normal ifup will take care
 		 * of this. Otherwise, we need to request our rs here.
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dae1949..d5d20cd 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -381,9 +381,8 @@
 	 *	cannot be fragmented, because there is no warranty
 	 *	that different fragments will go along one path. --ANK
 	 */
-	if (opt->ra) {
-		u8 *ptr = skb_network_header(skb) + opt->ra;
-		if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3]))
+	if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
+		if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
 			return 0;
 	}
 
@@ -822,11 +821,17 @@
 					  const struct flowi6 *fl6)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct rt6_info *rt = (struct rt6_info *)dst;
+	struct rt6_info *rt;
 
 	if (!dst)
 		goto out;
 
+	if (dst->ops->family != AF_INET6) {
+		dst_release(dst);
+		return NULL;
+	}
+
+	rt = (struct rt6_info *)dst;
 	/* Yes, checking route validity in not connected
 	 * case is not very simple. Take into account,
 	 * that we do not support routing by source, TOS,
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 2712ab2..ca4ffcc 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1493,7 +1493,7 @@
 	 */
 
 	if (ha)
-		ndisc_fill_addr_option(skb, ND_OPT_TARGET_LL_ADDR, ha);
+		ndisc_fill_addr_option(buff, ND_OPT_TARGET_LL_ADDR, ha);
 
 	/*
 	 *	build redirect option and copy skb over to the new packet.
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
index 97bcf2b..c9b6a6e 100644
--- a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -204,7 +204,7 @@
 		if (ct != NULL && !nf_ct_is_untracked(ct)) {
 			help = nfct_help(ct);
 			if ((help && help->helper) || !nf_ct_is_confirmed(ct)) {
-				nf_conntrack_get_reasm(skb);
+				nf_conntrack_get_reasm(reasm);
 				NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, reasm,
 					       (struct net_device *)in,
 					       (struct net_device *)out,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index c5fbd75..9da8620 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1710,6 +1710,7 @@
 	hdr->sadb_msg_version = PF_KEY_V2;
 	hdr->sadb_msg_errno = (uint8_t) 0;
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+	hdr->sadb_msg_reserved = 0;
 
 	pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
 
@@ -2699,6 +2700,7 @@
 	hdr->sadb_msg_errno = (uint8_t) 0;
 	hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;
 	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));
+	hdr->sadb_msg_reserved = 0;
 	pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);
 	return 0;
 
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 637a341..8dec687 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -346,19 +346,19 @@
 	skb_put(skb, 2);
 
 	/* Copy user data into skb */
-	error = memcpy_fromiovec(skb->data, m->msg_iov, total_len);
+	error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov,
+				 total_len);
 	if (error < 0) {
 		kfree_skb(skb);
 		goto error_put_sess_tun;
 	}
-	skb_put(skb, total_len);
 
 	l2tp_xmit_skb(session, skb, session->hdr_len);
 
 	sock_put(ps->tunnel_sock);
 	sock_put(sk);
 
-	return error;
+	return total_len;
 
 error_put_sess_tun:
 	sock_put(ps->tunnel_sock);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 1a89c80..4fdb306e 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1057,6 +1057,12 @@
 	clear_bit(SDATA_STATE_OFFCHANNEL_BEACON_STOPPED, &sdata->state);
 	ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED);
 
+	if (sdata->wdev.cac_started) {
+		cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
+		cfg80211_cac_event(sdata->dev, NL80211_RADAR_CAC_ABORTED,
+				   GFP_KERNEL);
+	}
+
 	drv_stop_ap(sdata->local, sdata);
 
 	/* free all potentially still buffered bcast frames */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 44be28c..9ca8e32 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1497,10 +1497,11 @@
 	ieee80211_tx_skb_tid(sdata, skb, 7);
 }
 
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action,
+u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
 			       struct ieee802_11_elems *elems,
 			       u64 filter, u32 crc);
-static inline void ieee802_11_parse_elems(u8 *start, size_t len, bool action,
+static inline void ieee802_11_parse_elems(const u8 *start, size_t len,
+					  bool action,
 					  struct ieee802_11_elems *elems)
 {
 	ieee802_11_parse_elems_crc(start, len, action, elems, 0, 0);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index a8c2130..741448b 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2522,8 +2522,11 @@
 	u16 capab_info, aid;
 	struct ieee802_11_elems elems;
 	struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf;
+	const struct cfg80211_bss_ies *bss_ies = NULL;
+	struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
 	u32 changed = 0;
 	int err;
+	bool ret;
 
 	/* AssocResp and ReassocResp have identical structure */
 
@@ -2555,21 +2558,86 @@
 	ifmgd->aid = aid;
 
 	/*
+	 * Some APs are erroneously not including some information in their
+	 * (re)association response frames. Try to recover by using the data
+	 * from the beacon or probe response. This seems to afflict mobile
+	 * 2G/3G/4G wifi routers, reported models include the "Onda PN51T",
+	 * "Vodafone PocketWiFi 2", "ZTE MF60" and a similar T-Mobile device.
+	 */
+	if ((assoc_data->wmm && !elems.wmm_param) ||
+	    (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
+	     (!elems.ht_cap_elem || !elems.ht_operation)) ||
+	    (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
+	     (!elems.vht_cap_elem || !elems.vht_operation))) {
+		const struct cfg80211_bss_ies *ies;
+		struct ieee802_11_elems bss_elems;
+
+		rcu_read_lock();
+		ies = rcu_dereference(cbss->ies);
+		if (ies)
+			bss_ies = kmemdup(ies, sizeof(*ies) + ies->len,
+					  GFP_ATOMIC);
+		rcu_read_unlock();
+		if (!bss_ies)
+			return false;
+
+		ieee802_11_parse_elems(bss_ies->data, bss_ies->len,
+				       false, &bss_elems);
+		if (assoc_data->wmm &&
+		    !elems.wmm_param && bss_elems.wmm_param) {
+			elems.wmm_param = bss_elems.wmm_param;
+			sdata_info(sdata,
+				   "AP bug: WMM param missing from AssocResp\n");
+		}
+
+		/*
+		 * Also check if we requested HT/VHT, otherwise the AP doesn't
+		 * have to include the IEs in the (re)association response.
+		 */
+		if (!elems.ht_cap_elem && bss_elems.ht_cap_elem &&
+		    !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+			elems.ht_cap_elem = bss_elems.ht_cap_elem;
+			sdata_info(sdata,
+				   "AP bug: HT capability missing from AssocResp\n");
+		}
+		if (!elems.ht_operation && bss_elems.ht_operation &&
+		    !(ifmgd->flags & IEEE80211_STA_DISABLE_HT)) {
+			elems.ht_operation = bss_elems.ht_operation;
+			sdata_info(sdata,
+				   "AP bug: HT operation missing from AssocResp\n");
+		}
+		if (!elems.vht_cap_elem && bss_elems.vht_cap_elem &&
+		    !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
+			elems.vht_cap_elem = bss_elems.vht_cap_elem;
+			sdata_info(sdata,
+				   "AP bug: VHT capa missing from AssocResp\n");
+		}
+		if (!elems.vht_operation && bss_elems.vht_operation &&
+		    !(ifmgd->flags & IEEE80211_STA_DISABLE_VHT)) {
+			elems.vht_operation = bss_elems.vht_operation;
+			sdata_info(sdata,
+				   "AP bug: VHT operation missing from AssocResp\n");
+		}
+	}
+
+	/*
 	 * We previously checked these in the beacon/probe response, so
 	 * they should be present here. This is just a safety net.
 	 */
 	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_HT) &&
 	    (!elems.wmm_param || !elems.ht_cap_elem || !elems.ht_operation)) {
 		sdata_info(sdata,
-			   "HT AP is missing WMM params or HT capability/operation in AssocResp\n");
-		return false;
+			   "HT AP is missing WMM params or HT capability/operation\n");
+		ret = false;
+		goto out;
 	}
 
 	if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT) &&
 	    (!elems.vht_cap_elem || !elems.vht_operation)) {
 		sdata_info(sdata,
-			   "VHT AP is missing VHT capability/operation in AssocResp\n");
-		return false;
+			   "VHT AP is missing VHT capability/operation\n");
+		ret = false;
+		goto out;
 	}
 
 	mutex_lock(&sdata->local->sta_mtx);
@@ -2580,7 +2648,8 @@
 	sta = sta_info_get(sdata, cbss->bssid);
 	if (WARN_ON(!sta)) {
 		mutex_unlock(&sdata->local->sta_mtx);
-		return false;
+		ret = false;
+		goto out;
 	}
 
 	sband = local->hw.wiphy->bands[ieee80211_get_sdata_band(sdata)];
@@ -2633,7 +2702,8 @@
 			   sta->sta.addr);
 		WARN_ON(__sta_info_destroy(sta));
 		mutex_unlock(&sdata->local->sta_mtx);
-		return false;
+		ret = false;
+		goto out;
 	}
 
 	mutex_unlock(&sdata->local->sta_mtx);
@@ -2673,7 +2743,10 @@
 	ieee80211_sta_rx_notify(sdata, (struct ieee80211_hdr *)mgmt);
 	ieee80211_sta_reset_beacon_monitor(sdata);
 
-	return true;
+	ret = true;
+ out:
+	kfree(bss_ies);
+	return ret;
 }
 
 static enum rx_mgmt_action __must_check
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index d3f414f..a02bef3 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -615,7 +615,7 @@
 		if (rates[i].idx < 0)
 			break;
 
-		rate_idx_match_mask(&rates[i], sband, mask, chan_width,
+		rate_idx_match_mask(&rates[i], sband, chan_width, mask,
 				    mcs_mask);
 	}
 }
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 27e0715..72e6292 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -661,12 +661,12 @@
 }
 EXPORT_SYMBOL(ieee80211_queue_delayed_work);
 
-u32 ieee802_11_parse_elems_crc(u8 *start, size_t len, bool action,
+u32 ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
 			       struct ieee802_11_elems *elems,
 			       u64 filter, u32 crc)
 {
 	size_t left = len;
-	u8 *pos = start;
+	const u8 *pos = start;
 	bool calc_crc = filter != 0;
 	DECLARE_BITMAP(seen_elems, 256);
 	const u8 *ie;
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 05565d2..23b8eb5 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1442,7 +1442,8 @@
 
 	/* do the statistics and put it back */
 	ip_vs_in_stats(cp, skb);
-	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
+	if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol ||
+	    IPPROTO_SCTP == cih->protocol)
 		offset += 2 * sizeof(__u16);
 	verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum, &ciph);
 
diff --git a/net/netfilter/nf_conntrack_labels.c b/net/netfilter/nf_conntrack_labels.c
index 8fe2e99..355d2ef 100644
--- a/net/netfilter/nf_conntrack_labels.c
+++ b/net/netfilter/nf_conntrack_labels.c
@@ -45,7 +45,7 @@
 	if (test_bit(bit, labels->bits))
 		return 0;
 
-	if (test_and_set_bit(bit, labels->bits))
+	if (!test_and_set_bit(bit, labels->bits))
 		nf_conntrack_event_cache(IPCT_LABEL, ct);
 
 	return 0;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 6d0f8a1..ecf065f 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1825,6 +1825,7 @@
 			nf_conntrack_eventmask_report((1 << IPCT_REPLY) |
 						      (1 << IPCT_ASSURED) |
 						      (1 << IPCT_HELPER) |
+						      (1 << IPCT_LABEL) |
 						      (1 << IPCT_PROTOINFO) |
 						      (1 << IPCT_NATSEQADJ) |
 						      (1 << IPCT_MARK),
diff --git a/net/netfilter/nf_nat_sip.c b/net/netfilter/nf_nat_sip.c
index 96ccdf7..dac11f7 100644
--- a/net/netfilter/nf_nat_sip.c
+++ b/net/netfilter/nf_nat_sip.c
@@ -230,9 +230,10 @@
 					&ct->tuplehash[!dir].tuple.src.u3,
 					false);
 			if (!mangle_packet(skb, protoff, dataoff, dptr, datalen,
-					   poff, plen, buffer, buflen))
+					   poff, plen, buffer, buflen)) {
 				nf_ct_helper_log(skb, ct, "cannot mangle received");
 				return NF_DROP;
+			}
 		}
 
 		/* The rport= parameter (RFC 3581) contains the port number
diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c
index afaebc7..7011c71 100644
--- a/net/netfilter/xt_TCPMSS.c
+++ b/net/netfilter/xt_TCPMSS.c
@@ -45,17 +45,22 @@
 
 static int
 tcpmss_mangle_packet(struct sk_buff *skb,
-		     const struct xt_tcpmss_info *info,
+		     const struct xt_action_param *par,
 		     unsigned int in_mtu,
 		     unsigned int tcphoff,
 		     unsigned int minlen)
 {
+	const struct xt_tcpmss_info *info = par->targinfo;
 	struct tcphdr *tcph;
 	unsigned int tcplen, i;
 	__be16 oldval;
 	u16 newmss;
 	u8 *opt;
 
+	/* This is a fragment, no TCP header is available */
+	if (par->fragoff != 0)
+		return XT_CONTINUE;
+
 	if (!skb_make_writable(skb, skb->len))
 		return -1;
 
@@ -125,11 +130,17 @@
 
 	skb_put(skb, TCPOLEN_MSS);
 
-	/* RFC 879 states that the default MSS is 536 without specific
-	 * knowledge that the destination host is prepared to accept larger.
-	 * Since no MSS was provided, we MUST NOT set a value > 536.
+	/*
+	 * IPv4: RFC 1122 states "If an MSS option is not received at
+	 * connection setup, TCP MUST assume a default send MSS of 536".
+	 * IPv6: RFC 2460 states IPv6 has a minimum MTU of 1280 and a minimum
+	 * length IPv6 header of 60, ergo the default MSS value is 1220
+	 * Since no MSS was provided, we must use the default values
 	 */
-	newmss = min(newmss, (u16)536);
+	if (par->family == NFPROTO_IPV4)
+		newmss = min(newmss, (u16)536);
+	else
+		newmss = min(newmss, (u16)1220);
 
 	opt = (u_int8_t *)tcph + sizeof(struct tcphdr);
 	memmove(opt + TCPOLEN_MSS, opt, tcplen - sizeof(struct tcphdr));
@@ -188,7 +199,7 @@
 	__be16 newlen;
 	int ret;
 
-	ret = tcpmss_mangle_packet(skb, par->targinfo,
+	ret = tcpmss_mangle_packet(skb, par,
 				   tcpmss_reverse_mtu(skb, PF_INET),
 				   iph->ihl * 4,
 				   sizeof(*iph) + sizeof(struct tcphdr));
@@ -217,7 +228,7 @@
 	tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr, &frag_off);
 	if (tcphoff < 0)
 		return NF_DROP;
-	ret = tcpmss_mangle_packet(skb, par->targinfo,
+	ret = tcpmss_mangle_packet(skb, par,
 				   tcpmss_reverse_mtu(skb, PF_INET6),
 				   tcphoff,
 				   sizeof(*ipv6h) + sizeof(struct tcphdr));
diff --git a/net/netfilter/xt_TCPOPTSTRIP.c b/net/netfilter/xt_TCPOPTSTRIP.c
index 1eb1a44..b68fa19 100644
--- a/net/netfilter/xt_TCPOPTSTRIP.c
+++ b/net/netfilter/xt_TCPOPTSTRIP.c
@@ -48,11 +48,13 @@
 		return NF_DROP;
 
 	len = skb->len - tcphoff;
-	if (len < (int)sizeof(struct tcphdr) ||
-	    tcp_hdr(skb)->doff * 4 > len)
+	if (len < (int)sizeof(struct tcphdr))
 		return NF_DROP;
 
 	tcph = (struct tcphdr *)(skb_network_header(skb) + tcphoff);
+	if (tcph->doff * 4 > len)
+		return NF_DROP;
+
 	opt  = (u_int8_t *)tcph;
 
 	/*
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 8ec1bca..20a1bd0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2851,12 +2851,11 @@
 		return -EOPNOTSUPP;
 
 	uaddr->sa_family = AF_PACKET;
+	memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
 	rcu_read_lock();
 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
 	if (dev)
-		strncpy(uaddr->sa_data, dev->name, 14);
-	else
-		memset(uaddr->sa_data, 0, 14);
+		strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
 	rcu_read_unlock();
 	*uaddr_len = sizeof(*uaddr);
 
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index 32a4625..be35e2d 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -206,6 +206,8 @@
  */
 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
 {
+	memset(q, 0, sizeof(struct sctp_outq));
+
 	q->asoc = asoc;
 	INIT_LIST_HEAD(&q->out_chunk_list);
 	INIT_LIST_HEAD(&q->control_chunk_list);
@@ -213,11 +215,7 @@
 	INIT_LIST_HEAD(&q->sacked);
 	INIT_LIST_HEAD(&q->abandoned);
 
-	q->fast_rtx = 0;
-	q->outstanding_bytes = 0;
 	q->empty = 1;
-	q->cork  = 0;
-	q->out_qlen = 0;
 }
 
 /* Free the outqueue structure and any related pending chunks.
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index d5aed3b..b14b7e3 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1564,12 +1564,17 @@
 	struct cfg80211_registered_device *dev;
 	s64 filter_wiphy = -1;
 	bool split = false;
-	struct nlattr **tb = nl80211_fam.attrbuf;
+	struct nlattr **tb;
 	int res;
 
+	/* will be zeroed in nlmsg_parse() */
+	tb = kmalloc(sizeof(*tb) * (NL80211_ATTR_MAX + 1), GFP_KERNEL);
+	if (!tb)
+		return -ENOMEM;
+
 	mutex_lock(&cfg80211_mutex);
 	res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize,
-			  tb, nl80211_fam.maxattr, nl80211_policy);
+			  tb, NL80211_ATTR_MAX, nl80211_policy);
 	if (res == 0) {
 		split = tb[NL80211_ATTR_SPLIT_WIPHY_DUMP];
 		if (tb[NL80211_ATTR_WIPHY])
@@ -1583,6 +1588,7 @@
 			netdev = dev_get_by_index(sock_net(skb->sk), ifidx);
 			if (!netdev) {
 				mutex_unlock(&cfg80211_mutex);
+				kfree(tb);
 				return -ENODEV;
 			}
 			if (netdev->ieee80211_ptr) {
@@ -1593,6 +1599,7 @@
 			dev_put(netdev);
 		}
 	}
+	kfree(tb);
 
 	list_for_each_entry(dev, &cfg80211_rdev_list, list) {
 		if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk)))
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 8337663..f97869f 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -149,7 +149,7 @@
 
 ld_flags       = $(LDFLAGS) $(ldflags-y)
 
-dtc_cpp_flags  = -Wp,-MD,$(depfile).pre -nostdinc                        \
+dtc_cpp_flags  = -Wp,-MD,$(depfile).pre.tmp -nostdinc                    \
 		 -I$(srctree)/arch/$(SRCARCH)/boot/dts                   \
 		 -I$(srctree)/arch/$(SRCARCH)/boot/dts/include           \
 		 -undef -D__DTS__
@@ -265,13 +265,13 @@
 cmd_dtc = $(CPP) $(dtc_cpp_flags) -x assembler-with-cpp -o $(dtc-tmp) $< ; \
 	$(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 \
 		-i $(dir $<) $(DTC_FLAGS) \
-		-d $(depfile).dtc $(dtc-tmp) ; \
-	cat $(depfile).pre $(depfile).dtc > $(depfile)
+		-d $(depfile).dtc.tmp $(dtc-tmp) ; \
+	cat $(depfile).pre.tmp $(depfile).dtc.tmp > $(depfile)
 
 $(obj)/%.dtb: $(src)/%.dts FORCE
 	$(call if_changed_dep,dtc)
 
-dtc-tmp = $(subst $(comma),_,$(dot-target).dts)
+dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
 
 # Bzip2
 # ---------------------------------------------------------------------------
diff --git a/scripts/dtc/dtc-lexer.l b/scripts/dtc/dtc-lexer.l
index 254d5af..3b41bfc 100644
--- a/scripts/dtc/dtc-lexer.l
+++ b/scripts/dtc/dtc-lexer.l
@@ -71,7 +71,7 @@
 			push_input_file(name);
 		}
 
-<*>^"#"(line)?{WS}+[0-9]+{WS}+{STRING}({WS}+[0-9]+)? {
+<*>^"#"(line)?[ \t]+[0-9]+[ \t]+{STRING}([ \t]+[0-9]+)? {
 			char *line, *tmp, *fn;
 			/* skip text before line # */
 			line = yytext;
diff --git a/scripts/dtc/dtc-lexer.lex.c_shipped b/scripts/dtc/dtc-lexer.lex.c_shipped
index a6c5fcd..2d30f41 100644
--- a/scripts/dtc/dtc-lexer.lex.c_shipped
+++ b/scripts/dtc/dtc-lexer.lex.c_shipped
@@ -405,19 +405,19 @@
 static yyconst flex_int32_t yy_ec[256] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    2,    3,
-        2,    2,    2,    1,    1,    1,    1,    1,    1,    1,
+        4,    4,    4,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    2,    4,    5,    6,    1,    1,    7,    8,    1,
-        1,    9,   10,   10,   11,   10,   12,   13,   14,   15,
-       15,   15,   15,   15,   15,   15,   15,   16,    1,   17,
-       18,   19,   10,   10,   20,   20,   20,   20,   20,   20,
-       21,   21,   21,   21,   21,   22,   21,   21,   21,   21,
-       21,   21,   21,   21,   23,   21,   21,   24,   21,   21,
-        1,   25,   26,    1,   21,    1,   20,   27,   28,   29,
+        1,    2,    5,    6,    7,    1,    1,    8,    9,    1,
+        1,   10,   11,   11,   12,   11,   13,   14,   15,   16,
+       16,   16,   16,   16,   16,   16,   16,   17,    1,   18,
+       19,   20,   11,   11,   21,   21,   21,   21,   21,   21,
+       22,   22,   22,   22,   22,   23,   22,   22,   22,   22,
+       22,   22,   22,   22,   24,   22,   22,   25,   22,   22,
+        1,   26,   27,    1,   22,    1,   21,   28,   29,   30,
 
-       30,   20,   21,   21,   31,   21,   21,   32,   33,   34,
-       35,   36,   21,   37,   38,   39,   40,   41,   21,   24,
-       42,   21,   43,   44,   45,    1,    1,    1,    1,    1,
+       31,   21,   22,   22,   32,   22,   22,   33,   34,   35,
+       36,   37,   22,   38,   39,   40,   41,   42,   22,   25,
+       43,   22,   44,   45,   46,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
@@ -434,36 +434,36 @@
         1,    1,    1,    1,    1
     } ;
 
-static yyconst flex_int32_t yy_meta[46] =
+static yyconst flex_int32_t yy_meta[47] =
     {   0,
-        1,    1,    1,    1,    1,    2,    3,    1,    2,    2,
-        2,    4,    5,    5,    5,    6,    1,    1,    1,    7,
-        8,    8,    8,    8,    1,    1,    7,    7,    7,    7,
-        8,    8,    8,    8,    8,    8,    8,    8,    8,    8,
-        8,    8,    3,    1,    1
+        1,    1,    1,    1,    1,    1,    2,    3,    1,    2,
+        2,    2,    4,    5,    5,    5,    6,    1,    1,    1,
+        7,    8,    8,    8,    8,    1,    1,    7,    7,    7,
+        7,    8,    8,    8,    8,    8,    8,    8,    8,    8,
+        8,    8,    8,    3,    1,    1
     } ;
 
 static yyconst flex_int16_t yy_base[175] =
     {   0,
-        0,  388,  381,   40,   41,  386,   71,  385,   34,   44,
-      390,  395,   60,   62,  371,  112,  111,  111,  111,  104,
-      370,  106,  371,  342,  124,  119,    0,  144,  395,    0,
-      123,    0,  159,  153,  165,  167,  395,  130,  395,  382,
-      395,    0,  372,  122,  395,  157,  374,  379,  350,   21,
-      346,  349,  395,  395,  395,  395,  395,  362,  395,  395,
-      181,  346,  342,  395,  359,    0,  191,  343,  190,  351,
-      350,    0,    0,    0,  173,  362,  177,  367,  357,  329,
-      335,  328,  337,  331,  206,  329,  334,  327,  395,  338,
-      170,  314,  346,  345,  318,  325,  343,  158,  316,  212,
+        0,  385,  378,   40,   41,  383,   72,  382,   34,   44,
+      388,  393,   61,  117,  368,  116,  115,  115,  115,   48,
+      367,  107,  368,  339,  127,  120,    0,  147,  393,    0,
+      127,    0,  133,  156,  168,  153,  393,  125,  393,  380,
+      393,    0,  369,  127,  393,  160,  371,  377,  347,   21,
+      343,  346,  393,  393,  393,  393,  393,  359,  393,  393,
+      183,  343,  339,  393,  356,    0,  183,  340,  187,  348,
+      347,    0,    0,    0,  178,  359,  195,  365,  354,  326,
+      332,  325,  334,  328,  204,  326,  331,  324,  393,  335,
+      150,  311,  343,  342,  315,  322,  340,  179,  313,  207,
 
-      322,  319,  320,  395,  340,  336,  308,  305,  314,  304,
-      295,  138,  208,  220,  395,  292,  305,  265,  264,  254,
-      201,  222,  285,  275,  273,  270,  236,  235,  225,  115,
-      395,  395,  252,  216,  216,  217,  214,  230,  209,  220,
-      213,  239,  211,  217,  216,  209,  229,  395,  240,  225,
-      206,  169,  395,  395,  116,  106,   99,   54,  395,  395,
-      254,  260,  268,  272,  276,  282,  289,  293,  301,  309,
-      313,  319,  327,  335
+      319,  316,  317,  393,  337,  333,  305,  302,  311,  301,
+      310,  190,  338,  337,  393,  307,  322,  301,  305,  277,
+      208,  311,  307,  278,  271,  270,  248,  246,  213,  130,
+      393,  393,  263,  235,  207,  221,  218,  229,  213,  213,
+      206,  234,  218,  210,  208,  193,  219,  393,  223,  204,
+      176,  157,  393,  393,  120,  106,   97,  119,  393,  393,
+      245,  251,  259,  263,  267,  273,  280,  284,  292,  300,
+      304,  310,  318,  326
     } ;
 
 static yyconst flex_int16_t yy_def[175] =
@@ -489,108 +489,108 @@
       160,  160,  160,  160
     } ;
 
-static yyconst flex_int16_t yy_nxt[441] =
+static yyconst flex_int16_t yy_nxt[440] =
     {   0,
-       12,   13,   14,   15,   16,   12,   17,   18,   12,   12,
-       12,   19,   12,   12,   12,   12,   20,   21,   22,   23,
-       23,   23,   23,   23,   12,   12,   23,   23,   23,   23,
+       12,   13,   14,   13,   15,   16,   12,   17,   18,   12,
+       12,   12,   19,   12,   12,   12,   12,   20,   21,   22,
+       23,   23,   23,   23,   23,   12,   12,   23,   23,   23,
        23,   23,   23,   23,   23,   23,   23,   23,   23,   23,
-       23,   23,   12,   24,   12,   25,   34,   35,   35,   25,
-       81,   26,   26,   27,   27,   27,   34,   35,   35,   82,
-       28,   36,   36,   36,   36,  159,   29,   28,   28,   28,
-       28,   12,   13,   14,   15,   16,   30,   17,   18,   30,
-       30,   30,   26,   30,   30,   30,   12,   20,   21,   22,
-       31,   31,   31,   31,   31,   32,   12,   31,   31,   31,
+       23,   23,   23,   12,   24,   12,   25,   34,   35,   35,
+       25,   81,   26,   26,   27,   27,   27,   34,   35,   35,
+       82,   28,   36,   36,   36,   53,   54,   29,   28,   28,
+       28,   28,   12,   13,   14,   13,   15,   16,   30,   17,
+       18,   30,   30,   30,   26,   30,   30,   30,   12,   20,
+       21,   22,   31,   31,   31,   31,   31,   32,   12,   31,
 
        31,   31,   31,   31,   31,   31,   31,   31,   31,   31,
-       31,   31,   31,   12,   24,   12,   39,   41,   45,   47,
-       53,   54,   48,   56,   57,   61,   61,   47,   66,   45,
-       48,   66,   66,   66,   39,   46,   40,   49,   59,   50,
-      158,   51,  122,   52,  157,   49,   46,   50,  136,   63,
-      137,   52,  156,   43,   40,   62,   65,   65,   65,   59,
-       61,   61,  123,   65,   75,   69,   69,   69,   36,   36,
-       65,   65,   65,   65,   70,   71,   72,   69,   69,   69,
-       45,   46,   61,   61,  109,   77,   70,   71,   93,  110,
-       68,   70,   71,   85,   85,   85,   66,   46,  155,   66,
+       31,   31,   31,   31,   31,   12,   24,   12,   36,   36,
+       36,   39,   41,   45,   47,   56,   57,   48,   61,   47,
+       39,  159,   48,   66,   61,   45,   66,   66,   66,  158,
+       46,   40,   49,   59,   50,  157,   51,   49,   52,   50,
+       40,   63,   46,   52,   36,   36,   36,  156,   43,   62,
+       65,   65,   65,   59,  136,   68,  137,   65,   75,   69,
+       69,   69,   70,   71,   65,   65,   65,   65,   70,   71,
+       72,   69,   69,   69,   61,   46,   45,  155,  154,   66,
+       70,   71,   66,   66,   66,  122,   85,   85,   85,   59,
 
-       66,   66,   69,   69,   69,  122,   59,  100,  100,   61,
-       61,   70,   71,  100,  100,  148,  112,  154,   85,   85,
-       85,   61,   61,  129,  129,  123,  129,  129,  135,  135,
-      135,  142,  142,  148,  143,  149,  153,  135,  135,  135,
-      142,  142,  160,  143,  152,  151,  150,  146,  145,  144,
-      141,  140,  139,  149,   38,   38,   38,   38,   38,   38,
-       38,   38,   42,  138,  134,  133,   42,   42,   44,   44,
-       44,   44,   44,   44,   44,   44,   58,   58,   58,   58,
-       64,  132,   64,   66,  131,  130,   66,  160,   66,   66,
-       67,  128,  127,   67,   67,   67,   67,   73,  126,   73,
+       69,   69,   69,   46,   77,  100,  109,   93,  100,   70,
+       71,  110,  112,  122,  129,  123,  153,   85,   85,   85,
+      135,  135,  135,  148,  148,  160,  135,  135,  135,  152,
+      142,  142,  142,  123,  143,  142,  142,  142,  151,  143,
+      150,  146,  145,  149,  149,   38,   38,   38,   38,   38,
+       38,   38,   38,   42,  144,  141,  140,   42,   42,   44,
+       44,   44,   44,   44,   44,   44,   44,   58,   58,   58,
+       58,   64,  139,   64,   66,  138,  134,   66,  133,   66,
+       66,   67,  132,  131,   67,   67,   67,   67,   73,  130,
+       73,   73,   76,   76,   76,   76,   76,   76,   76,   76,
 
-       73,   76,   76,   76,   76,   76,   76,   76,   76,   78,
-       78,   78,   78,   78,   78,   78,   78,   91,  125,   91,
-       92,  124,   92,   92,  120,   92,   92,  121,  121,  121,
-      121,  121,  121,  121,  121,  147,  147,  147,  147,  147,
-      147,  147,  147,  119,  118,  117,  116,  115,   47,  114,
-      110,  113,  111,  108,  107,  106,   48,  105,  104,   89,
-      103,  102,  101,   99,   98,   97,   96,   95,   94,   79,
-       77,   90,   89,   88,   59,   87,   86,   59,   84,   83,
-       80,   79,   77,   74,  160,   60,   59,   55,   37,  160,
-       33,   25,   26,   25,   11,  160,  160,  160,  160,  160,
+       78,   78,   78,   78,   78,   78,   78,   78,   91,  160,
+       91,   92,  129,   92,   92,  128,   92,   92,  121,  121,
+      121,  121,  121,  121,  121,  121,  147,  147,  147,  147,
+      147,  147,  147,  147,  127,  126,  125,  124,   61,   61,
+      120,  119,  118,  117,  116,  115,   47,  114,  110,  113,
+      111,  108,  107,  106,   48,  105,  104,   89,  103,  102,
+      101,   99,   98,   97,   96,   95,   94,   79,   77,   90,
+       89,   88,   59,   87,   86,   59,   84,   83,   80,   79,
+       77,   74,  160,   60,   59,   55,   37,  160,   33,   25,
+       26,   25,   11,  160,  160,  160,  160,  160,  160,  160,
 
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160
+      160,  160,  160,  160,  160,  160,  160,  160,  160
     } ;
 
-static yyconst flex_int16_t yy_chk[441] =
+static yyconst flex_int16_t yy_chk[440] =
     {   0,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
         1,    1,    1,    1,    1,    1,    1,    1,    1,    1,
-        1,    1,    1,    1,    1,    4,    9,    9,    9,   10,
-       50,    4,    5,    5,    5,    5,   10,   10,   10,   50,
-        5,   13,   13,   14,   14,  158,    5,    5,    5,    5,
-        5,    7,    7,    7,    7,    7,    7,    7,    7,    7,
+        1,    1,    1,    1,    1,    1,    4,    9,    9,    9,
+       10,   50,    4,    5,    5,    5,    5,   10,   10,   10,
+       50,    5,   13,   13,   13,   20,   20,    5,    5,    5,
+        5,    5,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
 
         7,    7,    7,    7,    7,    7,    7,    7,    7,    7,
-        7,    7,    7,    7,    7,    7,   16,   17,   18,   19,
-       20,   20,   19,   22,   22,   25,   25,   26,   31,   44,
-       26,   31,   31,   31,   38,   18,   16,   19,   31,   19,
-      157,   19,  112,   19,  156,   26,   44,   26,  130,   26,
-      130,   26,  155,   17,   38,   25,   28,   28,   28,   28,
-       33,   33,  112,   28,   46,   34,   34,   34,   36,   36,
-       28,   28,   28,   28,   34,   34,   34,   35,   35,   35,
-       75,   46,   61,   61,   98,   77,   35,   35,   77,   98,
-       33,   91,   91,   61,   61,   61,   67,   75,  152,   67,
+        7,    7,    7,    7,    7,    7,    7,    7,   14,   14,
+       14,   16,   17,   18,   19,   22,   22,   19,   25,   26,
+       38,  158,   26,   31,   33,   44,   31,   31,   31,  157,
+       18,   16,   19,   31,   19,  156,   19,   26,   19,   26,
+       38,   26,   44,   26,   36,   36,   36,  155,   17,   25,
+       28,   28,   28,   28,  130,   33,  130,   28,   46,   34,
+       34,   34,   91,   91,   28,   28,   28,   28,   34,   34,
+       34,   35,   35,   35,   61,   46,   75,  152,  151,   67,
+       35,   35,   67,   67,   67,  112,   61,   61,   61,   67,
 
-       67,   67,   69,   69,   69,  121,   67,   85,   85,  113,
-      113,   69,   69,  100,  100,  143,  100,  151,   85,   85,
-       85,  114,  114,  122,  122,  121,  129,  129,  135,  135,
-      135,  138,  138,  147,  138,  143,  150,  129,  129,  129,
-      142,  142,  149,  142,  146,  145,  144,  141,  140,  139,
-      137,  136,  134,  147,  161,  161,  161,  161,  161,  161,
-      161,  161,  162,  133,  128,  127,  162,  162,  163,  163,
-      163,  163,  163,  163,  163,  163,  164,  164,  164,  164,
-      165,  126,  165,  166,  125,  124,  166,  123,  166,  166,
-      167,  120,  119,  167,  167,  167,  167,  168,  118,  168,
+       69,   69,   69,   75,   77,   85,   98,   77,  100,   69,
+       69,   98,  100,  121,  129,  112,  150,   85,   85,   85,
+      135,  135,  135,  143,  147,  149,  129,  129,  129,  146,
+      138,  138,  138,  121,  138,  142,  142,  142,  145,  142,
+      144,  141,  140,  143,  147,  161,  161,  161,  161,  161,
+      161,  161,  161,  162,  139,  137,  136,  162,  162,  163,
+      163,  163,  163,  163,  163,  163,  163,  164,  164,  164,
+      164,  165,  134,  165,  166,  133,  128,  166,  127,  166,
+      166,  167,  126,  125,  167,  167,  167,  167,  168,  124,
+      168,  168,  169,  169,  169,  169,  169,  169,  169,  169,
 
-      168,  169,  169,  169,  169,  169,  169,  169,  169,  170,
-      170,  170,  170,  170,  170,  170,  170,  171,  117,  171,
-      172,  116,  172,  172,  111,  172,  172,  173,  173,  173,
-      173,  173,  173,  173,  173,  174,  174,  174,  174,  174,
-      174,  174,  174,  110,  109,  108,  107,  106,  105,  103,
-      102,  101,   99,   97,   96,   95,   94,   93,   92,   90,
-       88,   87,   86,   84,   83,   82,   81,   80,   79,   78,
-       76,   71,   70,   68,   65,   63,   62,   58,   52,   51,
-       49,   48,   47,   43,   40,   24,   23,   21,   15,   11,
-        8,    6,    3,    2,  160,  160,  160,  160,  160,  160,
+      170,  170,  170,  170,  170,  170,  170,  170,  171,  123,
+      171,  172,  122,  172,  172,  120,  172,  172,  173,  173,
+      173,  173,  173,  173,  173,  173,  174,  174,  174,  174,
+      174,  174,  174,  174,  119,  118,  117,  116,  114,  113,
+      111,  110,  109,  108,  107,  106,  105,  103,  102,  101,
+       99,   97,   96,   95,   94,   93,   92,   90,   88,   87,
+       86,   84,   83,   82,   81,   80,   79,   78,   76,   71,
+       70,   68,   65,   63,   62,   58,   52,   51,   49,   48,
+       47,   43,   40,   24,   23,   21,   15,   11,    8,    6,
+        3,    2,  160,  160,  160,  160,  160,  160,  160,  160,
 
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
       160,  160,  160,  160,  160,  160,  160,  160,  160,  160,
-      160,  160,  160,  160,  160,  160,  160,  160,  160,  160
+      160,  160,  160,  160,  160,  160,  160,  160,  160
     } ;
 
 static yy_state_type yy_last_accepting_state;
diff --git a/scripts/dtc/dtc-parser.tab.c_shipped b/scripts/dtc/dtc-parser.tab.c_shipped
index 4af5590..ee1d8c3 100644
--- a/scripts/dtc/dtc-parser.tab.c_shipped
+++ b/scripts/dtc/dtc-parser.tab.c_shipped
@@ -1,10 +1,8 @@
+/* A Bison parser, made by GNU Bison 2.5.  */
 
-/* A Bison parser, made by GNU Bison 2.4.1.  */
-
-/* Skeleton implementation for Bison's Yacc-like parsers in C
+/* Bison implementation for Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
-   Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -46,7 +44,7 @@
 #define YYBISON 1
 
 /* Bison version.  */
-#define YYBISON_VERSION "2.4.1"
+#define YYBISON_VERSION "2.5"
 
 /* Skeleton name.  */
 #define YYSKELETON_NAME "yacc.c"
@@ -67,7 +65,7 @@
 
 /* Copy the first part of user declarations.  */
 
-/* Line 189 of yacc.c  */
+/* Line 268 of yacc.c  */
 #line 21 "dtc-parser.y"
 
 #include <stdio.h>
@@ -88,8 +86,8 @@
 static unsigned char eval_char_literal(const char *s);
 
 
-/* Line 189 of yacc.c  */
-#line 93 "dtc-parser.tab.c"
+/* Line 268 of yacc.c  */
+#line 91 "dtc-parser.tab.c"
 
 /* Enabling traces.  */
 #ifndef YYDEBUG
@@ -147,7 +145,7 @@
 typedef union YYSTYPE
 {
 
-/* Line 214 of yacc.c  */
+/* Line 293 of yacc.c  */
 #line 40 "dtc-parser.y"
 
 	char *propnodename;
@@ -171,8 +169,8 @@
 
 
 
-/* Line 214 of yacc.c  */
-#line 176 "dtc-parser.tab.c"
+/* Line 293 of yacc.c  */
+#line 174 "dtc-parser.tab.c"
 } YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
@@ -183,8 +181,8 @@
 /* Copy the second part of user declarations.  */
 
 
-/* Line 264 of yacc.c  */
-#line 188 "dtc-parser.tab.c"
+/* Line 343 of yacc.c  */
+#line 186 "dtc-parser.tab.c"
 
 #ifdef short
 # undef short
@@ -234,7 +232,7 @@
 #define YYSIZE_MAXIMUM ((YYSIZE_T) -1)
 
 #ifndef YY_
-# if YYENABLE_NLS
+# if defined YYENABLE_NLS && YYENABLE_NLS
 #  if ENABLE_NLS
 #   include <libintl.h> /* INFRINGES ON USER NAME SPACE */
 #   define YY_(msgid) dgettext ("bison-runtime", msgid)
@@ -287,11 +285,11 @@
 #    define alloca _alloca
 #   else
 #    define YYSTACK_ALLOC alloca
-#    if ! defined _ALLOCA_H && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#    if ! defined _ALLOCA_H && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 #     include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-#     ifndef _STDLIB_H
-#      define _STDLIB_H 1
+#     ifndef EXIT_SUCCESS
+#      define EXIT_SUCCESS 0
 #     endif
 #    endif
 #   endif
@@ -314,24 +312,24 @@
 #  ifndef YYSTACK_ALLOC_MAXIMUM
 #   define YYSTACK_ALLOC_MAXIMUM YYSIZE_MAXIMUM
 #  endif
-#  if (defined __cplusplus && ! defined _STDLIB_H \
+#  if (defined __cplusplus && ! defined EXIT_SUCCESS \
        && ! ((defined YYMALLOC || defined malloc) \
 	     && (defined YYFREE || defined free)))
 #   include <stdlib.h> /* INFRINGES ON USER NAME SPACE */
-#   ifndef _STDLIB_H
-#    define _STDLIB_H 1
+#   ifndef EXIT_SUCCESS
+#    define EXIT_SUCCESS 0
 #   endif
 #  endif
 #  ifndef YYMALLOC
 #   define YYMALLOC malloc
-#   if ! defined malloc && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#   if ! defined malloc && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 void *malloc (YYSIZE_T); /* INFRINGES ON USER NAME SPACE */
 #   endif
 #  endif
 #  ifndef YYFREE
 #   define YYFREE free
-#   if ! defined free && ! defined _STDLIB_H && (defined __STDC__ || defined __C99__FUNC__ \
+#   if ! defined free && ! defined EXIT_SUCCESS && (defined __STDC__ || defined __C99__FUNC__ \
      || defined __cplusplus || defined _MSC_VER)
 void free (void *); /* INFRINGES ON USER NAME SPACE */
 #   endif
@@ -360,23 +358,7 @@
      ((N) * (sizeof (yytype_int16) + sizeof (YYSTYPE)) \
       + YYSTACK_GAP_MAXIMUM)
 
-/* Copy COUNT objects from FROM to TO.  The source and destination do
-   not overlap.  */
-# ifndef YYCOPY
-#  if defined __GNUC__ && 1 < __GNUC__
-#   define YYCOPY(To, From, Count) \
-      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
-#  else
-#   define YYCOPY(To, From, Count)		\
-      do					\
-	{					\
-	  YYSIZE_T yyi;				\
-	  for (yyi = 0; yyi < (Count); yyi++)	\
-	    (To)[yyi] = (From)[yyi];		\
-	}					\
-      while (YYID (0))
-#  endif
-# endif
+# define YYCOPY_NEEDED 1
 
 /* Relocate STACK from its old location to the new one.  The
    local variables YYSIZE and YYSTACKSIZE give the old and new number of
@@ -396,6 +378,26 @@
 
 #endif
 
+#if defined YYCOPY_NEEDED && YYCOPY_NEEDED
+/* Copy COUNT objects from FROM to TO.  The source and destination do
+   not overlap.  */
+# ifndef YYCOPY
+#  if defined __GNUC__ && 1 < __GNUC__
+#   define YYCOPY(To, From, Count) \
+      __builtin_memcpy (To, From, (Count) * sizeof (*(From)))
+#  else
+#   define YYCOPY(To, From, Count)		\
+      do					\
+	{					\
+	  YYSIZE_T yyi;				\
+	  for (yyi = 0; yyi < (Count); yyi++)	\
+	    (To)[yyi] = (From)[yyi];		\
+	}					\
+      while (YYID (0))
+#  endif
+# endif
+#endif /* !YYCOPY_NEEDED */
+
 /* YYFINAL -- State number of the termination state.  */
 #define YYFINAL  4
 /* YYLAST -- Last index in YYTABLE.  */
@@ -571,8 +573,8 @@
        2,     0,     2,     2,     0,     2,     2,     2,     3,     2
 };
 
-/* YYDEFACT[STATE-NAME] -- Default rule to reduce with in state
-   STATE-NUM when YYTABLE doesn't specify something else to do.  Zero
+/* YYDEFACT[STATE-NAME] -- Default reduction number in state STATE-NUM.
+   Performed when YYTABLE doesn't specify something else to do.  Zero
    means the default is an error.  */
 static const yytype_uint8 yydefact[] =
 {
@@ -633,8 +635,7 @@
 
 /* YYTABLE[YYPACT[STATE-NUM]].  What to do in state STATE-NUM.  If
    positive, shift that token.  If negative, reduce the rule which
-   number is the opposite.  If zero, do what YYDEFACT says.
-   If YYTABLE_NINF, syntax error.  */
+   number is the opposite.  If YYTABLE_NINF, syntax error.  */
 #define YYTABLE_NINF -1
 static const yytype_uint8 yytable[] =
 {
@@ -654,6 +655,12 @@
      137,     0,    73,   139
 };
 
+#define yypact_value_is_default(yystate) \
+  ((yystate) == (-78))
+
+#define yytable_value_is_error(yytable_value) \
+  YYID (0)
+
 static const yytype_int16 yycheck[] =
 {
        5,    38,    39,    17,    18,    19,    12,    12,    17,    18,
@@ -705,9 +712,18 @@
 
 /* Like YYERROR except do call yyerror.  This remains here temporarily
    to ease the transition to the new meaning of YYERROR, for GCC.
-   Once GCC version 2 has supplanted version 1, this can go.  */
+   Once GCC version 2 has supplanted version 1, this can go.  However,
+   YYFAIL appears to be in use.  Nevertheless, it is formally deprecated
+   in Bison 2.4.2's NEWS entry, where a plan to phase it out is
+   discussed.  */
 
 #define YYFAIL		goto yyerrlab
+#if defined YYFAIL
+  /* This is here to suppress warnings from the GCC cpp's
+     -Wunused-macros.  Normally we don't worry about that warning, but
+     some users do, and we want to make it easy for users to remove
+     YYFAIL uses, which will produce warnings from Bison 2.5.  */
+#endif
 
 #define YYRECOVERING()  (!!yyerrstatus)
 
@@ -717,7 +733,6 @@
     {								\
       yychar = (Token);						\
       yylval = (Value);						\
-      yytoken = YYTRANSLATE (yychar);				\
       YYPOPSTACK (1);						\
       goto yybackup;						\
     }								\
@@ -759,19 +774,10 @@
 #endif
 
 
-/* YY_LOCATION_PRINT -- Print the location on the stream.
-   This macro was not mandated originally: define only if we know
-   we won't break user code: when these are the locations we know.  */
+/* This macro is provided for backward compatibility. */
 
 #ifndef YY_LOCATION_PRINT
-# if YYLTYPE_IS_TRIVIAL
-#  define YY_LOCATION_PRINT(File, Loc)			\
-     fprintf (File, "%d.%d-%d.%d",			\
-	      (Loc).first_line, (Loc).first_column,	\
-	      (Loc).last_line,  (Loc).last_column)
-# else
-#  define YY_LOCATION_PRINT(File, Loc) ((void) 0)
-# endif
+# define YY_LOCATION_PRINT(File, Loc) ((void) 0)
 #endif
 
 
@@ -963,7 +969,6 @@
 # define YYMAXDEPTH 10000
 #endif
 
-
 
 #if YYERROR_VERBOSE
 
@@ -1066,115 +1071,142 @@
 }
 # endif
 
-/* Copy into YYRESULT an error message about the unexpected token
-   YYCHAR while in state YYSTATE.  Return the number of bytes copied,
-   including the terminating null byte.  If YYRESULT is null, do not
-   copy anything; just return the number of bytes that would be
-   copied.  As a special case, return 0 if an ordinary "syntax error"
-   message will do.  Return YYSIZE_MAXIMUM if overflow occurs during
-   size calculation.  */
-static YYSIZE_T
-yysyntax_error (char *yyresult, int yystate, int yychar)
+/* Copy into *YYMSG, which is of size *YYMSG_ALLOC, an error message
+   about the unexpected token YYTOKEN for the state stack whose top is
+   YYSSP.
+
+   Return 0 if *YYMSG was successfully written.  Return 1 if *YYMSG is
+   not large enough to hold the message.  In that case, also set
+   *YYMSG_ALLOC to the required number of bytes.  Return 2 if the
+   required number of bytes is too large to store.  */
+static int
+yysyntax_error (YYSIZE_T *yymsg_alloc, char **yymsg,
+                yytype_int16 *yyssp, int yytoken)
 {
-  int yyn = yypact[yystate];
+  YYSIZE_T yysize0 = yytnamerr (0, yytname[yytoken]);
+  YYSIZE_T yysize = yysize0;
+  YYSIZE_T yysize1;
+  enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
+  /* Internationalized format string. */
+  const char *yyformat = 0;
+  /* Arguments of yyformat. */
+  char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
+  /* Number of reported tokens (one for the "unexpected", one per
+     "expected"). */
+  int yycount = 0;
 
-  if (! (YYPACT_NINF < yyn && yyn <= YYLAST))
-    return 0;
-  else
+  /* There are many possibilities here to consider:
+     - Assume YYFAIL is not used.  It's too flawed to consider.  See
+       <http://lists.gnu.org/archive/html/bison-patches/2009-12/msg00024.html>
+       for details.  YYERROR is fine as it does not invoke this
+       function.
+     - If this state is a consistent state with a default action, then
+       the only way this function was invoked is if the default action
+       is an error action.  In that case, don't check for expected
+       tokens because there are none.
+     - The only way there can be no lookahead present (in yychar) is if
+       this state is a consistent state with a default action.  Thus,
+       detecting the absence of a lookahead is sufficient to determine
+       that there is no unexpected or expected token to report.  In that
+       case, just report a simple "syntax error".
+     - Don't assume there isn't a lookahead just because this state is a
+       consistent state with a default action.  There might have been a
+       previous inconsistent state, consistent state with a non-default
+       action, or user semantic action that manipulated yychar.
+     - Of course, the expected token list depends on states to have
+       correct lookahead information, and it depends on the parser not
+       to perform extra reductions after fetching a lookahead from the
+       scanner and before detecting a syntax error.  Thus, state merging
+       (from LALR or IELR) and default reductions corrupt the expected
+       token list.  However, the list is correct for canonical LR with
+       one exception: it will still contain any token that will not be
+       accepted due to an error action in a later state.
+  */
+  if (yytoken != YYEMPTY)
     {
-      int yytype = YYTRANSLATE (yychar);
-      YYSIZE_T yysize0 = yytnamerr (0, yytname[yytype]);
-      YYSIZE_T yysize = yysize0;
-      YYSIZE_T yysize1;
-      int yysize_overflow = 0;
-      enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 };
-      char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM];
-      int yyx;
+      int yyn = yypact[*yyssp];
+      yyarg[yycount++] = yytname[yytoken];
+      if (!yypact_value_is_default (yyn))
+        {
+          /* Start YYX at -YYN if negative to avoid negative indexes in
+             YYCHECK.  In other words, skip the first -YYN actions for
+             this state because they are default actions.  */
+          int yyxbegin = yyn < 0 ? -yyn : 0;
+          /* Stay within bounds of both yycheck and yytname.  */
+          int yychecklim = YYLAST - yyn + 1;
+          int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
+          int yyx;
 
-# if 0
-      /* This is so xgettext sees the translatable formats that are
-	 constructed on the fly.  */
-      YY_("syntax error, unexpected %s");
-      YY_("syntax error, unexpected %s, expecting %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s or %s");
-      YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s");
-# endif
-      char *yyfmt;
-      char const *yyf;
-      static char const yyunexpected[] = "syntax error, unexpected %s";
-      static char const yyexpecting[] = ", expecting %s";
-      static char const yyor[] = " or %s";
-      char yyformat[sizeof yyunexpected
-		    + sizeof yyexpecting - 1
-		    + ((YYERROR_VERBOSE_ARGS_MAXIMUM - 2)
-		       * (sizeof yyor - 1))];
-      char const *yyprefix = yyexpecting;
-
-      /* Start YYX at -YYN if negative to avoid negative indexes in
-	 YYCHECK.  */
-      int yyxbegin = yyn < 0 ? -yyn : 0;
-
-      /* Stay within bounds of both yycheck and yytname.  */
-      int yychecklim = YYLAST - yyn + 1;
-      int yyxend = yychecklim < YYNTOKENS ? yychecklim : YYNTOKENS;
-      int yycount = 1;
-
-      yyarg[0] = yytname[yytype];
-      yyfmt = yystpcpy (yyformat, yyunexpected);
-
-      for (yyx = yyxbegin; yyx < yyxend; ++yyx)
-	if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR)
-	  {
-	    if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
-	      {
-		yycount = 1;
-		yysize = yysize0;
-		yyformat[sizeof yyunexpected - 1] = '\0';
-		break;
-	      }
-	    yyarg[yycount++] = yytname[yyx];
-	    yysize1 = yysize + yytnamerr (0, yytname[yyx]);
-	    yysize_overflow |= (yysize1 < yysize);
-	    yysize = yysize1;
-	    yyfmt = yystpcpy (yyfmt, yyprefix);
-	    yyprefix = yyor;
-	  }
-
-      yyf = YY_(yyformat);
-      yysize1 = yysize + yystrlen (yyf);
-      yysize_overflow |= (yysize1 < yysize);
-      yysize = yysize1;
-
-      if (yysize_overflow)
-	return YYSIZE_MAXIMUM;
-
-      if (yyresult)
-	{
-	  /* Avoid sprintf, as that infringes on the user's name space.
-	     Don't have undefined behavior even if the translation
-	     produced a string with the wrong number of "%s"s.  */
-	  char *yyp = yyresult;
-	  int yyi = 0;
-	  while ((*yyp = *yyf) != '\0')
-	    {
-	      if (*yyp == '%' && yyf[1] == 's' && yyi < yycount)
-		{
-		  yyp += yytnamerr (yyp, yyarg[yyi++]);
-		  yyf += 2;
-		}
-	      else
-		{
-		  yyp++;
-		  yyf++;
-		}
-	    }
-	}
-      return yysize;
+          for (yyx = yyxbegin; yyx < yyxend; ++yyx)
+            if (yycheck[yyx + yyn] == yyx && yyx != YYTERROR
+                && !yytable_value_is_error (yytable[yyx + yyn]))
+              {
+                if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM)
+                  {
+                    yycount = 1;
+                    yysize = yysize0;
+                    break;
+                  }
+                yyarg[yycount++] = yytname[yyx];
+                yysize1 = yysize + yytnamerr (0, yytname[yyx]);
+                if (! (yysize <= yysize1
+                       && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+                  return 2;
+                yysize = yysize1;
+              }
+        }
     }
+
+  switch (yycount)
+    {
+# define YYCASE_(N, S)                      \
+      case N:                               \
+        yyformat = S;                       \
+      break
+      YYCASE_(0, YY_("syntax error"));
+      YYCASE_(1, YY_("syntax error, unexpected %s"));
+      YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s"));
+      YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s"));
+      YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s"));
+      YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s"));
+# undef YYCASE_
+    }
+
+  yysize1 = yysize + yystrlen (yyformat);
+  if (! (yysize <= yysize1 && yysize1 <= YYSTACK_ALLOC_MAXIMUM))
+    return 2;
+  yysize = yysize1;
+
+  if (*yymsg_alloc < yysize)
+    {
+      *yymsg_alloc = 2 * yysize;
+      if (! (yysize <= *yymsg_alloc
+             && *yymsg_alloc <= YYSTACK_ALLOC_MAXIMUM))
+        *yymsg_alloc = YYSTACK_ALLOC_MAXIMUM;
+      return 1;
+    }
+
+  /* Avoid sprintf, as that infringes on the user's name space.
+     Don't have undefined behavior even if the translation
+     produced a string with the wrong number of "%s"s.  */
+  {
+    char *yyp = *yymsg;
+    int yyi = 0;
+    while ((*yyp = *yyformat) != '\0')
+      if (*yyp == '%' && yyformat[1] == 's' && yyi < yycount)
+        {
+          yyp += yytnamerr (yyp, yyarg[yyi++]);
+          yyformat += 2;
+        }
+      else
+        {
+          yyp++;
+          yyformat++;
+        }
+  }
+  return 0;
 }
 #endif /* YYERROR_VERBOSE */
-
 
 /*-----------------------------------------------.
 | Release the memory associated to this symbol.  |
@@ -1207,6 +1239,7 @@
     }
 }
 
+
 /* Prevent warnings from -Wmissing-prototypes.  */
 #ifdef YYPARSE_PARAM
 #if defined __STDC__ || defined __cplusplus
@@ -1233,10 +1266,9 @@
 int yynerrs;
 
 
-
-/*-------------------------.
-| yyparse or yypush_parse.  |
-`-------------------------*/
+/*----------.
+| yyparse.  |
+`----------*/
 
 #ifdef YYPARSE_PARAM
 #if (defined __STDC__ || defined __C99__FUNC__ \
@@ -1260,8 +1292,6 @@
 #endif
 #endif
 {
-
-
     int yystate;
     /* Number of tokens to shift before error messages enabled.  */
     int yyerrstatus;
@@ -1416,7 +1446,7 @@
 
   /* First try to decide what to do without reference to lookahead token.  */
   yyn = yypact[yystate];
-  if (yyn == YYPACT_NINF)
+  if (yypact_value_is_default (yyn))
     goto yydefault;
 
   /* Not known => get a lookahead token if don't already have one.  */
@@ -1447,8 +1477,8 @@
   yyn = yytable[yyn];
   if (yyn <= 0)
     {
-      if (yyn == 0 || yyn == YYTABLE_NINF)
-	goto yyerrlab;
+      if (yytable_value_is_error (yyn))
+        goto yyerrlab;
       yyn = -yyn;
       goto yyreduce;
     }
@@ -1503,72 +1533,72 @@
     {
         case 2:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 110 "dtc-parser.y"
     {
 			the_boot_info = build_boot_info((yyvsp[(3) - (4)].re), (yyvsp[(4) - (4)].node),
 							guess_boot_cpuid((yyvsp[(4) - (4)].node)));
-		;}
+		}
     break;
 
   case 3:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 118 "dtc-parser.y"
     {
 			(yyval.re) = NULL;
-		;}
+		}
     break;
 
   case 4:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 122 "dtc-parser.y"
     {
 			(yyval.re) = chain_reserve_entry((yyvsp[(1) - (2)].re), (yyvsp[(2) - (2)].re));
-		;}
+		}
     break;
 
   case 5:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 129 "dtc-parser.y"
     {
 			(yyval.re) = build_reserve_entry((yyvsp[(2) - (4)].integer), (yyvsp[(3) - (4)].integer));
-		;}
+		}
     break;
 
   case 6:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 133 "dtc-parser.y"
     {
 			add_label(&(yyvsp[(2) - (2)].re)->labels, (yyvsp[(1) - (2)].labelref));
 			(yyval.re) = (yyvsp[(2) - (2)].re);
-		;}
+		}
     break;
 
   case 7:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 141 "dtc-parser.y"
     {
 			(yyval.node) = name_node((yyvsp[(2) - (2)].node), "");
-		;}
+		}
     break;
 
   case 8:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 145 "dtc-parser.y"
     {
 			(yyval.node) = merge_nodes((yyvsp[(1) - (3)].node), (yyvsp[(3) - (3)].node));
-		;}
+		}
     break;
 
   case 9:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 149 "dtc-parser.y"
     {
 			struct node *target = get_node_by_ref((yyvsp[(1) - (3)].node), (yyvsp[(2) - (3)].labelref));
@@ -1578,12 +1608,12 @@
 			else
 				print_error("label or path, '%s', not found", (yyvsp[(2) - (3)].labelref));
 			(yyval.node) = (yyvsp[(1) - (3)].node);
-		;}
+		}
     break;
 
   case 10:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 159 "dtc-parser.y"
     {
 			struct node *target = get_node_by_ref((yyvsp[(1) - (4)].node), (yyvsp[(3) - (4)].labelref));
@@ -1594,112 +1624,112 @@
 				delete_node(target);
 
 			(yyval.node) = (yyvsp[(1) - (4)].node);
-		;}
+		}
     break;
 
   case 11:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 173 "dtc-parser.y"
     {
 			(yyval.node) = build_node((yyvsp[(2) - (5)].proplist), (yyvsp[(3) - (5)].nodelist));
-		;}
+		}
     break;
 
   case 12:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 180 "dtc-parser.y"
     {
 			(yyval.proplist) = NULL;
-		;}
+		}
     break;
 
   case 13:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 184 "dtc-parser.y"
     {
 			(yyval.proplist) = chain_property((yyvsp[(2) - (2)].prop), (yyvsp[(1) - (2)].proplist));
-		;}
+		}
     break;
 
   case 14:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 191 "dtc-parser.y"
     {
 			(yyval.prop) = build_property((yyvsp[(1) - (4)].propnodename), (yyvsp[(3) - (4)].data));
-		;}
+		}
     break;
 
   case 15:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 195 "dtc-parser.y"
     {
 			(yyval.prop) = build_property((yyvsp[(1) - (2)].propnodename), empty_data);
-		;}
+		}
     break;
 
   case 16:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 199 "dtc-parser.y"
     {
 			(yyval.prop) = build_property_delete((yyvsp[(2) - (3)].propnodename));
-		;}
+		}
     break;
 
   case 17:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 203 "dtc-parser.y"
     {
 			add_label(&(yyvsp[(2) - (2)].prop)->labels, (yyvsp[(1) - (2)].labelref));
 			(yyval.prop) = (yyvsp[(2) - (2)].prop);
-		;}
+		}
     break;
 
   case 18:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 211 "dtc-parser.y"
     {
 			(yyval.data) = data_merge((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].data));
-		;}
+		}
     break;
 
   case 19:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 215 "dtc-parser.y"
     {
 			(yyval.data) = data_merge((yyvsp[(1) - (3)].data), (yyvsp[(2) - (3)].array).data);
-		;}
+		}
     break;
 
   case 20:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 219 "dtc-parser.y"
     {
 			(yyval.data) = data_merge((yyvsp[(1) - (4)].data), (yyvsp[(3) - (4)].data));
-		;}
+		}
     break;
 
   case 21:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 223 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), REF_PATH, (yyvsp[(2) - (2)].labelref));
-		;}
+		}
     break;
 
   case 22:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 227 "dtc-parser.y"
     {
 			FILE *f = srcfile_relative_open((yyvsp[(4) - (9)].data).val, NULL);
@@ -1716,12 +1746,12 @@
 
 			(yyval.data) = data_merge((yyvsp[(1) - (9)].data), d);
 			fclose(f);
-		;}
+		}
     break;
 
   case 23:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 244 "dtc-parser.y"
     {
 			FILE *f = srcfile_relative_open((yyvsp[(4) - (5)].data).val, NULL);
@@ -1731,48 +1761,48 @@
 
 			(yyval.data) = data_merge((yyvsp[(1) - (5)].data), d);
 			fclose(f);
-		;}
+		}
     break;
 
   case 24:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 254 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-		;}
+		}
     break;
 
   case 25:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 261 "dtc-parser.y"
     {
 			(yyval.data) = empty_data;
-		;}
+		}
     break;
 
   case 26:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 265 "dtc-parser.y"
     {
 			(yyval.data) = (yyvsp[(1) - (2)].data);
-		;}
+		}
     break;
 
   case 27:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 269 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-		;}
+		}
     break;
 
   case 28:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 276 "dtc-parser.y"
     {
 			(yyval.array).data = empty_data;
@@ -1787,22 +1817,22 @@
 					    " are currently supported");
 				(yyval.array).bits = 32;
 			}
-		;}
+		}
     break;
 
   case 29:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 291 "dtc-parser.y"
     {
 			(yyval.array).data = empty_data;
 			(yyval.array).bits = 32;
-		;}
+		}
     break;
 
   case 30:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 296 "dtc-parser.y"
     {
 			if ((yyvsp[(1) - (2)].array).bits < 64) {
@@ -1822,12 +1852,12 @@
 			}
 
 			(yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, (yyvsp[(2) - (2)].integer), (yyvsp[(1) - (2)].array).bits);
-		;}
+		}
     break;
 
   case 31:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 316 "dtc-parser.y"
     {
 			uint64_t val = ~0ULL >> (64 - (yyvsp[(1) - (2)].array).bits);
@@ -1841,288 +1871,299 @@
 					    "arrays with 32-bit elements.");
 
 			(yyval.array).data = data_append_integer((yyvsp[(1) - (2)].array).data, val, (yyvsp[(1) - (2)].array).bits);
-		;}
+		}
     break;
 
   case 32:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 330 "dtc-parser.y"
     {
 			(yyval.array).data = data_add_marker((yyvsp[(1) - (2)].array).data, LABEL, (yyvsp[(2) - (2)].labelref));
-		;}
+		}
     break;
 
   case 33:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 337 "dtc-parser.y"
     {
 			(yyval.integer) = eval_literal((yyvsp[(1) - (1)].literal), 0, 64);
-		;}
+		}
     break;
 
   case 34:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 341 "dtc-parser.y"
     {
 			(yyval.integer) = eval_char_literal((yyvsp[(1) - (1)].literal));
-		;}
+		}
     break;
 
   case 35:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 345 "dtc-parser.y"
     {
 			(yyval.integer) = (yyvsp[(2) - (3)].integer);
-		;}
+		}
     break;
 
   case 38:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 356 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (5)].integer) ? (yyvsp[(3) - (5)].integer) : (yyvsp[(5) - (5)].integer); }
     break;
 
   case 40:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 361 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) || (yyvsp[(3) - (3)].integer); }
     break;
 
   case 42:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 366 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) && (yyvsp[(3) - (3)].integer); }
     break;
 
   case 44:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 371 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) | (yyvsp[(3) - (3)].integer); }
     break;
 
   case 46:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 376 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) ^ (yyvsp[(3) - (3)].integer); }
     break;
 
   case 48:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 381 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) & (yyvsp[(3) - (3)].integer); }
     break;
 
   case 50:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 386 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) == (yyvsp[(3) - (3)].integer); }
     break;
 
   case 51:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 387 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) != (yyvsp[(3) - (3)].integer); }
     break;
 
   case 53:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 392 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) < (yyvsp[(3) - (3)].integer); }
     break;
 
   case 54:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 393 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) > (yyvsp[(3) - (3)].integer); }
     break;
 
   case 55:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 394 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) <= (yyvsp[(3) - (3)].integer); }
     break;
 
   case 56:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 395 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >= (yyvsp[(3) - (3)].integer); }
     break;
 
   case 57:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 399 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) << (yyvsp[(3) - (3)].integer); }
     break;
 
   case 58:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 400 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) >> (yyvsp[(3) - (3)].integer); }
     break;
 
   case 60:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 405 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) + (yyvsp[(3) - (3)].integer); }
     break;
 
   case 61:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 406 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) - (yyvsp[(3) - (3)].integer); }
     break;
 
   case 63:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 411 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) * (yyvsp[(3) - (3)].integer); }
     break;
 
   case 64:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 412 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) / (yyvsp[(3) - (3)].integer); }
     break;
 
   case 65:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 413 "dtc-parser.y"
-    { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); ;}
+    { (yyval.integer) = (yyvsp[(1) - (3)].integer) % (yyvsp[(3) - (3)].integer); }
     break;
 
   case 68:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 419 "dtc-parser.y"
-    { (yyval.integer) = -(yyvsp[(2) - (2)].integer); ;}
+    { (yyval.integer) = -(yyvsp[(2) - (2)].integer); }
     break;
 
   case 69:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 420 "dtc-parser.y"
-    { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); ;}
+    { (yyval.integer) = ~(yyvsp[(2) - (2)].integer); }
     break;
 
   case 70:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 421 "dtc-parser.y"
-    { (yyval.integer) = !(yyvsp[(2) - (2)].integer); ;}
+    { (yyval.integer) = !(yyvsp[(2) - (2)].integer); }
     break;
 
   case 71:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 426 "dtc-parser.y"
     {
 			(yyval.data) = empty_data;
-		;}
+		}
     break;
 
   case 72:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 430 "dtc-parser.y"
     {
 			(yyval.data) = data_append_byte((yyvsp[(1) - (2)].data), (yyvsp[(2) - (2)].byte));
-		;}
+		}
     break;
 
   case 73:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 434 "dtc-parser.y"
     {
 			(yyval.data) = data_add_marker((yyvsp[(1) - (2)].data), LABEL, (yyvsp[(2) - (2)].labelref));
-		;}
+		}
     break;
 
   case 74:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 441 "dtc-parser.y"
     {
 			(yyval.nodelist) = NULL;
-		;}
+		}
     break;
 
   case 75:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 445 "dtc-parser.y"
     {
 			(yyval.nodelist) = chain_node((yyvsp[(1) - (2)].node), (yyvsp[(2) - (2)].nodelist));
-		;}
+		}
     break;
 
   case 76:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 449 "dtc-parser.y"
     {
 			print_error("syntax error: properties must precede subnodes");
 			YYERROR;
-		;}
+		}
     break;
 
   case 77:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 457 "dtc-parser.y"
     {
 			(yyval.node) = name_node((yyvsp[(2) - (2)].node), (yyvsp[(1) - (2)].propnodename));
-		;}
+		}
     break;
 
   case 78:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 461 "dtc-parser.y"
     {
 			(yyval.node) = name_node(build_node_delete(), (yyvsp[(2) - (3)].propnodename));
-		;}
+		}
     break;
 
   case 79:
 
-/* Line 1455 of yacc.c  */
+/* Line 1806 of yacc.c  */
 #line 465 "dtc-parser.y"
     {
 			add_label(&(yyvsp[(2) - (2)].node)->labels, (yyvsp[(1) - (2)].labelref));
 			(yyval.node) = (yyvsp[(2) - (2)].node);
-		;}
+		}
     break;
 
 
 
-/* Line 1455 of yacc.c  */
-#line 2124 "dtc-parser.tab.c"
+/* Line 1806 of yacc.c  */
+#line 2154 "dtc-parser.tab.c"
       default: break;
     }
+  /* User semantic actions sometimes alter yychar, and that requires
+     that yytoken be updated with the new translation.  We take the
+     approach of translating immediately before every use of yytoken.
+     One alternative is translating here after every semantic action,
+     but that translation would be missed if the semantic action invokes
+     YYABORT, YYACCEPT, or YYERROR immediately after altering yychar or
+     if it invokes YYBACKUP.  In the case of YYABORT or YYACCEPT, an
+     incorrect destructor might then be invoked immediately.  In the
+     case of YYERROR or YYBACKUP, subsequent parser actions might lead
+     to an incorrect destructor call or verbose syntax error message
+     before the lookahead is translated.  */
   YY_SYMBOL_PRINT ("-> $$ =", yyr1[yyn], &yyval, &yyloc);
 
   YYPOPSTACK (yylen);
@@ -2150,6 +2191,10 @@
 | yyerrlab -- here on detecting error |
 `------------------------------------*/
 yyerrlab:
+  /* Make sure we have latest lookahead translation.  See comments at
+     user semantic actions for why this is necessary.  */
+  yytoken = yychar == YYEMPTY ? YYEMPTY : YYTRANSLATE (yychar);
+
   /* If not already recovering from an error, report this error.  */
   if (!yyerrstatus)
     {
@@ -2157,37 +2202,36 @@
 #if ! YYERROR_VERBOSE
       yyerror (YY_("syntax error"));
 #else
+# define YYSYNTAX_ERROR yysyntax_error (&yymsg_alloc, &yymsg, \
+                                        yyssp, yytoken)
       {
-	YYSIZE_T yysize = yysyntax_error (0, yystate, yychar);
-	if (yymsg_alloc < yysize && yymsg_alloc < YYSTACK_ALLOC_MAXIMUM)
-	  {
-	    YYSIZE_T yyalloc = 2 * yysize;
-	    if (! (yysize <= yyalloc && yyalloc <= YYSTACK_ALLOC_MAXIMUM))
-	      yyalloc = YYSTACK_ALLOC_MAXIMUM;
-	    if (yymsg != yymsgbuf)
-	      YYSTACK_FREE (yymsg);
-	    yymsg = (char *) YYSTACK_ALLOC (yyalloc);
-	    if (yymsg)
-	      yymsg_alloc = yyalloc;
-	    else
-	      {
-		yymsg = yymsgbuf;
-		yymsg_alloc = sizeof yymsgbuf;
-	      }
-	  }
-
-	if (0 < yysize && yysize <= yymsg_alloc)
-	  {
-	    (void) yysyntax_error (yymsg, yystate, yychar);
-	    yyerror (yymsg);
-	  }
-	else
-	  {
-	    yyerror (YY_("syntax error"));
-	    if (yysize != 0)
-	      goto yyexhaustedlab;
-	  }
+        char const *yymsgp = YY_("syntax error");
+        int yysyntax_error_status;
+        yysyntax_error_status = YYSYNTAX_ERROR;
+        if (yysyntax_error_status == 0)
+          yymsgp = yymsg;
+        else if (yysyntax_error_status == 1)
+          {
+            if (yymsg != yymsgbuf)
+              YYSTACK_FREE (yymsg);
+            yymsg = (char *) YYSTACK_ALLOC (yymsg_alloc);
+            if (!yymsg)
+              {
+                yymsg = yymsgbuf;
+                yymsg_alloc = sizeof yymsgbuf;
+                yysyntax_error_status = 2;
+              }
+            else
+              {
+                yysyntax_error_status = YYSYNTAX_ERROR;
+                yymsgp = yymsg;
+              }
+          }
+        yyerror (yymsgp);
+        if (yysyntax_error_status == 2)
+          goto yyexhaustedlab;
       }
+# undef YYSYNTAX_ERROR
 #endif
     }
 
@@ -2246,7 +2290,7 @@
   for (;;)
     {
       yyn = yypact[yystate];
-      if (yyn != YYPACT_NINF)
+      if (!yypact_value_is_default (yyn))
 	{
 	  yyn += YYTERROR;
 	  if (0 <= yyn && yyn <= YYLAST && yycheck[yyn] == YYTERROR)
@@ -2305,8 +2349,13 @@
 
 yyreturn:
   if (yychar != YYEMPTY)
-     yydestruct ("Cleanup: discarding lookahead",
-		 yytoken, &yylval);
+    {
+      /* Make sure we have latest lookahead translation.  See comments at
+         user semantic actions for why this is necessary.  */
+      yytoken = YYTRANSLATE (yychar);
+      yydestruct ("Cleanup: discarding lookahead",
+                  yytoken, &yylval);
+    }
   /* Do not reclaim the symbols of the rule which action triggered
      this YYABORT or YYACCEPT.  */
   YYPOPSTACK (yylen);
@@ -2331,7 +2380,7 @@
 
 
 
-/* Line 1675 of yacc.c  */
+/* Line 2067 of yacc.c  */
 #line 471 "dtc-parser.y"
 
 
diff --git a/scripts/dtc/dtc-parser.tab.h_shipped b/scripts/dtc/dtc-parser.tab.h_shipped
index 9d2dce4..25d3b88 100644
--- a/scripts/dtc/dtc-parser.tab.h_shipped
+++ b/scripts/dtc/dtc-parser.tab.h_shipped
@@ -1,10 +1,8 @@
+/* A Bison parser, made by GNU Bison 2.5.  */
 
-/* A Bison parser, made by GNU Bison 2.4.1.  */
-
-/* Skeleton interface for Bison's Yacc-like parsers in C
+/* Bison interface for Yacc-like parsers in C
    
-      Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004, 2005, 2006
-   Free Software Foundation, Inc.
+      Copyright (C) 1984, 1989-1990, 2000-2011 Free Software Foundation, Inc.
    
    This program is free software: you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
@@ -70,7 +68,7 @@
 typedef union YYSTYPE
 {
 
-/* Line 1676 of yacc.c  */
+/* Line 2068 of yacc.c  */
 #line 40 "dtc-parser.y"
 
 	char *propnodename;
@@ -94,8 +92,8 @@
 
 
 
-/* Line 1676 of yacc.c  */
-#line 99 "dtc-parser.tab.h"
+/* Line 2068 of yacc.c  */
+#line 97 "dtc-parser.tab.h"
 } YYSTYPE;
 # define YYSTYPE_IS_TRIVIAL 1
 # define yystype YYSTYPE /* obsolescent; will be withdrawn */
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index ccfa383..f928181 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1649,6 +1649,7 @@
 	}
 	if (!snd_pcm_stream_linked(substream)) {
 		substream->group = group;
+		group = NULL;
 		spin_lock_init(&substream->group->lock);
 		INIT_LIST_HEAD(&substream->group->substreams);
 		list_add_tail(&substream->link_list, &substream->group->substreams);
@@ -1663,8 +1664,7 @@
  _nolock:
 	snd_card_unref(substream1->pcm->card);
 	fput_light(file, fput_needed);
-	if (res < 0)
-		kfree(group);
+	kfree(group);
 	return res;
 }
 
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index bd8d46c..cccaf9c 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -58,6 +58,7 @@
 	CS420X_GPIO_23,
 	CS420X_MBP101,
 	CS420X_MBP81,
+	CS420X_MBA42,
 	CS420X_AUTO,
 	/* aliases */
 	CS420X_IMAC27_122 = CS420X_GPIO_23,
@@ -346,6 +347,7 @@
 	{ .id = CS420X_APPLE, .name = "apple" },
 	{ .id = CS420X_MBP101, .name = "mbp101" },
 	{ .id = CS420X_MBP81, .name = "mbp81" },
+	{ .id = CS420X_MBA42, .name = "mba42" },
 	{}
 };
 
@@ -361,6 +363,7 @@
 	SND_PCI_QUIRK(0x106b, 0x1c00, "MacBookPro 8,1", CS420X_MBP81),
 	SND_PCI_QUIRK(0x106b, 0x2000, "iMac 12,2", CS420X_IMAC27_122),
 	SND_PCI_QUIRK(0x106b, 0x2800, "MacBookPro 10,1", CS420X_MBP101),
+	SND_PCI_QUIRK(0x106b, 0x5b00, "MacBookAir 4,2", CS420X_MBA42),
 	SND_PCI_QUIRK_VENDOR(0x106b, "Apple", CS420X_APPLE),
 	{} /* terminator */
 };
@@ -414,6 +417,20 @@
 	{} /* terminator */
 };
 
+static const struct hda_pintbl mba42_pincfgs[] = {
+	{ 0x09, 0x012b4030 }, /* HP */
+	{ 0x0a, 0x400000f0 },
+	{ 0x0b, 0x90100120 }, /* speaker */
+	{ 0x0c, 0x400000f0 },
+	{ 0x0d, 0x90a00110 }, /* mic */
+	{ 0x0e, 0x400000f0 },
+	{ 0x0f, 0x400000f0 },
+	{ 0x10, 0x400000f0 },
+	{ 0x12, 0x400000f0 },
+	{ 0x15, 0x400000f0 },
+	{} /* terminator */
+};
+
 static void cs420x_fixup_gpio_13(struct hda_codec *codec,
 				 const struct hda_fixup *fix, int action)
 {
@@ -482,6 +499,12 @@
 		.chained = true,
 		.chain_id = CS420X_GPIO_13,
 	},
+	[CS420X_MBA42] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = mba42_pincfgs,
+		.chained = true,
+		.chain_id = CS420X_GPIO_13,
+	},
 };
 
 static struct cs_spec *cs_alloc_spec(struct hda_codec *codec, int vendor_nid)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 02e22b4..403010c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3483,6 +3483,7 @@
 	SND_PCI_QUIRK(0x1028, 0x05ca, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05cb, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05de, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x05e0, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05e9, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05ea, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05eb, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -3494,6 +3495,8 @@
 	SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x05f8, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+	SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
 	SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
 	SND_PCI_QUIRK(0x103c, 0x18e6, "HP", ALC269_FIXUP_HP_GPIO_LED),
@@ -3596,6 +3599,8 @@
 	{.id = ALC269_FIXUP_INV_DMIC, .name = "inv-dmic"},
 	{.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
 	{.id = ALC269_FIXUP_HP_GPIO_LED, .name = "hp-gpio-led"},
+	{.id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
+	{.id = ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, .name = "dell-headset-dock"},
 	{}
 };
 
@@ -4275,6 +4280,7 @@
 	{.id = ALC662_FIXUP_ASUS_MODE7, .name = "asus-mode7"},
 	{.id = ALC662_FIXUP_ASUS_MODE8, .name = "asus-mode8"},
 	{.id = ALC662_FIXUP_INV_DMIC, .name = "inv-dmic"},
+	{.id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE, .name = "dell-headset-multi"},
 	{}
 };
 
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 1a03317..64952e2 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -147,14 +147,32 @@
 		return -EINVAL;
 	}
 
+	alts = &iface->altsetting[0];
+	altsd = get_iface_desc(alts);
+
+	/*
+	 * Android with both accessory and audio interfaces enabled gets the
+	 * interface numbers wrong.
+	 */
+	if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) ||
+	     chip->usb_id == USB_ID(0x18d1, 0x2d05)) &&
+	    interface == 0 &&
+	    altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC &&
+	    altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) {
+		interface = 2;
+		iface = usb_ifnum_to_if(dev, interface);
+		if (!iface)
+			return -EINVAL;
+		alts = &iface->altsetting[0];
+		altsd = get_iface_desc(alts);
+	}
+
 	if (usb_interface_claimed(iface)) {
 		snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n",
 						dev->devnum, ctrlif, interface);
 		return -EINVAL;
 	}
 
-	alts = &iface->altsetting[0];
-	altsd = get_iface_desc(alts);
 	if ((altsd->bInterfaceClass == USB_CLASS_AUDIO ||
 	     altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) &&
 	    altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) {
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index e5c7f9f..d543808 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -885,6 +885,7 @@
 
 	case USB_ID(0x046d, 0x0808):
 	case USB_ID(0x046d, 0x0809):
+	case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
 	case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
 	case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
 	case USB_ID(0x046d, 0x0991):