Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-next-2.6 into for-davem
diff --git a/Documentation/ABI/obsolete/proc-pid-oom_adj b/Documentation/ABI/obsolete/proc-pid-oom_adj
new file mode 100644
index 0000000..cf63f26
--- /dev/null
+++ b/Documentation/ABI/obsolete/proc-pid-oom_adj
@@ -0,0 +1,22 @@
+What:	/proc/<pid>/oom_adj
+When:	August 2012
+Why:	/proc/<pid>/oom_adj allows userspace to influence the oom killer's
+	badness heuristic used to determine which task to kill when the kernel
+	is out of memory.
+
+	The badness heuristic has since been rewritten since the introduction of
+	this tunable such that its meaning is deprecated.  The value was
+	implemented as a bitshift on a score generated by the badness()
+	function that did not have any precise units of measure.  With the
+	rewrite, the score is given as a proportion of available memory to the
+	task allocating pages, so using a bitshift which grows the score
+	exponentially is, thus, impossible to tune with fine granularity.
+
+	A much more powerful interface, /proc/<pid>/oom_score_adj, was
+	introduced with the oom killer rewrite that allows users to increase or
+	decrease the badness() score linearly.  This interface will replace
+	/proc/<pid>/oom_adj.
+
+	A warning will be emitted to the kernel log if an application uses this
+	deprecated interface.  After it is printed once, future warnings will be
+	suppressed until the kernel is rebooted.
diff --git a/Documentation/block/switching-sched.txt b/Documentation/block/switching-sched.txt
index d5af3f6..71cfbdc 100644
--- a/Documentation/block/switching-sched.txt
+++ b/Documentation/block/switching-sched.txt
@@ -16,7 +16,7 @@
 As of the Linux 2.6.10 kernel, it is now possible to change the
 IO scheduler for a given block device on the fly (thus making it possible,
 for instance, to set the CFQ scheduler for the system default, but
-set a specific device to use the anticipatory or noop schedulers - which
+set a specific device to use the deadline or noop schedulers - which
 can improve that device's throughput).
 
 To set a specific scheduler, simply do this:
@@ -31,7 +31,7 @@
 will be displayed, with the currently selected scheduler in brackets:
 
 # cat /sys/block/hda/queue/scheduler
-noop anticipatory deadline [cfq]
-# echo anticipatory > /sys/block/hda/queue/scheduler
+noop deadline [cfq]
+# echo deadline > /sys/block/hda/queue/scheduler
 # cat /sys/block/hda/queue/scheduler
-noop [anticipatory] deadline cfq
+noop [deadline] cfq
diff --git a/Documentation/filesystems/xfs-delayed-logging-design.txt b/Documentation/filesystems/xfs-delayed-logging-design.txt
index 96d0df2..7445bf3 100644
--- a/Documentation/filesystems/xfs-delayed-logging-design.txt
+++ b/Documentation/filesystems/xfs-delayed-logging-design.txt
@@ -794,17 +794,6 @@
 
 Roadmap:
 
-2.6.37 Remove experimental tag from mount option
-	=> should be roughly 6 months after initial merge
-	=> enough time to:
-		=> gain confidence and fix problems reported by early
-		   adopters (a.k.a. guinea pigs)
-		=> address worst performance regressions and undesired
-		   behaviours
-		=> start tuning/optimising code for parallelism
-		=> start tuning/optimising algorithms consuming
-		   excessive CPU time
-
 2.6.39 Switch default mount option to use delayed logging
 	=> should be roughly 12 months after initial merge
 	=> enough time to shake out remaining problems before next round of
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index ed45e98..92e83e5 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -706,7 +706,7 @@
 			arch/x86/kernel/cpu/cpufreq/elanfreq.c.
 
 	elevator=	[IOSCHED]
-			Format: {"anticipatory" | "cfq" | "deadline" | "noop"}
+			Format: {"cfq" | "deadline" | "noop"}
 			See Documentation/block/as-iosched.txt and
 			Documentation/block/deadline-iosched.txt for details.
 
diff --git a/Documentation/leds-class.txt b/Documentation/leds-class.txt
index 8fd5ca2..58b266b 100644
--- a/Documentation/leds-class.txt
+++ b/Documentation/leds-class.txt
@@ -60,15 +60,18 @@
 
 Some LEDs can be programmed to blink without any CPU interaction. To
 support this feature, a LED driver can optionally implement the
-blink_set() function (see <linux/leds.h>). If implemented, triggers can
-attempt to use it before falling back to software timers. The blink_set()
-function should return 0 if the blink setting is supported, or -EINVAL
-otherwise, which means that LED blinking will be handled by software.
+blink_set() function (see <linux/leds.h>). To set an LED to blinking,
+however, it is better to use use the API function led_blink_set(),
+as it will check and implement software fallback if necessary.
 
-The blink_set() function should choose a user friendly blinking
-value if it is called with *delay_on==0 && *delay_off==0 parameters. In
-this case the driver should give back the chosen value through delay_on
-and delay_off parameters to the leds subsystem.
+To turn off blinking again, use the API function led_brightness_set()
+as that will not just set the LED brightness but also stop any software
+timers that may have been required for blinking.
+
+The blink_set() function should choose a user friendly blinking value
+if it is called with *delay_on==0 && *delay_off==0 parameters. In this
+case the driver should give back the chosen value through delay_on and
+delay_off parameters to the leds subsystem.
 
 Setting the brightness to zero with brightness_set() callback function
 should completely turn off the LED and cancel the previously programmed
diff --git a/Documentation/leds/leds-lp5521.txt b/Documentation/leds/leds-lp5521.txt
new file mode 100644
index 0000000..c4d8d15
--- /dev/null
+++ b/Documentation/leds/leds-lp5521.txt
@@ -0,0 +1,88 @@
+Kernel driver for lp5521
+========================
+
+* National Semiconductor LP5521 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5521.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+
+LP5521 can drive up to 3 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5521:channelx, where x is 0 .. 2
+
+All three channels can be also controlled using the engine micro programs.
+More details of the instructions can be found from the public data sheet.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : store program (visible only in engine load mode)
+
+Example (start to blink the channel 2 led):
+cd   /sys/class/leds/lp5521:channel2/device
+echo "load" > engine3_mode
+echo "037f4d0003ff6000" > engine3_load
+echo "run" > engine3_mode
+
+stop the engine:
+echo "disabled" > engine3_mode
+
+sysfs contains a selftest entry.
+The test communicates with the chip and checks that
+the clock mode is automatically set to the requested one.
+
+Each channel has its own led current settings.
+/sys/class/leds/lp5521:channel0/led_current - RW
+/sys/class/leds/lp5521:channel0/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+example platform data:
+
+Note: chan_nr can have values between 0 and 2.
+
+static struct lp5521_led_config lp5521_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+		.max_current    = 130,
+        }, {
+                .chan_nr        = 1,
+                .led_current    = 0,
+		.max_current    = 130,
+        }, {
+                .chan_nr        = 2,
+                .led_current    = 0,
+		.max_current    = 130,
+        }
+};
+
+static int lp5521_setup(void)
+{
+	/* setup HW resources */
+}
+
+static void lp5521_release(void)
+{
+	/* Release HW resources */
+}
+
+static void lp5521_enable(bool state)
+{
+	/* Control of chip enable signal */
+}
+
+static struct lp5521_platform_data lp5521_platform_data = {
+        .led_config     = lp5521_led_config,
+        .num_channels   = ARRAY_SIZE(lp5521_led_config),
+        .clock_mode     = LP5521_CLOCK_EXT,
+        .setup_resources   = lp5521_setup,
+        .release_resources = lp5521_release,
+        .enable            = lp5521_enable,
+};
+
+If the current is set to 0 in the platform data, that channel is
+disabled and it is not visible in the sysfs.
diff --git a/Documentation/leds/leds-lp5523.txt b/Documentation/leds/leds-lp5523.txt
new file mode 100644
index 0000000..fad2feb
--- /dev/null
+++ b/Documentation/leds/leds-lp5523.txt
@@ -0,0 +1,83 @@
+Kernel driver for lp5523
+========================
+
+* National Semiconductor LP5523 led driver chip
+* Datasheet: http://www.national.com/pf/LP/LP5523.html
+
+Authors: Mathias Nyman, Yuri Zaporozhets, Samu Onkalo
+Contact: Samu Onkalo (samu.p.onkalo-at-nokia.com)
+
+Description
+-----------
+LP5523 can drive up to 9 channels. Leds can be controlled directly via
+the led class control interface. Channels have generic names:
+lp5523:channelx where x is 0...8
+
+The chip provides 3 engines. Each engine can control channels without
+interaction from the main CPU. Details of the micro engine code can be found
+from the public data sheet. Leds can be muxed to different channels.
+
+Control interface for the engines:
+x is 1 .. 3
+enginex_mode : disabled, load, run
+enginex_load : microcode load (visible only in load mode)
+enginex_leds : led mux control (visible only in load mode)
+
+cd /sys/class/leds/lp5523:channel2/device
+echo "load" > engine3_mode
+echo "9d80400004ff05ff437f0000" > engine3_load
+echo "111111111" > engine3_leds
+echo "run" > engine3_mode
+
+sysfs contains a selftest entry. It measures each channel
+voltage level and checks if it looks reasonable. If the level is too high,
+the led is missing; if the level is too low, there is a short circuit.
+
+Selftest uses always the current from the platform data.
+
+Each channel contains led current settings.
+/sys/class/leds/lp5523:channel2/led_current - RW
+/sys/class/leds/lp5523:channel2/max_current - RO
+Format: 10x mA i.e 10 means 1.0 mA
+
+Example platform data:
+
+Note - chan_nr can have values between 0 and 8.
+
+static struct lp5523_led_config lp5523_led_config[] = {
+        {
+                .chan_nr        = 0,
+                .led_current    = 50,
+		.max_current    = 130,
+        },
+...
+        }, {
+                .chan_nr        = 8,
+                .led_current    = 50,
+		.max_current    = 130,
+        }
+};
+
+static int lp5523_setup(void)
+{
+	/* Setup HW resources */
+}
+
+static void lp5523_release(void)
+{
+	/* Release HW resources */
+}
+
+static void lp5523_enable(bool state)
+{
+	/* Control chip enable signal */
+}
+
+static struct lp5523_platform_data lp5523_platform_data = {
+        .led_config     = lp5523_led_config,
+        .num_channels   = ARRAY_SIZE(lp5523_led_config),
+        .clock_mode     = LP5523_CLOCK_EXT,
+        .setup_resources   = lp5523_setup,
+        .release_resources = lp5523_release,
+        .enable            = lp5523_enable,
+};
diff --git a/Documentation/networking/LICENSE.qlcnic b/Documentation/networking/LICENSE.qlcnic
new file mode 100644
index 0000000..29ad4b1
--- /dev/null
+++ b/Documentation/networking/LICENSE.qlcnic
@@ -0,0 +1,327 @@
+Copyright (c) 2009-2010 QLogic Corporation
+QLogic Linux qlcnic NIC Driver
+
+This program includes a device driver for Linux 2.6 that may be
+distributed with QLogic hardware specific firmware binary file.
+You may modify and redistribute the device driver code under the
+GNU General Public License (a copy of which is attached hereto as
+Exhibit A) published by the Free Software Foundation (version 2).
+
+You may redistribute the hardware specific firmware binary file
+under the following terms:
+
+       1. Redistribution of source code (only if applicable),
+          must retain the above copyright notice, this list of
+          conditions and the following disclaimer.
+
+       2. Redistribution in binary form must reproduce the above
+          copyright notice, this list of conditions and the
+          following disclaimer in the documentation and/or other
+          materials provided with the distribution.
+
+       3. The name of QLogic Corporation may not be used to
+          endorse or promote products derived from this software
+          without specific prior written permission
+
+REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
+THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
+CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
+OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
+TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
+ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
+COMBINATION WITH THIS PROGRAM.
+
+
+EXHIBIT A
+
+                   GNU GENERAL PUBLIC LICENSE
+                      Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                           Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                   GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                           NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
index 271d524..b395ca6 100644
--- a/Documentation/networking/dccp.txt
+++ b/Documentation/networking/dccp.txt
@@ -47,6 +47,26 @@
 
 Socket options
 ==============
+DCCP_SOCKOPT_QPOLICY_ID sets the dequeuing policy for outgoing packets. It takes
+a policy ID as argument and can only be set before the connection (i.e. changes
+during an established connection are not supported). Currently, two policies are
+defined: the "simple" policy (DCCPQ_POLICY_SIMPLE), which does nothing special,
+and a priority-based variant (DCCPQ_POLICY_PRIO). The latter allows to pass an
+u32 priority value as ancillary data to sendmsg(), where higher numbers indicate
+a higher packet priority (similar to SO_PRIORITY). This ancillary data needs to
+be formatted using a cmsg(3) message header filled in as follows:
+	cmsg->cmsg_level = SOL_DCCP;
+	cmsg->cmsg_type	 = DCCP_SCM_PRIORITY;
+	cmsg->cmsg_len	 = CMSG_LEN(sizeof(uint32_t));	/* or CMSG_LEN(4) */
+
+DCCP_SOCKOPT_QPOLICY_TXQLEN sets the maximum length of the output queue. A zero
+value is always interpreted as unbounded queue length. If different from zero,
+the interpretation of this parameter depends on the current dequeuing policy
+(see above): the "simple" policy will enforce a fixed queue size by returning
+EAGAIN, whereas the "prio" policy enforces a fixed queue length by dropping the
+lowest-priority packet first. The default value for this parameter is
+initialised from /proc/sys/net/dccp/default/tx_qlen.
+
 DCCP_SOCKOPT_SERVICE sets the service. The specification mandates use of
 service codes (RFC 4340, sec. 8.1.2); if this socket option is not set,
 the socket will fall back to 0 (which means that no meaningful service code
diff --git a/Documentation/networking/e1000.txt b/Documentation/networking/e1000.txt
index d9271e7..6cb13e9 100644
--- a/Documentation/networking/e1000.txt
+++ b/Documentation/networking/e1000.txt
@@ -79,7 +79,7 @@
 ---------------------
 (not supported on Intel(R) 82542, 82543 or 82544-based adapters)
 Valid Range:   0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
-                                   4=simplified balancing)
+                                 4=simplified balancing)
 Default Value: 3
 
 The driver can limit the amount of interrupts per second that the adapter
@@ -124,8 +124,8 @@
 the same as mode 3, the InterruptThrottleRate will be increased stepwise to
 70000 for traffic in class "Lowest latency".
 
-In simplified mode the interrupt rate is based on the ratio of Tx and
-Rx traffic.  If the bytes per second rate is approximately equal, the
+In simplified mode the interrupt rate is based on the ratio of TX and
+RX traffic.  If the bytes per second rate is approximately equal, the
 interrupt rate will drop as low as 2000 interrupts per second.  If the
 traffic is mostly transmit or mostly receive, the interrupt rate could
 be as high as 8000.
@@ -245,7 +245,7 @@
 TxDescriptorStep
 ----------------
 Valid Range:    1 (use every Tx Descriptor)
-		4 (use every 4th Tx Descriptor)
+                4 (use every 4th Tx Descriptor)
 
 Default Value:  1 (use every Tx Descriptor)
 
@@ -312,7 +312,7 @@
 Default Value: 256
 Usage: insmod e1000.ko copybreak=128
 
-Driver copies all packets below or equaling this size to a fresh Rx
+Driver copies all packets below or equaling this size to a fresh RX
 buffer before handing it up the stack.
 
 This parameter is different than other parameters, in that it is a
diff --git a/Documentation/networking/e1000e.txt b/Documentation/networking/e1000e.txt
index 6aa048b..81a66e6 100644
--- a/Documentation/networking/e1000e.txt
+++ b/Documentation/networking/e1000e.txt
@@ -1,5 +1,5 @@
 Linux* Driver for Intel(R) Network Connection
-===============================================================
+=============================================
 
 Intel Gigabit Linux driver.
 Copyright(c) 1999 - 2010 Intel Corporation.
@@ -61,6 +61,12 @@
 load on the system and can lower CPU utilization under heavy load,
 but will increase latency as packets are not processed as quickly.
 
+The default behaviour of the driver previously assumed a static
+InterruptThrottleRate value of 8000, providing a good fallback value for
+all traffic types, but lacking in small packet performance and latency.
+The hardware can handle many more small packets per second however, and
+for this reason an adaptive interrupt moderation algorithm was implemented.
+
 The driver has two adaptive modes (setting 1 or 3) in which
 it dynamically adjusts the InterruptThrottleRate value based on the traffic
 that it receives. After determining the type of incoming traffic in the last
@@ -86,8 +92,8 @@
 the same as mode 3, the InterruptThrottleRate will be increased stepwise to
 70000 for traffic in class "Lowest latency".
 
-In simplified mode the interrupt rate is based on the ratio of Tx and
-Rx traffic.  If the bytes per second rate is approximately equal the
+In simplified mode the interrupt rate is based on the ratio of TX and
+RX traffic.  If the bytes per second rate is approximately equal, the
 interrupt rate will drop as low as 2000 interrupts per second.  If the
 traffic is mostly transmit or mostly receive, the interrupt rate could
 be as high as 8000.
@@ -177,7 +183,7 @@
 Valid Range:   0-xxxxxxx (0=off)
 Default Value: 256
 
-Driver copies all packets below or equaling this size to a fresh Rx
+Driver copies all packets below or equaling this size to a fresh RX
 buffer before handing it up the stack.
 
 This parameter is different than other parameters, in that it is a
@@ -223,17 +229,17 @@
 
 WriteProtectNVM
 ---------------
-Valid Range: 0-1
-Default Value: 1 (enabled)
+Valid Range: 0,1
+Default Value: 1
 
-Set the hardware to ignore all write/erase cycles to the GbE region in the
-ICHx NVM (non-volatile memory).  This feature can be disabled by the
-WriteProtectNVM module parameter (enabled by default) only after a hardware
-reset, but the machine must be power cycled before trying to enable writes.
-
-Note: the kernel boot option iomem=relaxed may need to be set if the kernel
-config option CONFIG_STRICT_DEVMEM=y, if the root user wants to write the
-NVM from user space via ethtool.
+If set to 1, configure the hardware to ignore all write/erase cycles to the
+GbE region in the ICHx NVM (in order to prevent accidental corruption of the
+NVM). This feature can be disabled by setting the parameter to 0 during initial
+driver load.
+NOTE: The machine must be power cycled (full off/on) when enabling NVM writes
+via setting the parameter to zero. Once the NVM has been locked (via the
+parameter at 1 when the driver loads) it cannot be unlocked except via power
+cycle.
 
 Additional Configurations
 =========================
@@ -259,7 +265,6 @@
   - Some adapters limit Jumbo Frames sized packets to a maximum of
     4096 bytes and some adapters do not support Jumbo Frames.
 
-
   Ethtool
   -------
   The driver utilizes the ethtool interface for driver configuration and
@@ -283,8 +288,7 @@
   loaded when shutting down or rebooting the system.
 
   In most cases Wake On LAN is only supported on port A for multiple port
-  adapters. To verify if a port supports Wake on LAN run ethtool eth<X>.
-
+  adapters. To verify if a port supports Wake on Lan run Ethtool eth<X>.
 
 Support
 =======
diff --git a/Documentation/networking/igb.txt b/Documentation/networking/igb.txt
index ab2d718..4a5e29c 100644
--- a/Documentation/networking/igb.txt
+++ b/Documentation/networking/igb.txt
@@ -36,6 +36,7 @@
 This parameter adds support for SR-IOV.  It causes the driver to spawn up to
 max_vfs worth of virtual function.
 
+
 Additional Configurations
 =========================
 
@@ -60,9 +61,10 @@
   Ethtool
   -------
   The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.
+  diagnostics, as well as displaying statistical information. The latest
+  version of Ethtool can be found at:
 
-  http://sourceforge.net/projects/gkernel.
+  http://ftp.kernel.org/pub/software/network/ethtool/
 
   Enabling Wake on LAN* (WoL)
   ---------------------------
@@ -91,31 +93,6 @@
   REQUIREMENTS: MSI-X support is required for Multiqueue. If MSI-X is not
   found, the system will fallback to MSI or to Legacy interrupts.
 
-  LRO
-  ---
-  Large Receive Offload (LRO) is a technique for increasing inbound throughput
-  of high-bandwidth network connections by reducing CPU overhead. It works by
-  aggregating multiple incoming packets from a single stream into a larger
-  buffer before they are passed higher up the networking stack, thus reducing
-  the number of packets that have to be processed. LRO combines multiple
-  Ethernet frames into a single receive in the stack, thereby potentially
-  decreasing CPU utilization for receives.
-
-  NOTE: You need to have inet_lro enabled via either the CONFIG_INET_LRO or
-  CONFIG_INET_LRO_MODULE kernel config option. Additionally, if
-  CONFIG_INET_LRO_MODULE is used, the inet_lro module needs to be loaded
-  before the igb driver.
-
-  You can verify that the driver is using LRO by looking at these counters in
-  Ethtool:
-
-  lro_aggregated - count of total packets that were combined
-  lro_flushed - counts the number of packets flushed out of LRO
-  lro_no_desc - counts the number of times an LRO descriptor was not available
-  for the LRO packet
-
-  NOTE: IPv6 and UDP are not supported by LRO.
-
 Support
 =======
 
diff --git a/Documentation/networking/igbvf.txt b/Documentation/networking/igbvf.txt
index 0560281..694817b 100644
--- a/Documentation/networking/igbvf.txt
+++ b/Documentation/networking/igbvf.txt
@@ -58,7 +58,9 @@
   Ethtool
   -------
   The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.
+  diagnostics, as well as displaying statistical information.  Ethtool
+  version 3.0 or later is required for this functionality, although we
+  strongly recommend downloading the latest version at:
 
   http://sourceforge.net/projects/gkernel.
 
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index c7165f4..2193a5d 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -20,6 +20,15 @@
 min_pmtu - INTEGER
 	default 562 - minimum discovered Path MTU
 
+route/max_size - INTEGER
+	Maximum number of routes allowed in the kernel.  Increase
+	this when using large numbers of interfaces and/or routes.
+
+neigh/default/gc_thresh3 - INTEGER
+	Maximum number of neighbor entries allowed.  Increase this
+	when using large numbers of interfaces and when communicating
+	with large numbers of directly-connected peers.
+
 mtu_expires - INTEGER
 	Time, in seconds, that cached PMTU information is kept.
 
@@ -135,6 +144,7 @@
 	Count buffering overhead as bytes/2^tcp_adv_win_scale
 	(if tcp_adv_win_scale > 0) or bytes-bytes/2^(-tcp_adv_win_scale),
 	if it is <= 0.
+	Possible values are [-31, 31], inclusive.
 	Default: 2
 
 tcp_allowed_congestion_control - STRING
@@ -698,10 +708,28 @@
 	Change the maximum number of multicast groups we can subscribe to.
 	Default: 20
 
-conf/interface/*  changes special settings per interface (where "interface" is
-		  the name of your network interface)
-conf/all/*	  is special, changes the settings for all interfaces
+	Theoretical maximum value is bounded by having to send a membership
+	report in a single datagram (i.e. the report can't span multiple
+	datagrams, or risk confusing the switch and leaving groups you don't
+	intend to).
 
+	The number of supported groups 'M' is bounded by the number of group
+	report entries you can fit into a single datagram of 65535 bytes.
+
+	M = 65536-sizeof (ip header)/(sizeof(Group record))
+
+	Group records are variable length, with a minimum of 12 bytes.
+	So net.ipv4.igmp_max_memberships should not be set higher than:
+
+	(65536-24) / 12 = 5459
+
+	The value 5459 assumes no IP header options, so in practice
+	this number may be lower.
+
+	conf/interface/*  changes special settings per interface (where
+	"interface" is the name of your network interface)
+
+	conf/all/*	  is special, changes the settings for all interfaces
 
 log_martians - BOOLEAN
 	Log packets with impossible addresses to kernel log.
diff --git a/Documentation/networking/ixgbe.txt b/Documentation/networking/ixgbe.txt
index eeb6868..9ade280 100644
--- a/Documentation/networking/ixgbe.txt
+++ b/Documentation/networking/ixgbe.txt
@@ -1,107 +1,126 @@
 Linux Base Driver for 10 Gigabit PCI Express Intel(R) Network Connection
 ========================================================================
 
-March 10, 2009
-
+Intel Gigabit Linux driver.
+Copyright(c) 1999 - 2010 Intel Corporation.
 
 Contents
 ========
 
-- In This Release
 - Identifying Your Adapter
-- Building and Installation
 - Additional Configurations
+- Performance Tuning
+- Known Issues
 - Support
 
-
-
-In This Release
-===============
-
-This file describes the ixgbe Linux Base Driver for the 10 Gigabit PCI
-Express Intel(R) Network Connection.  This driver includes support for
-Itanium(R)2-based systems.
-
-For questions related to hardware requirements, refer to the documentation
-supplied with your 10 Gigabit adapter.  All hardware requirements listed apply
-to use with Linux.
-
-The following features are available in this kernel:
- - Native VLANs
- - Channel Bonding (teaming)
- - SNMP
- - Generic Receive Offload
- - Data Center Bridging
-
-Channel Bonding documentation can be found in the Linux kernel source:
-/Documentation/networking/bonding.txt
-
-Ethtool, lspci, and ifconfig can be used to display device and driver
-specific information.
-
-
 Identifying Your Adapter
 ========================
 
-This driver supports devices based on the 82598 controller and the 82599
-controller.
+The driver in this release is compatible with 82598 and 82599-based Intel
+Network Connections.
 
-For specific information on identifying which adapter you have, please visit:
+For more information on how to identify your adapter, go to the Adapter &
+Driver ID Guide at:
 
-    http://support.intel.com/support/network/sb/CS-008441.htm
+    http://support.intel.com/support/network/sb/CS-012904.htm
+
+SFP+ Devices with Pluggable Optics
+----------------------------------
+
+82599-BASED ADAPTERS
+
+NOTES: If your 82599-based Intel(R) Network Adapter came with Intel optics, or
+is an Intel(R) Ethernet Server Adapter X520-2, then it only supports Intel
+optics and/or the direct attach cables listed below.
+
+When 82599-based SFP+ devices are connected back to back, they should be set to
+the same Speed setting via Ethtool. Results may vary if you mix speed settings.
+82598-based adapters support all passive direct attach cables that comply
+with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
+cables are not supported.
+
+Supplier    Type                                             Part Numbers
+
+SR Modules
+Intel       DUAL RATE 1G/10G SFP+ SR (bailed)                FTLX8571D3BCV-IT
+Intel       DUAL RATE 1G/10G SFP+ SR (bailed)                AFBR-703SDDZ-IN1
+Intel       DUAL RATE 1G/10G SFP+ SR (bailed)                AFBR-703SDZ-IN2
+LR Modules
+Intel       DUAL RATE 1G/10G SFP+ LR (bailed)                FTLX1471D3BCV-IT
+Intel       DUAL RATE 1G/10G SFP+ LR (bailed)                AFCT-701SDDZ-IN1
+Intel       DUAL RATE 1G/10G SFP+ LR (bailed)                AFCT-701SDZ-IN2
+
+The following is a list of 3rd party SFP+ modules and direct attach cables that
+have received some testing. Not all modules are applicable to all devices.
+
+Supplier   Type                                              Part Numbers
+
+Finisar    SFP+ SR bailed, 10g single rate                   FTLX8571D3BCL
+Avago      SFP+ SR bailed, 10g single rate                   AFBR-700SDZ
+Finisar    SFP+ LR bailed, 10g single rate                   FTLX1471D3BCL
+
+Finisar    DUAL RATE 1G/10G SFP+ SR (No Bail)                FTLX8571D3QCV-IT
+Avago      DUAL RATE 1G/10G SFP+ SR (No Bail)                AFBR-703SDZ-IN1
+Finisar    DUAL RATE 1G/10G SFP+ LR (No Bail)                FTLX1471D3QCV-IT
+Avago      DUAL RATE 1G/10G SFP+ LR (No Bail)                AFCT-701SDZ-IN1
+Finistar   1000BASE-T SFP                                    FCLF8522P2BTL
+Avago      1000BASE-T SFP                                    ABCU-5710RZ
+
+82599-based adapters support all passive and active limiting direct attach
+cables that comply with SFF-8431 v4.1 and SFF-8472 v10.4 specifications.
+
+Laser turns off for SFP+ when ifconfig down
+-------------------------------------------
+"ifconfig down" turns off the laser for 82599-based SFP+ fiber adapters.
+"ifconfig up" turns on the later.
 
 
-Building and Installation
-=========================
+82598-BASED ADAPTERS
 
-select m for "Intel(R) 10GbE PCI Express adapters support" located at:
-      Location:
-        -> Device Drivers
-          -> Network device support (NETDEVICES [=y])
-            -> Ethernet (10000 Mbit) (NETDEV_10000 [=y])
+NOTES for 82598-Based Adapters:
+- Intel(R) Network Adapters that support removable optical modules only support
+  their original module type (i.e., the Intel(R) 10 Gigabit SR Dual Port
+  Express Module only supports SR optical modules). If you plug in a different
+  type of module, the driver will not load.
+- Hot Swapping/hot plugging optical modules is not supported.
+- Only single speed, 10 gigabit modules are supported.
+- LAN on Motherboard (LOMs) may support DA, SR, or LR modules. Other module
+  types are not supported. Please see your system documentation for details.
 
-1. make modules & make modules_install
+The following is a list of 3rd party SFP+ modules and direct attach cables that
+have received some testing. Not all modules are applicable to all devices.
 
-2. Load the module:
+Supplier   Type                                              Part Numbers
 
-# modprobe ixgbe
+Finisar    SFP+ SR bailed, 10g single rate                   FTLX8571D3BCL
+Avago      SFP+ SR bailed, 10g single rate                   AFBR-700SDZ
+Finisar    SFP+ LR bailed, 10g single rate                   FTLX1471D3BCL
 
-   The insmod command can be used if the full
-   path to the driver module is specified.  For example:
+82598-based adapters support all passive direct attach cables that comply
+with SFF-8431 v4.1 and SFF-8472 v10.4 specifications. Active direct attach
+cables are not supported.
 
-     insmod /lib/modules/<KERNEL VERSION>/kernel/drivers/net/ixgbe/ixgbe.ko
 
-   With 2.6 based kernels also make sure that older ixgbe drivers are
-   removed from the kernel, before loading the new module:
+Flow Control
+------------
+Ethernet Flow Control (IEEE 802.3x) can be configured with ethtool to enable
+receiving and transmitting pause frames for ixgbe. When TX is enabled, PAUSE
+frames are generated when the receive packet buffer crosses a predefined
+threshold.  When rx is enabled, the transmit unit will halt for the time delay
+specified when a PAUSE frame is received.
 
-     rmmod ixgbe; modprobe ixgbe
+Flow Control is enabled by default. If you want to disable a flow control
+capable link partner, use Ethtool:
 
-3. Assign an IP address to the interface by entering the following, where
-   x is the interface number:
+     ethtool -A eth? autoneg off RX off TX off
 
-     ifconfig ethx <IP_address>
-
-4. Verify that the interface works. Enter the following, where <IP_address>
-   is the IP address for another machine on the same subnet as the interface
-   that is being tested:
-
-     ping  <IP_address>
-
+NOTE: For 82598 backplane cards entering 1 gig mode, flow control default
+behavior is changed to off.  Flow control in 1 gig mode on these devices can
+lead to Tx hangs.
 
 Additional Configurations
 =========================
 
-  Viewing Link Messages
-  ---------------------
-  Link messages will not be displayed to the console if the distribution is
-  restricting system messages. In order to see network driver link messages on
-  your console, set dmesg to eight by entering the following:
-
-       dmesg -n 8
-
-  NOTE: This setting is not saved across reboots.
-
-
   Jumbo Frames
   ------------
   The driver supports Jumbo Frames for all adapters. Jumbo Frames support is
@@ -123,13 +142,8 @@
   other protocols besides TCP.  It's also safe to use with configurations that
   are problematic for LRO, namely bridging and iSCSI.
 
-  GRO is enabled by default in the driver.  Future versions of ethtool will
-  support disabling and re-enabling GRO on the fly.
-
-
   Data Center Bridging, aka DCB
   -----------------------------
-
   DCB is a configuration Quality of Service implementation in hardware.
   It uses the VLAN priority tag (802.1p) to filter traffic.  That means
   that there are 8 different priorities that traffic can be filtered into.
@@ -163,24 +177,71 @@
 
         http://e1000.sf.net
 
-
   Ethtool
   -------
   The driver utilizes the ethtool interface for driver configuration and
-  diagnostics, as well as displaying statistical information.  Ethtool
-  version 3.0 or later is required for this functionality.
+  diagnostics, as well as displaying statistical information. The latest
+  Ethtool version is required for this functionality.
 
   The latest release of ethtool can be found from
   http://sourceforge.net/projects/gkernel.
 
-
-  NAPI
+  FCoE
   ----
+  This release of the ixgbe driver contains new code to enable users to use
+  Fiber Channel over Ethernet (FCoE) and Data Center Bridging (DCB)
+  functionality that is supported by the 82598-based hardware.  This code has
+  no default effect on the regular driver operation, and configuring DCB and
+  FCoE is outside the scope of this driver README. Refer to
+  http://www.open-fcoe.org/ for FCoE project information and contact
+  e1000-eedc@lists.sourceforge.net for DCB information.
 
-  NAPI (Rx polling mode) is supported in the ixgbe driver.  NAPI is enabled
-  by default in the driver.
+  MAC and VLAN anti-spoofing feature
+  ----------------------------------
+  When a malicious driver attempts to send a spoofed packet, it is dropped by
+  the hardware and not transmitted.  An interrupt is sent to the PF driver
+  notifying it of the spoof attempt.
 
-  See www.cyberus.ca/~hadi/usenix-paper.tgz for more information on NAPI.
+  When a spoofed packet is detected the PF driver will send the following
+  message to the system log (displayed by  the "dmesg" command):
+
+  Spoof event(s) detected on VF (n)
+
+  Where n=the VF that attempted to do the spoofing.
+
+
+Performance Tuning
+==================
+
+An excellent article on performance tuning can be found at:
+
+http://www.redhat.com/promo/summit/2008/downloads/pdf/Thursday/Mark_Wagner.pdf
+
+
+Known Issues
+============
+
+  Enabling SR-IOV in a 32-bit Microsoft* Windows* Server 2008 Guest OS using
+  Intel (R) 82576-based GbE or Intel (R) 82599-based 10GbE controller under KVM
+  -----------------------------------------------------------------------------
+  KVM Hypervisor/VMM supports direct assignment of a PCIe device to a VM.  This
+  includes traditional PCIe devices, as well as SR-IOV-capable devices using
+  Intel 82576-based and 82599-based controllers.
+
+  While direct assignment of a PCIe device or an SR-IOV Virtual Function (VF)
+  to a Linux-based VM running 2.6.32 or later kernel works fine, there is a
+  known issue with Microsoft Windows Server 2008 VM that results in a "yellow
+  bang" error. This problem is within the KVM VMM itself, not the Intel driver,
+  or the SR-IOV logic of the VMM, but rather that KVM emulates an older CPU
+  model for the guests, and this older CPU model does not support MSI-X
+  interrupts, which is a requirement for Intel SR-IOV.
+
+  If you wish to use the Intel 82576 or 82599-based controllers in SR-IOV mode
+  with KVM and a Microsoft Windows Server 2008 guest try the following
+  workaround. The workaround is to tell KVM to emulate a different model of CPU
+  when using qemu to create the KVM guest:
+
+       "-cpu qemu64,model=13"
 
 
 Support
diff --git a/Documentation/networking/ixgbevf.txt b/Documentation/networking/ixgbevf.txt
index 21dd5d1..5a91a41 100644
--- a/Documentation/networking/ixgbevf.txt
+++ b/Documentation/networking/ixgbevf.txt
@@ -35,10 +35,6 @@
 Known Issues/Troubleshooting
 ============================
 
-  Unloading Physical Function (PF) Driver Causes System Reboots When VM is
-  Running and VF is Loaded on the VM
-  ------------------------------------------------------------------------
-  Do not unload the PF driver (ixgbe) while VFs are assigned to guests.
 
 Support
 =======
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index 7ee770b..80a7a34 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -7,7 +7,7 @@
 (Synopsys IP blocks); it has been fully tested on STLinux platforms.
 
 Currently this network device driver is for all STM embedded MAC/GMAC
-(7xxx SoCs).
+(7xxx SoCs). Other platforms start using it i.e. ARM SPEAr.
 
 DWC Ether MAC 10/100/1000 Universal version 3.41a and DWC Ether MAC 10/100
 Universal version 4.0 have been used for developing the first code
@@ -95,9 +95,14 @@
 driver's Header file in include/linux directory.
 
 struct plat_stmmacenet_data {
-        int bus_id;
-        int pbl;
-        int has_gmac;
+	int bus_id;
+	int pbl;
+	int clk_csr;
+	int has_gmac;
+	int enh_desc;
+	int tx_coe;
+	int bugged_jumbo;
+	int pmt;
         void (*fix_mac_speed)(void *priv, unsigned int speed);
         void (*bus_setup)(unsigned long ioaddr);
 #ifdef CONFIG_STM_DRIVERS
@@ -114,6 +119,12 @@
   registers (on STM platforms);
 - has_gmac: GMAC core is on board (get it at run-time in the next step);
 - bus_id: bus identifier.
+- tx_coe: core is able to perform the tx csum in HW.
+- enh_desc: if sets the MAC will use the enhanced descriptor structure.
+- clk_csr: CSR Clock range selection.
+- bugged_jumbo: some HWs are not able to perform the csum in HW for
+  over-sized frames due to limited buffer sizes. Setting this
+  flag the csum will be done in SW on JUMBO frames.
 
 struct plat_stmmacphy_data {
         int bus_id;
@@ -131,13 +142,28 @@
 - interface: physical MII interface mode;
 - phy_reset: hook to reset HW function.
 
+SOURCES:
+- Kconfig
+- Makefile
+- stmmac_main.c: main network device driver;
+- stmmac_mdio.c: mdio functions;
+- stmmac_ethtool.c: ethtool support;
+- stmmac_timer.[ch]: timer code used for mitigating the driver dma interrupts
+  Only tested on ST40 platforms based.
+- stmmac.h: private driver structure;
+- common.h: common definitions and VFTs;
+- descs.h: descriptor structure definitions;
+- dwmac1000_core.c: GMAC core functions;
+- dwmac1000_dma.c:  dma functions for the GMAC chip;
+- dwmac1000.h: specific header file for the GMAC;
+- dwmac100_core: MAC 100 core and dma code;
+- dwmac100_dma.c: dma funtions for the MAC chip;
+- dwmac1000.h: specific header file for the MAC;
+- dwmac_lib.c: generic DMA functions shared among chips
+- enh_desc.c: functions for handling enhanced descriptors
+- norm_desc.c: functions for handling normal descriptors
+
 TODO:
-- Continue to make the driver more generic and suitable for other Synopsys
-  Ethernet controllers used on other architectures (i.e. ARM).
-- 10G controllers are not supported.
-- MAC uses Normal descriptors and GMAC uses enhanced ones.
-  This is a limit that should be reviewed. MAC could want to
-  use the enhanced structure.
-- Checksumming: Rx/Tx csum is done in HW in case of GMAC only.
+- XGMAC controller is not supported.
 - Review the timer optimisation code to use an embedded device that seems to be
   available in new chip generations.
diff --git a/Documentation/rbtree.txt b/Documentation/rbtree.txt
index 221f38b..19f8278 100644
--- a/Documentation/rbtree.txt
+++ b/Documentation/rbtree.txt
@@ -21,8 +21,8 @@
 To quote Linux Weekly News:
 
     There are a number of red-black trees in use in the kernel.
-    The anticipatory, deadline, and CFQ I/O schedulers all employ
-    rbtrees to track requests; the packet CD/DVD driver does the same.
+    The deadline and CFQ I/O schedulers employ rbtrees to
+    track requests; the packet CD/DVD driver does the same.
     The high-resolution timer code uses an rbtree to organize outstanding
     timer requests.  The ext3 filesystem tracks directory entries in a
     red-black tree.  Virtual memory areas (VMAs) are tracked with red-black
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 3894eaa..209e158 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -28,6 +28,7 @@
 - core_uses_pid
 - ctrl-alt-del
 - dentry-state
+- dmesg_restrict
 - domainname
 - hostname
 - hotplug
@@ -213,6 +214,19 @@
 
 ==============================================================
 
+dmesg_restrict:
+
+This toggle indicates whether unprivileged users are prevented from using
+dmesg(8) to view messages from the kernel's log buffer.  When
+dmesg_restrict is set to (0) there are no restrictions.  When
+dmesg_restrict is set set to (1), users must have CAP_SYS_ADMIN to use
+dmesg(8).
+
+The kernel config option CONFIG_SECURITY_DMESG_RESTRICT sets the default
+value of dmesg_restrict.
+
+==============================================================
+
 domainname & hostname:
 
 These files can be used to set the NIS/YP domainname and the
diff --git a/MAINTAINERS b/MAINTAINERS
index 4d8bde3..f16ce8f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -161,7 +161,7 @@
 L:	linux-serial@vger.kernel.org
 W:	http://serial.sourceforge.net
 S:	Maintained
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
 F:	drivers/serial/8250*
 F:	include/linux/serial_8250.h
 
@@ -1365,7 +1365,7 @@
 
 BONDING DRIVER
 M:	Jay Vosburgh <fubar@us.ibm.com>
-L:	bonding-devel@lists.sourceforge.net
+L:	netdev@vger.kernel.org
 W:	http://sourceforge.net/projects/bonding/
 S:	Supported
 F:	drivers/net/bonding/
@@ -1835,6 +1835,13 @@
 S:	Supported
 F:	drivers/net/cxgb4vf/
 
+STMMAC ETHERNET DRIVER
+M:	Giuseppe Cavallaro <peppe.cavallaro@st.com>
+L:	netdev@vger.kernel.org
+W:	http://www.stlinux.com
+S:	Supported
+F:	drivers/net/stmmac/
+
 CYBERPRO FB DRIVER
 M:	Russell King <linux@arm.linux.org.uk>
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -3114,6 +3121,8 @@
 M:	John Ronciak <john.ronciak@intel.com>
 L:	e1000-devel@lists.sourceforge.net
 W:	http://e1000.sourceforge.net/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next-2.6.git
 S:	Supported
 F:	Documentation/networking/e100.txt
 F:	Documentation/networking/e1000.txt
@@ -5682,7 +5691,7 @@
 
 STAGING SUBSYSTEM
 M:	Greg Kroah-Hartman <gregkh@suse.de>
-T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-next-2.6.git
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging-2.6.git
 L:	devel@driverdev.osuosl.org
 S:	Maintained
 F:	drivers/staging/
@@ -5916,7 +5925,7 @@
 TTY LAYER
 M:	Greg Kroah-Hartman <gregkh@suse.de>
 S:	Maintained
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
 F:	drivers/char/tty_*
 F:	drivers/serial/serial_core.c
 F:	include/linux/serial_core.h
@@ -6239,7 +6248,7 @@
 M:	Greg Kroah-Hartman <gregkh@suse.de>
 L:	linux-usb@vger.kernel.org
 W:	http://www.linux-usb.org
-T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
 S:	Supported
 F:	Documentation/usb/
 F:	drivers/net/usb/
@@ -6604,14 +6613,14 @@
 
 XEN PCI SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xensource.com
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:	Supported
 F:	arch/x86/pci/*xen*
 F:	drivers/pci/*xen*
 
 XEN SWIOTLB SUBSYSTEM
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xensource.com
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 S:	Supported
 F:	arch/x86/xen/*swiotlb*
 F:	drivers/xen/*swiotlb*
@@ -6619,7 +6628,7 @@
 XEN HYPERVISOR INTERFACE
 M:	Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
 M:	Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-L:	xen-devel@lists.xen.org
+L:	xen-devel@lists.xensource.com (moderated for non-subscribers)
 L:	virtualization@lists.osdl.org
 S:	Supported
 F:	arch/x86/xen/
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index a19a526..8ae3d48 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,7 +6,7 @@
 	select HAVE_MEMBLOCK
 	select RTC_LIB
 	select SYS_SUPPORTS_APM_EMULATION
-	select GENERIC_ATOMIC64 if (!CPU_32v6K)
+	select GENERIC_ATOMIC64 if (!CPU_32v6K || !AEABI)
 	select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
 	select HAVE_ARCH_KGDB
 	select HAVE_KPROBES if (!XIP_KERNEL)
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index ada6359..772f95f 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -251,15 +251,16 @@
 		writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
 
 	/*
-	 * Set priority on all interrupts.
+	 * Set priority on all global interrupts.
 	 */
-	for (i = 0; i < max_irq; i += 4)
+	for (i = 32; i < max_irq; i += 4)
 		writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
 
 	/*
-	 * Disable all interrupts.
+	 * Disable all interrupts.  Leave the PPI and SGIs alone
+	 * as these enables are banked registers.
 	 */
-	for (i = 0; i < max_irq; i += 32)
+	for (i = 32; i < max_irq; i += 32)
 		writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
 
 	/*
@@ -277,11 +278,30 @@
 
 void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
 {
+	void __iomem *dist_base;
+	int i;
+
 	if (gic_nr >= MAX_GIC_NR)
 		BUG();
 
+	dist_base = gic_data[gic_nr].dist_base;
+	BUG_ON(!dist_base);
+
 	gic_data[gic_nr].cpu_base = base;
 
+	/*
+	 * Deal with the banked PPI and SGI interrupts - disable all
+	 * PPI interrupts, ensure all SGI interrupts are enabled.
+	 */
+	writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
+	writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
+
+	/*
+	 * Set priority on PPI and SGI interrupts
+	 */
+	for (i = 0; i < 32; i += 4)
+		writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
+
 	writel(0xf0, base + GIC_CPU_PRIMASK);
 	writel(1, base + GIC_CPU_CTRL);
 }
diff --git a/arch/arm/include/asm/hardware/it8152.h b/arch/arm/include/asm/hardware/it8152.h
index 6700c7f..21fa272 100644
--- a/arch/arm/include/asm/hardware/it8152.h
+++ b/arch/arm/include/asm/hardware/it8152.h
@@ -75,7 +75,7 @@
   IT8152_PD_IRQ(1)  USB (USBR)
   IT8152_PD_IRQ(0)  Audio controller (ACR)
  */
-#define IT8152_IRQ(x)   (IRQ_BOARD_END + (x))
+#define IT8152_IRQ(x)   (IRQ_BOARD_START + (x))
 
 /* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
 #define IT8152_LD_IRQ_COUNT     9
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 54593b0..21e3a4a 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -748,8 +748,7 @@
 		breakpoint_handler(addr, regs);
 		break;
 	case ARM_ENTRY_ASYNC_WATCHPOINT:
-		WARN_ON("Asynchronous watchpoint exception taken. "
-			"Debugging results may be unreliable");
+		WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
 	case ARM_ENTRY_SYNC_WATCHPOINT:
 		watchpoint_handler(addr, regs);
 		break;
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 49643b1..07a5035 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1749,7 +1749,7 @@
 static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc,
 					enum armv7_counters counter)
 {
-	int ret;
+	int ret = 0;
 
 	if (counter == ARMV7_CYCLE_COUNTER)
 		ret = pmnc & ARMV7_FLAG_C;
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 20b7411..c2e112e 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -28,7 +28,7 @@
 
 	/* only go to a higher address on the stack */
 	low = frame->sp;
-	high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+	high = ALIGN(low, THREAD_SIZE);
 
 	/* check current frame pointer is within bounds */
 	if (fp < (low + 12) || fp + 4 >= high)
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index cda78d5..446aee9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -53,10 +53,7 @@
 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
 {
 #ifdef CONFIG_KALLSYMS
-	char sym1[KSYM_SYMBOL_LEN], sym2[KSYM_SYMBOL_LEN];
-	sprint_symbol(sym1, where);
-	sprint_symbol(sym2, from);
-	printk("[<%08lx>] (%s) from [<%08lx>] (%s)\n", where, sym1, from, sym2);
+	printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
 #else
 	printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
 #endif
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index 2a16176..d2cb0b3 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -279,7 +279,7 @@
 
 	/* only go to a higher address on the stack */
 	low = frame->sp;
-	high = ALIGN(low, THREAD_SIZE) + THREAD_SIZE;
+	high = ALIGN(low, THREAD_SIZE);
 
 	pr_debug("%s(pc = %08lx lr = %08lx sp = %08lx)\n", __func__,
 		 frame->pc, frame->lr, frame->sp);
diff --git a/arch/arm/mach-ep93xx/include/mach/dma.h b/arch/arm/mach-ep93xx/include/mach/dma.h
index 3a5961d..5e31b2b 100644
--- a/arch/arm/mach-ep93xx/include/mach/dma.h
+++ b/arch/arm/mach-ep93xx/include/mach/dma.h
@@ -1,5 +1,13 @@
-/*
- * arch/arm/mach-ep93xx/include/mach/dma.h
+/**
+ * DOC: EP93xx DMA M2P memory to peripheral and peripheral to memory engine
+ *
+ * The EP93xx DMA M2P subsystem handles DMA transfers between memory and
+ * peripherals. DMA M2P channels are available for audio, UARTs and IrDA.
+ * See chapter 10 of the EP93xx users guide for full details on the DMA M2P
+ * engine.
+ *
+ * See sound/soc/ep93xx/ep93xx-pcm.c for an example use of the DMA M2P code.
+ *
  */
 
 #ifndef __ASM_ARCH_DMA_H
@@ -8,12 +16,34 @@
 #include <linux/list.h>
 #include <linux/types.h>
 
+/**
+ * struct ep93xx_dma_buffer - Information about a buffer to be transferred
+ * using the DMA M2P engine
+ *
+ * @list: Entry in DMA buffer list
+ * @bus_addr: Physical address of the buffer
+ * @size: Size of the buffer in bytes
+ */
 struct ep93xx_dma_buffer {
 	struct list_head	list;
 	u32			bus_addr;
 	u16			size;
 };
 
+/**
+ * struct ep93xx_dma_m2p_client - Information about a DMA M2P client
+ *
+ * @name: Unique name for this client
+ * @flags: Client flags
+ * @cookie: User data to pass to callback functions
+ * @buffer_started: Non NULL function to call when a transfer is started.
+ * 			The arguments are the user data cookie and the DMA
+ *			buffer which is starting.
+ * @buffer_finished: Non NULL function to call when a transfer is completed.
+ *			The arguments are the user data cookie, the DMA buffer
+ *			which has completed, and a boolean flag indicating if
+ *			the transfer had an error.
+ */
 struct ep93xx_dma_m2p_client {
 	char			*name;
 	u8			flags;
@@ -24,10 +54,11 @@
 					struct ep93xx_dma_buffer *buf,
 					int bytes, int error);
 
-	/* Internal to the DMA code.  */
+	/* private: Internal use only */
 	void			*channel;
 };
 
+/* DMA M2P ports */
 #define EP93XX_DMA_M2P_PORT_I2S1	0x00
 #define EP93XX_DMA_M2P_PORT_I2S2	0x01
 #define EP93XX_DMA_M2P_PORT_AAC1	0x02
@@ -39,18 +70,80 @@
 #define EP93XX_DMA_M2P_PORT_UART3	0x08
 #define EP93XX_DMA_M2P_PORT_IRDA	0x09
 #define EP93XX_DMA_M2P_PORT_MASK	0x0f
-#define EP93XX_DMA_M2P_TX		0x00
-#define EP93XX_DMA_M2P_RX		0x10
-#define EP93XX_DMA_M2P_ABORT_ON_ERROR	0x20
-#define EP93XX_DMA_M2P_IGNORE_ERROR	0x40
-#define EP93XX_DMA_M2P_ERROR_MASK	0x60
 
-int  ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+/* DMA M2P client flags */
+#define EP93XX_DMA_M2P_TX		0x00	/* Memory to peripheral */
+#define EP93XX_DMA_M2P_RX		0x10	/* Peripheral to memory */
+
+/*
+ * DMA M2P client error handling flags. See the EP93xx users guide
+ * documentation on the DMA M2P CONTROL register for more details
+ */
+#define EP93XX_DMA_M2P_ABORT_ON_ERROR	0x20	/* Abort on peripheral error */
+#define EP93XX_DMA_M2P_IGNORE_ERROR	0x40	/* Ignore peripheral errors */
+#define EP93XX_DMA_M2P_ERROR_MASK	0x60	/* Mask of error bits */
+
+/**
+ * ep93xx_dma_m2p_client_register - Register a client with the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client information to register
+ * returns 0 on success
+ *
+ * The DMA M2P subsystem allocates a channel and an interrupt line for the DMA
+ * client
+ */
+int ep93xx_dma_m2p_client_register(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_client_unregister - Unregister a client from the DMA M2P
+ * subsystem
+ *
+ * @m2p: Client to unregister
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ */
 void ep93xx_dma_m2p_client_unregister(struct ep93xx_dma_m2p_client *m2p);
+
+/**
+ * ep93xx_dma_m2p_submit - Submit a DMA M2P transfer
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * If the current or next transfer positions are free on the M2P client then
+ * the transfer is started immediately. If not, the transfer is added to the
+ * list of pending transfers. This function must not be called from the
+ * buffer_finished callback for an M2P channel.
+ *
+ */
 void ep93xx_dma_m2p_submit(struct ep93xx_dma_m2p_client *m2p,
 			   struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_submit_recursive - Put a DMA transfer on the pending list
+ * for an M2P channel
+ *
+ * @m2p: DMA Client to submit the transfer on
+ * @buf: DMA Buffer to submit
+ *
+ * This function must only be called from the buffer_finished callback for an
+ * M2P channel. It is commonly used to add the next transfer in a chained list
+ * of DMA transfers.
+ */
 void ep93xx_dma_m2p_submit_recursive(struct ep93xx_dma_m2p_client *m2p,
 				     struct ep93xx_dma_buffer *buf);
+
+/**
+ * ep93xx_dma_m2p_flush - Flush all pending transfers on a DMA M2P client
+ *
+ * @m2p: DMA client to flush transfers on
+ *
+ * Any transfers currently in progress will be completed in hardware, but
+ * ignored in software.
+ *
+ */
 void ep93xx_dma_m2p_flush(struct ep93xx_dma_m2p_client *m2p);
 
 #endif /* __ASM_ARCH_DMA_H */
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 51ff23b..3688123 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -854,10 +854,9 @@
 
 	kirkwood_pcie_id(&dev, &rev);
 
-	if ((dev == MV88F6281_DEV_ID && (rev == MV88F6281_REV_A0 ||
-					rev == MV88F6281_REV_A1)) ||
-	    (dev == MV88F6282_DEV_ID))
-		return 200000000;
+	if (dev == MV88F6281_DEV_ID || dev == MV88F6282_DEV_ID)
+		if (((readl(SAMPLE_AT_RESET) >> 21) & 1) == 0)
+			return 200000000;
 
 	return 166666667;
 }
diff --git a/arch/arm/mach-kirkwood/d2net_v2-setup.c b/arch/arm/mach-kirkwood/d2net_v2-setup.c
index 4aa86e4..a31c949 100644
--- a/arch/arm/mach-kirkwood/d2net_v2-setup.c
+++ b/arch/arm/mach-kirkwood/d2net_v2-setup.c
@@ -225,5 +225,5 @@
 	.init_machine	= d2net_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.c b/arch/arm/mach-kirkwood/lacie_v2-common.c
index d3ea1b6..285edab 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.c
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.c
@@ -111,17 +111,3 @@
 			pr_err("Failed to power up HDD%d\n", i + 1);
 	}
 }
-
-/*****************************************************************************
- * Timer
- ****************************************************************************/
-
-static void lacie_v2_timer_init(void)
-{
-	kirkwood_tclk = 166666667;
-	orion_time_init(IRQ_KIRKWOOD_BRIDGE, kirkwood_tclk);
-}
-
-struct sys_timer lacie_v2_timer = {
-	.init = lacie_v2_timer_init,
-};
diff --git a/arch/arm/mach-kirkwood/lacie_v2-common.h b/arch/arm/mach-kirkwood/lacie_v2-common.h
index af52131..fc64f57 100644
--- a/arch/arm/mach-kirkwood/lacie_v2-common.h
+++ b/arch/arm/mach-kirkwood/lacie_v2-common.h
@@ -13,6 +13,4 @@
 void lacie_v2_register_i2c_devices(void);
 void lacie_v2_hdd_power_init(int hdd_num);
 
-extern struct sys_timer lacie_v2_timer;
-
 #endif
diff --git a/arch/arm/mach-kirkwood/mpp.c b/arch/arm/mach-kirkwood/mpp.c
index 065187d..27901f7 100644
--- a/arch/arm/mach-kirkwood/mpp.c
+++ b/arch/arm/mach-kirkwood/mpp.c
@@ -59,7 +59,7 @@
 	}
 	printk("\n");
 
-	while (*mpp_list) {
+	for ( ; *mpp_list; mpp_list++) {
 		unsigned int num = MPP_NUM(*mpp_list);
 		unsigned int sel = MPP_SEL(*mpp_list);
 		int shift, gpio_mode;
@@ -88,8 +88,6 @@
 		if (sel != 0)
 			gpio_mode = 0;
 		orion_gpio_set_valid(num, gpio_mode);
-
-		mpp_list++;
 	}
 
 	printk(KERN_DEBUG "  final MPP regs:");
diff --git a/arch/arm/mach-kirkwood/netspace_v2-setup.c b/arch/arm/mach-kirkwood/netspace_v2-setup.c
index 5ea66f1..65ee21f 100644
--- a/arch/arm/mach-kirkwood/netspace_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netspace_v2-setup.c
@@ -262,7 +262,7 @@
 	.init_machine	= netspace_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -272,7 +272,7 @@
 	.init_machine	= netspace_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -282,6 +282,6 @@
 	.init_machine	= netspace_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/netxbig_v2-setup.c b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
index a1b45d5..93afd3c 100644
--- a/arch/arm/mach-kirkwood/netxbig_v2-setup.c
+++ b/arch/arm/mach-kirkwood/netxbig_v2-setup.c
@@ -403,7 +403,7 @@
 	.init_machine	= netxbig_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
 
@@ -413,6 +413,6 @@
 	.init_machine	= netxbig_v2_init,
 	.map_io		= kirkwood_map_io,
 	.init_irq	= kirkwood_init_irq,
-	.timer		= &lacie_v2_timer,
+	.timer		= &kirkwood_timer,
 MACHINE_END
 #endif
diff --git a/arch/arm/mach-kirkwood/ts41x-setup.c b/arch/arm/mach-kirkwood/ts41x-setup.c
index 8be09a0..3587a28 100644
--- a/arch/arm/mach-kirkwood/ts41x-setup.c
+++ b/arch/arm/mach-kirkwood/ts41x-setup.c
@@ -27,6 +27,10 @@
 #include "mpp.h"
 #include "tsx1x-common.h"
 
+/* for the PCIe reset workaround */
+#include <plat/pcie.h>
+
+
 #define QNAP_TS41X_JUMPER_JP1	45
 
 static struct i2c_board_info __initdata qnap_ts41x_i2c_rtc = {
@@ -140,8 +144,16 @@
 
 static int __init ts41x_pci_init(void)
 {
-	if (machine_is_ts41x())
+	if (machine_is_ts41x()) {
+		/*
+		 * Without this explicit reset, the PCIe SATA controller
+		 * (Marvell 88sx7042/sata_mv) is known to stop working
+		 * after a few minutes.
+		 */
+		orion_pcie_reset((void __iomem *)PCIE_VIRT_BASE);
+
 		kirkwood_pcie_init(KW_PCIE0);
+	}
 
    return 0;
 }
diff --git a/arch/arm/mach-mmp/include/mach/cputype.h b/arch/arm/mach-mmp/include/mach/cputype.h
index f43a68b..8a3b56d 100644
--- a/arch/arm/mach-mmp/include/mach/cputype.h
+++ b/arch/arm/mach-mmp/include/mach/cputype.h
@@ -46,7 +46,8 @@
 #ifdef CONFIG_CPU_MMP2
 static inline int cpu_is_mmp2(void)
 {
-	return (((cpu_readid_id() >> 8) & 0xff) == 0x58);
+	return (((read_cpuid_id() >> 8) & 0xff) == 0x58);
+}
 #else
 #define cpu_is_mmp2()	(0)
 #endif
diff --git a/arch/arm/mach-mv78xx0/mpp.c b/arch/arm/mach-mv78xx0/mpp.c
index 354ac51..84db2df 100644
--- a/arch/arm/mach-mv78xx0/mpp.c
+++ b/arch/arm/mach-mv78xx0/mpp.c
@@ -54,7 +54,7 @@
 	}
 	printk("\n");
 
-	while (*mpp_list) {
+	for ( ; *mpp_list; mpp_list++) {
 		unsigned int num = MPP_NUM(*mpp_list);
 		unsigned int sel = MPP_SEL(*mpp_list);
 		int shift, gpio_mode;
@@ -83,8 +83,6 @@
 		if (sel != 0)
 			gpio_mode = 0;
 		orion_gpio_set_valid(num, gpio_mode);
-
-		mpp_list++;
 	}
 
 	printk(KERN_DEBUG "  final MPP regs:");
diff --git a/arch/arm/mach-orion5x/mpp.c b/arch/arm/mach-orion5x/mpp.c
index bc4c3b9..db485d3 100644
--- a/arch/arm/mach-orion5x/mpp.c
+++ b/arch/arm/mach-orion5x/mpp.c
@@ -127,7 +127,7 @@
 	/* Initialize gpiolib. */
 	orion_gpio_init();
 
-	while (mode->mpp >= 0) {
+	for ( ; mode->mpp >= 0; mode++) {
 		u32 *reg;
 		int num_type;
 		int shift;
@@ -160,8 +160,6 @@
 			orion_gpio_set_unused(mode->mpp);
 
 		orion_gpio_set_valid(mode->mpp, !!(mode->type == MPP_GPIO));
-
-		mode++;
 	}
 
 	writel(mpp_0_7_ctrl, MPP_0_7_CTRL);
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index 16f1bd5..c1c1cd0 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -239,7 +239,7 @@
 static struct resource ts78xx_ts_nand_resources = {
 	.start		= TS_NAND_DATA,
 	.end		= TS_NAND_DATA + 4,
-	.flags		= IORESOURCE_IO,
+	.flags		= IORESOURCE_MEM,
 };
 
 static struct platform_device ts78xx_ts_nand_device = {
diff --git a/arch/arm/mach-pxa/cm-x2xx.c b/arch/arm/mach-pxa/cm-x2xx.c
index ac5598c..d34b99f 100644
--- a/arch/arm/mach-pxa/cm-x2xx.c
+++ b/arch/arm/mach-pxa/cm-x2xx.c
@@ -476,8 +476,6 @@
 
 static void __init cmx2xx_init_irq(void)
 {
-	pxa27x_init_irq();
-
 	if (cpu_is_pxa25x()) {
 		pxa25x_init_irq();
 		cmx2xx_pci_init_irq(CMX255_GPIO_IT8152_IRQ);
diff --git a/arch/arm/mach-pxa/saar.c b/arch/arm/mach-pxa/saar.c
index 4b521e0..ffa50e6 100644
--- a/arch/arm/mach-pxa/saar.c
+++ b/arch/arm/mach-pxa/saar.c
@@ -116,7 +116,7 @@
 	},
 };
 
-#if defined(CONFIG_FB_PXA) || (CONFIG_FB_PXA_MODULE)
+#if defined(CONFIG_FB_PXA) || defined(CONFIG_FB_PXA_MODULE)
 static uint16_t lcd_power_on[] = {
 	/* single frame */
 	SMART_CMD_NOOP,
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 54b479c..51dcd59 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -116,4 +116,6 @@
 config SH_CLK_CPG
 	bool
 
+source "drivers/sh/Kconfig"
+
 endif
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index 46ca4d4..32d9e28 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -565,12 +565,50 @@
 
 /* FSI */
 #define IRQ_FSI		evt2irq(0x1840)
+
+static int fsi_set_rate(int is_porta, int rate)
+{
+	struct clk *fsib_clk;
+	struct clk *fdiv_clk = &sh7372_fsidivb_clk;
+	int ret;
+
+	/* set_rate is not needed if port A */
+	if (is_porta)
+		return 0;
+
+	fsib_clk = clk_get(NULL, "fsib_clk");
+	if (IS_ERR(fsib_clk))
+		return -EINVAL;
+
+	switch (rate) {
+	case 48000:
+		clk_set_rate(fsib_clk, clk_round_rate(fsib_clk, 85428000));
+		clk_set_rate(fdiv_clk, clk_round_rate(fdiv_clk, 12204000));
+		ret = SH_FSI_ACKMD_256 | SH_FSI_BPFMD_64;
+		break;
+	default:
+		pr_err("unsupported rate in FSI2 port B\n");
+		ret = -EINVAL;
+		break;
+	}
+
+	clk_put(fsib_clk);
+
+	return ret;
+}
+
 static struct sh_fsi_platform_info fsi_info = {
 	.porta_flags = SH_FSI_BRS_INV |
 		       SH_FSI_OUT_SLAVE_MODE |
 		       SH_FSI_IN_SLAVE_MODE |
 		       SH_FSI_OFMT(PCM) |
 		       SH_FSI_IFMT(PCM),
+
+	.portb_flags = SH_FSI_BRS_INV |
+		       SH_FSI_BRM_INV |
+		       SH_FSI_LRS_INV |
+		       SH_FSI_OFMT(SPDIF),
+	.set_rate = fsi_set_rate,
 };
 
 static struct resource fsi_resources[] = {
@@ -634,6 +672,7 @@
 static struct sh_mobile_hdmi_info hdmi_info = {
 	.lcd_chan = &sh_mobile_lcdc1_info.ch[0],
 	.lcd_dev = &lcdc1_device.dev,
+	.flags = HDMI_SND_SRC_SPDIF,
 };
 
 static struct resource hdmi_resources[] = {
@@ -992,6 +1031,7 @@
 
 #define GPIO_PORT9CR	0xE6051009
 #define GPIO_PORT10CR	0xE605100A
+#define USCCR1		0xE6058144
 static void __init ap4evb_init(void)
 {
 	u32 srcr4;
@@ -1062,7 +1102,7 @@
 	/* setup USB phy */
 	__raw_writew(0x8a0a, 0xE6058130);	/* USBCR2 */
 
-	/* enable FSI2 */
+	/* enable FSI2 port A (ak4643) */
 	gpio_request(GPIO_FN_FSIAIBT,	NULL);
 	gpio_request(GPIO_FN_FSIAILR,	NULL);
 	gpio_request(GPIO_FN_FSIAISLD,	NULL);
@@ -1079,6 +1119,10 @@
 	gpio_request(GPIO_PORT41, NULL);
 	gpio_direction_input(GPIO_PORT41);
 
+	/* setup FSI2 port B (HDMI) */
+	gpio_request(GPIO_FN_FSIBCK, NULL);
+	__raw_writew(__raw_readw(USCCR1) & ~(1 << 6), USCCR1); /* use SPDIF */
+
 	/* set SPU2 clock to 119.6 MHz */
 	clk = clk_get(NULL, "spu_clk");
 	if (!IS_ERR(clk)) {
diff --git a/arch/arm/mach-shmobile/clock-sh7372.c b/arch/arm/mach-shmobile/clock-sh7372.c
index 8565aef..7db31e6 100644
--- a/arch/arm/mach-shmobile/clock-sh7372.c
+++ b/arch/arm/mach-shmobile/clock-sh7372.c
@@ -50,6 +50,9 @@
 #define SMSTPCR3	0xe615013c
 #define SMSTPCR4	0xe6150140
 
+#define FSIDIVA		0xFE1F8000
+#define FSIDIVB		0xFE1F8008
+
 /* Platforms must set frequency on their DV_CLKI pin */
 struct clk sh7372_dv_clki_clk = {
 };
@@ -288,6 +291,7 @@
 	.ops		= &pllc2_clk_ops,
 	.parent		= &extal1_div2_clk,
 	.freq_table	= pllc2_freq_table,
+	.nr_freqs	= ARRAY_SIZE(pllc2_freq_table) - 1,
 	.parent_table	= pllc2_parent,
 	.parent_num	= ARRAY_SIZE(pllc2_parent),
 };
@@ -417,6 +421,101 @@
 				      fsibckcr_parent, ARRAY_SIZE(fsibckcr_parent), 6, 2),
 };
 
+/* FSI DIV */
+static unsigned long fsidiv_recalc(struct clk *clk)
+{
+	unsigned long value;
+
+	value = __raw_readl(clk->mapping->base);
+
+	if ((value & 0x3) != 0x3)
+		return 0;
+
+	value >>= 16;
+	if (value < 2)
+		return 0;
+
+	return clk->parent->rate / value;
+}
+
+static long fsidiv_round_rate(struct clk *clk, unsigned long rate)
+{
+	return clk_rate_div_range_round(clk, 2, 0xffff, rate);
+}
+
+static void fsidiv_disable(struct clk *clk)
+{
+	__raw_writel(0, clk->mapping->base);
+}
+
+static int fsidiv_enable(struct clk *clk)
+{
+	unsigned long value;
+
+	value  = __raw_readl(clk->mapping->base) >> 16;
+	if (value < 2) {
+		fsidiv_disable(clk);
+		return -ENOENT;
+	}
+
+	__raw_writel((value << 16) | 0x3, clk->mapping->base);
+
+	return 0;
+}
+
+static int fsidiv_set_rate(struct clk *clk,
+			   unsigned long rate, int algo_id)
+{
+	int idx;
+
+	if (clk->parent->rate == rate) {
+		fsidiv_disable(clk);
+		return 0;
+	}
+
+	idx = (clk->parent->rate / rate) & 0xffff;
+	if (idx < 2)
+		return -ENOENT;
+
+	__raw_writel(idx << 16, clk->mapping->base);
+	return fsidiv_enable(clk);
+}
+
+static struct clk_ops fsidiv_clk_ops = {
+	.recalc		= fsidiv_recalc,
+	.round_rate	= fsidiv_round_rate,
+	.set_rate	= fsidiv_set_rate,
+	.enable		= fsidiv_enable,
+	.disable	= fsidiv_disable,
+};
+
+static struct clk_mapping sh7372_fsidiva_clk_mapping = {
+	.phys	= FSIDIVA,
+	.len	= 8,
+};
+
+struct clk sh7372_fsidiva_clk = {
+	.ops		= &fsidiv_clk_ops,
+	.parent		= &div6_reparent_clks[DIV6_FSIA], /* late install */
+	.mapping	= &sh7372_fsidiva_clk_mapping,
+};
+
+static struct clk_mapping sh7372_fsidivb_clk_mapping = {
+	.phys	= FSIDIVB,
+	.len	= 8,
+};
+
+struct clk sh7372_fsidivb_clk = {
+	.ops		= &fsidiv_clk_ops,
+	.parent		= &div6_reparent_clks[DIV6_FSIB],  /* late install */
+	.mapping	= &sh7372_fsidivb_clk_mapping,
+};
+
+static struct clk *late_main_clks[] = {
+	&sh7372_fsidiva_clk,
+	&sh7372_fsidivb_clk,
+};
+
 enum { MSTP001,
        MSTP131, MSTP130,
        MSTP129, MSTP128, MSTP127, MSTP126, MSTP125,
@@ -585,6 +684,9 @@
 	if (!ret)
 		ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR);
 
+	for (k = 0; !ret && (k < ARRAY_SIZE(late_main_clks)); k++)
+		ret = clk_register(late_main_clks[k]);
+
 	clkdev_add_table(lookups, ARRAY_SIZE(lookups));
 
 	if (!ret)
diff --git a/arch/arm/mach-shmobile/include/mach/gpio.h b/arch/arm/mach-shmobile/include/mach/gpio.h
index 5bc6bd4..2b1bb9e 100644
--- a/arch/arm/mach-shmobile/include/mach/gpio.h
+++ b/arch/arm/mach-shmobile/include/mach/gpio.h
@@ -35,12 +35,12 @@
 
 static inline int gpio_to_irq(unsigned gpio)
 {
-	return -ENOSYS;
+	return __gpio_to_irq(gpio);
 }
 
 static inline int irq_to_gpio(unsigned int irq)
 {
-	return -EINVAL;
+	return -ENOSYS;
 }
 
 #endif /* CONFIG_GPIOLIB */
diff --git a/arch/arm/mach-shmobile/include/mach/sh7372.h b/arch/arm/mach-shmobile/include/mach/sh7372.h
index 147775a..e4f9004 100644
--- a/arch/arm/mach-shmobile/include/mach/sh7372.h
+++ b/arch/arm/mach-shmobile/include/mach/sh7372.h
@@ -464,5 +464,7 @@
 extern struct clk sh7372_pllc2_clk;
 extern struct clk sh7372_fsiack_clk;
 extern struct clk sh7372_fsibck_clk;
+extern struct clk sh7372_fsidiva_clk;
+extern struct clk sh7372_fsidivb_clk;
 
 #endif /* __ASM_SH7372_H__ */
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index c2e405a..fd25ccd 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -54,7 +54,9 @@
 
 static void __init ct_ca9x4_map_io(void)
 {
+#ifdef CONFIG_LOCAL_TIMERS
 	twd_base = MMIO_P2V(A9_MPCORE_TWD);
+#endif
 	v2m_map_io(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
 }
 
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e4dd064..ac6a361 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -198,7 +198,7 @@
 	 * fragmentation of the DMA space, and also prevents allocations
 	 * smaller than a section from crossing a section boundary.
 	 */
-	bit = fls(size - 1) + 1;
+	bit = fls(size - 1);
 	if (bit > SECTION_SHIFT)
 		bit = SECTION_SHIFT;
 	align = 1 << bit;
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 6f42a18..fc81912 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -284,12 +284,14 @@
 	if (!size)
 		return;
 
-	paddr = __memblock_alloc_base(size, SZ_1M, MEMBLOCK_REAL_LIMIT);
+	paddr = memblock_alloc(size, SZ_1M);
 	if (!paddr) {
 		pr_err("%s: failed to reserve %x bytes\n",
 				__func__, size);
 		return;
 	}
+	memblock_free(paddr, size);
+	memblock_remove(paddr, size);
 
 	omap_dsp_phys_mempool_base = paddr;
 }
diff --git a/arch/arm/plat-orion/include/plat/pcie.h b/arch/arm/plat-orion/include/plat/pcie.h
index 3ebfef7..cc99163 100644
--- a/arch/arm/plat-orion/include/plat/pcie.h
+++ b/arch/arm/plat-orion/include/plat/pcie.h
@@ -11,12 +11,15 @@
 #ifndef __PLAT_PCIE_H
 #define __PLAT_PCIE_H
 
+struct pci_bus;
+
 u32 orion_pcie_dev_id(void __iomem *base);
 u32 orion_pcie_rev(void __iomem *base);
 int orion_pcie_link_up(void __iomem *base);
 int orion_pcie_x4_mode(void __iomem *base);
 int orion_pcie_get_local_bus_nr(void __iomem *base);
 void orion_pcie_set_local_bus_nr(void __iomem *base, int nr);
+void orion_pcie_reset(void __iomem *base);
 void orion_pcie_setup(void __iomem *base,
 		      struct mbus_dram_target_info *dram);
 int orion_pcie_rd_conf(void __iomem *base, struct pci_bus *bus,
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 779553a..af2d733 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -182,11 +182,6 @@
 	u32 mask;
 
 	/*
-	 * soft reset PCIe unit
-	 */
-	orion_pcie_reset(base);
-
-	/*
 	 * Point PCIe unit MBUS decode windows to DRAM space.
 	 */
 	orion_pcie_setup_wins(base, dram);
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 4a5b284..7ef4115 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,7 +2,9 @@
 #define _M68K_IRQFLAGS_H
 
 #include <linux/types.h>
+#ifdef CONFIG_MMU
 #include <linux/hardirq.h>
+#endif
 #include <linux/preempt.h>
 #include <asm/thread_info.h>
 #include <asm/entry.h>
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 789f3b2..415d548 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,5 +40,6 @@
 extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
 
 extern void config_BSP(char *command, int len);
+extern void do_IRQ(int irq, struct pt_regs *fp);
 
 #endif /* _M68K_MACHDEP_H */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e5..b06bdae 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@
 
 static void kvm_patch_ins_b(u32 *inst, int addr)
 {
-#ifdef CONFIG_RELOCATABLE
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
 	/* On relocatable kernels interrupts handlers and our code
 	   can be in different regions, so we don't patch them */
 
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 0498469..1cc471f 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@
 	lwz	r3, VCPU_PC(r4)
 	mtsrr0	r3
 	lwz	r3, VCPU_SHARED(r4)
-	lwz	r3, VCPU_SHARED_MSR(r3)
+	lwz	r3, (VCPU_SHARED_MSR + 4)(r3)
 	oris	r3, r3, KVMPPC_MSR_MASK@h
 	ori	r3, r3, KVMPPC_MSR_MASK@l
 	mtsrr1	r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2..e3768ee 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
 	free_page((unsigned long)vcpu->arch.shared);
-	kvmppc_e500_tlb_uninit(vcpu_e500);
 	kvm_vcpu_uninit(vcpu);
+	kvmppc_e500_tlb_uninit(vcpu_e500);
 	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a16..38f756f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@
 	switch (ioctl) {
 	case KVM_PPC_GET_PVINFO: {
 		struct kvm_ppc_pvinfo pvinfo;
+		memset(&pvinfo, 0, sizeof(pvinfo));
 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
 			r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f..a021f58 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@
 	int i;
 
 	/* pause guest execution to avoid concurrent updates */
-	local_irq_disable();
 	mutex_lock(&vcpu->mutex);
 
 	vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@
 	vcpu->arch.timing_last_enter.tv64 = 0;
 
 	mutex_unlock(&vcpu->mutex);
-	local_irq_enable();
 }
 
 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/s390/include/asm/qeth.h b/arch/s390/include/asm/qeth.h
index 06cbd1e..90efda0 100644
--- a/arch/s390/include/asm/qeth.h
+++ b/arch/s390/include/asm/qeth.h
@@ -28,39 +28,70 @@
 	__u8  reserved2[32];
 } __attribute__ ((packed));
 
+enum qeth_arp_ipaddrtype {
+	QETHARP_IP_ADDR_V4 = 1,
+	QETHARP_IP_ADDR_V6 = 2,
+};
+struct qeth_arp_entrytype {
+	__u8 mac;
+	__u8 ip;
+} __attribute__((packed));
+
+#define QETH_QARP_MEDIASPECIFIC_BYTES 32
+#define QETH_QARP_MACADDRTYPE_BYTES 1
 struct qeth_arp_qi_entry7 {
-	__u8 media_specific[32];
-	__u8 macaddr_type;
-	__u8 ipaddr_type;
+	__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
+	struct qeth_arp_entrytype type;
 	__u8 macaddr[6];
 	__u8 ipaddr[4];
 } __attribute__((packed));
 
+struct qeth_arp_qi_entry7_ipv6 {
+	__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
+	struct qeth_arp_entrytype type;
+	__u8 macaddr[6];
+	__u8 ipaddr[16];
+} __attribute__((packed));
+
 struct qeth_arp_qi_entry7_short {
-	__u8 macaddr_type;
-	__u8 ipaddr_type;
+	struct qeth_arp_entrytype type;
 	__u8 macaddr[6];
 	__u8 ipaddr[4];
 } __attribute__((packed));
 
+struct qeth_arp_qi_entry7_short_ipv6 {
+	struct qeth_arp_entrytype type;
+	__u8 macaddr[6];
+	__u8 ipaddr[16];
+} __attribute__((packed));
+
 struct qeth_arp_qi_entry5 {
-	__u8 media_specific[32];
-	__u8 macaddr_type;
-	__u8 ipaddr_type;
+	__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
+	struct qeth_arp_entrytype type;
 	__u8 ipaddr[4];
 } __attribute__((packed));
 
+struct qeth_arp_qi_entry5_ipv6 {
+	__u8 media_specific[QETH_QARP_MEDIASPECIFIC_BYTES];
+	struct qeth_arp_entrytype type;
+	__u8 ipaddr[16];
+} __attribute__((packed));
+
 struct qeth_arp_qi_entry5_short {
-	__u8 macaddr_type;
-	__u8 ipaddr_type;
+	struct qeth_arp_entrytype type;
 	__u8 ipaddr[4];
 } __attribute__((packed));
 
+struct qeth_arp_qi_entry5_short_ipv6 {
+	struct qeth_arp_entrytype type;
+	__u8 ipaddr[16];
+} __attribute__((packed));
 /*
  * can be set by user if no "media specific information" is wanted
  * -> saves a lot of space in user space buffer
  */
 #define QETH_QARP_STRIP_ENTRIES  0x8000
+#define QETH_QARP_WITH_IPV6	 0x4000
 #define QETH_QARP_REQUEST_MASK   0x00ff
 
 /* data sent to user space as result of query arp ioctl */
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 5c075f5..7f217b3 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -193,6 +193,7 @@
 config CPU_SH2A
 	bool
 	select CPU_SH2
+	select UNCACHED_MAPPING
 
 config CPU_SH3
 	bool
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 307b3a4..9c8c6e1 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -133,10 +133,7 @@
 machdir-$(CONFIG_SH_HP6XX)			+= mach-hp6xx
 machdir-$(CONFIG_SH_DREAMCAST)			+= mach-dreamcast
 machdir-$(CONFIG_SH_SH03)			+= mach-sh03
-machdir-$(CONFIG_SH_SECUREEDGE5410)		+= mach-snapgear
 machdir-$(CONFIG_SH_RTS7751R2D)			+= mach-r2d
-machdir-$(CONFIG_SH_7751_SYSTEMH)		+= mach-systemh
-machdir-$(CONFIG_SH_EDOSK7705)			+= mach-edosk7705
 machdir-$(CONFIG_SH_HIGHLANDER)			+= mach-highlander
 machdir-$(CONFIG_SH_MIGOR)			+= mach-migor
 machdir-$(CONFIG_SH_AP325RXA)			+= mach-ap325rxa
diff --git a/arch/sh/boards/Kconfig b/arch/sh/boards/Kconfig
index 9c94711..2018c7e 100644
--- a/arch/sh/boards/Kconfig
+++ b/arch/sh/boards/Kconfig
@@ -81,13 +81,6 @@
 	  Select 7343 SolutionEngine if configuring for a Hitachi
 	  SH7343 (SH-Mobile 3AS) evaluation board.
 
-config SH_7751_SYSTEMH
-	bool "SystemH7751R"
-	depends on CPU_SUBTYPE_SH7751R
-	help
-	  Select SystemH if you are configuring for a Renesas SystemH
-	  7751R evaluation board.
-
 config SH_HP6XX
 	bool "HP6XX"
 	select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/sh/boards/Makefile b/arch/sh/boards/Makefile
index 38ef655..be7d11d 100644
--- a/arch/sh/boards/Makefile
+++ b/arch/sh/boards/Makefile
@@ -2,10 +2,12 @@
 # Specific board support, not covered by a mach group.
 #
 obj-$(CONFIG_SH_MAGIC_PANEL_R2)	+= board-magicpanelr2.o
+obj-$(CONFIG_SH_SECUREEDGE5410)	+= board-secureedge5410.o
 obj-$(CONFIG_SH_SH2007)		+= board-sh2007.o
 obj-$(CONFIG_SH_SH7785LCR)	+= board-sh7785lcr.o
 obj-$(CONFIG_SH_URQUELL)	+= board-urquell.o
 obj-$(CONFIG_SH_SHMIN)		+= board-shmin.o
+obj-$(CONFIG_SH_EDOSK7705)	+= board-edosk7705.o
 obj-$(CONFIG_SH_EDOSK7760)	+= board-edosk7760.o
 obj-$(CONFIG_SH_ESPT)		+= board-espt.o
 obj-$(CONFIG_SH_POLARIS)	+= board-polaris.o
diff --git a/arch/sh/boards/board-edosk7705.c b/arch/sh/boards/board-edosk7705.c
new file mode 100644
index 0000000..4cb3bb7
--- /dev/null
+++ b/arch/sh/boards/board-edosk7705.c
@@ -0,0 +1,78 @@
+/*
+ * arch/sh/boards/renesas/edosk7705/setup.c
+ *
+ * Copyright (C) 2000  Kazumoto Kojima
+ *
+ * Hitachi SolutionEngine Support.
+ *
+ * Modified for edosk7705 development
+ * board by S. Dunn, 2003.
+ */
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/smc91x.h>
+#include <asm/machvec.h>
+#include <asm/sizes.h>
+
+#define SMC_IOBASE	0xA2000000
+#define SMC_IO_OFFSET	0x300
+#define SMC_IOADDR	(SMC_IOBASE + SMC_IO_OFFSET)
+
+#define ETHERNET_IRQ	0x09
+
+static void __init sh_edosk7705_init_irq(void)
+{
+	make_imask_irq(ETHERNET_IRQ);
+}
+
+/* eth initialization functions */
+static struct smc91x_platdata smc91x_info = {
+	.flags = SMC91X_USE_16BIT | SMC91X_IO_SHIFT_1 | IORESOURCE_IRQ_LOWLEVEL,
+};
+
+static struct resource smc91x_res[] = {
+	[0] = {
+		.start	= SMC_IOADDR,
+		.end	= SMC_IOADDR + SZ_32 - 1,
+		.flags	= IORESOURCE_MEM,
+	},
+	[1] = {
+		.start	= ETHERNET_IRQ,
+		.end	= ETHERNET_IRQ,
+		.flags	= IORESOURCE_IRQ ,
+	}
+};
+
+static struct platform_device smc91x_dev = {
+	.name		= "smc91x",
+	.id		= -1,
+	.num_resources	= ARRAY_SIZE(smc91x_res),
+	.resource	= smc91x_res,
+
+	.dev	= {
+		.platform_data	= &smc91x_info,
+	},
+};
+
+/* platform init code */
+static struct platform_device *edosk7705_devices[] __initdata = {
+	&smc91x_dev,
+};
+
+static int __init init_edosk7705_devices(void)
+{
+	return platform_add_devices(edosk7705_devices,
+				    ARRAY_SIZE(edosk7705_devices));
+}
+__initcall(init_edosk7705_devices);
+
+/*
+ * The Machine Vector
+ */
+static struct sh_machine_vector mv_edosk7705 __initmv = {
+	.mv_name		= "EDOSK7705",
+	.mv_nr_irqs		= 80,
+	.mv_init_irq		= sh_edosk7705_init_irq,
+};
diff --git a/arch/sh/boards/mach-snapgear/setup.c b/arch/sh/boards/board-secureedge5410.c
similarity index 70%
rename from arch/sh/boards/mach-snapgear/setup.c
rename to arch/sh/boards/board-secureedge5410.c
index 331745d..32f875e 100644
--- a/arch/sh/boards/mach-snapgear/setup.c
+++ b/arch/sh/boards/board-secureedge5410.c
@@ -1,6 +1,4 @@
 /*
- * linux/arch/sh/boards/snapgear/setup.c
- *
  * Copyright (C) 2002  David McCullough <davidm@snapgear.com>
  * Copyright (C) 2003  Paul Mundt <lethal@linux-sh.org>
  *
@@ -19,18 +17,19 @@
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <asm/machvec.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
 #include <asm/irq.h>
 #include <asm/io.h>
 #include <cpu/timer.h>
 
+unsigned short secureedge5410_ioport;
+
 /*
  * EraseConfig handling functions
  */
-
 static irqreturn_t eraseconfig_interrupt(int irq, void *dev_id)
 {
-	(void)__raw_readb(0xb8000000);	/* dummy read */
+	ctrl_delay();	/* dummy read */
 
 	printk("SnapGear: erase switch interrupt!\n");
 
@@ -39,21 +38,22 @@
 
 static int __init eraseconfig_init(void)
 {
+	unsigned int irq = evt2irq(0x240);
+
 	printk("SnapGear: EraseConfig init\n");
+
 	/* Setup "EraseConfig" switch on external IRQ 0 */
-	if (request_irq(IRL0_IRQ, eraseconfig_interrupt, IRQF_DISABLED,
+	if (request_irq(irq, eraseconfig_interrupt, IRQF_DISABLED,
 				"Erase Config", NULL))
 		printk("SnapGear: failed to register IRQ%d for Reset witch\n",
-				IRL0_IRQ);
+				irq);
 	else
 		printk("SnapGear: registered EraseConfig switch on IRQ%d\n",
-				IRL0_IRQ);
-	return(0);
+				irq);
+	return 0;
 }
-
 module_init(eraseconfig_init);
 
-/****************************************************************************/
 /*
  * Initialize IRQ setting
  *
@@ -62,7 +62,6 @@
  * IRL2 = eth1
  * IRL3 = crypto
  */
-
 static void __init init_snapgear_IRQ(void)
 {
 	printk("Setup SnapGear IRQ/IPR ...\n");
@@ -76,20 +75,5 @@
 static struct sh_machine_vector mv_snapgear __initmv = {
 	.mv_name		= "SnapGear SecureEdge5410",
 	.mv_nr_irqs		= 72,
-
-	.mv_inb			= snapgear_inb,
-	.mv_inw			= snapgear_inw,
-	.mv_inl			= snapgear_inl,
-	.mv_outb		= snapgear_outb,
-	.mv_outw		= snapgear_outw,
-	.mv_outl		= snapgear_outl,
-
-	.mv_inb_p		= snapgear_inb_p,
-	.mv_inw_p		= snapgear_inw,
-	.mv_inl_p		= snapgear_inl,
-	.mv_outb_p		= snapgear_outb_p,
-	.mv_outw_p		= snapgear_outw,
-	.mv_outl_p		= snapgear_outl,
-
 	.mv_init_irq		= init_snapgear_IRQ,
 };
diff --git a/arch/sh/boards/mach-edosk7705/Makefile b/arch/sh/boards/mach-edosk7705/Makefile
deleted file mode 100644
index cd54acb..0000000
--- a/arch/sh/boards/mach-edosk7705/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the EDOSK7705 specific parts of the kernel
-#
-
-obj-y	 := setup.o io.o
diff --git a/arch/sh/boards/mach-edosk7705/io.c b/arch/sh/boards/mach-edosk7705/io.c
deleted file mode 100644
index 5b9c57c..0000000
--- a/arch/sh/boards/mach-edosk7705/io.c
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/io.c
- *
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routines for Hitachi EDOSK7705 board.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/io.h>
-#include <mach/edosk7705.h>
-#include <asm/addrspace.h>
-
-#define SMC_IOADDR	0xA2000000
-
-/* Map the Ethernet addresses as if it is at 0x300 - 0x320 */
-static unsigned long sh_edosk7705_isa_port2addr(unsigned long port)
-{
-	/*
-	 * SMC91C96 registers are 4 byte aligned rather than the
-	 * usual 2 byte!
-	 */
-	if (port >= 0x300 && port < 0x320)
-		return SMC_IOADDR + ((port - 0x300) * 2);
-
-	maybebadio(port);
-	return port;
-}
-
-/* Trying to read / write bytes on odd-byte boundaries to the Ethernet
- * registers causes problems. So we bit-shift the value and read / write
- * in 2 byte chunks. Setting the low byte to 0 does not cause problems
- * now as odd byte writes are only made on the bit mask / interrupt
- * register. This may not be the case in future Mar-2003 SJD
- */
-unsigned char sh_edosk7705_inb(unsigned long port)
-{
-	if (port >= 0x300 && port < 0x320 && port & 0x01)
-		return __raw_readw(port - 1) >> 8;
-
-	return __raw_readb(sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_outb(unsigned char value, unsigned long port)
-{
-	if (port >= 0x300 && port < 0x320 && port & 0x01) {
-		__raw_writew(((unsigned short)value << 8), port - 1);
-		return;
-	}
-
-	__raw_writeb(value, sh_edosk7705_isa_port2addr(port));
-}
-
-void sh_edosk7705_insb(unsigned long port, void *addr, unsigned long count)
-{
-	unsigned char *p = addr;
-
-	while (count--)
-		*p++ = sh_edosk7705_inb(port);
-}
-
-void sh_edosk7705_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-	unsigned char *p = (unsigned char *)addr;
-
-	while (count--)
-		sh_edosk7705_outb(*p++, port);
-}
diff --git a/arch/sh/boards/mach-edosk7705/setup.c b/arch/sh/boards/mach-edosk7705/setup.c
deleted file mode 100644
index d59225e..0000000
--- a/arch/sh/boards/mach-edosk7705/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * arch/sh/boards/renesas/edosk7705/setup.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * Hitachi SolutionEngine Support.
- *
- * Modified for edosk7705 development
- * board by S. Dunn, 2003.
- */
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <asm/machvec.h>
-#include <mach/edosk7705.h>
-
-static void __init sh_edosk7705_init_irq(void)
-{
-	/* This is the Ethernet interrupt */
-	make_imask_irq(0x09);
-}
-
-/*
- * The Machine Vector
- */
-static struct sh_machine_vector mv_edosk7705 __initmv = {
-	.mv_name		= "EDOSK7705",
-	.mv_nr_irqs		= 80,
-
-	.mv_inb			= sh_edosk7705_inb,
-	.mv_outb		= sh_edosk7705_outb,
-
-	.mv_insb		= sh_edosk7705_insb,
-	.mv_outsb		= sh_edosk7705_outsb,
-
-	.mv_init_irq		= sh_edosk7705_init_irq,
-};
diff --git a/arch/sh/boards/mach-microdev/io.c b/arch/sh/boards/mach-microdev/io.c
index 2960c65..acdafb0 100644
--- a/arch/sh/boards/mach-microdev/io.c
+++ b/arch/sh/boards/mach-microdev/io.c
@@ -54,7 +54,7 @@
 /*
  * map I/O ports to memory-mapped addresses
  */
-static unsigned long microdev_isa_port2addr(unsigned long offset)
+void __iomem *microdev_ioport_map(unsigned long offset, unsigned int len)
 {
 	unsigned long result;
 
@@ -72,16 +72,6 @@
 			 *	Configuration Registers
 			 */
 		result = IO_SUPERIO_PHYS + (offset << 1);
-#if 0
-	} else if (offset == KBD_DATA_REG || offset == KBD_CNTL_REG ||
-		   offset == KBD_STATUS_REG) {
-			/*
-			 *	SMSC FDC37C93xAPM SuperIO chip
-			 *
-			 *	PS/2 Keyboard + Mouse (ports 0x60 and 0x64).
-			 */
-	        result = IO_SUPERIO_PHYS + (offset << 1);
-#endif
 	} else if (((offset >= IO_IDE1_BASE) &&
 		    (offset <  IO_IDE1_BASE + IO_IDE_EXTENT)) ||
 		    (offset == IO_IDE1_MISC)) {
@@ -131,237 +121,5 @@
 		result = PVR;
 	}
 
-	return result;
-}
-
-#define PORT2ADDR(x) (microdev_isa_port2addr(x))
-
-static inline void delay(void)
-{
-#if defined(CONFIG_PCI)
-	/* System board present, just make a dummy SRAM access.  (CS0 will be
-	   mapped to PCI memory, probably good to avoid it.) */
-	__raw_readw(0xa6800000);
-#else
-	/* CS0 will be mapped to flash, ROM etc so safe to access it. */
-	__raw_readw(0xa0000000);
-#endif
-}
-
-unsigned char microdev_inb(unsigned long port)
-{
-#ifdef CONFIG_PCI
-	if (port >= PCIBIOS_MIN_IO)
-		return microdev_pci_inb(port);
-#endif
-	return *(volatile unsigned char*)PORT2ADDR(port);
-}
-
-unsigned short microdev_inw(unsigned long port)
-{
-#ifdef CONFIG_PCI
-	if (port >= PCIBIOS_MIN_IO)
-		return microdev_pci_inw(port);
-#endif
-	return *(volatile unsigned short*)PORT2ADDR(port);
-}
-
-unsigned int microdev_inl(unsigned long port)
-{
-#ifdef CONFIG_PCI
-	if (port >= PCIBIOS_MIN_IO)
-		return microdev_pci_inl(port);
-#endif
-	return *(volatile unsigned int*)PORT2ADDR(port);
-}
-
-void microdev_outw(unsigned short b, unsigned long port)
-{
-#ifdef CONFIG_PCI
-	if (port >= PCIBIOS_MIN_IO) {
-		microdev_pci_outw(b, port);
-		return;
-	}
-#endif
-	*(volatile unsigned short*)PORT2ADDR(port) = b;
-}
-
-void microdev_outb(unsigned char b, unsigned long port)
-{
-#ifdef CONFIG_PCI
-	if (port >= PCIBIOS_MIN_IO) {
-		microdev_pci_outb(b, port);
-		return;
-	}
-#endif
-
-	/*
-	 *	There is a board feature with the current SH4-202 MicroDev in
-	 *	that the 2 byte enables (nBE0 and nBE1) are tied together (and
-	 *	to the Chip Select Line (Ethernet_CS)). Due to this connectivity,
-	 *	it is not possible to safely perform 8-bit writes to the
-	 *	Ethernet registers, as 16-bits will be consumed from the Data
-	 *	lines (corrupting the other byte).  Hence, this function is
-	 *	written to implement 16-bit read/modify/write for all byte-wide
-	 *	accesses.
-	 *
-	 *	Note: there is no problem with byte READS (even or odd).
-	 *
-	 *			Sean McGoogan - 16th June 2003.
-	 */
-	if ((port >= IO_LAN91C111_BASE) &&
-	    (port <  IO_LAN91C111_BASE + IO_LAN91C111_EXTENT)) {
-			/*
-			 * Then are trying to perform a byte-write to the
-			 * LAN91C111.  This needs special care.
-			 */
-		if (port % 2 == 1) {	/* is the port odd ? */
-			/* unset bit-0, i.e. make even */
-			const unsigned long evenPort = port-1;
-			unsigned short word;
-
-			/*
-			 * do a 16-bit read/write to write to 'port',
-			 * preserving even byte.
-			 *
-			 *	Even addresses are bits 0-7
-			 *	Odd  addresses are bits 8-15
-			 */
-			word = microdev_inw(evenPort);
-			word = (word & 0xffu) | (b << 8);
-			microdev_outw(word, evenPort);
-		} else {
-			/* else, we are trying to do an even byte write */
-			unsigned short word;
-
-			/*
-			 * do a 16-bit read/write to write to 'port',
-			 * preserving odd byte.
-			 *
-			 *	Even addresses are bits 0-7
-			 *	Odd  addresses are bits 8-15
-			 */
-			word = microdev_inw(port);
-			word = (word & 0xff00u) | (b);
-			microdev_outw(word, port);
-		}
-	} else {
-		*(volatile unsigned char*)PORT2ADDR(port) = b;
-	}
-}
-
-void microdev_outl(unsigned int b, unsigned long port)
-{
-#ifdef CONFIG_PCI
-	if (port >= PCIBIOS_MIN_IO) {
-		microdev_pci_outl(b, port);
-		return;
-	}
-#endif
-	*(volatile unsigned int*)PORT2ADDR(port) = b;
-}
-
-unsigned char microdev_inb_p(unsigned long port)
-{
-	unsigned char v = microdev_inb(port);
-	delay();
-	return v;
-}
-
-unsigned short microdev_inw_p(unsigned long port)
-{
-	unsigned short v = microdev_inw(port);
-	delay();
-	return v;
-}
-
-unsigned int microdev_inl_p(unsigned long port)
-{
-	unsigned int v = microdev_inl(port);
-	delay();
-	return v;
-}
-
-void microdev_outb_p(unsigned char b, unsigned long port)
-{
-	microdev_outb(b, port);
-	delay();
-}
-
-void microdev_outw_p(unsigned short b, unsigned long port)
-{
-	microdev_outw(b, port);
-	delay();
-}
-
-void microdev_outl_p(unsigned int b, unsigned long port)
-{
-	microdev_outl(b, port);
-	delay();
-}
-
-void microdev_insb(unsigned long port, void *buffer, unsigned long count)
-{
-	volatile unsigned char *port_addr;
-	unsigned char *buf = buffer;
-
-	port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
-	while (count--)
-		*buf++ = *port_addr;
-}
-
-void microdev_insw(unsigned long port, void *buffer, unsigned long count)
-{
-	volatile unsigned short *port_addr;
-	unsigned short *buf = buffer;
-
-	port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
-	while (count--)
-		*buf++ = *port_addr;
-}
-
-void microdev_insl(unsigned long port, void *buffer, unsigned long count)
-{
-	volatile unsigned long *port_addr;
-	unsigned int *buf = buffer;
-
-	port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
-	while (count--)
-		*buf++ = *port_addr;
-}
-
-void microdev_outsb(unsigned long port, const void *buffer, unsigned long count)
-{
-	volatile unsigned char *port_addr;
-	const unsigned char *buf = buffer;
-
-	port_addr = (volatile unsigned char *)PORT2ADDR(port);
-
-	while (count--)
-		*port_addr = *buf++;
-}
-
-void microdev_outsw(unsigned long port, const void *buffer, unsigned long count)
-{
-	volatile unsigned short *port_addr;
-	const unsigned short *buf = buffer;
-
-	port_addr = (volatile unsigned short *)PORT2ADDR(port);
-
-	while (count--)
-		*port_addr = *buf++;
-}
-
-void microdev_outsl(unsigned long port, const void *buffer, unsigned long count)
-{
-	volatile unsigned long *port_addr;
-	const unsigned int *buf = buffer;
-
-	port_addr = (volatile unsigned long *)PORT2ADDR(port);
-
-	while (count--)
-		*port_addr = *buf++;
+	return (void __iomem *)result;
 }
diff --git a/arch/sh/boards/mach-microdev/setup.c b/arch/sh/boards/mach-microdev/setup.c
index d1df2a4..d8a7472 100644
--- a/arch/sh/boards/mach-microdev/setup.c
+++ b/arch/sh/boards/mach-microdev/setup.c
@@ -195,27 +195,6 @@
 static struct sh_machine_vector mv_sh4202_microdev __initmv = {
 	.mv_name		= "SH4-202 MicroDev",
 	.mv_nr_irqs		= 72,
-
-	.mv_inb			= microdev_inb,
-	.mv_inw			= microdev_inw,
-	.mv_inl			= microdev_inl,
-	.mv_outb		= microdev_outb,
-	.mv_outw		= microdev_outw,
-	.mv_outl		= microdev_outl,
-
-	.mv_inb_p		= microdev_inb_p,
-	.mv_inw_p		= microdev_inw_p,
-	.mv_inl_p		= microdev_inl_p,
-	.mv_outb_p		= microdev_outb_p,
-	.mv_outw_p		= microdev_outw_p,
-	.mv_outl_p		= microdev_outl_p,
-
-	.mv_insb		= microdev_insb,
-	.mv_insw		= microdev_insw,
-	.mv_insl		= microdev_insl,
-	.mv_outsb		= microdev_outsb,
-	.mv_outsw		= microdev_outsw,
-	.mv_outsl		= microdev_outsl,
-
+	.mv_ioport_map		= microdev_ioport_map,
 	.mv_init_irq		= init_microdev_irq,
 };
diff --git a/arch/sh/boards/mach-se/7206/Makefile b/arch/sh/boards/mach-se/7206/Makefile
index 63e7ed6..5c9eaa0 100644
--- a/arch/sh/boards/mach-se/7206/Makefile
+++ b/arch/sh/boards/mach-se/7206/Makefile
@@ -2,4 +2,4 @@
 # Makefile for the 7206 SolutionEngine specific parts of the kernel
 #
 
-obj-y	 := setup.o io.o irq.o
+obj-y	 := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7206/io.c b/arch/sh/boards/mach-se/7206/io.c
deleted file mode 100644
index adadc77..0000000
--- a/arch/sh/boards/mach-se/7206/io.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/* $Id: io.c,v 1.5 2004/02/22 23:08:43 kkojima Exp $
- *
- * linux/arch/sh/boards/se/7206/io.c
- *
- * Copyright (C) 2006 Yoshinori Sato
- *
- * I/O routine for Hitachi 7206 SolutionEngine.
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7206.h>
-
-
-static inline void delay(void)
-{
-	__raw_readw(0x20000000);  /* P2 ROM Area */
-}
-
-/* MS7750 requires special versions of in*, out* routines, since
-   PC-like io ports are located at upper half byte of 16-bit word which
-   can be accessed only with 16-bit wide.  */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
-	if (port >= 0x2000 && port < 0x2020)
-		return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-	else if (port >= 0x300 && port < 0x310)
-		return (volatile __u16 *) (PA_SMSC + (port - 0x300));
-
-	return (volatile __u16 *)port;
-}
-
-unsigned char se7206_inb(unsigned long port)
-{
-	return (*port2adr(port)) & 0xff;
-}
-
-unsigned char se7206_inb_p(unsigned long port)
-{
-	unsigned long v;
-
-	v = (*port2adr(port)) & 0xff;
-	delay();
-	return v;
-}
-
-unsigned short se7206_inw(unsigned long port)
-{
-	return *port2adr(port);
-}
-
-void se7206_outb(unsigned char value, unsigned long port)
-{
-	*(port2adr(port)) = value;
-}
-
-void se7206_outb_p(unsigned char value, unsigned long port)
-{
-	*(port2adr(port)) = value;
-	delay();
-}
-
-void se7206_outw(unsigned short value, unsigned long port)
-{
-	*port2adr(port) = value;
-}
-
-void se7206_insb(unsigned long port, void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	__u8 *ap = addr;
-
-	while (count--)
-		*ap++ = *p;
-}
-
-void se7206_insw(unsigned long port, void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	__u16 *ap = addr;
-	while (count--)
-		*ap++ = *p;
-}
-
-void se7206_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	const __u8 *ap = addr;
-
-	while (count--)
-		*p = *ap++;
-}
-
-void se7206_outsw(unsigned long port, const void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	const __u16 *ap = addr;
-	while (count--)
-		*p = *ap++;
-}
diff --git a/arch/sh/boards/mach-se/7206/irq.c b/arch/sh/boards/mach-se/7206/irq.c
index 883b21e..d96194960 100644
--- a/arch/sh/boards/mach-se/7206/irq.c
+++ b/arch/sh/boards/mach-se/7206/irq.c
@@ -139,11 +139,13 @@
 	make_se7206_irq(IRQ0_IRQ); /* SMC91C111 */
 	make_se7206_irq(IRQ1_IRQ); /* ATA */
 	make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
-	__raw_writew(inw(INTC_ICR1) | 0x000b ,INTC_ICR1 ) ; /* ICR1 */
+
+	__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
 
 	/* FPGA System register setup*/
 	__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */
 	__raw_writew(0x0000,INTSTS1); /* Clear INTSTS1 */
+
 	/* IRQ0=LAN, IRQ1=ATA, IRQ3=SLT,PCM */
 	__raw_writew(0x0001,INTSEL);
 }
diff --git a/arch/sh/boards/mach-se/7206/setup.c b/arch/sh/boards/mach-se/7206/setup.c
index 8f5c65d..7f4871c 100644
--- a/arch/sh/boards/mach-se/7206/setup.c
+++ b/arch/sh/boards/mach-se/7206/setup.c
@@ -86,20 +86,5 @@
 static struct sh_machine_vector mv_se __initmv = {
 	.mv_name		= "SolutionEngine",
 	.mv_nr_irqs		= 256,
-	.mv_inb			= se7206_inb,
-	.mv_inw			= se7206_inw,
-	.mv_outb		= se7206_outb,
-	.mv_outw		= se7206_outw,
-
-	.mv_inb_p		= se7206_inb_p,
-	.mv_inw_p		= se7206_inw,
-	.mv_outb_p		= se7206_outb_p,
-	.mv_outw_p		= se7206_outw,
-
-	.mv_insb		= se7206_insb,
-	.mv_insw		= se7206_insw,
-	.mv_outsb		= se7206_outsb,
-	.mv_outsw		= se7206_outsw,
-
 	.mv_init_irq		= init_se7206_IRQ,
 };
diff --git a/arch/sh/boards/mach-se/770x/Makefile b/arch/sh/boards/mach-se/770x/Makefile
index 8e624b0..43ea14f 100644
--- a/arch/sh/boards/mach-se/770x/Makefile
+++ b/arch/sh/boards/mach-se/770x/Makefile
@@ -2,4 +2,4 @@
 # Makefile for the 770x SolutionEngine specific parts of the kernel
 #
 
-obj-y	 := setup.o io.o irq.o
+obj-y	 := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/770x/io.c b/arch/sh/boards/mach-se/770x/io.c
deleted file mode 100644
index 28833c8..0000000
--- a/arch/sh/boards/mach-se/770x/io.c
+++ /dev/null
@@ -1,156 +0,0 @@
-/*
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * I/O routine for Hitachi SolutionEngine.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <asm/io.h>
-#include <mach-se/mach/se.h>
-
-/* MS7750 requires special versions of in*, out* routines, since
-   PC-like io ports are located at upper half byte of 16-bit word which
-   can be accessed only with 16-bit wide.  */
-
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
-	if (port & 0xff000000)
-		return ( volatile __u16 *) port;
-	if (port >= 0x2000)
-		return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-	else if (port >= 0x1000)
-		return (volatile __u16 *) (PA_83902 + (port << 1));
-	else
-		return (volatile __u16 *) (PA_SUPERIO + (port << 1));
-}
-
-static inline int
-shifted_port(unsigned long port)
-{
-	/* For IDE registers, value is not shifted */
-	if ((0x1f0 <= port && port < 0x1f8) || port == 0x3f6)
-		return 0;
-	else
-		return 1;
-}
-
-unsigned char se_inb(unsigned long port)
-{
-	if (shifted_port(port))
-		return (*port2adr(port) >> 8);
-	else
-		return (*port2adr(port))&0xff;
-}
-
-unsigned char se_inb_p(unsigned long port)
-{
-	unsigned long v;
-
-	if (shifted_port(port))
-		v = (*port2adr(port) >> 8);
-	else
-		v = (*port2adr(port))&0xff;
-	ctrl_delay();
-	return v;
-}
-
-unsigned short se_inw(unsigned long port)
-{
-	if (port >= 0x2000)
-		return *port2adr(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-unsigned int se_inl(unsigned long port)
-{
-	maybebadio(port);
-	return 0;
-}
-
-void se_outb(unsigned char value, unsigned long port)
-{
-	if (shifted_port(port))
-		*(port2adr(port)) = value << 8;
-	else
-		*(port2adr(port)) = value;
-}
-
-void se_outb_p(unsigned char value, unsigned long port)
-{
-	if (shifted_port(port))
-		*(port2adr(port)) = value << 8;
-	else
-		*(port2adr(port)) = value;
-	ctrl_delay();
-}
-
-void se_outw(unsigned short value, unsigned long port)
-{
-	if (port >= 0x2000)
-		*port2adr(port) = value;
-	else
-		maybebadio(port);
-}
-
-void se_outl(unsigned int value, unsigned long port)
-{
-	maybebadio(port);
-}
-
-void se_insb(unsigned long port, void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	__u8 *ap = addr;
-
-	if (shifted_port(port)) {
-		while (count--)
-			*ap++ = *p >> 8;
-	} else {
-		while (count--)
-			*ap++ = *p;
-	}
-}
-
-void se_insw(unsigned long port, void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	__u16 *ap = addr;
-	while (count--)
-		*ap++ = *p;
-}
-
-void se_insl(unsigned long port, void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
-
-void se_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	const __u8 *ap = addr;
-
-	if (shifted_port(port)) {
-		while (count--)
-			*p = *ap++ << 8;
-	} else {
-		while (count--)
-			*p = *ap++;
-	}
-}
-
-void se_outsw(unsigned long port, const void *addr, unsigned long count)
-{
-	volatile __u16 *p = port2adr(port);
-	const __u16 *ap = addr;
-
-	while (count--)
-		*p = *ap++;
-}
-
-void se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-se/770x/setup.c b/arch/sh/boards/mach-se/770x/setup.c
index 66d39d1..31330c6 100644
--- a/arch/sh/boards/mach-se/770x/setup.c
+++ b/arch/sh/boards/mach-se/770x/setup.c
@@ -195,27 +195,5 @@
 #elif defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7712)
 	.mv_nr_irqs             = 104,
 #endif
-
-	.mv_inb			= se_inb,
-	.mv_inw			= se_inw,
-	.mv_inl			= se_inl,
-	.mv_outb		= se_outb,
-	.mv_outw		= se_outw,
-	.mv_outl		= se_outl,
-
-	.mv_inb_p		= se_inb_p,
-	.mv_inw_p		= se_inw,
-	.mv_inl_p		= se_inl,
-	.mv_outb_p		= se_outb_p,
-	.mv_outw_p		= se_outw,
-	.mv_outl_p		= se_outl,
-
-	.mv_insb		= se_insb,
-	.mv_insw		= se_insw,
-	.mv_insl		= se_insl,
-	.mv_outsb		= se_outsb,
-	.mv_outsw		= se_outsw,
-	.mv_outsl		= se_outsl,
-
 	.mv_init_irq		= init_se_IRQ,
 };
diff --git a/arch/sh/boards/mach-se/7751/Makefile b/arch/sh/boards/mach-se/7751/Makefile
index e6f4341..a338fd9 100644
--- a/arch/sh/boards/mach-se/7751/Makefile
+++ b/arch/sh/boards/mach-se/7751/Makefile
@@ -2,4 +2,4 @@
 # Makefile for the 7751 SolutionEngine specific parts of the kernel
 #
 
-obj-y	 := setup.o io.o irq.o
+obj-y	 := setup.o irq.o
diff --git a/arch/sh/boards/mach-se/7751/io.c b/arch/sh/boards/mach-se/7751/io.c
deleted file mode 100644
index 6e75bd4..0000000
--- a/arch/sh/boards/mach-se/7751/io.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <mach-se/mach/se7751.h>
-#include <asm/addrspace.h>
-
-static inline volatile u16 *port2adr(unsigned int port)
-{
-	if (port >= 0x2000)
-		return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-	maybebadio((unsigned long)port);
-	return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used  w/o translation for
- * compatibility.
- */
-unsigned char sh7751se_inb(unsigned long port)
-{
-	if (PXSEG(port))
-		return *(volatile unsigned char *)port;
-	else
-		return (*port2adr(port)) & 0xff;
-}
-
-unsigned char sh7751se_inb_p(unsigned long port)
-{
-	unsigned char v;
-
-        if (PXSEG(port))
-                v = *(volatile unsigned char *)port;
-	else
-		v = (*port2adr(port)) & 0xff;
-	ctrl_delay();
-	return v;
-}
-
-unsigned short sh7751se_inw(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned short *)port;
-	else if (port >= 0x2000)
-		return *port2adr(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-unsigned int sh7751se_inl(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned long *)port;
-	else if (port >= 0x2000)
-		return *port2adr(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-void sh7751se_outb(unsigned char value, unsigned long port)
-{
-
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-	else
-		*(port2adr(port)) = value;
-}
-
-void sh7751se_outb_p(unsigned char value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-	else
-		*(port2adr(port)) = value;
-	ctrl_delay();
-}
-
-void sh7751se_outw(unsigned short value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned short *)port = value;
-	else if (port >= 0x2000)
-		*port2adr(port) = value;
-	else
-		maybebadio(port);
-}
-
-void sh7751se_outl(unsigned int value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned long *)port = value;
-	else
-		maybebadio(port);
-}
-
-void sh7751se_insl(unsigned long port, void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
-
-void sh7751se_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-se/7751/setup.c b/arch/sh/boards/mach-se/7751/setup.c
index 5057251..9fbc51b 100644
--- a/arch/sh/boards/mach-se/7751/setup.c
+++ b/arch/sh/boards/mach-se/7751/setup.c
@@ -56,23 +56,5 @@
 static struct sh_machine_vector mv_7751se __initmv = {
 	.mv_name		= "7751 SolutionEngine",
 	.mv_nr_irqs		= 72,
-
-	.mv_inb			= sh7751se_inb,
-	.mv_inw			= sh7751se_inw,
-	.mv_inl			= sh7751se_inl,
-	.mv_outb		= sh7751se_outb,
-	.mv_outw		= sh7751se_outw,
-	.mv_outl		= sh7751se_outl,
-
-	.mv_inb_p		= sh7751se_inb_p,
-	.mv_inw_p		= sh7751se_inw,
-	.mv_inl_p		= sh7751se_inl,
-	.mv_outb_p		= sh7751se_outb_p,
-	.mv_outw_p		= sh7751se_outw,
-	.mv_outl_p		= sh7751se_outl,
-
-	.mv_insl		= sh7751se_insl,
-	.mv_outsl		= sh7751se_outsl,
-
 	.mv_init_irq		= init_7751se_IRQ,
 };
diff --git a/arch/sh/boards/mach-snapgear/Makefile b/arch/sh/boards/mach-snapgear/Makefile
deleted file mode 100644
index d2d2f4b..0000000
--- a/arch/sh/boards/mach-snapgear/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the SnapGear specific parts of the kernel
-#
-
-obj-y	 := setup.o io.o
diff --git a/arch/sh/boards/mach-snapgear/io.c b/arch/sh/boards/mach-snapgear/io.c
deleted file mode 100644
index 476650e..0000000
--- a/arch/sh/boards/mach-snapgear/io.c
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * Copyright (C) 2002  David McCullough <davidm@snapgear.com>
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 SolutionEngine.
- *
- * Initial version only to support LAN access; some
- * placeholder code from io_se.c left in with the
- * expectation of later SuperIO and PCMCIA access.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <asm/io.h>
-#include <asm/addrspace.h>
-
-#ifdef CONFIG_SH_SECUREEDGE5410
-unsigned short secureedge5410_ioport;
-#endif
-
-static inline volatile __u16 *port2adr(unsigned int port)
-{
-	maybebadio((unsigned long)port);
-	return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used  w/o translation for
- * compatibility.
- */
-unsigned char snapgear_inb(unsigned long port)
-{
-	if (PXSEG(port))
-		return *(volatile unsigned char *)port;
-	else
-		return (*port2adr(port)) & 0xff;
-}
-
-unsigned char snapgear_inb_p(unsigned long port)
-{
-	unsigned char v;
-
-	if (PXSEG(port))
-		v = *(volatile unsigned char *)port;
-	else
-		v = (*port2adr(port))&0xff;
-	ctrl_delay();
-	return v;
-}
-
-unsigned short snapgear_inw(unsigned long port)
-{
-	if (PXSEG(port))
-		return *(volatile unsigned short *)port;
-	else if (port >= 0x2000)
-		return *port2adr(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-unsigned int snapgear_inl(unsigned long port)
-{
-	if (PXSEG(port))
-		return *(volatile unsigned long *)port;
-	else if (port >= 0x2000)
-		return *port2adr(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-void snapgear_outb(unsigned char value, unsigned long port)
-{
-
-	if (PXSEG(port))
-		*(volatile unsigned char *)port = value;
-	else
-		*(port2adr(port)) = value;
-}
-
-void snapgear_outb_p(unsigned char value, unsigned long port)
-{
-	if (PXSEG(port))
-		*(volatile unsigned char *)port = value;
-	else
-		*(port2adr(port)) = value;
-	ctrl_delay();
-}
-
-void snapgear_outw(unsigned short value, unsigned long port)
-{
-	if (PXSEG(port))
-		*(volatile unsigned short *)port = value;
-	else if (port >= 0x2000)
-		*port2adr(port) = value;
-	else
-		maybebadio(port);
-}
-
-void snapgear_outl(unsigned int value, unsigned long port)
-{
-	if (PXSEG(port))
-		*(volatile unsigned long *)port = value;
-	else
-		maybebadio(port);
-}
-
-void snapgear_insl(unsigned long port, void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
-
-void snapgear_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/Makefile b/arch/sh/boards/mach-systemh/Makefile
deleted file mode 100644
index 2cc6a23..0000000
--- a/arch/sh/boards/mach-systemh/Makefile
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# Makefile for the SystemH specific parts of the kernel
-#
-
-obj-y	 := setup.o irq.o io.o
-
-# XXX: This wants to be consolidated in arch/sh/drivers/pci, and more
-# importantly, with the generic sh7751_pcic_init() code. For now, we'll
-# just abuse the hell out of kbuild, because we can..
-
-obj-$(CONFIG_PCI) += pci.o
-pci-y := ../../se/7751/pci.o
-
diff --git a/arch/sh/boards/mach-systemh/io.c b/arch/sh/boards/mach-systemh/io.c
deleted file mode 100644
index 15577ff..0000000
--- a/arch/sh/boards/mach-systemh/io.c
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/io.c
- *
- * Copyright (C) 2001  Ian da Silva, Jeremy Siegel
- * Based largely on io_se.c.
- *
- * I/O routine for Hitachi 7751 Systemh.
- */
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <mach/systemh7751.h>
-#include <asm/addrspace.h>
-#include <asm/io.h>
-
-#define ETHER_IOMAP(adr) (0xB3000000 + (adr)) /*map to 16bits access area
-                                                of smc lan chip*/
-static inline volatile __u16 *
-port2adr(unsigned int port)
-{
-	if (port >= 0x2000)
-		return (volatile __u16 *) (PA_MRSHPC + (port - 0x2000));
-	maybebadio((unsigned long)port);
-	return (volatile __u16*)port;
-}
-
-/*
- * General outline: remap really low stuff [eventually] to SuperIO,
- * stuff in PCI IO space (at or above window at pci.h:PCIBIOS_MIN_IO)
- * is mapped through the PCI IO window.  Stuff with high bits (PXSEG)
- * should be way beyond the window, and is used  w/o translation for
- * compatibility.
- */
-unsigned char sh7751systemh_inb(unsigned long port)
-{
-	if (PXSEG(port))
-		return *(volatile unsigned char *)port;
-	else if (port <= 0x3F1)
-		return *(volatile unsigned char *)ETHER_IOMAP(port);
-	else
-		return (*port2adr(port))&0xff;
-}
-
-unsigned char sh7751systemh_inb_p(unsigned long port)
-{
-	unsigned char v;
-
-        if (PXSEG(port))
-                v = *(volatile unsigned char *)port;
-	else if (port <= 0x3F1)
-		v = *(volatile unsigned char *)ETHER_IOMAP(port);
-	else
-		v = (*port2adr(port))&0xff;
-	ctrl_delay();
-	return v;
-}
-
-unsigned short sh7751systemh_inw(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned short *)port;
-	else if (port >= 0x2000)
-		return *port2adr(port);
-	else if (port <= 0x3F1)
-		return *(volatile unsigned int *)ETHER_IOMAP(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-unsigned int sh7751systemh_inl(unsigned long port)
-{
-        if (PXSEG(port))
-                return *(volatile unsigned long *)port;
-	else if (port >= 0x2000)
-		return *port2adr(port);
-	else if (port <= 0x3F1)
-		return *(volatile unsigned int *)ETHER_IOMAP(port);
-	else
-		maybebadio(port);
-	return 0;
-}
-
-void sh7751systemh_outb(unsigned char value, unsigned long port)
-{
-
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-	else if (port <= 0x3F1)
-		*(volatile unsigned char *)ETHER_IOMAP(port) = value;
-	else
-		*(port2adr(port)) = value;
-}
-
-void sh7751systemh_outb_p(unsigned char value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned char *)port = value;
-	else if (port <= 0x3F1)
-		*(volatile unsigned char *)ETHER_IOMAP(port) = value;
-	else
-		*(port2adr(port)) = value;
-	ctrl_delay();
-}
-
-void sh7751systemh_outw(unsigned short value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned short *)port = value;
-	else if (port >= 0x2000)
-		*port2adr(port) = value;
-	else if (port <= 0x3F1)
-		*(volatile unsigned short *)ETHER_IOMAP(port) = value;
-	else
-		maybebadio(port);
-}
-
-void sh7751systemh_outl(unsigned int value, unsigned long port)
-{
-        if (PXSEG(port))
-                *(volatile unsigned long *)port = value;
-	else
-		maybebadio(port);
-}
-
-void sh7751systemh_insb(unsigned long port, void *addr, unsigned long count)
-{
-	unsigned char *p = addr;
-	while (count--) *p++ = sh7751systemh_inb(port);
-}
-
-void sh7751systemh_insw(unsigned long port, void *addr, unsigned long count)
-{
-	unsigned short *p = addr;
-	while (count--) *p++ = sh7751systemh_inw(port);
-}
-
-void sh7751systemh_insl(unsigned long port, void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
-
-void sh7751systemh_outsb(unsigned long port, const void *addr, unsigned long count)
-{
-	unsigned char *p = (unsigned char*)addr;
-	while (count--) sh7751systemh_outb(*p++, port);
-}
-
-void sh7751systemh_outsw(unsigned long port, const void *addr, unsigned long count)
-{
-	unsigned short *p = (unsigned short*)addr;
-	while (count--) sh7751systemh_outw(*p++, port);
-}
-
-void sh7751systemh_outsl(unsigned long port, const void *addr, unsigned long count)
-{
-	maybebadio(port);
-}
diff --git a/arch/sh/boards/mach-systemh/irq.c b/arch/sh/boards/mach-systemh/irq.c
deleted file mode 100644
index e5ee13a..0000000
--- a/arch/sh/boards/mach-systemh/irq.c
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/irq.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by
- * Jonathan Short.
- */
-
-#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-
-#include <mach/systemh7751.h>
-#include <asm/smc37c93x.h>
-
-/* address of external interrupt mask register
- * address must be set prior to use these (maybe in init_XXX_irq())
- * XXX : is it better to use .config than specifying it in code? */
-static unsigned long *systemh_irq_mask_register = (unsigned long *)0xB3F10004;
-static unsigned long *systemh_irq_request_register = (unsigned long *)0xB3F10000;
-
-static void disable_systemh_irq(struct irq_data *data)
-{
-	unsigned long val, mask = 0x01 << 1;
-
-	/* Clear the "irq"th bit in the mask and set it in the request */
-	val = __raw_readl((unsigned long)systemh_irq_mask_register);
-	val &= ~mask;
-	__raw_writel(val, (unsigned long)systemh_irq_mask_register);
-
-	val = __raw_readl((unsigned long)systemh_irq_request_register);
-	val |= mask;
-	__raw_writel(val, (unsigned long)systemh_irq_request_register);
-}
-
-static void enable_systemh_irq(struct irq_data *data)
-{
-	unsigned long val, mask = 0x01 << 1;
-
-	/* Set "irq"th bit in the mask register */
-	val = __raw_readl((unsigned long)systemh_irq_mask_register);
-	val |= mask;
-	__raw_writel(val, (unsigned long)systemh_irq_mask_register);
-}
-
-static struct irq_chip systemh_irq_type = {
-	.name		= "SystemH Register",
-	.irq_unmask	= enable_systemh_irq,
-	.irq_mask	= disable_systemh_irq,
-};
-
-void make_systemh_irq(unsigned int irq)
-{
-	disable_irq_nosync(irq);
-	set_irq_chip_and_handler(irq, &systemh_irq_type, handle_level_irq);
-	disable_systemh_irq(irq_get_irq_data(irq));
-}
diff --git a/arch/sh/boards/mach-systemh/setup.c b/arch/sh/boards/mach-systemh/setup.c
deleted file mode 100644
index 219fd80..0000000
--- a/arch/sh/boards/mach-systemh/setup.c
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * linux/arch/sh/boards/renesas/systemh/setup.c
- *
- * Copyright (C) 2000  Kazumoto Kojima
- * Copyright (C) 2003  Paul Mundt
- *
- * Hitachi SystemH Support.
- *
- * Modified for 7751 SystemH by Jonathan Short.
- *
- * Rewritten for 2.6 by Paul Mundt.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/init.h>
-#include <asm/machvec.h>
-#include <mach/systemh7751.h>
-
-extern void make_systemh_irq(unsigned int irq);
-
-/*
- * Initialize IRQ setting
- */
-static void __init sh7751systemh_init_irq(void)
-{
-	make_systemh_irq(0xb);	/* Ethernet interrupt */
-}
-
-static struct sh_machine_vector mv_7751systemh __initmv = {
-	.mv_name		= "7751 SystemH",
-	.mv_nr_irqs		= 72,
-
-	.mv_inb			= sh7751systemh_inb,
-	.mv_inw			= sh7751systemh_inw,
-	.mv_inl			= sh7751systemh_inl,
-	.mv_outb		= sh7751systemh_outb,
-	.mv_outw		= sh7751systemh_outw,
-	.mv_outl		= sh7751systemh_outl,
-
-	.mv_inb_p		= sh7751systemh_inb_p,
-	.mv_inw_p		= sh7751systemh_inw,
-	.mv_inl_p		= sh7751systemh_inl,
-	.mv_outb_p		= sh7751systemh_outb_p,
-	.mv_outw_p		= sh7751systemh_outw,
-	.mv_outl_p		= sh7751systemh_outl,
-
-	.mv_insb		= sh7751systemh_insb,
-	.mv_insw		= sh7751systemh_insw,
-	.mv_insl		= sh7751systemh_insl,
-	.mv_outsb		= sh7751systemh_outsb,
-	.mv_outsw		= sh7751systemh_outsw,
-	.mv_outsl		= sh7751systemh_outsl,
-
-	.mv_init_irq		= sh7751systemh_init_irq,
-};
diff --git a/arch/sh/configs/snapgear_defconfig b/arch/sh/configs/secureedge5410_defconfig
similarity index 100%
rename from arch/sh/configs/snapgear_defconfig
rename to arch/sh/configs/secureedge5410_defconfig
diff --git a/arch/sh/configs/systemh_defconfig b/arch/sh/configs/systemh_defconfig
deleted file mode 100644
index b58dfc5..0000000
--- a/arch/sh/configs/systemh_defconfig
+++ /dev/null
@@ -1,28 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
-# CONFIG_SYSCTL_SYSCALL is not set
-# CONFIG_HOTPLUG is not set
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_CPU_SUBTYPE_SH7751R=y
-CONFIG_MEMORY_START=0x0c000000
-CONFIG_MEMORY_SIZE=0x00400000
-CONFIG_FLATMEM_MANUAL=y
-CONFIG_SH_7751_SYSTEMH=y
-CONFIG_PREEMPT=y
-# CONFIG_STANDALONE is not set
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=1024
-# CONFIG_INPUT is not set
-# CONFIG_SERIO_SERPORT is not set
-# CONFIG_VT is not set
-CONFIG_HW_RANDOM=y
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_CRAMFS=y
-CONFIG_ROMFS_FS=y
-# CONFIG_RCU_CPU_STALL_DETECTOR is not set
diff --git a/arch/sh/include/asm/addrspace.h b/arch/sh/include/asm/addrspace.h
index 446b383..3d1ae2b 100644
--- a/arch/sh/include/asm/addrspace.h
+++ b/arch/sh/include/asm/addrspace.h
@@ -44,10 +44,10 @@
 /*
  * These will never work in 32-bit, don't even bother.
  */
-#define P1SEGADDR(a)	__futile_remapping_attempt
-#define P2SEGADDR(a)	__futile_remapping_attempt
-#define P3SEGADDR(a)	__futile_remapping_attempt
-#define P4SEGADDR(a)	__futile_remapping_attempt
+#define P1SEGADDR(a)	({ (void)(a); BUG(); NULL; })
+#define P2SEGADDR(a)	({ (void)(a); BUG(); NULL; })
+#define P3SEGADDR(a)	({ (void)(a); BUG(); NULL; })
+#define P4SEGADDR(a)	({ (void)(a); BUG(); NULL; })
 #endif
 #endif /* P1SEG */
 
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index a15f105..083ea06 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -66,7 +66,6 @@
 #define PHYS_ADDR_MASK29		0x1fffffff
 #define PHYS_ADDR_MASK32		0xffffffff
 
-#ifdef CONFIG_PMB
 static inline unsigned long phys_addr_mask(void)
 {
 	/* Is the MMU in 29bit mode? */
@@ -75,17 +74,6 @@
 
 	return PHYS_ADDR_MASK32;
 }
-#elif defined(CONFIG_32BIT)
-static inline unsigned long phys_addr_mask(void)
-{
-	return PHYS_ADDR_MASK32;
-}
-#else
-static inline unsigned long phys_addr_mask(void)
-{
-	return PHYS_ADDR_MASK29;
-}
-#endif
 
 #define PTE_PHYS_MASK		(phys_addr_mask() & PAGE_MASK)
 #define PTE_FLAGS_MASK		(~(PTE_PHYS_MASK) << PAGE_SHIFT)
diff --git a/arch/sh/include/asm/system.h b/arch/sh/include/asm/system.h
index 1f1af5a..10c8b18 100644
--- a/arch/sh/include/asm/system.h
+++ b/arch/sh/include/asm/system.h
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/linkage.h>
 #include <asm/types.h>
+#include <asm/uncached.h>
 
 #define AT_VECTOR_SIZE_ARCH 5 /* entries in ARCH_DLINFO */
 
@@ -137,9 +138,6 @@
 #define instruction_size(insn)	(4)
 #endif
 
-extern unsigned long cached_to_uncached;
-extern unsigned long uncached_size;
-
 void per_cpu_trap_init(void);
 void default_idle(void);
 void cpu_idle_wait(void);
diff --git a/arch/sh/include/asm/system_32.h b/arch/sh/include/asm/system_32.h
index c941b27..a4ad1cd 100644
--- a/arch/sh/include/asm/system_32.h
+++ b/arch/sh/include/asm/system_32.h
@@ -145,42 +145,6 @@
 		__restore_dsp(prev);				\
 } while (0)
 
-/*
- * Jump to uncached area.
- * When handling TLB or caches, we need to do it from an uncached area.
- */
-#define jump_to_uncached()			\
-do {						\
-	unsigned long __dummy;			\
-						\
-	__asm__ __volatile__(			\
-		"mova	1f, %0\n\t"		\
-		"add	%1, %0\n\t"		\
-		"jmp	@%0\n\t"		\
-		" nop\n\t"			\
-		".balign 4\n"			\
-		"1:"				\
-		: "=&z" (__dummy)		\
-		: "r" (cached_to_uncached));	\
-} while (0)
-
-/*
- * Back to cached area.
- */
-#define back_to_cached()				\
-do {							\
-	unsigned long __dummy;				\
-	ctrl_barrier();					\
-	__asm__ __volatile__(				\
-		"mov.l	1f, %0\n\t"			\
-		"jmp	@%0\n\t"			\
-		" nop\n\t"				\
-		".balign 4\n"				\
-		"1:	.long 2f\n"			\
-		"2:"					\
-		: "=&r" (__dummy));			\
-} while (0)
-
 #ifdef CONFIG_CPU_HAS_SR_RB
 #define lookup_exception_vector()	\
 ({					\
diff --git a/arch/sh/include/asm/system_64.h b/arch/sh/include/asm/system_64.h
index 3633864..8593bc8d 100644
--- a/arch/sh/include/asm/system_64.h
+++ b/arch/sh/include/asm/system_64.h
@@ -34,9 +34,6 @@
 			      &next->thread);			\
 } while (0)
 
-#define jump_to_uncached()	do { } while (0)
-#define back_to_cached()	do { } while (0)
-
 #define __icbi(addr)	__asm__ __volatile__ ( "icbi %0, 0\n\t" : : "r" (addr))
 #define __ocbp(addr)	__asm__ __volatile__ ( "ocbp %0, 0\n\t" : : "r" (addr))
 #define __ocbi(addr)	__asm__ __volatile__ ( "ocbi %0, 0\n\t" : : "r" (addr))
diff --git a/arch/sh/include/asm/uncached.h b/arch/sh/include/asm/uncached.h
index e3419f9..6f8816b 100644
--- a/arch/sh/include/asm/uncached.h
+++ b/arch/sh/include/asm/uncached.h
@@ -4,15 +4,55 @@
 #include <linux/bug.h>
 
 #ifdef CONFIG_UNCACHED_MAPPING
+extern unsigned long cached_to_uncached;
+extern unsigned long uncached_size;
 extern unsigned long uncached_start, uncached_end;
 
 extern int virt_addr_uncached(unsigned long kaddr);
 extern void uncached_init(void);
 extern void uncached_resize(unsigned long size);
+
+/*
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
+ */
+#define jump_to_uncached()			\
+do {						\
+	unsigned long __dummy;			\
+						\
+	__asm__ __volatile__(			\
+		"mova	1f, %0\n\t"		\
+		"add	%1, %0\n\t"		\
+		"jmp	@%0\n\t"		\
+		" nop\n\t"			\
+		".balign 4\n"			\
+		"1:"				\
+		: "=&z" (__dummy)		\
+		: "r" (cached_to_uncached));	\
+} while (0)
+
+/*
+ * Back to cached area.
+ */
+#define back_to_cached()				\
+do {							\
+	unsigned long __dummy;				\
+	ctrl_barrier();					\
+	__asm__ __volatile__(				\
+		"mov.l	1f, %0\n\t"			\
+		"jmp	@%0\n\t"			\
+		" nop\n\t"				\
+		".balign 4\n"				\
+		"1:	.long 2f\n"			\
+		"2:"					\
+		: "=&r" (__dummy));			\
+} while (0)
 #else
 #define virt_addr_uncached(kaddr)	(0)
 #define uncached_init()			do { } while (0)
 #define uncached_resize(size)		BUG()
+#define jump_to_uncached()		do { } while (0)
+#define back_to_cached()		do { } while (0)
 #endif
 
 #endif /* __ASM_SH_UNCACHED_H */
diff --git a/arch/sh/include/mach-common/mach/edosk7705.h b/arch/sh/include/mach-common/mach/edosk7705.h
deleted file mode 100644
index efc43b32..0000000
--- a/arch/sh/include/mach-common/mach/edosk7705.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef __ASM_SH_EDOSK7705_H
-#define __ASM_SH_EDOSK7705_H
-
-#define __IO_PREFIX sh_edosk7705
-#include <asm/io_generic.h>
-
-#endif /* __ASM_SH_EDOSK7705_H */
diff --git a/arch/sh/include/mach-common/mach/microdev.h b/arch/sh/include/mach-common/mach/microdev.h
index 1aed158..dcb05fa 100644
--- a/arch/sh/include/mach-common/mach/microdev.h
+++ b/arch/sh/include/mach-common/mach/microdev.h
@@ -68,13 +68,4 @@
 #define __IO_PREFIX microdev
 #include <asm/io_generic.h>
 
-#if defined(CONFIG_PCI)
-unsigned char  microdev_pci_inb(unsigned long port);
-unsigned short microdev_pci_inw(unsigned long port);
-unsigned long  microdev_pci_inl(unsigned long port);
-void           microdev_pci_outb(unsigned char  data, unsigned long port);
-void           microdev_pci_outw(unsigned short data, unsigned long port);
-void           microdev_pci_outl(unsigned long  data, unsigned long port);
-#endif
-
 #endif /* __ASM_SH_MICRODEV_H */
diff --git a/arch/sh/include/mach-common/mach/snapgear.h b/arch/sh/include/mach-common/mach/secureedge5410.h
similarity index 79%
rename from arch/sh/include/mach-common/mach/snapgear.h
rename to arch/sh/include/mach-common/mach/secureedge5410.h
index 042d95f..3653b9a4 100644
--- a/arch/sh/include/mach-common/mach/snapgear.h
+++ b/arch/sh/include/mach-common/mach/secureedge5410.h
@@ -12,30 +12,9 @@
 #ifndef _ASM_SH_IO_SNAPGEAR_H
 #define _ASM_SH_IO_SNAPGEAR_H
 
-#if defined(CONFIG_CPU_SH4)
-/*
- * The external interrupt lines, these take up ints 0 - 15 inclusive
- * depending on the priority for the interrupt.  In fact the priority
- * is the interrupt :-)
- */
-
-#define IRL0_IRQ	2
-#define IRL0_PRIORITY	13
-
-#define IRL1_IRQ	5
-#define IRL1_PRIORITY	10
-
-#define IRL2_IRQ	8
-#define IRL2_PRIORITY	7
-
-#define IRL3_IRQ	11
-#define IRL3_PRIORITY	4
-#endif
-
 #define __IO_PREFIX	snapgear
 #include <asm/io_generic.h>
 
-#ifdef CONFIG_SH_SECUREEDGE5410
 /*
  * We need to remember what was written to the ioport as some bits
  * are shared with other functions and you cannot read back what was
@@ -66,6 +45,5 @@
 			((secureedge5410_ioport & ~(mask)) | ((val) & (mask)))))
 #define SECUREEDGE_READ_IOPORT() \
 	 ((*SECUREEDGE_IOPORT_ADDR&0x0817) | (secureedge5410_ioport&~0x0817))
-#endif
 
 #endif /* _ASM_SH_IO_SNAPGEAR_H */
diff --git a/arch/sh/include/mach-common/mach/systemh7751.h b/arch/sh/include/mach-common/mach/systemh7751.h
deleted file mode 100644
index 4161122..0000000
--- a/arch/sh/include/mach-common/mach/systemh7751.h
+++ /dev/null
@@ -1,71 +0,0 @@
-#ifndef __ASM_SH_SYSTEMH_7751SYSTEMH_H
-#define __ASM_SH_SYSTEMH_7751SYSTEMH_H
-
-/*
- * linux/include/asm-sh/systemh/7751systemh.h
- *
- * Copyright (C) 2000  Kazumoto Kojima
- *
- * Hitachi SystemH support
-
- * Modified for 7751 SystemH by
- * Jonathan Short, 2002.
- */
-
-/* Box specific addresses.  */
-
-#define PA_ROM		0x00000000	/* EPROM */
-#define PA_ROM_SIZE	0x00400000	/* EPROM size 4M byte */
-#define PA_FROM		0x01000000	/* EPROM */
-#define PA_FROM_SIZE	0x00400000	/* EPROM size 4M byte */
-#define PA_EXT1		0x04000000
-#define PA_EXT1_SIZE	0x04000000
-#define PA_EXT2		0x08000000
-#define PA_EXT2_SIZE	0x04000000
-#define PA_SDRAM	0x0c000000
-#define PA_SDRAM_SIZE	0x04000000
-
-#define PA_EXT4		0x12000000
-#define PA_EXT4_SIZE	0x02000000
-#define PA_EXT5		0x14000000
-#define PA_EXT5_SIZE	0x04000000
-#define PA_PCIC		0x18000000	/* MR-SHPC-01 PCMCIA */
-
-#define PA_DIPSW0	0xb9000000	/* Dip switch 5,6 */
-#define PA_DIPSW1	0xb9000002	/* Dip switch 7,8 */
-#define PA_LED		0xba000000	/* LED */
-#define	PA_BCR		0xbb000000	/* FPGA on the MS7751SE01 */
-
-#define PA_MRSHPC	0xb83fffe0	/* MR-SHPC-01 PCMCIA controller */
-#define PA_MRSHPC_MW1	0xb8400000	/* MR-SHPC-01 memory window base */
-#define PA_MRSHPC_MW2	0xb8500000	/* MR-SHPC-01 attribute window base */
-#define PA_MRSHPC_IO	0xb8600000	/* MR-SHPC-01 I/O window base */
-#define MRSHPC_MODE     (PA_MRSHPC + 4)
-#define MRSHPC_OPTION   (PA_MRSHPC + 6)
-#define MRSHPC_CSR      (PA_MRSHPC + 8)
-#define MRSHPC_ISR      (PA_MRSHPC + 10)
-#define MRSHPC_ICR      (PA_MRSHPC + 12)
-#define MRSHPC_CPWCR    (PA_MRSHPC + 14)
-#define MRSHPC_MW0CR1   (PA_MRSHPC + 16)
-#define MRSHPC_MW1CR1   (PA_MRSHPC + 18)
-#define MRSHPC_IOWCR1   (PA_MRSHPC + 20)
-#define MRSHPC_MW0CR2   (PA_MRSHPC + 22)
-#define MRSHPC_MW1CR2   (PA_MRSHPC + 24)
-#define MRSHPC_IOWCR2   (PA_MRSHPC + 26)
-#define MRSHPC_CDCR     (PA_MRSHPC + 28)
-#define MRSHPC_PCIC_INFO (PA_MRSHPC + 30)
-
-#define BCR_ILCRA	(PA_BCR + 0)
-#define BCR_ILCRB	(PA_BCR + 2)
-#define BCR_ILCRC	(PA_BCR + 4)
-#define BCR_ILCRD	(PA_BCR + 6)
-#define BCR_ILCRE	(PA_BCR + 8)
-#define BCR_ILCRF	(PA_BCR + 10)
-#define BCR_ILCRG	(PA_BCR + 12)
-
-#define IRQ_79C973	13
-
-#define __IO_PREFIX	sh7751systemh
-#include <asm/io_generic.h>
-
-#endif  /* __ASM_SH_SYSTEMH_7751SYSTEMH_H */
diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
index 2d9700c..0fe2e93 100644
--- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
+++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c
@@ -48,7 +48,7 @@
  * Default rate for the root input clock, reset this with clk_set_rate()
  * from the platform code.
  */
-struct clk extal_clk = {
+static struct clk extal_clk = {
 	.rate		= 33333333,
 };
 
@@ -111,7 +111,7 @@
 	.parent		= &pll_clk,
 };
 
-struct clk *main_clks[] = {
+static struct clk *main_clks[] = {
 	&r_clk,
 	&extal_clk,
 	&fll_clk,
@@ -156,7 +156,7 @@
 
 enum { DIV6_V, DIV6_FA, DIV6_FB, DIV6_I, DIV6_S, DIV6_NR };
 
-struct clk div6_clks[DIV6_NR] = {
+static struct clk div6_clks[DIV6_NR] = {
 	[DIV6_V] = SH_CLK_DIV6(&div3_clk, VCLKCR, 0),
 	[DIV6_FA] = SH_CLK_DIV6(&div3_clk, FCLKACR, 0),
 	[DIV6_FB] = SH_CLK_DIV6(&div3_clk, FCLKBCR, 0),
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 0937039..c3e61b3 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -79,7 +79,7 @@
 
 config 32BIT
 	bool
-	default y if CPU_SH5
+	default y if CPU_SH5 || !MMU
 
 config PMB
 	bool "Support 32-bit physical addressing through PMB"
diff --git a/arch/sh/mm/consistent.c b/arch/sh/mm/consistent.c
index 0387932..40733a9 100644
--- a/arch/sh/mm/consistent.c
+++ b/arch/sh/mm/consistent.c
@@ -79,21 +79,20 @@
 void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 		    enum dma_data_direction direction)
 {
-#if defined(CONFIG_CPU_SH5) || defined(CONFIG_PMB)
-	void *p1addr = vaddr;
-#else
-	void *p1addr = (void*) P1SEGADDR((unsigned long)vaddr);
-#endif
+	void *addr;
+
+	addr = __in_29bit_mode() ?
+	       (void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
 
 	switch (direction) {
 	case DMA_FROM_DEVICE:		/* invalidate only */
-		__flush_invalidate_region(p1addr, size);
+		__flush_invalidate_region(addr, size);
 		break;
 	case DMA_TO_DEVICE:		/* writeback only */
-		__flush_wback_region(p1addr, size);
+		__flush_wback_region(addr, size);
 		break;
 	case DMA_BIDIRECTIONAL:		/* writeback and invalidate */
-		__flush_purge_region(p1addr, size);
+		__flush_purge_region(addr, size);
 		break;
 	default:
 		BUG();
diff --git a/arch/sh/mm/uncached.c b/arch/sh/mm/uncached.c
index 8a4eca5..a7767da 100644
--- a/arch/sh/mm/uncached.c
+++ b/arch/sh/mm/uncached.c
@@ -28,7 +28,7 @@
 
 void __init uncached_init(void)
 {
-#ifdef CONFIG_29BIT
+#if defined(CONFIG_29BIT) || !defined(CONFIG_MMU)
 	uncached_start = P2SEG;
 #else
 	uncached_start = memory_end;
diff --git a/arch/sh/tools/mach-types b/arch/sh/tools/mach-types
index 9f56eb9..0e68465 100644
--- a/arch/sh/tools/mach-types
+++ b/arch/sh/tools/mach-types
@@ -26,7 +26,6 @@
 7724SE			SH_7724_SOLUTION_ENGINE
 7751SE			SH_7751_SOLUTION_ENGINE
 7780SE			SH_7780_SOLUTION_ENGINE
-7751SYSTEMH		SH_7751_SYSTEMH
 HP6XX			SH_HP6XX
 DREAMCAST		SH_DREAMCAST
 SNAPGEAR		SH_SECUREEDGE5410
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee18..b2a6c5d 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
 
 #include <linux/interrupt.h>
 #include <linux/threads.h>
-#include <asm/kmap_types.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106..3d0f202 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
 #define _ASM_TILE_KMAP_TYPES_H
 
 /*
- * In TILE Linux each set of four of these uses another 16MB chunk of
- * address space, given 64 tiles and 64KB pages, so we only enable
- * ones that are required by the kernel configuration.
+ * In 32-bit TILE Linux we have to balance the desire to have a lot of
+ * nested atomic mappings with the fact that large page sizes and many
+ * processors chew up address space quickly.  In a typical
+ * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
+ * adds 4MB of required address-space.  For now we leave KM_TYPE_NR
+ * set to depth 8.
  */
 enum km_type {
+	KM_TYPE_NR = 8
+};
+
+/*
+ * We provide dummy definitions of all the stray values that used to be
+ * required for kmap_atomic() and no longer are.
+ */
+enum {
 	KM_BOUNCE_READ,
 	KM_SKB_SUNRPC_DATA,
 	KM_SKB_DATA_SOFTIRQ,
 	KM_USER0,
 	KM_USER1,
 	KM_BIO_SRC_IRQ,
+	KM_BIO_DST_IRQ,
+	KM_PTE0,
+	KM_PTE1,
 	KM_IRQ0,
 	KM_IRQ1,
 	KM_SOFTIRQ0,
 	KM_SOFTIRQ1,
-	KM_MEMCPY0,
-	KM_MEMCPY1,
-#if defined(CONFIG_HIGHPTE)
-	KM_PTE0,
-	KM_PTE1,
-#endif
-	KM_TYPE_NR
+	KM_SYNC_ICACHE,
+	KM_SYNC_DCACHE,
+	KM_UML_USERCOPY,
+	KM_IRQ_PTE,
+	KM_NMI,
+	KM_NMI_PTE,
+	KM_KDB
 };
 
 #endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd..a6604e9 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 #if defined(CONFIG_HIGHPTE)
-extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
-#define pte_offset_map(dir, address) \
-	_pte_offset_map(dir, address, KM_PTE0)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
+#define pte_unmap(pte) kunmap_atomic(pte)
 #else
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
 #define pte_unmap(pte) do { } while (0)
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa..b16e5db 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_STAT64	/* Used for compat_sys_stat64() etc. */
+#endif
 #include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff4..b35c2db 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@
 #ifdef CONFIG_COMPAT
 #define __ARCH_WANT_SYS_LLSEEK
 #endif
+#define __ARCH_WANT_SYS_NEWFSTATAT
 #endif
 
 #endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cd..67617a0 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -148,11 +148,11 @@
 #define compat_sys_readahead sys32_readahead
 #define compat_sys_sync_file_range compat_sys_sync_file_range2
 
-/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
-#define compat_sys_stat64 sys_newstat
-#define compat_sys_lstat64 sys_newlstat
-#define compat_sys_fstat64 sys_newfstat
-#define compat_sys_fstatat64 sys_newfstatat
+/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
+#define compat_sys_stat64 sys_stat64
+#define compat_sys_lstat64 sys_lstat64
+#define compat_sys_fstat64 sys_fstat64
+#define compat_sys_fstatat64 sys_fstatat64
 
 /* The native sys_ptrace dynamically handles compat binaries. */
 #define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd4..493a0e6 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@
 void early_panic(const char *fmt, ...)
 {
 	va_list ap;
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	va_start(ap, fmt);
 	early_printk("Kernel panic - not syncing: ");
 	early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a78..e910530 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@
 
 static void enable_firewall_interrupts(void)
 {
-	raw_local_irq_unmask_now(INT_UDN_FIREWALL);
+	arch_local_irq_unmask_now(INT_UDN_FIREWALL);
 }
 
 static void disable_firewall_interrupts(void)
 {
-	raw_local_irq_mask_now(INT_UDN_FIREWALL);
+	arch_local_irq_mask_now(INT_UDN_FIREWALL);
 }
 
 /* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@
 }
 
 static const struct file_operations dev_hardwall_fops = {
+	.open           = nonseekable_open,
 	.unlocked_ioctl = hardwall_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = hardwall_compat_ioctl,
 #endif
 	.flush          = hardwall_flush,
 	.release        = hardwall_release,
-	.llseek		= noop_llseek,
 };
 
 static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e639176..128805e 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
 #define IS_HW_CLEARED 1
 
 /*
- * The set of interrupts we enable for raw_local_irq_enable().
+ * The set of interrupts we enable for arch_local_irq_enable().
  * This is initialized to have just a single interrupt that the kernel
  * doesn't actually use as a sentinel.  During kernel init,
  * interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@
 	/* Enable interrupt delivery. */
 	unmask_irqs(~0UL);
 #if CHIP_HAS_IPI()
-	raw_local_irq_unmask(INT_IPI_K);
+	arch_local_irq_unmask(INT_IPI_K);
 #endif
 }
 
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265..0d8b9e9 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@
 
 		if ((entry & IND_SOURCE)) {
 			void *va =
-				kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
+				kmap_atomic_pfn(entry >> PAGE_SHIFT);
 			r = kexec_bn2cl(va);
 			if (r) {
 				command_line = r;
 				break;
 			}
-			kunmap_atomic(va, KM_USER0);
+			kunmap_atomic(va);
 		}
 	}
 
@@ -198,7 +198,7 @@
 
 		hverr = hv_set_command_line(
 			(HV_VirtAddr) command_line, strlen(command_line));
-		kunmap_atomic(command_line, KM_USER0);
+		kunmap_atomic(command_line);
 	} else {
 		pr_info("%s: no command line found; making empty\n",
 		       __func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e393..0858ee6 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@
 		panic("hv_register_message_state: error %d", rc);
 
 	/* Make sure downcall interrupts will be enabled. */
-	raw_local_irq_unmask(INT_INTCTRL_K);
+	arch_local_irq_unmask(INT_INTCTRL_K);
 }
 
 void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9cd2988..e92e405 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -50,10 +50,10 @@
 {
 	unsigned long __user *datap = (long __user __force *)data;
 	unsigned long tmp;
-	int i;
 	long ret = -EIO;
-	unsigned long *childregs;
 	char *childreg;
+	struct pt_regs copyregs;
+	int ex1_offset;
 
 	switch (request) {
 
@@ -80,6 +80,16 @@
 		if (addr >= PTREGS_SIZE)
 			break;
 		childreg = (char *)task_pt_regs(child) + addr;
+
+		/* Guard against overwrites of the privilege level. */
+		ex1_offset = PTREGS_OFFSET_EX1;
+#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
+		if (is_compat_task())   /* point at low word */
+			ex1_offset += sizeof(compat_long_t);
+#endif
+		if (addr == ex1_offset)
+			data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
+
 #ifdef CONFIG_COMPAT
 		if (is_compat_task()) {
 			if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@
 		break;
 
 	case PTRACE_GETREGS:  /* Get all registers from the child. */
-		if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
-			break;
-		childregs = (long *)task_pt_regs(child);
-		for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
-				++i) {
-			ret = __put_user(childregs[i], &datap[i]);
-			if (ret != 0)
-				break;
+		if (copy_to_user(datap, task_pt_regs(child),
+				 sizeof(struct pt_regs)) == 0) {
+			ret = 0;
 		}
 		break;
 
 	case PTRACE_SETREGS:  /* Set all registers in the child. */
-		if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
-			break;
-		childregs = (long *)task_pt_regs(child);
-		for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
-				++i) {
-			ret = __get_user(childregs[i], &datap[i]);
-			if (ret != 0)
-				break;
+		if (copy_from_user(&copyregs, datap,
+				   sizeof(struct pt_regs)) == 0) {
+			copyregs.ex1 =
+				PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
+			*task_pt_regs(child) = copyregs;
+			ret = 0;
 		}
 		break;
 
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d2..baa3d90 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
 void machine_halt(void)
 {
 	warn_early_printk();
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	smp_send_stop();
 	hv_halt();
 }
@@ -35,14 +35,14 @@
 void machine_power_off(void)
 {
 	warn_early_printk();
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	smp_send_stop();
 	hv_power_off();
 }
 
 void machine_restart(char *cmd)
 {
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	smp_send_stop();
 	hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
 }
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ae51cad..fb0b3cb 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -868,14 +868,14 @@
 
 	/* Allow asynchronous TLB interrupts. */
 #if CHIP_HAS_TILE_DMA()
-	raw_local_irq_unmask(INT_DMATLB_MISS);
-	raw_local_irq_unmask(INT_DMATLB_ACCESS);
+	arch_local_irq_unmask(INT_DMATLB_MISS);
+	arch_local_irq_unmask(INT_DMATLB_ACCESS);
 #endif
 #if CHIP_HAS_SN_PROC()
-	raw_local_irq_unmask(INT_SNITLB_MISS);
+	arch_local_irq_unmask(INT_SNITLB_MISS);
 #endif
 #ifdef __tilegx__
-	raw_local_irq_unmask(INT_SINGLE_STEP_K);
+	arch_local_irq_unmask(INT_SINGLE_STEP_K);
 #endif
 
 	/*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85..687719d 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -71,6 +71,9 @@
 	for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
 		err |= __get_user(regs->regs[i], &sc->gregs[i]);
 
+	/* Ensure that the PL is always set to USER_PL. */
+	regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
+
 	regs->faultnum = INT_SWINT_1_SIGRETURN;
 
 	err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +333,7 @@
 			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
 		}
 
-		return;
+		goto done;
 	}
 
 	/* Did we come from a system call? */
@@ -358,4 +361,8 @@
 		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
 		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
 	}
+
+done:
+	/* Avoid double syscall restart if there are nested signals. */
+	regs->faultnum = INT_SWINT_1_SIGRETURN;
 }
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d9..9575b37 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@
 static void smp_stop_cpu_interrupt(void)
 {
 	set_cpu_online(smp_processor_id(), 0);
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	for (;;)
 		asm("nap");
 }
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820..f2e156e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@
 {
 	BUG_ON(ticks > MAX_TICK);
 	__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
-	raw_local_irq_unmask_now(INT_TILE_TIMER);
+	arch_local_irq_unmask_now(INT_TILE_TIMER);
 	return 0;
 }
 
@@ -143,7 +143,7 @@
 static void tile_timer_set_mode(enum clock_event_mode mode,
 				struct clock_event_device *evt)
 {
-	raw_local_irq_mask_now(INT_TILE_TIMER);
+	arch_local_irq_mask_now(INT_TILE_TIMER);
 }
 
 /*
@@ -172,7 +172,7 @@
 	evt->cpumask = cpumask_of(smp_processor_id());
 
 	/* Start out with timer not firing. */
-	raw_local_irq_mask_now(INT_TILE_TIMER);
+	arch_local_irq_mask_now(INT_TILE_TIMER);
 
 	/* Register tile timer. */
 	clockevents_register_device(evt);
@@ -188,7 +188,7 @@
 	 * Mask the timer interrupt here, since we are a oneshot timer
 	 * and there are now by definition no events pending.
 	 */
-	raw_local_irq_mask(INT_TILE_TIMER);
+	arch_local_irq_mask(INT_TILE_TIMER);
 
 	/* Track time spent here in an interrupt context */
 	irq_enter();
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7..f7d4a6a 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@
  * we must run with interrupts disabled to avoid the risk of some
  * other code seeing the incoherent data in our cache.  (Recall that
  * our cache is indexed by PA, so even if the other code doesn't use
- * our KM_MEMCPY virtual addresses, they'll still hit in cache using
+ * our kmap_atomic virtual addresses, they'll still hit in cache using
  * the normal VAs that aren't supposed to hit in cache.)
  */
 static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@
 	unsigned long flags, newsrc, newdst;
 	pmd_t *pmdp;
 	pte_t *ptep;
+	int type0, type1;
 	int cpu = get_cpu();
 
 	/*
@@ -77,7 +78,8 @@
 	sim_allow_multiple_caching(1);
 
 	/* Set up the new dest mapping */
-	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0;
+	type0 = kmap_atomic_idx_push();
+	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
 	newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
 	pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
 	ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@
 	}
 
 	/* Set up the new source mapping */
-	idx += (KM_MEMCPY0 - KM_MEMCPY1);
+	type1 = kmap_atomic_idx_push();
+	idx += (type0 - type1);
 	src_pte = hv_pte_set_nc(src_pte);
 	src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
 	newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@
 	 * We're done: notify the simulator that all is back to normal,
 	 * and re-enable interrupts and pre-emption.
 	 */
+	kmap_atomic_idx_pop();
+	kmap_atomic_idx_pop();
 	sim_allow_multiple_caching(0);
 	local_irq_restore(flags);
 	put_cpu();
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index abb5733..31dbbd9 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -227,7 +227,7 @@
 void *__kmap_atomic(struct page *page)
 {
 	/* PAGE_NONE is a magic value that tells us to check immutability. */
-	return kmap_atomic_prot(page, type, PAGE_NONE);
+	return kmap_atomic_prot(page, PAGE_NONE);
 }
 EXPORT_SYMBOL(__kmap_atomic);
 
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 78e1982..0b9ce69 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@
 /* Select whether to free (1) or mark unusable (0) the __init pages. */
 static int __init set_initfree(char *str)
 {
-	strict_strtol(str, 0, &initfree);
-	pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
+	long val;
+	if (strict_strtol(str, 0, &val)) {
+		initfree = val;
+		pr_info("initfree: %s free init pages\n",
+			initfree ? "will" : "won't");
+	}
 	return 1;
 }
 __setup("initfree=", set_initfree);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c246..1f5430c 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@
 }
 
 #if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type)
+pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
 {
-	pte_t *pte = kmap_atomic(pmd_page(*dir), type) +
+	pte_t *pte = kmap_atomic(pmd_page(*dir)) +
 		(pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
 	return &pte[pte_index(address)];
 }
diff --git a/arch/um/include/asm/ptrace-generic.h b/arch/um/include/asm/ptrace-generic.h
index 2cd899f..b7c5bab 100644
--- a/arch/um/include/asm/ptrace-generic.h
+++ b/arch/um/include/asm/ptrace-generic.h
@@ -38,8 +38,8 @@
 
 struct task_struct;
 
-extern long subarch_ptrace(struct task_struct *child, long request, long addr,
-			   long data);
+extern long subarch_ptrace(struct task_struct *child, long request,
+	unsigned long addr, unsigned long data);
 extern unsigned long getreg(struct task_struct *child, int regno);
 extern int putreg(struct task_struct *child, int regno, unsigned long value);
 extern int get_fpregs(struct user_i387_struct __user *buf,
diff --git a/arch/um/kernel/ptrace.c b/arch/um/kernel/ptrace.c
index a5e33f2..701b672 100644
--- a/arch/um/kernel/ptrace.c
+++ b/arch/um/kernel/ptrace.c
@@ -122,7 +122,7 @@
 		break;
 
 	case PTRACE_SET_THREAD_AREA:
-		ret = ptrace_set_thread_area(child, addr, datavp);
+		ret = ptrace_set_thread_area(child, addr, vp);
 		break;
 
 	case PTRACE_FAULTINFO: {
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 286de34..f6ce0bd 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -141,13 +141,13 @@
 
 static inline u32 native_apic_msr_read(u32 reg)
 {
-	u32 low, high;
+	u64 msr;
 
 	if (reg == APIC_DFR)
 		return -1;
 
-	rdmsr(APIC_BASE_MSR + (reg >> 4), low, high);
-	return low;
+	rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
+	return (u32)msr;
 }
 
 static inline void native_x2apic_wait_icr_idle(void)
@@ -181,12 +181,12 @@
 extern void x2apic_icr_write(u32 low, u32 id);
 static inline int x2apic_enabled(void)
 {
-	int msr, msr2;
+	u64 msr;
 
 	if (!cpu_has_x2apic)
 		return 0;
 
-	rdmsr(MSR_IA32_APICBASE, msr, msr2);
+	rdmsrl(MSR_IA32_APICBASE, msr);
 	if (msr & X2APIC_ENABLE)
 		return 1;
 	return 0;
diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h
index b2f2d2e..6d90adf 100644
--- a/arch/x86/include/asm/uv/uv_mmrs.h
+++ b/arch/x86/include/asm/uv/uv_mmrs.h
@@ -806,6 +806,78 @@
 };
 
 /* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR 0x16000c8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_0_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_overlay_config_0_mmr_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR 0x16000d8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_1_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_overlay_config_1_mmr_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
+/*                 UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR                  */
+/* ========================================================================= */
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR 0x16000e8UL
+
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_SHFT 24
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_BASE_MASK 0x00000000ff000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_SHFT 48
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_M_ALIAS_MASK 0x001f000000000000UL
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_SHFT 63
+#define UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR_ENABLE_MASK 0x8000000000000000UL
+
+union uvh_rh_gam_alias210_overlay_config_2_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_alias210_overlay_config_2_mmr_s {
+	unsigned long	rsvd_0_23: 24;  /*    */
+	unsigned long	base    :  8;  /* RW */
+	unsigned long	rsvd_32_47: 16;  /*    */
+	unsigned long	m_alias :  5;  /* RW */
+	unsigned long	rsvd_53_62: 10;  /*    */
+	unsigned long	enable  :  1;  /* RW */
+    } s;
+};
+
+/* ========================================================================= */
 /*                UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR                  */
 /* ========================================================================= */
 #define UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR 0x16000d0UL
@@ -857,6 +929,29 @@
 };
 
 /* ========================================================================= */
+/*                          UVH_RH_GAM_CONFIG_MMR                            */
+/* ========================================================================= */
+#define UVH_RH_GAM_CONFIG_MMR 0x1600000UL
+
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_SHFT 0
+#define UVH_RH_GAM_CONFIG_MMR_M_SKT_MASK 0x000000000000003fUL
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_SHFT 6
+#define UVH_RH_GAM_CONFIG_MMR_N_SKT_MASK 0x00000000000003c0UL
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_SHFT 12
+#define UVH_RH_GAM_CONFIG_MMR_MMIOL_CFG_MASK 0x0000000000001000UL
+
+union uvh_rh_gam_config_mmr_u {
+    unsigned long	v;
+    struct uvh_rh_gam_config_mmr_s {
+	unsigned long	m_skt     :  6;  /* RW */
+	unsigned long	n_skt     :  4;  /* RW */
+	unsigned long	rsvd_10_11:  2;  /*    */
+	unsigned long	mmiol_cfg :  1;  /* RW */
+	unsigned long	rsvd_13_63: 51;  /*    */
+    } s;
+};
+
+/* ========================================================================= */
 /*                    UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR                      */
 /* ========================================================================= */
 #define UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR 0x1600010UL
@@ -987,97 +1082,5 @@
     } s;
 };
 
-/* ========================================================================= */
-/*                          UVH_SI_ADDR_MAP_CONFIG                           */
-/* ========================================================================= */
-#define UVH_SI_ADDR_MAP_CONFIG 0xc80000UL
 
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_SHFT 0
-#define UVH_SI_ADDR_MAP_CONFIG_M_SKT_MASK 0x000000000000003fUL
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_SHFT 8
-#define UVH_SI_ADDR_MAP_CONFIG_N_SKT_MASK 0x0000000000000f00UL
-
-union uvh_si_addr_map_config_u {
-    unsigned long	v;
-    struct uvh_si_addr_map_config_s {
-	unsigned long	m_skt :  6;  /* RW */
-	unsigned long	rsvd_6_7:  2;  /*    */
-	unsigned long	n_skt :  4;  /* RW */
-	unsigned long	rsvd_12_63: 52;  /*    */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS0_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG 0xc80008UL
-
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS0_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias0_overlay_config_u {
-    unsigned long	v;
-    struct uvh_si_alias0_overlay_config_s {
-	unsigned long	rsvd_0_23: 24;  /*    */
-	unsigned long	base    :  8;  /* RW */
-	unsigned long	rsvd_32_47: 16;  /*    */
-	unsigned long	m_alias :  5;  /* RW */
-	unsigned long	rsvd_53_62: 10;  /*    */
-	unsigned long	enable  :  1;  /* RW */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS1_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG 0xc80010UL
-
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS1_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias1_overlay_config_u {
-    unsigned long	v;
-    struct uvh_si_alias1_overlay_config_s {
-	unsigned long	rsvd_0_23: 24;  /*    */
-	unsigned long	base    :  8;  /* RW */
-	unsigned long	rsvd_32_47: 16;  /*    */
-	unsigned long	m_alias :  5;  /* RW */
-	unsigned long	rsvd_53_62: 10;  /*    */
-	unsigned long	enable  :  1;  /* RW */
-    } s;
-};
-
-/* ========================================================================= */
-/*                       UVH_SI_ALIAS2_OVERLAY_CONFIG                        */
-/* ========================================================================= */
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG 0xc80018UL
-
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_SHFT 24
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_BASE_MASK 0x00000000ff000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_SHFT 48
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_M_ALIAS_MASK 0x001f000000000000UL
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_SHFT 63
-#define UVH_SI_ALIAS2_OVERLAY_CONFIG_ENABLE_MASK 0x8000000000000000UL
-
-union uvh_si_alias2_overlay_config_u {
-    unsigned long	v;
-    struct uvh_si_alias2_overlay_config_s {
-	unsigned long	rsvd_0_23: 24;  /*    */
-	unsigned long	base    :  8;  /* RW */
-	unsigned long	rsvd_32_47: 16;  /*    */
-	unsigned long	m_alias :  5;  /* RW */
-	unsigned long	rsvd_53_62: 10;  /*    */
-	unsigned long	enable  :  1;  /* RW */
-    } s;
-};
-
-
-#endif /* _ASM_X86_UV_UV_MMRS_H */
+#endif /* __ASM_UV_MMRS_X86_H__ */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 850657d..3f838d5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -52,7 +52,6 @@
 #include <asm/mce.h>
 #include <asm/kvm_para.h>
 #include <asm/tsc.h>
-#include <asm/atomic.h>
 
 unsigned int num_processors;
 
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index ed4118d..194539a 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -379,14 +379,14 @@
 #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT
 
 static __initdata struct redir_addr redir_addrs[] = {
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG},
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG},
-	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG},
+	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_0_MMR},
+	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_1_MMR},
+	{UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_RH_GAM_ALIAS210_OVERLAY_CONFIG_2_MMR},
 };
 
 static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size)
 {
-	union uvh_si_alias0_overlay_config_u alias;
+	union uvh_rh_gam_alias210_overlay_config_2_mmr_u alias;
 	union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect;
 	int i;
 
@@ -660,7 +660,7 @@
 
 void __init uv_system_init(void)
 {
-	union uvh_si_addr_map_config_u m_n_config;
+	union uvh_rh_gam_config_mmr_u  m_n_config;
 	union uvh_node_id_u node_id;
 	unsigned long gnode_upper, lowmem_redir_base, lowmem_redir_size;
 	int bytes, nid, cpu, lcpu, pnode, blade, i, j, m_val, n_val;
@@ -670,7 +670,7 @@
 
 	map_low_mmrs();
 
-	m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG);
+	m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
 	m_val = m_n_config.s.m_skt;
 	n_val = m_n_config.s.n_skt;
 	mmr_base =
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index 46d5844..e421b8c 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -280,11 +280,11 @@
 	struct amd_nb *nb;
 	int i;
 
-	nb = kmalloc(sizeof(struct amd_nb), GFP_KERNEL);
+	nb = kmalloc_node(sizeof(struct amd_nb), GFP_KERNEL | __GFP_ZERO,
+			  cpu_to_node(cpu));
 	if (!nb)
 		return NULL;
 
-	memset(nb, 0, sizeof(*nb));
 	nb->nb_id = nb_id;
 
 	/*
diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
index e1af7c0..ce0cb47 100644
--- a/arch/x86/kernel/microcode_amd.c
+++ b/arch/x86/kernel/microcode_amd.c
@@ -212,7 +212,7 @@
 		return 0;
 	}
 
-	equiv_cpu_table = (struct equiv_cpu_entry *) vmalloc(size);
+	equiv_cpu_table = vmalloc(size);
 	if (!equiv_cpu_table) {
 		pr_err("failed to allocate equivalent CPU table\n");
 		return 0;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index 7182580..6da143c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -217,13 +217,13 @@
 	wrmsrl(address, val);
 }
 
-static int __devinit set_check_enable_amd_mmconf(const struct dmi_system_id *d)
+static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d)
 {
         pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
         return 0;
 }
 
-static const struct dmi_system_id __cpuinitconst mmconf_dmi_table[] = {
+static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
         {
                 .callback = set_check_enable_amd_mmconf,
                 .ident = "Sun Microsystems Machine",
@@ -234,7 +234,8 @@
 	{}
 };
 
-void __cpuinit check_enable_amd_mmconf_dmi(void)
+/* Called from a __cpuinit function, but only on the BSP. */
+void __ref check_enable_amd_mmconf_dmi(void)
 {
 	dmi_check_system(mmconf_dmi_table);
 }
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index bab3b9e..008b91e 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -41,44 +41,6 @@
 	valid_flags = flags;
 }
 
-/*
- * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
- * yielding a 64-bit result.
- */
-static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
-{
-	u64 product;
-#ifdef __i386__
-	u32 tmp1, tmp2;
-#endif
-
-	if (shift < 0)
-		delta >>= -shift;
-	else
-		delta <<= shift;
-
-#ifdef __i386__
-	__asm__ (
-		"mul  %5       ; "
-		"mov  %4,%%eax ; "
-		"mov  %%edx,%4 ; "
-		"mul  %5       ; "
-		"xor  %5,%5    ; "
-		"add  %4,%%eax ; "
-		"adc  %5,%%edx ; "
-		: "=A" (product), "=r" (tmp1), "=r" (tmp2)
-		: "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
-#elif defined(__x86_64__)
-	__asm__ (
-		"mul %%rdx ; shrd $32,%%rdx,%%rax"
-		: "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
-#else
-#error implement me!
-#endif
-
-	return product;
-}
-
 static u64 pvclock_get_nsec_offset(struct pvclock_shadow_time *shadow)
 {
 	u64 delta = native_read_tsc() - shadow->tsc_timestamp;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea54..fb8b376 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@
 	}
 }
 
-static void set_spte_track_bits(u64 *sptep, u64 new_spte)
+static int set_spte_track_bits(u64 *sptep, u64 new_spte)
 {
 	pfn_t pfn;
 	u64 old_spte = *sptep;
@@ -731,19 +731,20 @@
 		old_spte = __xchg_spte(sptep, new_spte);
 
 	if (!is_rmap_spte(old_spte))
-		return;
+		return 0;
 
 	pfn = spte_to_pfn(old_spte);
 	if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
 		kvm_set_pfn_accessed(pfn);
 	if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
 		kvm_set_pfn_dirty(pfn);
+	return 1;
 }
 
 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
 {
-	set_spte_track_bits(sptep, new_spte);
-	rmap_remove(kvm, sptep);
+	if (set_spte_track_bits(sptep, new_spte))
+		rmap_remove(kvm, sptep);
 }
 
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad8..cdac9e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@
 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
 	events->exception.nr = vcpu->arch.exception.nr;
 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+	events->exception.pad = 0;
 	events->exception.error_code = vcpu->arch.exception.error_code;
 
 	events->interrupt.injected =
@@ -2573,12 +2574,14 @@
 	events->nmi.injected = vcpu->arch.nmi_injected;
 	events->nmi.pending = vcpu->arch.nmi_pending;
 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+	events->nmi.pad = 0;
 
 	events->sipi_vector = vcpu->arch.sipi_vector;
 
 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
 			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
 			 | KVM_VCPUEVENT_VALID_SHADOW);
+	memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@
 	dbgregs->dr6 = vcpu->arch.dr6;
 	dbgregs->dr7 = vcpu->arch.dr7;
 	dbgregs->flags = 0;
+	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@
 		sizeof(ps->channels));
 	ps->flags = kvm->arch.vpit->pit_state.flags;
 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+	memset(&ps->reserved, 0, sizeof(ps->reserved));
 	return r;
 }
 
@@ -3169,10 +3174,6 @@
 		struct kvm_memslots *slots, *old_slots;
 		unsigned long *dirty_bitmap;
 
-		spin_lock(&kvm->mmu_lock);
-		kvm_mmu_slot_remove_write_access(kvm, log->slot);
-		spin_unlock(&kvm->mmu_lock);
-
 		r = -ENOMEM;
 		dirty_bitmap = vmalloc(n);
 		if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@
 		dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
 		kfree(old_slots);
 
+		spin_lock(&kvm->mmu_lock);
+		kvm_mmu_slot_remove_write_access(kvm, log->slot);
+		spin_unlock(&kvm->mmu_lock);
+
 		r = -EFAULT;
 		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
 			vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@
 		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
 		local_irq_enable();
 		user_ns.flags = 0;
+		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
 		r = -EFAULT;
 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@
 		return X86EMUL_CONTINUE;
 
 	if (kvm_x86_ops->has_wbinvd_exit()) {
+		preempt_disable();
 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
 				wbinvd_ipi, NULL, 1);
+		preempt_enable();
 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
 	}
 	wbinvd();
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 4935848..12cdbb1 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -251,7 +251,7 @@
 	}
 }
 
-static int tlb_cpuhp_notify(struct notifier_block *n,
+static int __cpuinit tlb_cpuhp_notify(struct notifier_block *n,
 		unsigned long action, void *hcpu)
 {
 	switch (action & 0xf) {
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 117f5b8..d7b5109 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -147,8 +147,10 @@
 		irq = xen_allocate_pirq(v[i], 0, /* not sharable */
 			(type == PCI_CAP_ID_MSIX) ?
 			"pcifront-msi-x" : "pcifront-msi");
-		if (irq < 0)
-			return -1;
+		if (irq < 0) {
+			ret = -1;
+			goto free;
+		}
 
 		ret = set_irq_msi(irq, msidesc);
 		if (ret)
@@ -164,7 +166,7 @@
 	if (ret == -ENODEV)
 		dev_err(&dev->dev, "Xen PCI frontend has not registered" \
 			" MSI/MSI-X support!\n");
-
+free:
 	kfree(v);
 	return ret;
 }
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 20ea20a..a318194 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1343,8 +1343,8 @@
 	 * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
 	 * per cpu; and up to 32 (UV_ADP_SIZE) cpu's per uvhub
 	 */
-	bau_desc = (struct bau_desc *)kmalloc_node(sizeof(struct bau_desc)*
-		UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
+	bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
+				* UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
 	BUG_ON(!bau_desc);
 
 	pa = uv_gpa(bau_desc); /* need the real nasid*/
@@ -1402,9 +1402,9 @@
 	struct bau_payload_queue_entry *pqp_malloc;
 	struct bau_control *bcp;
 
-	pqp = (struct bau_payload_queue_entry *) kmalloc_node(
-		(DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
-		GFP_KERNEL, node);
+	pqp = kmalloc_node((DEST_Q_SIZE + 1)
+			   * sizeof(struct bau_payload_queue_entry),
+			   GFP_KERNEL, node);
 	BUG_ON(!pqp);
 	pqp_malloc = pqp;
 
@@ -1520,8 +1520,7 @@
 
 	timeout_us = calculate_destination_timeout();
 
-	uvhub_descs = (struct uvhub_desc *)
-		kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
+	uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
 	memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
 	uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
 	for_each_present_cpu(cpu) {
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c237b81..21ed8d7 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -2126,7 +2126,7 @@
 {
 	pmd_t *kernel_pmd;
 
-	level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+	level2_kernel_pgt = extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE);
 
 	max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
 				  xen_start_info->nr_pt_frames * PAGE_SIZE +
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index b1dbdaa..769c4b0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -118,16 +118,18 @@
 						     const struct e820map *e820)
 {
 	phys_addr_t max_addr = PFN_PHYS(max_pfn);
-	phys_addr_t last_end = 0;
+	phys_addr_t last_end = ISA_END_ADDRESS;
 	unsigned long released = 0;
 	int i;
 
+	/* Free any unused memory above the low 1Mbyte. */
 	for (i = 0; i < e820->nr_map && last_end < max_addr; i++) {
 		phys_addr_t end = e820->map[i].addr;
 		end = min(max_addr, end);
 
-		released += xen_release_chunk(last_end, end);
-		last_end = e820->map[i].addr + e820->map[i].size;
+		if (last_end < end)
+			released += xen_release_chunk(last_end, end);
+		last_end = max(last_end, e820->map[i].addr + e820->map[i].size);
 	}
 
 	if (last_end < max_addr)
@@ -164,6 +166,7 @@
 		XENMEM_memory_map;
 	rc = HYPERVISOR_memory_op(op, &memmap);
 	if (rc == -ENOSYS) {
+		BUG_ON(xen_initial_domain());
 		memmap.nr_entries = 1;
 		map[0].addr = 0ULL;
 		map[0].size = mem_end;
@@ -201,12 +204,13 @@
 	}
 
 	/*
-	 * Even though this is normal, usable memory under Xen, reserve
-	 * ISA memory anyway because too many things think they can poke
+	 * In domU, the ISA region is normal, usable memory, but we
+	 * reserve ISA memory anyway because too many things poke
 	 * about in there.
 	 *
-	 * In a dom0 kernel, this region is identity mapped with the
-	 * hardware ISA area, so it really is out of bounds.
+	 * In Dom0, the host E820 information can leave gaps in the
+	 * ISA range, which would cause us to release those pages.  To
+	 * avoid this, we unconditionally reserve them here.
 	 */
 	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
 			E820_RESERVED);
diff --git a/block/blk-core.c b/block/blk-core.c
index f0834e2..4ce953f 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1194,13 +1194,6 @@
 	int where = ELEVATOR_INSERT_SORT;
 	int rw_flags;
 
-	/* REQ_HARDBARRIER is no more */
-	if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
-		"block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
-		bio_endio(bio, -EOPNOTSUPP);
-		return 0;
-	}
-
 	/*
 	 * low level driver can indicate that it wants pages above a
 	 * certain limit bounced to low memory (ie for highmem, or even
@@ -1351,7 +1344,7 @@
 			bdevname(bio->bi_bdev, b),
 			bio->bi_rw,
 			(unsigned long long)bio->bi_sector + bio_sectors(bio),
-			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+			(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
 
 	set_bit(BIO_EOF, &bio->bi_flags);
 }
@@ -1404,7 +1397,7 @@
 		return 0;
 
 	/* Test device or partition size, when known. */
-	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+	maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
 	if (maxsector) {
 		sector_t sector = bio->bi_sector;
 
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index d22c4c5..3c7a339 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -153,20 +153,6 @@
 }
 EXPORT_SYMBOL(get_io_context);
 
-void copy_io_context(struct io_context **pdst, struct io_context **psrc)
-{
-	struct io_context *src = *psrc;
-	struct io_context *dst = *pdst;
-
-	if (src) {
-		BUG_ON(atomic_long_read(&src->refcount) == 0);
-		atomic_long_inc(&src->refcount);
-		put_io_context(dst);
-		*pdst = src;
-	}
-}
-EXPORT_SYMBOL(copy_io_context);
-
 static int __init blk_ioc_init(void)
 {
 	iocontext_cachep = kmem_cache_create("blkdev_ioc",
diff --git a/block/blk-map.c b/block/blk-map.c
index d4a586d..5d5dbe4 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -205,6 +205,8 @@
 			unaligned = 1;
 			break;
 		}
+		if (!iov[i].iov_len)
+			return -EINVAL;
 	}
 
 	if (unaligned || (q->dma_pad_mask & len) || map_data)
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 119f07b..58c6ee5 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -744,13 +744,13 @@
 		bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
 		return 0;
 	case BLKGETSIZE:
-		size = bdev->bd_inode->i_size;
+		size = i_size_read(bdev->bd_inode);
 		if ((size >> 9) > ~0UL)
 			return -EFBIG;
 		return compat_put_ulong(arg, size >> 9);
 
 	case BLKGETSIZE64_32:
-		return compat_put_u64(arg, bdev->bd_inode->i_size);
+		return compat_put_u64(arg, i_size_read(bdev->bd_inode));
 
 	case BLKTRACESETUP32:
 	case BLKTRACESTART: /* compatible */
diff --git a/block/elevator.c b/block/elevator.c
index 282e830..2569512 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -429,7 +429,7 @@
 	q->nr_sorted--;
 
 	boundary = q->end_sector;
-	stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
+	stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
 	list_for_each_prev(entry, &q->queue_head) {
 		struct request *pos = list_entry_rq(entry);
 
@@ -691,7 +691,7 @@
 void __elv_add_request(struct request_queue *q, struct request *rq, int where,
 		       int plug)
 {
-	if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
+	if (rq->cmd_flags & REQ_SOFTBARRIER) {
 		/* barriers are scheduling boundary, update end_sector */
 		if (rq->cmd_type == REQ_TYPE_FS ||
 		    (rq->cmd_flags & REQ_DISCARD)) {
diff --git a/block/ioctl.c b/block/ioctl.c
index d724ceb..3d866d0 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -125,7 +125,7 @@
 	start >>= 9;
 	len >>= 9;
 
-	if (start + len > (bdev->bd_inode->i_size >> 9))
+	if (start + len > (i_size_read(bdev->bd_inode) >> 9))
 		return -EINVAL;
 	if (secure)
 		flags |= BLKDEV_DISCARD_SECURE;
@@ -242,6 +242,7 @@
 		 * We need to set the startsect first, the driver may
 		 * want to override it.
 		 */
+		memset(&geo, 0, sizeof(geo));
 		geo.start = get_start_sect(bdev);
 		ret = disk->fops->getgeo(bdev, &geo);
 		if (ret)
@@ -307,12 +308,12 @@
 		ret = blkdev_reread_part(bdev);
 		break;
 	case BLKGETSIZE:
-		size = bdev->bd_inode->i_size;
+		size = i_size_read(bdev->bd_inode);
 		if ((size >> 9) > ~0UL)
 			return -EFBIG;
 		return put_ulong(arg, size >> 9);
 	case BLKGETSIZE64:
-		return put_u64(arg, bdev->bd_inode->i_size);
+		return put_u64(arg, i_size_read(bdev->bd_inode));
 	case BLKTRACESTART:
 	case BLKTRACESTOP:
 	case BLKTRACESETUP:
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index a8b5a10..4f4230b 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -321,33 +321,47 @@
 	if (hdr->iovec_count) {
 		const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
 		size_t iov_data_len;
-		struct sg_iovec *iov;
+		struct sg_iovec *sg_iov;
+		struct iovec *iov;
+		int i;
 
-		iov = kmalloc(size, GFP_KERNEL);
-		if (!iov) {
+		sg_iov = kmalloc(size, GFP_KERNEL);
+		if (!sg_iov) {
 			ret = -ENOMEM;
 			goto out;
 		}
 
-		if (copy_from_user(iov, hdr->dxferp, size)) {
-			kfree(iov);
+		if (copy_from_user(sg_iov, hdr->dxferp, size)) {
+			kfree(sg_iov);
 			ret = -EFAULT;
 			goto out;
 		}
 
+		/*
+		 * Sum up the vecs, making sure they don't overflow
+		 */
+		iov = (struct iovec *) sg_iov;
+		iov_data_len = 0;
+		for (i = 0; i < hdr->iovec_count; i++) {
+			if (iov_data_len + iov[i].iov_len < iov_data_len) {
+				kfree(sg_iov);
+				ret = -EINVAL;
+				goto out;
+			}
+			iov_data_len += iov[i].iov_len;
+		}
+
 		/* SG_IO howto says that the shorter of the two wins */
-		iov_data_len = iov_length((struct iovec *)iov,
-					  hdr->iovec_count);
 		if (hdr->dxfer_len < iov_data_len) {
-			hdr->iovec_count = iov_shorten((struct iovec *)iov,
+			hdr->iovec_count = iov_shorten(iov,
 						       hdr->iovec_count,
 						       hdr->dxfer_len);
 			iov_data_len = hdr->dxfer_len;
 		}
 
-		ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count,
+		ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
 					  iov_data_len, GFP_KERNEL);
-		kfree(iov);
+		kfree(sg_iov);
 	} else if (hdr->dxfer_len)
 		ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
 				      GFP_KERNEL);
diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
index de30782..75586f1 100644
--- a/crypto/pcrypt.c
+++ b/crypto/pcrypt.c
@@ -504,7 +504,6 @@
 
 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
 {
-	kobject_put(&pcrypt->pinst->kobj);
 	free_cpumask_var(pcrypt->cb_cpumask->mask);
 	kfree(pcrypt->cb_cpumask);
 
diff --git a/drivers/Makefile b/drivers/Makefile
index 14cf907..f3ebb30 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -26,6 +26,7 @@
 
 # char/ comes before serial/ etc so that the VT console is the boot-time
 # default.
+obj-y				+= tty/
 obj-y				+= char/
 
 # gpu/ comes after char for AGP vs DRM startup
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index d050e07..3f91c01 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2552,8 +2552,11 @@
 		 *
 		 * If door lock fails, always clear sdev->locked to
 		 * avoid this infinite loop.
+		 *
+		 * This may happen before SCSI scan is complete.  Make
+		 * sure qc->dev->sdev isn't NULL before dereferencing.
 		 */
-		if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL)
+		if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL && qc->dev->sdev)
 			qc->dev->sdev->locked = 0;
 
 		qc->scsicmd->result = SAM_STAT_CHECK_CONDITION;
diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
index eaf1941..6bd9425 100644
--- a/drivers/ata/pata_legacy.c
+++ b/drivers/ata/pata_legacy.c
@@ -142,7 +142,7 @@
 static int pio_mask = ATA_PIO4;	/* PIO range for autospeed devices */
 static int iordy_mask = 0xFFFFFFFF;	/* Use iordy if available */
 
-#ifdef PATA_WINBOND_VLB_MODULE
+#ifdef CONFIG_PATA_WINBOND_VLB_MODULE
 static int winbond = 1;		/* Set to probe Winbond controllers,
 					give I/O port if non standard */
 #else
diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
index 74b8298..fa1b95a 100644
--- a/drivers/ata/pata_octeon_cf.c
+++ b/drivers/ata/pata_octeon_cf.c
@@ -653,8 +653,6 @@
 
 		ap = host->ports[i];
 		ocd = ap->dev->platform_data;
-
-		ocd = ap->dev->platform_data;
 		cf_port = ap->private_data;
 		dma_int.u64 =
 			cvmx_read_csr(CVMX_MIO_BOOT_DMA_INTX(ocd->dma_engine));
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index c8fc69c..c097619 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -92,7 +92,7 @@
 
 #define FORE200E_INDEX(virt_addr, type, index)     (&((type *)(virt_addr))[ index ])
 
-#define FORE200E_NEXT_ENTRY(index, modulo)         (index = ++(index) % (modulo))
+#define FORE200E_NEXT_ENTRY(index, modulo)         (index = ((index) + 1) % (modulo))
 
 #if 1
 #define ASSERT(expr)     if (!(expr)) { \
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cbe15a8..930051d 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -2241,11 +2241,8 @@
 	memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
 	lanai_timed_poll_start(lanai);
 	printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
-	    "(%02X-%02X-%02X-%02X-%02X-%02X)\n", lanai->number,
-	    (int) lanai->pci->revision, (unsigned long) lanai->base,
-	    lanai->pci->irq,
-	    atmdev->esi[0], atmdev->esi[1], atmdev->esi[2],
-	    atmdev->esi[3], atmdev->esi[4], atmdev->esi[5]);
+		"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
+		(unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
 	printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
 	    "board_rev=%d\n", lanai->number,
 	    lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
diff --git a/drivers/atm/solos-attrlist.c b/drivers/atm/solos-attrlist.c
index 1a9332e..9a676ee 100644
--- a/drivers/atm/solos-attrlist.c
+++ b/drivers/atm/solos-attrlist.c
@@ -1,6 +1,7 @@
 SOLOS_ATTR_RO(DriverVersion)
 SOLOS_ATTR_RO(APIVersion)
 SOLOS_ATTR_RO(FirmwareVersion)
+SOLOS_ATTR_RO(Version)
 // SOLOS_ATTR_RO(DspVersion)
 // SOLOS_ATTR_RO(CommonHandshake)
 SOLOS_ATTR_RO(Connected)
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index f46138a..2e08c99 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1161,6 +1161,14 @@
 	dev_info(&dev->dev, "Solos FPGA Version %d.%02d svn-%d\n",
 		 major_ver, minor_ver, fpga_ver);
 
+	if (fpga_ver < 37 && (fpga_upgrade || firmware_upgrade ||
+			      db_fpga_upgrade || db_firmware_upgrade)) {
+		dev_warn(&dev->dev,
+			 "FPGA too old; cannot upgrade flash. Use JTAG.\n");
+		fpga_upgrade = firmware_upgrade = 0;
+		db_fpga_upgrade = db_firmware_upgrade = 0;
+	}
+
 	if (card->fpga_version >= DMA_SUPPORTED){
 		card->using_dma = 1;
 	} else {
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
index 541e188..528f631 100644
--- a/drivers/block/aoe/aoeblk.c
+++ b/drivers/block/aoe/aoeblk.c
@@ -180,9 +180,6 @@
 		BUG();
 		bio_endio(bio, -ENXIO);
 		return 0;
-	} else if (bio->bi_rw & REQ_HARDBARRIER) {
-		bio_endio(bio, -EOPNOTSUPP);
-		return 0;
 	} else if (bio->bi_io_vec == NULL) {
 		printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
 		BUG();
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 5674bd0..de0435e6 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -297,8 +297,8 @@
 	struct sk_buff *skb;
 	struct net_device *ifp;
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, ifp) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, ifp) {
 		dev_hold(ifp);
 		if (!is_aoe_netif(ifp))
 			goto cont;
@@ -325,7 +325,7 @@
 cont:
 		dev_put(ifp);
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
 static void
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 2cc4dda..a67d0a6 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -113,6 +113,8 @@
 	{0x409D0E11, "Smart Array 6400 EM", &SA5_access},
 	{0x40910E11, "Smart Array 6i", &SA5_access},
 	{0x3225103C, "Smart Array P600", &SA5_access},
+	{0x3223103C, "Smart Array P800", &SA5_access},
+	{0x3234103C, "Smart Array P400", &SA5_access},
 	{0x3235103C, "Smart Array P400i", &SA5_access},
 	{0x3211103C, "Smart Array E200i", &SA5_access},
 	{0x3212103C, "Smart Array E200", &SA5_access},
@@ -3753,7 +3755,7 @@
 	for (i = 0; i < MAX_CONFIG_WAIT; i++) {
 		if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
 			break;
-		msleep(10);
+		usleep_range(10000, 20000);
 	}
 }
 
@@ -3937,10 +3939,9 @@
 	*board_id = ((subsystem_device_id << 16) & 0xffff0000) |
 			subsystem_vendor_id;
 
-	for (i = 0; i < ARRAY_SIZE(products); i++) {
+	for (i = 0; i < ARRAY_SIZE(products); i++)
 		if (*board_id == products[i].board_id)
 			return i;
-	}
 	dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
 		*board_id);
 	return -ENODEV;
@@ -3971,18 +3972,31 @@
 	return -ENODEV;
 }
 
-static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h)
+static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
+	void __iomem *vaddr, int wait_for_ready)
+#define BOARD_READY 1
+#define BOARD_NOT_READY 0
 {
-	int i;
+	int i, iterations;
 	u32 scratchpad;
 
-	for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) {
-		scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
-		if (scratchpad == CCISS_FIRMWARE_READY)
-			return 0;
+	if (wait_for_ready)
+		iterations = CCISS_BOARD_READY_ITERATIONS;
+	else
+		iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
+
+	for (i = 0; i < iterations; i++) {
+		scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
+		if (wait_for_ready) {
+			if (scratchpad == CCISS_FIRMWARE_READY)
+				return 0;
+		} else {
+			if (scratchpad != CCISS_FIRMWARE_READY)
+				return 0;
+		}
 		msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
 	}
-	dev_warn(&h->pdev->dev, "board not ready, timed out.\n");
+	dev_warn(&pdev->dev, "board not ready, timed out.\n");
 	return -ENODEV;
 }
 
@@ -4031,6 +4045,11 @@
 static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
 {
 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+
+	/* Limit commands in memory limited kdump scenario. */
+	if (reset_devices && h->max_commands > 32)
+		h->max_commands = 32;
+
 	if (h->max_commands < 16) {
 		dev_warn(&h->pdev->dev, "Controller reports "
 			"max supported commands of %d, an obvious lie. "
@@ -4148,7 +4167,7 @@
 		err = -ENOMEM;
 		goto err_out_free_res;
 	}
-	err = cciss_wait_for_board_ready(h);
+	err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
 	if (err)
 		goto err_out_free_res;
 	err = cciss_find_cfgtables(h);
@@ -4313,36 +4332,6 @@
 #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
 #define cciss_noop(p) cciss_message(p, 3, 0)
 
-static __devinit int cciss_reset_msi(struct pci_dev *pdev)
-{
-/* the #defines are stolen from drivers/pci/msi.h. */
-#define msi_control_reg(base)		(base + PCI_MSI_FLAGS)
-#define PCI_MSIX_FLAGS_ENABLE		(1 << 15)
-
-	int pos;
-	u16 control = 0;
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
-	if (pos) {
-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
-		if (control & PCI_MSI_FLAGS_ENABLE) {
-			dev_info(&pdev->dev, "resetting MSI\n");
-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
-		}
-	}
-
-	pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
-	if (pos) {
-		pci_read_config_word(pdev, msi_control_reg(pos), &control);
-		if (control & PCI_MSIX_FLAGS_ENABLE) {
-			dev_info(&pdev->dev, "resetting MSI-X\n");
-			pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
-		}
-	}
-
-	return 0;
-}
-
 static int cciss_controller_hard_reset(struct pci_dev *pdev,
 	void * __iomem vaddr, bool use_doorbell)
 {
@@ -4397,17 +4386,17 @@
  * states or using the doorbell register. */
 static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
 {
-	u16 saved_config_space[32];
 	u64 cfg_offset;
 	u32 cfg_base_addr;
 	u64 cfg_base_addr_index;
 	void __iomem *vaddr;
 	unsigned long paddr;
 	u32 misc_fw_support, active_transport;
-	int rc, i;
+	int rc;
 	CfgTable_struct __iomem *cfgtable;
 	bool use_doorbell;
 	u32 board_id;
+	u16 command_register;
 
 	/* For controllers as old a the p600, this is very nearly
 	 * the same thing as
@@ -4417,14 +4406,6 @@
 	 * pci_set_power_state(pci_dev, PCI_D0);
 	 * pci_restore_state(pci_dev);
 	 *
-	 * but we can't use these nice canned kernel routines on
-	 * kexec, because they also check the MSI/MSI-X state in PCI
-	 * configuration space and do the wrong thing when it is
-	 * set/cleared.  Also, the pci_save/restore_state functions
-	 * violate the ordering requirements for restoring the
-	 * configuration space from the CCISS document (see the
-	 * comment below).  So we roll our own ....
-	 *
 	 * For controllers newer than the P600, the pci power state
 	 * method of resetting doesn't work so we have another way
 	 * using the doorbell register.
@@ -4443,8 +4424,13 @@
 		return -ENODEV;
 	}
 
-	for (i = 0; i < 32; i++)
-		pci_read_config_word(pdev, 2*i, &saved_config_space[i]);
+	/* Save the PCI command register */
+	pci_read_config_word(pdev, 4, &command_register);
+	/* Turn the board off.  This is so that later pci_restore_state()
+	 * won't turn the board on before the rest of config space is ready.
+	 */
+	pci_disable_device(pdev);
+	pci_save_state(pdev);
 
 	/* find the first memory BAR, so we can find the cfg table */
 	rc = cciss_pci_find_memory_BAR(pdev, &paddr);
@@ -4479,26 +4465,32 @@
 	rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
 	if (rc)
 		goto unmap_cfgtable;
-
-	/* Restore the PCI configuration space.  The Open CISS
-	 * Specification says, "Restore the PCI Configuration
-	 * Registers, offsets 00h through 60h. It is important to
-	 * restore the command register, 16-bits at offset 04h,
-	 * last. Do not restore the configuration status register,
-	 * 16-bits at offset 06h."  Note that the offset is 2*i.
-	 */
-	for (i = 0; i < 32; i++) {
-		if (i == 2 || i == 3)
-			continue;
-		pci_write_config_word(pdev, 2*i, saved_config_space[i]);
+	pci_restore_state(pdev);
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "failed to enable device.\n");
+		goto unmap_cfgtable;
 	}
-	wmb();
-	pci_write_config_word(pdev, 4, saved_config_space[2]);
+	pci_write_config_word(pdev, 4, command_register);
 
 	/* Some devices (notably the HP Smart Array 5i Controller)
 	   need a little pause here */
 	msleep(CCISS_POST_RESET_PAUSE_MSECS);
 
+	/* Wait for board to become not ready, then ready. */
+	dev_info(&pdev->dev, "Waiting for board to become ready.\n");
+	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
+	if (rc) /* Don't bail, might be E500, etc. which can't be reset */
+		dev_warn(&pdev->dev,
+			"failed waiting for board to become not ready\n");
+	rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
+	if (rc) {
+		dev_warn(&pdev->dev,
+			"failed waiting for board to become ready\n");
+		goto unmap_cfgtable;
+	}
+	dev_info(&pdev->dev, "board ready.\n");
+
 	/* Controller should be in simple mode at this point.  If it's not,
 	 * It means we're on one of those controllers which doesn't support
 	 * the doorbell reset method and on which the PCI power management reset
@@ -4539,8 +4531,6 @@
 		return 0; /* just try to do the kdump anyhow. */
 	if (rc)
 		return -ENODEV;
-	if (cciss_reset_msi(pdev))
-		return -ENODEV;
 
 	/* Now try to get the controller to respond to a no-op */
 	for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
@@ -4936,7 +4926,8 @@
 		}
 	}
 	kthread_stop(cciss_scan_thread);
-	remove_proc_entry("driver/cciss", NULL);
+	if (proc_cciss)
+		remove_proc_entry("driver/cciss", NULL);
 	bus_unregister(&cciss_bus_type);
 }
 
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index ae340ff..4b8933d 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -200,10 +200,14 @@
  * the above.
  */
 #define CCISS_BOARD_READY_WAIT_SECS (120)
+#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
 #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
 #define CCISS_BOARD_READY_ITERATIONS \
 	((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
 		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
+#define CCISS_BOARD_NOT_READY_ITERATIONS \
+	((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
+		CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
 #define CCISS_POST_RESET_PAUSE_MSECS (3000)
 #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
 #define CCISS_POST_RESET_NOOP_RETRIES (12)
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index ac04ef9..ba95cba 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -78,11 +78,10 @@
 	init_completion(&md_io.event);
 	md_io.error = 0;
 
-	if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags))
-		rw |= REQ_HARDBARRIER;
+	if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
+		rw |= REQ_FUA;
 	rw |= REQ_UNPLUG | REQ_SYNC;
 
- retry:
 	bio = bio_alloc(GFP_NOIO, 1);
 	bio->bi_bdev = bdev->md_bdev;
 	bio->bi_sector = sector;
@@ -100,17 +99,6 @@
 	wait_for_completion(&md_io.event);
 	ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
 
-	/* check for unsupported barrier op.
-	 * would rather check on EOPNOTSUPP, but that is not reliable.
-	 * don't try again for ANY return value != 0 */
-	if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
-		/* Try again with no barrier */
-		dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
-		set_bit(MD_NO_BARRIER, &mdev->flags);
-		rw &= ~REQ_HARDBARRIER;
-		bio_put(bio);
-		goto retry;
-	}
  out:
 	bio_put(bio);
 	return ok;
@@ -284,18 +272,32 @@
 	u32 xor_sum = 0;
 
 	if (!get_ldev(mdev)) {
-		dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n");
+		dev_err(DEV,
+			"disk is %s, cannot start al transaction (-%d +%d)\n",
+			drbd_disk_str(mdev->state.disk), evicted, new_enr);
 		complete(&((struct update_al_work *)w)->event);
 		return 1;
 	}
 	/* do we have to do a bitmap write, first?
 	 * TODO reduce maximum latency:
 	 * submit both bios, then wait for both,
-	 * instead of doing two synchronous sector writes. */
+	 * instead of doing two synchronous sector writes.
+	 * For now, we must not write the transaction,
+	 * if we cannot write out the bitmap of the evicted extent. */
 	if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
 		drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
 
-	mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */
+	/* The bitmap write may have failed, causing a state change. */
+	if (mdev->state.disk < D_INCONSISTENT) {
+		dev_err(DEV,
+			"disk is %s, cannot write al transaction (-%d +%d)\n",
+			drbd_disk_str(mdev->state.disk), evicted, new_enr);
+		complete(&((struct update_al_work *)w)->event);
+		put_ldev(mdev);
+		return 1;
+	}
+
+	mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
 	buffer = (struct al_transaction *)page_address(mdev->md_io_page);
 
 	buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
@@ -739,7 +741,7 @@
 	unsigned int enr;
 	unsigned long add = 0;
 	char ppb[10];
-	int i;
+	int i, tmp;
 
 	wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
 
@@ -747,7 +749,9 @@
 		enr = lc_element_by_index(mdev->act_log, i)->lc_number;
 		if (enr == LC_FREE)
 			continue;
-		add += drbd_bm_ALe_set_all(mdev, enr);
+		tmp = drbd_bm_ALe_set_all(mdev, enr);
+		dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
+		add += tmp;
 	}
 
 	lc_unlock(mdev->act_log);
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
index 9bdcf43..1ea1a34 100644
--- a/drivers/block/drbd/drbd_int.h
+++ b/drivers/block/drbd/drbd_int.h
@@ -114,11 +114,11 @@
 #define D_ASSERT(exp)	if (!(exp)) \
 	 dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
 
-#define ERR_IF(exp) if (({				\
-	int _b = (exp) != 0;				\
-	if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n",	\
-		__func__, #exp, __FILE__, __LINE__);	\
-	 _b;						\
+#define ERR_IF(exp) if (({						\
+	int _b = (exp) != 0;						\
+	if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n",	\
+			__func__, #exp, __FILE__, __LINE__);		\
+	_b;								\
 	}))
 
 /* Defines to control fault insertion */
@@ -749,17 +749,12 @@
 
 /* drbd_epoch flag bits */
 enum {
-	DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
-	DE_BARRIER_IN_NEXT_EPOCH_DONE,
-	DE_CONTAINS_A_BARRIER,
 	DE_HAVE_BARRIER_NUMBER,
-	DE_IS_FINISHING,
 };
 
 enum epoch_event {
 	EV_PUT,
 	EV_GOT_BARRIER_NR,
-	EV_BARRIER_DONE,
 	EV_BECAME_LAST,
 	EV_CLEANUP = 32, /* used as flag */
 };
@@ -801,11 +796,6 @@
 	__EE_CALL_AL_COMPLETE_IO,
 	__EE_MAY_SET_IN_SYNC,
 
-	/* This epoch entry closes an epoch using a barrier.
-	 * On sucessful completion, the epoch is released,
-	 * and the P_BARRIER_ACK send. */
-	__EE_IS_BARRIER,
-
 	/* In case a barrier failed,
 	 * we need to resubmit without the barrier flag. */
 	__EE_RESUBMITTED,
@@ -820,7 +810,6 @@
 };
 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
 #define EE_MAY_SET_IN_SYNC     (1<<__EE_MAY_SET_IN_SYNC)
-#define EE_IS_BARRIER          (1<<__EE_IS_BARRIER)
 #define	EE_RESUBMITTED         (1<<__EE_RESUBMITTED)
 #define EE_WAS_ERROR           (1<<__EE_WAS_ERROR)
 #define EE_HAS_DIGEST          (1<<__EE_HAS_DIGEST)
@@ -843,16 +832,15 @@
 				 * Gets cleared when the state.conn
 				 * goes into C_CONNECTED state. */
 	WRITE_BM_AFTER_RESYNC,	/* A kmalloc() during resync failed */
-	NO_BARRIER_SUPP,	/* underlying block device doesn't implement barriers */
 	CONSIDER_RESYNC,
 
-	MD_NO_BARRIER,		/* meta data device does not support barriers,
-				   so don't even try */
+	MD_NO_FUA,		/* Users wants us to not use FUA/FLUSH on meta data dev */
 	SUSPEND_IO,		/* suspend application io */
 	BITMAP_IO,		/* suspend application io;
 				   once no more io in flight, start bitmap io */
 	BITMAP_IO_QUEUED,       /* Started bitmap IO */
-	GO_DISKLESS,		/* Disk failed, local_cnt reached zero, we are going diskless */
+	GO_DISKLESS,		/* Disk is being detached, on io-error or admin request. */
+	WAS_IO_ERROR,		/* Local disk failed returned IO error */
 	RESYNC_AFTER_NEG,       /* Resync after online grow after the attach&negotiate finished. */
 	NET_CONGESTED,		/* The data socket is congested */
 
@@ -947,7 +935,6 @@
 	WO_none,
 	WO_drain_io,
 	WO_bdev_flush,
-	WO_bio_barrier
 };
 
 struct fifo_buffer {
@@ -1281,6 +1268,7 @@
 extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
 extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
 extern void drbd_go_diskless(struct drbd_conf *mdev);
+extern void drbd_ldev_destroy(struct drbd_conf *mdev);
 
 
 /* Meta data layout
@@ -1798,17 +1786,17 @@
 	case EP_PASS_ON:
 		if (!forcedetach) {
 			if (__ratelimit(&drbd_ratelimit_state))
-				dev_err(DEV, "Local IO failed in %s."
-					     "Passing error on...\n", where);
+				dev_err(DEV, "Local IO failed in %s.\n", where);
 			break;
 		}
 		/* NOTE fall through to detach case if forcedetach set */
 	case EP_DETACH:
 	case EP_CALL_HELPER:
+		set_bit(WAS_IO_ERROR, &mdev->flags);
 		if (mdev->state.disk > D_FAILED) {
 			_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
-			dev_err(DEV, "Local IO failed in %s."
-				     "Detaching...\n", where);
+			dev_err(DEV,
+				"Local IO failed in %s. Detaching...\n", where);
 		}
 		break;
 	}
@@ -1874,7 +1862,7 @@
 static inline sector_t drbd_get_capacity(struct block_device *bdev)
 {
 	/* return bdev ? get_capacity(bdev->bd_disk) : 0; */
-	return bdev ? bdev->bd_inode->i_size >> 9 : 0;
+	return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
 }
 
 /**
@@ -2127,7 +2115,11 @@
 	__release(local);
 	D_ASSERT(i >= 0);
 	if (i == 0) {
+		if (mdev->state.disk == D_DISKLESS)
+			/* even internal references gone, safe to destroy */
+			drbd_ldev_destroy(mdev);
 		if (mdev->state.disk == D_FAILED)
+			/* all application IO references gone. */
 			drbd_go_diskless(mdev);
 		wake_up(&mdev->misc_wait);
 	}
@@ -2138,6 +2130,10 @@
 {
 	int io_allowed;
 
+	/* never get a reference while D_DISKLESS */
+	if (mdev->state.disk == D_DISKLESS)
+		return 0;
+
 	atomic_inc(&mdev->local_cnt);
 	io_allowed = (mdev->state.disk >= mins);
 	if (!io_allowed)
@@ -2406,12 +2402,12 @@
 {
 	int r;
 
-	if (test_bit(MD_NO_BARRIER, &mdev->flags))
+	if (test_bit(MD_NO_FUA, &mdev->flags))
 		return;
 
 	r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
 	if (r) {
-		set_bit(MD_NO_BARRIER, &mdev->flags);
+		set_bit(MD_NO_FUA, &mdev->flags);
 		dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
 	}
 }
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 25c7a73..6be5401 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -835,6 +835,15 @@
 	    ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
 		ns.conn = os.conn;
 
+	/* we cannot fail (again) if we already detached */
+	if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
+		ns.disk = D_DISKLESS;
+
+	/* if we are only D_ATTACHING yet,
+	 * we can (and should) go directly to D_DISKLESS. */
+	if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
+		ns.disk = D_DISKLESS;
+
 	/* After C_DISCONNECTING only C_STANDALONE may follow */
 	if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
 		ns.conn = os.conn;
@@ -1056,7 +1065,15 @@
 	    !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
 		set_bit(DEVICE_DYING, &mdev->flags);
 
-	mdev->state.i = ns.i;
+	/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
+	 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
+	 * drbd_ldev_destroy() won't happen before our corresponding
+	 * after_state_ch works run, where we put_ldev again. */
+	if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
+	    (os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
+		atomic_inc(&mdev->local_cnt);
+
+	mdev->state = ns;
 	wake_up(&mdev->misc_wait);
 	wake_up(&mdev->state_wait);
 
@@ -1268,7 +1285,6 @@
 			if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
 				drbd_uuid_new_current(mdev);
 				clear_bit(NEW_CUR_UUID, &mdev->flags);
-				drbd_md_sync(mdev);
 			}
 			spin_lock_irq(&mdev->req_lock);
 			_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
@@ -1365,63 +1381,64 @@
 	    os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
 		drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
 
-	/* first half of local IO error */
-	if (os.disk > D_FAILED && ns.disk == D_FAILED) {
-		enum drbd_io_error_p eh = EP_PASS_ON;
+	/* first half of local IO error, failure to attach,
+	 * or administrative detach */
+	if (os.disk != D_FAILED && ns.disk == D_FAILED) {
+		enum drbd_io_error_p eh;
+		int was_io_error;
+		/* corresponding get_ldev was in __drbd_set_state, to serialize
+		 * our cleanup here with the transition to D_DISKLESS,
+		 * so it is safe to dreference ldev here. */
+		eh = mdev->ldev->dc.on_io_error;
+		was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
+
+		/* current state still has to be D_FAILED,
+		 * there is only one way out: to D_DISKLESS,
+		 * and that may only happen after our put_ldev below. */
+		if (mdev->state.disk != D_FAILED)
+			dev_err(DEV,
+				"ASSERT FAILED: disk is %s during detach\n",
+				drbd_disk_str(mdev->state.disk));
 
 		if (drbd_send_state(mdev))
-			dev_warn(DEV, "Notified peer that my disk is broken.\n");
+			dev_warn(DEV, "Notified peer that I am detaching my disk\n");
 		else
-			dev_err(DEV, "Sending state for drbd_io_error() failed\n");
+			dev_err(DEV, "Sending state for detaching disk failed\n");
 
 		drbd_rs_cancel_all(mdev);
 
-		if (get_ldev_if_state(mdev, D_FAILED)) {
-			eh = mdev->ldev->dc.on_io_error;
-			put_ldev(mdev);
-		}
-		if (eh == EP_CALL_HELPER)
+		/* In case we want to get something to stable storage still,
+		 * this may be the last chance.
+		 * Following put_ldev may transition to D_DISKLESS. */
+		drbd_md_sync(mdev);
+		put_ldev(mdev);
+
+		if (was_io_error && eh == EP_CALL_HELPER)
 			drbd_khelper(mdev, "local-io-error");
 	}
 
+        /* second half of local IO error, failure to attach,
+         * or administrative detach,
+         * after local_cnt references have reached zero again */
+        if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
+                /* We must still be diskless,
+                 * re-attach has to be serialized with this! */
+                if (mdev->state.disk != D_DISKLESS)
+                        dev_err(DEV,
+                                "ASSERT FAILED: disk is %s while going diskless\n",
+                                drbd_disk_str(mdev->state.disk));
 
-	/* second half of local IO error handling,
-	 * after local_cnt references have reached zero: */
-	if (os.disk == D_FAILED && ns.disk == D_DISKLESS) {
-		mdev->rs_total = 0;
-		mdev->rs_failed = 0;
-		atomic_set(&mdev->rs_pending_cnt, 0);
-	}
+                mdev->rs_total = 0;
+                mdev->rs_failed = 0;
+                atomic_set(&mdev->rs_pending_cnt, 0);
 
-	if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
-		/* We must still be diskless,
-		 * re-attach has to be serialized with this! */
-		if (mdev->state.disk != D_DISKLESS)
-			dev_err(DEV,
-				"ASSERT FAILED: disk is %s while going diskless\n",
-				drbd_disk_str(mdev->state.disk));
-
-		/* we cannot assert local_cnt == 0 here, as get_ldev_if_state
-		 * will inc/dec it frequently. Since we became D_DISKLESS, no
-		 * one has touched the protected members anymore, though, so we
-		 * are safe to free them here. */
 		if (drbd_send_state(mdev))
-			dev_warn(DEV, "Notified peer that I detached my disk.\n");
+			dev_warn(DEV, "Notified peer that I'm now diskless.\n");
 		else
-			dev_err(DEV, "Sending state for detach failed\n");
-
-		lc_destroy(mdev->resync);
-		mdev->resync = NULL;
-		lc_destroy(mdev->act_log);
-		mdev->act_log = NULL;
-		__no_warn(local,
-			drbd_free_bc(mdev->ldev);
-			mdev->ldev = NULL;);
-
-		if (mdev->md_io_tmpp) {
-			__free_page(mdev->md_io_tmpp);
-			mdev->md_io_tmpp = NULL;
-		}
+			dev_err(DEV, "Sending state for being diskless failed\n");
+		/* corresponding get_ldev in __drbd_set_state
+		 * this may finaly trigger drbd_ldev_destroy. */
+		put_ldev(mdev);
 	}
 
 	/* Disks got bigger while they were detached */
@@ -2772,11 +2789,6 @@
 
 	drbd_set_defaults(mdev);
 
-	/* for now, we do NOT yet support it,
-	 * even though we start some framework
-	 * to eventually support barriers */
-	set_bit(NO_BARRIER_SUPP, &mdev->flags);
-
 	atomic_set(&mdev->ap_bio_cnt, 0);
 	atomic_set(&mdev->ap_pending_cnt, 0);
 	atomic_set(&mdev->rs_pending_cnt, 0);
@@ -2842,7 +2854,7 @@
 	drbd_thread_init(mdev, &mdev->asender, drbd_asender);
 
 	mdev->agreed_pro_version = PRO_VERSION_MAX;
-	mdev->write_ordering = WO_bio_barrier;
+	mdev->write_ordering = WO_bdev_flush;
 	mdev->resync_wenr = LC_FREE;
 }
 
@@ -2899,7 +2911,6 @@
 	D_ASSERT(list_empty(&mdev->resync_work.list));
 	D_ASSERT(list_empty(&mdev->unplug_work.list));
 	D_ASSERT(list_empty(&mdev->go_diskless.list));
-
 }
 
 
@@ -3660,6 +3671,8 @@
 
 	get_random_bytes(&val, sizeof(u64));
 	_drbd_uuid_set(mdev, UI_CURRENT, val);
+	/* get it to stable storage _now_ */
+	drbd_md_sync(mdev);
 }
 
 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
@@ -3756,19 +3769,31 @@
 	return 1;
 }
 
+void drbd_ldev_destroy(struct drbd_conf *mdev)
+{
+	lc_destroy(mdev->resync);
+	mdev->resync = NULL;
+	lc_destroy(mdev->act_log);
+	mdev->act_log = NULL;
+	__no_warn(local,
+		drbd_free_bc(mdev->ldev);
+		mdev->ldev = NULL;);
+
+	if (mdev->md_io_tmpp) {
+		__free_page(mdev->md_io_tmpp);
+		mdev->md_io_tmpp = NULL;
+	}
+	clear_bit(GO_DISKLESS, &mdev->flags);
+}
+
 static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
 	D_ASSERT(mdev->state.disk == D_FAILED);
 	/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
 	 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
-	 * the protected members anymore, though, so in the after_state_ch work
-	 * it will be safe to free them. */
+	 * the protected members anymore, though, so once put_ldev reaches zero
+	 * again, it will be safe to free them. */
 	drbd_force_state(mdev, NS(disk, D_DISKLESS));
-	/* We need to wait for return of references checked out while we still
-	 * have been D_FAILED, though (drbd_md_sync, bitmap io). */
-	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
-
-	clear_bit(GO_DISKLESS, &mdev->flags);
 	return 1;
 }
 
@@ -3777,9 +3802,6 @@
 	D_ASSERT(mdev->state.disk == D_FAILED);
 	if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
 		drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
-		/* don't drbd_queue_work_front,
-		 * we need to serialize with the after_state_ch work
-		 * of the -> D_FAILED transition. */
 }
 
 /**
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index 87925e9..29e5c70 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -870,6 +870,11 @@
 		retcode = ERR_DISK_CONFIGURED;
 		goto fail;
 	}
+	/* It may just now have detached because of IO error.  Make sure
+	 * drbd_ldev_destroy is done already, we may end up here very fast,
+	 * e.g. if someone calls attach from the on-io-error handler,
+	 * to realize a "hot spare" feature (not that I'd recommend that) */
+	wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
 	/* allocation not in the IO path, cqueue thread context */
 	nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1098,9 +1103,9 @@
 	/* Reset the "barriers don't work" bits here, then force meta data to
 	 * be written, to ensure we determine if barriers are supported. */
 	if (nbc->dc.no_md_flush)
-		set_bit(MD_NO_BARRIER, &mdev->flags);
+		set_bit(MD_NO_FUA, &mdev->flags);
 	else
-		clear_bit(MD_NO_BARRIER, &mdev->flags);
+		clear_bit(MD_NO_FUA, &mdev->flags);
 
 	/* Point of no return reached.
 	 * Devices and memory are no longer released by error cleanup below.
@@ -1112,8 +1117,8 @@
 	nbc = NULL;
 	resync_lru = NULL;
 
-	mdev->write_ordering = WO_bio_barrier;
-	drbd_bump_write_ordering(mdev, WO_bio_barrier);
+	mdev->write_ordering = WO_bdev_flush;
+	drbd_bump_write_ordering(mdev, WO_bdev_flush);
 
 	if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
 		set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1262,7 +1267,7 @@
  force_diskless_dec:
 	put_ldev(mdev);
  force_diskless:
-	drbd_force_state(mdev, NS(disk, D_DISKLESS));
+	drbd_force_state(mdev, NS(disk, D_FAILED));
 	drbd_md_sync(mdev);
  release_bdev2_fail:
 	if (nbc)
@@ -1285,10 +1290,19 @@
 	return 0;
 }
 
+/* Detaching the disk is a process in multiple stages.  First we need to lock
+ * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
+ * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
+ * internal references as well.
+ * Only then we have finally detached. */
 static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
 			  struct drbd_nl_cfg_reply *reply)
 {
+	drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
 	reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
+	if (mdev->state.disk == D_DISKLESS)
+		wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+	drbd_resume_io(mdev);
 	return 0;
 }
 
@@ -1953,7 +1967,6 @@
 	if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
 		drbd_uuid_new_current(mdev);
 		clear_bit(NEW_CUR_UUID, &mdev->flags);
-		drbd_md_sync(mdev);
 	}
 	drbd_suspend_io(mdev);
 	reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c
index ad325c5..7e6ac30 100644
--- a/drivers/block/drbd/drbd_proc.c
+++ b/drivers/block/drbd/drbd_proc.c
@@ -158,7 +158,6 @@
 		[WO_none] = 'n',
 		[WO_drain_io] = 'd',
 		[WO_bdev_flush] = 'f',
-		[WO_bio_barrier] = 'b',
 	};
 
 	seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index efd6169..d299fe9 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -49,11 +49,6 @@
 
 #include "drbd_vli.h"
 
-struct flush_work {
-	struct drbd_work w;
-	struct drbd_epoch *epoch;
-};
-
 enum finish_epoch {
 	FE_STILL_LIVE,
 	FE_DESTROYED,
@@ -66,16 +61,6 @@
 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
 
-static struct drbd_epoch *previous_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
-{
-	struct drbd_epoch *prev;
-	spin_lock(&mdev->epoch_lock);
-	prev = list_entry(epoch->list.prev, struct drbd_epoch, list);
-	if (prev == epoch || prev == mdev->current_epoch)
-		prev = NULL;
-	spin_unlock(&mdev->epoch_lock);
-	return prev;
-}
 
 #define GFP_TRY	(__GFP_HIGHMEM | __GFP_NOWARN)
 
@@ -981,7 +966,7 @@
 	return TRUE;
 }
 
-static enum finish_epoch drbd_flush_after_epoch(struct drbd_conf *mdev, struct drbd_epoch *epoch)
+static void drbd_flush(struct drbd_conf *mdev)
 {
 	int rv;
 
@@ -997,24 +982,6 @@
 		}
 		put_ldev(mdev);
 	}
-
-	return drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-}
-
-static int w_flush(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
-{
-	struct flush_work *fw = (struct flush_work *)w;
-	struct drbd_epoch *epoch = fw->epoch;
-
-	kfree(w);
-
-	if (!test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags))
-		drbd_flush_after_epoch(mdev, epoch);
-
-	drbd_may_finish_epoch(mdev, epoch, EV_PUT |
-			      (mdev->state.conn < C_CONNECTED ? EV_CLEANUP : 0));
-
-	return 1;
 }
 
 /**
@@ -1027,15 +994,13 @@
 					       struct drbd_epoch *epoch,
 					       enum epoch_event ev)
 {
-	int finish, epoch_size;
+	int epoch_size;
 	struct drbd_epoch *next_epoch;
-	int schedule_flush = 0;
 	enum finish_epoch rv = FE_STILL_LIVE;
 
 	spin_lock(&mdev->epoch_lock);
 	do {
 		next_epoch = NULL;
-		finish = 0;
 
 		epoch_size = atomic_read(&epoch->epoch_size);
 
@@ -1045,16 +1010,6 @@
 			break;
 		case EV_GOT_BARRIER_NR:
 			set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
-
-			/* Special case: If we just switched from WO_bio_barrier to
-			   WO_bdev_flush we should not finish the current epoch */
-			if (test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags) && epoch_size == 1 &&
-			    mdev->write_ordering != WO_bio_barrier &&
-			    epoch == mdev->current_epoch)
-				clear_bit(DE_CONTAINS_A_BARRIER, &epoch->flags);
-			break;
-		case EV_BARRIER_DONE:
-			set_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags);
 			break;
 		case EV_BECAME_LAST:
 			/* nothing to do*/
@@ -1063,23 +1018,7 @@
 
 		if (epoch_size != 0 &&
 		    atomic_read(&epoch->active) == 0 &&
-		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) &&
-		    epoch->list.prev == &mdev->current_epoch->list &&
-		    !test_bit(DE_IS_FINISHING, &epoch->flags)) {
-			/* Nearly all conditions are met to finish that epoch... */
-			if (test_bit(DE_BARRIER_IN_NEXT_EPOCH_DONE, &epoch->flags) ||
-			    mdev->write_ordering == WO_none ||
-			    (epoch_size == 1 && test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) ||
-			    ev & EV_CLEANUP) {
-				finish = 1;
-				set_bit(DE_IS_FINISHING, &epoch->flags);
-			} else if (!test_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags) &&
-				 mdev->write_ordering == WO_bio_barrier) {
-				atomic_inc(&epoch->active);
-				schedule_flush = 1;
-			}
-		}
-		if (finish) {
+		    test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags)) {
 			if (!(ev & EV_CLEANUP)) {
 				spin_unlock(&mdev->epoch_lock);
 				drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
@@ -1102,6 +1041,7 @@
 				/* atomic_set(&epoch->active, 0); is already zero */
 				if (rv == FE_STILL_LIVE)
 					rv = FE_RECYCLED;
+				wake_up(&mdev->ee_wait);
 			}
 		}
 
@@ -1113,22 +1053,6 @@
 
 	spin_unlock(&mdev->epoch_lock);
 
-	if (schedule_flush) {
-		struct flush_work *fw;
-		fw = kmalloc(sizeof(*fw), GFP_ATOMIC);
-		if (fw) {
-			fw->w.cb = w_flush;
-			fw->epoch = epoch;
-			drbd_queue_work(&mdev->data.work, &fw->w);
-		} else {
-			dev_warn(DEV, "Could not kmalloc a flush_work obj\n");
-			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
-			/* That is not a recursion, only one level */
-			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE);
-			drbd_may_finish_epoch(mdev, epoch, EV_PUT);
-		}
-	}
-
 	return rv;
 }
 
@@ -1144,19 +1068,16 @@
 		[WO_none] = "none",
 		[WO_drain_io] = "drain",
 		[WO_bdev_flush] = "flush",
-		[WO_bio_barrier] = "barrier",
 	};
 
 	pwo = mdev->write_ordering;
 	wo = min(pwo, wo);
-	if (wo == WO_bio_barrier && mdev->ldev->dc.no_disk_barrier)
-		wo = WO_bdev_flush;
 	if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
 		wo = WO_drain_io;
 	if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
 		wo = WO_none;
 	mdev->write_ordering = wo;
-	if (pwo != mdev->write_ordering || wo == WO_bio_barrier)
+	if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
 		dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
 }
 
@@ -1192,7 +1113,7 @@
 	bio->bi_sector = sector;
 	bio->bi_bdev = mdev->ldev->backing_bdev;
 	/* we special case some flags in the multi-bio case, see below
-	 * (REQ_UNPLUG, REQ_HARDBARRIER) */
+	 * (REQ_UNPLUG) */
 	bio->bi_rw = rw;
 	bio->bi_private = e;
 	bio->bi_end_io = drbd_endio_sec;
@@ -1226,11 +1147,6 @@
 			bio->bi_rw &= ~REQ_UNPLUG;
 
 		drbd_generic_make_request(mdev, fault_type, bio);
-
-		/* strip off REQ_HARDBARRIER,
-		 * unless it is the first or last bio */
-		if (bios && bios->bi_next)
-			bios->bi_rw &= ~REQ_HARDBARRIER;
 	} while (bios);
 	maybe_kick_lo(mdev);
 	return 0;
@@ -1244,45 +1160,9 @@
 	return -ENOMEM;
 }
 
-/**
- * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
- * @mdev:	DRBD device.
- * @w:		work object.
- * @cancel:	The connection will be closed anyways (unused in this callback)
- */
-int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __releases(local)
-{
-	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
-	/* We leave DE_CONTAINS_A_BARRIER and EE_IS_BARRIER in place,
-	   (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
-	   so that we can finish that epoch in drbd_may_finish_epoch().
-	   That is necessary if we already have a long chain of Epochs, before
-	   we realize that REQ_HARDBARRIER is actually not supported */
-
-	/* As long as the -ENOTSUPP on the barrier is reported immediately
-	   that will never trigger. If it is reported late, we will just
-	   print that warning and continue correctly for all future requests
-	   with WO_bdev_flush */
-	if (previous_epoch(mdev, e->epoch))
-		dev_warn(DEV, "Write ordering was not enforced (one time event)\n");
-
-	/* we still have a local reference,
-	 * get_ldev was done in receive_Data. */
-
-	e->w.cb = e_end_block;
-	if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_DT_WR) != 0) {
-		/* drbd_submit_ee fails for one reason only:
-		 * if was not able to allocate sufficient bios.
-		 * requeue, try again later. */
-		e->w.cb = w_e_reissue;
-		drbd_queue_work(&mdev->data.work, &e->w);
-	}
-	return 1;
-}
-
 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
 {
-	int rv, issue_flush;
+	int rv;
 	struct p_barrier *p = &mdev->data.rbuf.barrier;
 	struct drbd_epoch *epoch;
 
@@ -1300,44 +1180,40 @@
 	 * Therefore we must send the barrier_ack after the barrier request was
 	 * completed. */
 	switch (mdev->write_ordering) {
-	case WO_bio_barrier:
 	case WO_none:
 		if (rv == FE_RECYCLED)
 			return TRUE;
-		break;
+
+		/* receiver context, in the writeout path of the other node.
+		 * avoid potential distributed deadlock */
+		epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+		if (epoch)
+			break;
+		else
+			dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
+			/* Fall through */
 
 	case WO_bdev_flush:
 	case WO_drain_io:
-		if (rv == FE_STILL_LIVE) {
-			set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
-			drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
-		}
-		if (rv == FE_RECYCLED)
-			return TRUE;
-
-		/* The asender will send all the ACKs and barrier ACKs out, since
-		   all EEs moved from the active_ee to the done_ee. We need to
-		   provide a new epoch object for the EEs that come in soon */
-		break;
-	}
-
-	/* receiver context, in the writeout path of the other node.
-	 * avoid potential distributed deadlock */
-	epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
-	if (!epoch) {
-		dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
-		issue_flush = !test_and_set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
 		drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
-		if (issue_flush) {
-			rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
-			if (rv == FE_RECYCLED)
-				return TRUE;
+		drbd_flush(mdev);
+
+		if (atomic_read(&mdev->current_epoch->epoch_size)) {
+			epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
+			if (epoch)
+				break;
 		}
 
-		drbd_wait_ee_list_empty(mdev, &mdev->done_ee);
+		epoch = mdev->current_epoch;
+		wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
+
+		D_ASSERT(atomic_read(&epoch->active) == 0);
+		D_ASSERT(epoch->flags == 0);
 
 		return TRUE;
+	default:
+		dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
+		return FALSE;
 	}
 
 	epoch->flags = 0;
@@ -1652,15 +1528,8 @@
 {
 	struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
 	sector_t sector = e->sector;
-	struct drbd_epoch *epoch;
 	int ok = 1, pcmd;
 
-	if (e->flags & EE_IS_BARRIER) {
-		epoch = previous_epoch(mdev, e->epoch);
-		if (epoch)
-			drbd_may_finish_epoch(mdev, epoch, EV_BARRIER_DONE + (cancel ? EV_CLEANUP : 0));
-	}
-
 	if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
 		if (likely((e->flags & EE_WAS_ERROR) == 0)) {
 			pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
@@ -1817,27 +1686,6 @@
 	e->epoch = mdev->current_epoch;
 	atomic_inc(&e->epoch->epoch_size);
 	atomic_inc(&e->epoch->active);
-
-	if (mdev->write_ordering == WO_bio_barrier && atomic_read(&e->epoch->epoch_size) == 1) {
-		struct drbd_epoch *epoch;
-		/* Issue a barrier if we start a new epoch, and the previous epoch
-		   was not a epoch containing a single request which already was
-		   a Barrier. */
-		epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
-		if (epoch == e->epoch) {
-			set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-			rw |= REQ_HARDBARRIER;
-			e->flags |= EE_IS_BARRIER;
-		} else {
-			if (atomic_read(&epoch->epoch_size) > 1 ||
-			    !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
-				set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
-				set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-				rw |= REQ_HARDBARRIER;
-				e->flags |= EE_IS_BARRIER;
-			}
-		}
-	}
 	spin_unlock(&mdev->epoch_lock);
 
 	dp_flags = be32_to_cpu(p->dp_flags);
@@ -1995,10 +1843,11 @@
 		break;
 	}
 
-	if (mdev->state.pdsk == D_DISKLESS) {
+	if (mdev->state.pdsk < D_INCONSISTENT) {
 		/* In case we have the only disk of the cluster, */
 		drbd_set_out_of_sync(mdev, e->sector, e->size);
 		e->flags |= EE_CALL_AL_COMPLETE_IO;
+		e->flags &= ~EE_MAY_SET_IN_SYNC;
 		drbd_al_begin_io(mdev, e->sector);
 	}
 
@@ -3362,7 +3211,7 @@
 		if (ns.conn == C_MASK) {
 			ns.conn = C_CONNECTED;
 			if (mdev->state.disk == D_NEGOTIATING) {
-				drbd_force_state(mdev, NS(disk, D_DISKLESS));
+				drbd_force_state(mdev, NS(disk, D_FAILED));
 			} else if (peer_state.disk == D_NEGOTIATING) {
 				dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
 				peer_state.disk = D_DISKLESS;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index 9e91a25..11a75d3 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -258,7 +258,7 @@
 		if (!hlist_unhashed(&req->colision))
 			hlist_del(&req->colision);
 		else
-			D_ASSERT((s & RQ_NET_MASK) == 0);
+			D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
 
 		/* for writes we need to do some extra housekeeping */
 		if (rw == WRITE)
@@ -813,7 +813,8 @@
 			     mdev->state.conn >= C_CONNECTED));
 
 	if (!(local || remote) && !is_susp(mdev->state)) {
-		dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
+		if (__ratelimit(&drbd_ratelimit_state))
+			dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
 		goto fail_free_complete;
 	}
 
@@ -942,12 +943,21 @@
 	if (local) {
 		req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
 
-		if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
-				     : rw == READ  ? DRBD_FAULT_DT_RD
-				     :               DRBD_FAULT_DT_RA))
+		/* State may have changed since we grabbed our reference on the
+		 * mdev->ldev member. Double check, and short-circuit to endio.
+		 * In case the last activity log transaction failed to get on
+		 * stable storage, and this is a WRITE, we may not even submit
+		 * this bio. */
+		if (get_ldev(mdev)) {
+			if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
+					     : rw == READ  ? DRBD_FAULT_DT_RD
+					     :               DRBD_FAULT_DT_RA))
+				bio_endio(req->private_bio, -EIO);
+			else
+				generic_make_request(req->private_bio);
+			put_ldev(mdev);
+		} else
 			bio_endio(req->private_bio, -EIO);
-		else
-			generic_make_request(req->private_bio);
 	}
 
 	/* we need to plug ALWAYS since we possibly need to kick lo_dev.
@@ -1022,20 +1032,6 @@
 		return 0;
 	}
 
-	/* Reject barrier requests if we know the underlying device does
-	 * not support them.
-	 * XXX: Need to get this info from peer as well some how so we
-	 * XXX: reject if EITHER side/data/metadata area does not support them.
-	 *
-	 * because of those XXX, this is not yet enabled,
-	 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
-	 */
-	if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
-		/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
-		bio_endio(bio, -EOPNOTSUPP);
-		return 0;
-	}
-
 	/*
 	 * what we "blindly" assume:
 	 */
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 108d580..b0551ba 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -102,12 +102,6 @@
 	put_ldev(mdev);
 }
 
-static int is_failed_barrier(int ee_flags)
-{
-	return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
-			== (EE_IS_BARRIER|EE_WAS_ERROR);
-}
-
 /* writes on behalf of the partner, or resync writes,
  * "submitted" by the receiver, final stage.  */
 static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
@@ -119,21 +113,6 @@
 	int is_syncer_req;
 	int do_al_complete_io;
 
-	/* if this is a failed barrier request, disable use of barriers,
-	 * and schedule for resubmission */
-	if (is_failed_barrier(e->flags)) {
-		drbd_bump_write_ordering(mdev, WO_bdev_flush);
-		spin_lock_irqsave(&mdev->req_lock, flags);
-		list_del(&e->w.list);
-		e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
-		e->w.cb = w_e_reissue;
-		/* put_ldev actually happens below, once we come here again. */
-		__release(local);
-		spin_unlock_irqrestore(&mdev->req_lock, flags);
-		drbd_queue_work(&mdev->data.work, &e->w);
-		return;
-	}
-
 	D_ASSERT(e->block_id != ID_VACANT);
 
 	/* after we moved e to done_ee,
@@ -925,7 +904,7 @@
 	drbd_md_sync(mdev);
 
 	if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
-		dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n");
+		dev_info(DEV, "Writing the whole bitmap\n");
 		drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
 	}
 
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 767107c..3951020 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4363,9 +4363,9 @@
 out_put_disk:
 	while (dr--) {
 		del_timer(&motor_off_timer[dr]);
-		put_disk(disks[dr]);
 		if (disks[dr]->queue)
 			blk_cleanup_queue(disks[dr]->queue);
+		put_disk(disks[dr]);
 	}
 	return err;
 }
@@ -4573,8 +4573,8 @@
 			device_remove_file(&floppy_device[drive].dev, &dev_attr_cmos);
 			platform_device_unregister(&floppy_device[drive]);
 		}
-		put_disk(disks[drive]);
 		blk_cleanup_queue(disks[drive]->queue);
+		put_disk(disks[drive]);
 	}
 
 	del_timer_sync(&fd_timeout);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 1e5284e..7ea0bea 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -481,12 +481,6 @@
 	if (bio_rw(bio) == WRITE) {
 		struct file *file = lo->lo_backing_file;
 
-		/* REQ_HARDBARRIER is deprecated */
-		if (bio->bi_rw & REQ_HARDBARRIER) {
-			ret = -EOPNOTSUPP;
-			goto out;
-		}
-
 		if (bio->bi_rw & REQ_FLUSH) {
 			ret = vfs_fsync(file, 0);
 			if (unlikely(ret && ret != -EINVAL)) {
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 06e2812..255035c 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -289,8 +289,6 @@
 
 	ring_req->operation = rq_data_dir(req) ?
 		BLKIF_OP_WRITE : BLKIF_OP_READ;
-	if (req->cmd_flags & REQ_HARDBARRIER)
-		ring_req->operation = BLKIF_OP_WRITE_BARRIER;
 
 	ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
 	BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 3a9c014..ba53ec9 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -2,24 +2,10 @@
 # Makefile for the kernel character device drivers.
 #
 
-#
-# This file contains the font map for the default (hardware) font
-#
-FONTMAPFILE = cp437.uni
-
-obj-y	 += mem.o random.o tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o tty_buffer.o tty_port.o
-
-obj-y				+= tty_mutex.o
-obj-$(CONFIG_LEGACY_PTYS)	+= pty.o
-obj-$(CONFIG_UNIX98_PTYS)	+= pty.o
+obj-y				+= mem.o random.o
 obj-$(CONFIG_TTY_PRINTK)	+= ttyprintk.o
 obj-y				+= misc.o
-obj-$(CONFIG_VT)		+= vt_ioctl.o vc_screen.o selection.o keyboard.o
 obj-$(CONFIG_BFIN_JTAG_COMM)	+= bfin_jtag_comm.o
-obj-$(CONFIG_CONSOLE_TRANSLATIONS) += consolemap.o consolemap_deftbl.o
-obj-$(CONFIG_HW_CONSOLE)	+= vt.o defkeymap.o
-obj-$(CONFIG_AUDIT)		+= tty_audit.o
-obj-$(CONFIG_MAGIC_SYSRQ)	+= sysrq.o
 obj-$(CONFIG_MVME147_SCC)	+= generic_serial.o vme_scc.o
 obj-$(CONFIG_MVME162_SCC)	+= generic_serial.o vme_scc.o
 obj-$(CONFIG_BVME6000_SCC)	+= generic_serial.o vme_scc.o
@@ -41,8 +27,6 @@
 obj-$(CONFIG_SYNCLINK)		+= synclink.o
 obj-$(CONFIG_SYNCLINKMP)	+= synclinkmp.o
 obj-$(CONFIG_SYNCLINK_GT)	+= synclink_gt.o
-obj-$(CONFIG_N_HDLC)		+= n_hdlc.o
-obj-$(CONFIG_N_GSM)		+= n_gsm.o
 obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
 obj-$(CONFIG_SX)		+= sx.o generic_serial.o
 obj-$(CONFIG_RIO)		+= rio/ generic_serial.o
@@ -74,7 +58,6 @@
 obj-$(CONFIG_APM_EMULATION)	+= apm-emulation.o
 
 obj-$(CONFIG_DTLK)		+= dtlk.o
-obj-$(CONFIG_R3964)		+= n_r3964.o
 obj-$(CONFIG_APPLICOM)		+= applicom.o
 obj-$(CONFIG_SONYPI)		+= sonypi.o
 obj-$(CONFIG_RTC)		+= rtc.o
@@ -115,28 +98,3 @@
 
 obj-$(CONFIG_JS_RTC)		+= js-rtc.o
 js-rtc-y = rtc.o
-
-# Files generated that shall be removed upon make clean
-clean-files := consolemap_deftbl.c defkeymap.c
-
-quiet_cmd_conmk = CONMK   $@
-      cmd_conmk = scripts/conmakehash $< > $@
-
-$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
-	$(call cmd,conmk)
-
-$(obj)/defkeymap.o:  $(obj)/defkeymap.c
-
-# Uncomment if you're changing the keymap and have an appropriate
-# loadkeys version for the map. By default, we'll use the shipped
-# versions.
-# GENERATE_KEYMAP := 1
-
-ifdef GENERATE_KEYMAP
-
-$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
-	loadkeys --mktable $< > $@.tmp
-	sed -e 's/^static *//' $@.tmp > $@
-	rm $@.tmp
-
-endif
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 6b6760e..9272c38 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1210,14 +1210,14 @@
 	unsigned int gfdt = flags & AGP_USER_CACHED_MEMORY_GFDT;
 	u32 pte_flags;
 
-	if (type_mask == AGP_USER_UNCACHED_MEMORY)
+	if (type_mask == AGP_USER_MEMORY)
 		pte_flags = GEN6_PTE_UNCACHED | I810_PTE_VALID;
 	else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC) {
-		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
+		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
 		if (gfdt)
 			pte_flags |= GEN6_PTE_GFDT;
 	} else { /* set 'normal'/'cached' to LLC by default */
-		pte_flags = GEN6_PTE_LLC_MLC | I810_PTE_VALID;
+		pte_flags = GEN6_PTE_LLC | I810_PTE_VALID;
 		if (gfdt)
 			pte_flags |= GEN6_PTE_GFDT;
 	}
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c
index b0a7046..c0bd6f4 100644
--- a/drivers/char/amiserial.c
+++ b/drivers/char/amiserial.c
@@ -1299,7 +1299,6 @@
 {
 	struct async_struct * info = tty->driver_data;
 	struct async_icount cprev, cnow;	/* kernel counter temps */
-	struct serial_icounter_struct icount;
 	void __user *argp = (void __user *)arg;
 	unsigned long flags;
 
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index dd3f9b1..294d03e 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -1828,7 +1828,6 @@
 		      unsigned int cmd, unsigned long arg)
 {
 	struct port *port = tty->driver_data;
-	void __user *argp = (void __user *)arg;
 	int rval = -ENOIOCTLCMD;
 
 	DBG1("******** IOCTL, cmd: %d", cmd);
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
index bfc10f8..eaa4199 100644
--- a/drivers/char/pcmcia/synclink_cs.c
+++ b/drivers/char/pcmcia/synclink_cs.c
@@ -2796,6 +2796,7 @@
 	.hangup = mgslpc_hangup,
 	.tiocmget = tiocmget,
 	.tiocmset = tiocmset,
+	.get_icount = mgslpc_get_icount,
 	.proc_fops = &mgslpc_proc_fops,
 };
 
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index a446116..d68d3aa 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -616,13 +616,9 @@
 	/* get hold of clock */
 	p->clk = clk_get(&p->pdev->dev, "cmt_fck");
 	if (IS_ERR(p->clk)) {
-		dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
-		p->clk = clk_get(&p->pdev->dev, cfg->clk);
-		if (IS_ERR(p->clk)) {
-			dev_err(&p->pdev->dev, "cannot get clock\n");
-			ret = PTR_ERR(p->clk);
-			goto err1;
-		}
+		dev_err(&p->pdev->dev, "cannot get clock\n");
+		ret = PTR_ERR(p->clk);
+		goto err1;
 	}
 
 	if (resource_size(res) == 6) {
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index ef7a5be..40630cb 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -287,13 +287,9 @@
 	/* get hold of clock */
 	p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
 	if (IS_ERR(p->clk)) {
-		dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
-		p->clk = clk_get(&p->pdev->dev, cfg->clk);
-		if (IS_ERR(p->clk)) {
-			dev_err(&p->pdev->dev, "cannot get clock\n");
-			ret = PTR_ERR(p->clk);
-			goto err1;
-		}
+		dev_err(&p->pdev->dev, "cannot get clock\n");
+		ret = PTR_ERR(p->clk);
+		goto err1;
 	}
 
 	return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index de71590..36aba99 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -393,13 +393,9 @@
 	/* get hold of clock */
 	p->clk = clk_get(&p->pdev->dev, "tmu_fck");
 	if (IS_ERR(p->clk)) {
-		dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
-		p->clk = clk_get(&p->pdev->dev, cfg->clk);
-		if (IS_ERR(p->clk)) {
-			dev_err(&p->pdev->dev, "cannot get clock\n");
-			ret = PTR_ERR(p->clk);
-			goto err1;
-		}
+		dev_err(&p->pdev->dev, "cannot get clock\n");
+		ret = PTR_ERR(p->clk);
+		goto err1;
 	}
 
 	return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9dcb17d..84eb607 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -577,17 +577,11 @@
 	return ret;
 }
 
-static int ar_context_add_page(struct ar_context *ctx)
+static void ar_context_link_page(struct ar_context *ctx,
+				 struct ar_buffer *ab, dma_addr_t ab_bus)
 {
-	struct device *dev = ctx->ohci->card.device;
-	struct ar_buffer *ab;
-	dma_addr_t uninitialized_var(ab_bus);
 	size_t offset;
 
-	ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
-	if (ab == NULL)
-		return -ENOMEM;
-
 	ab->next = NULL;
 	memset(&ab->descriptor, 0, sizeof(ab->descriptor));
 	ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
@@ -606,6 +600,19 @@
 
 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
 	flush_writes(ctx->ohci);
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+	struct device *dev = ctx->ohci->card.device;
+	struct ar_buffer *ab;
+	dma_addr_t uninitialized_var(ab_bus);
+
+	ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
+	if (ab == NULL)
+		return -ENOMEM;
+
+	ar_context_link_page(ctx, ab, ab_bus);
 
 	return 0;
 }
@@ -730,16 +737,17 @@
 static void ar_context_tasklet(unsigned long data)
 {
 	struct ar_context *ctx = (struct ar_context *)data;
-	struct fw_ohci *ohci = ctx->ohci;
 	struct ar_buffer *ab;
 	struct descriptor *d;
 	void *buffer, *end;
+	__le16 res_count;
 
 	ab = ctx->current_buffer;
 	d = &ab->descriptor;
 
-	if (d->res_count == 0) {
-		size_t size, rest, offset;
+	res_count = ACCESS_ONCE(d->res_count);
+	if (res_count == 0) {
+		size_t size, size2, rest, pktsize, size3, offset;
 		dma_addr_t start_bus;
 		void *start;
 
@@ -750,29 +758,63 @@
 		 */
 
 		offset = offsetof(struct ar_buffer, data);
-		start = buffer = ab;
+		start = ab;
 		start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+		buffer = ab->data;
 
 		ab = ab->next;
 		d = &ab->descriptor;
-		size = buffer + PAGE_SIZE - ctx->pointer;
+		size = start + PAGE_SIZE - ctx->pointer;
+		/* valid buffer data in the next page */
 		rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+		/* what actually fits in this page */
+		size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
 		memmove(buffer, ctx->pointer, size);
-		memcpy(buffer + size, ab->data, rest);
-		ctx->current_buffer = ab;
-		ctx->pointer = (void *) ab->data + rest;
-		end = buffer + size + rest;
+		memcpy(buffer + size, ab->data, size2);
 
-		while (buffer < end)
-			buffer = handle_ar_packet(ctx, buffer);
+		while (size > 0) {
+			void *next = handle_ar_packet(ctx, buffer);
+			pktsize = next - buffer;
+			if (pktsize >= size) {
+				/*
+				 * We have handled all the data that was
+				 * originally in this page, so we can now
+				 * continue in the next page.
+				 */
+				buffer = next;
+				break;
+			}
+			/* move the next packet to the start of the buffer */
+			memmove(buffer, next, size + size2 - pktsize);
+			size -= pktsize;
+			/* fill up this page again */
+			size3 = min(rest - size2,
+				    (size_t)PAGE_SIZE - offset - size - size2);
+			memcpy(buffer + size + size2,
+			       (void *) ab->data + size2, size3);
+			size2 += size3;
+		}
 
-		dma_free_coherent(ohci->card.device, PAGE_SIZE,
-				  start, start_bus);
-		ar_context_add_page(ctx);
+		if (rest > 0) {
+			/* handle the packets that are fully in the next page */
+			buffer = (void *) ab->data +
+					(buffer - (start + offset + size));
+			end = (void *) ab->data + rest;
+
+			while (buffer < end)
+				buffer = handle_ar_packet(ctx, buffer);
+
+			ctx->current_buffer = ab;
+			ctx->pointer = end;
+
+			ar_context_link_page(ctx, start, start_bus);
+		} else {
+			ctx->pointer = start + PAGE_SIZE;
+		}
 	} else {
 		buffer = ctx->pointer;
 		ctx->pointer = end =
-			(void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+			(void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
 
 		while (buffer < end)
 			buffer = handle_ar_packet(ctx, buffer);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index dcbeb98..f7af91c 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -276,7 +276,7 @@
 	struct drm_crtc *tmp;
 	int crtc_mask = 1;
 
-	WARN(!crtc, "checking null crtc?");
+	WARN(!crtc, "checking null crtc?\n");
 
 	dev = crtc->dev;
 
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c1a2621..a245d17 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -240,7 +240,7 @@
 			.addr	= DDC_ADDR,
 			.flags	= I2C_M_RD,
 			.len	= len,
-			.buf	= buf + start,
+			.buf	= buf,
 		}
 	};
 
@@ -253,7 +253,7 @@
 static u8 *
 drm_do_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
 {
-	int i, j = 0;
+	int i, j = 0, valid_extensions = 0;
 	u8 *block, *new;
 
 	if ((block = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
@@ -280,14 +280,28 @@
 
 	for (j = 1; j <= block[0x7e]; j++) {
 		for (i = 0; i < 4; i++) {
-			if (drm_do_probe_ddc_edid(adapter, block, j,
-						  EDID_LENGTH))
+			if (drm_do_probe_ddc_edid(adapter,
+				  block + (valid_extensions + 1) * EDID_LENGTH,
+				  j, EDID_LENGTH))
 				goto out;
-			if (drm_edid_block_valid(block + j * EDID_LENGTH))
+			if (drm_edid_block_valid(block + (valid_extensions + 1) * EDID_LENGTH)) {
+				valid_extensions++;
 				break;
+			}
 		}
 		if (i == 4)
-			goto carp;
+			dev_warn(connector->dev->dev,
+			 "%s: Ignoring invalid EDID block %d.\n",
+			 drm_get_connector_name(connector), j);
+	}
+
+	if (valid_extensions != block[0x7e]) {
+		block[EDID_LENGTH-1] += block[0x7e] - valid_extensions;
+		block[0x7e] = valid_extensions;
+		new = krealloc(block, (valid_extensions + 1) * EDID_LENGTH, GFP_KERNEL);
+		if (!new)
+			goto out;
+		block = new;
 	}
 
 	return block;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd4..80745f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@
 module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
 
 unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
 
 unsigned int i915_lvds_downclock = 0;
 module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b..90414ae 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1321,6 +1321,7 @@
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453..ef188e3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2172,7 +2172,7 @@
 static int i915_ring_idle(struct drm_device *dev,
 			  struct intel_ring_buffer *ring)
 {
-	if (list_empty(&ring->gpu_write_list))
+	if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
 		return 0;
 
 	i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2190,7 @@
 	int ret;
 
 	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       list_empty(&dev_priv->bsd_ring.active_list) &&
-		       list_empty(&dev_priv->blt_ring.active_list));
+		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return 0;
 
@@ -3108,7 +3106,8 @@
 	 * write domain
 	 */
 	if (obj->write_domain &&
-	    obj->write_domain != obj->pending_read_domains) {
+	    (obj->write_domain != obj->pending_read_domains ||
+	     obj_priv->ring != ring)) {
 		flush_domains |= obj->write_domain;
 		invalidate_domains |=
 			obj->pending_read_domains & ~obj->write_domain;
@@ -3497,6 +3496,52 @@
 	return 0;
 }
 
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+				struct drm_file *file,
+				struct intel_ring_buffer *ring,
+				struct drm_gem_object **objects,
+				int count)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret, i;
+
+	/* Zero the global flush/invalidate flags. These
+	 * will be modified as new domains are computed
+	 * for each object
+	 */
+	dev->invalidate_domains = 0;
+	dev->flush_domains = 0;
+	dev_priv->mm.flush_rings = 0;
+	for (i = 0; i < count; i++)
+		i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+	if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+			  __func__,
+			 dev->invalidate_domains,
+			 dev->flush_domains);
+#endif
+		i915_gem_flush(dev, file,
+			       dev->invalidate_domains,
+			       dev->flush_domains,
+			       dev_priv->mm.flush_rings);
+	}
+
+	for (i = 0; i < count; i++) {
+		struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+		/* XXX replace with semaphores */
+		if (obj->ring && ring != obj->ring) {
+			ret = i915_gem_object_wait_rendering(&obj->base, true);
+			if (ret)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3757,33 +3802,10 @@
 		goto err;
 	}
 
-	/* Zero the global flush/invalidate flags. These
-	 * will be modified as new domains are computed
-	 * for each object
-	 */
-	dev->invalidate_domains = 0;
-	dev->flush_domains = 0;
-	dev_priv->mm.flush_rings = 0;
-
-	for (i = 0; i < args->buffer_count; i++) {
-		struct drm_gem_object *obj = object_list[i];
-
-		/* Compute new gpu domains and update invalidate/flush */
-		i915_gem_object_set_to_gpu_domain(obj, ring);
-	}
-
-	if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
-		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
-			  __func__,
-			 dev->invalidate_domains,
-			 dev->flush_domains);
-#endif
-		i915_gem_flush(dev, file,
-			       dev->invalidate_domains,
-			       dev->flush_domains,
-			       dev_priv->mm.flush_rings);
-	}
+	ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+					      object_list, args->buffer_count);
+	if (ret)
+		goto err;
 
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4065,7 @@
 			alignment = i915_gem_get_gtt_alignment(obj);
 		if (obj_priv->gtt_offset & (alignment - 1)) {
 			WARN(obj_priv->pin_count,
-			     "bo is already pinned with incorrect alignment:"
-			     " offset=%x, req.alignment=%x\n",
+			     "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
 			     obj_priv->gtt_offset, alignment);
 			ret = i915_gem_object_unbind(obj);
 			if (ret)
@@ -4856,17 +4877,24 @@
 		     struct drm_file *file_priv)
 {
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-	void *obj_addr;
-	int ret;
-	char __user *user_data;
+	void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+	char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-	user_data = (char __user *) (uintptr_t) args->data_ptr;
-	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+	DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
 
-	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
-	ret = copy_from_user(obj_addr, user_data, args->size);
-	if (ret)
-		return -EFAULT;
+	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+		unsigned long unwritten;
+
+		/* The physical object once assigned is fixed for the lifetime
+		 * of the obj, so we can safely drop the lock and continue
+		 * to access vaddr.
+		 */
+		mutex_unlock(&dev->struct_mutex);
+		unwritten = copy_from_user(vaddr, user_data, args->size);
+		mutex_lock(&dev->struct_mutex);
+		if (unwritten)
+			return -EFAULT;
+	}
 
 	drm_agp_chipset_flush(dev);
 	return 0;
@@ -4900,9 +4928,7 @@
 	int lists_empty;
 
 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->render_ring.active_list) &&
-		      list_empty(&dev_priv->bsd_ring.active_list) &&
-		      list_empty(&dev_priv->blt_ring.active_list);
+		      list_empty(&dev_priv->mm.active_list);
 
 	return !lists_empty;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013..d8ae7d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       list_empty(&dev_priv->bsd_ring.active_list) &&
-		       list_empty(&dev_priv->blt_ring.active_list));
+		       list_empty(&dev_priv->mm.active_list));
 	if (lists_empty)
 		return -ENOSPC;
 
@@ -184,9 +182,7 @@
 
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->render_ring.active_list) &&
-		       list_empty(&dev_priv->bsd_ring.active_list) &&
-		       list_empty(&dev_priv->blt_ring.active_list));
+		       list_empty(&dev_priv->mm.active_list));
 	BUG_ON(!lists_empty);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d..454c064 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -862,8 +862,10 @@
 	/* Clock gating state */
 	intel_init_clock_gating(dev);
 
-	if (HAS_PCH_SPLIT(dev))
+	if (HAS_PCH_SPLIT(dev)) {
 		ironlake_enable_drps(dev);
+		intel_init_emon(dev);
+	}
 
 	/* Cache mode state */
 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065..48d8fd6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1681,6 +1681,37 @@
 	udelay(500);
 }
 
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+	int pipe = intel_crtc->pipe;
+	u32 reg, temp;
+
+	/* enable normal train */
+	reg = FDI_TX_CTL(pipe);
+	temp = I915_READ(reg);
+	temp &= ~FDI_LINK_TRAIN_NONE;
+	temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+	I915_WRITE(reg, temp);
+
+	reg = FDI_RX_CTL(pipe);
+	temp = I915_READ(reg);
+	if (HAS_PCH_CPT(dev)) {
+		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+	} else {
+		temp &= ~FDI_LINK_TRAIN_NONE;
+		temp |= FDI_LINK_TRAIN_NONE;
+	}
+	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+	/* wait one idle pattern time */
+	POSTING_READ(reg);
+	udelay(1000);
+}
+
 /* The FDI link training functions for ILK/Ibexpeak. */
 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
 {
@@ -1767,27 +1798,6 @@
 
 	DRM_DEBUG_KMS("FDI train done\n");
 
-	/* enable normal train */
-	reg = FDI_TX_CTL(pipe);
-	temp = I915_READ(reg);
-	temp &= ~FDI_LINK_TRAIN_NONE;
-	temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
-	I915_WRITE(reg, temp);
-
-	reg = FDI_RX_CTL(pipe);
-	temp = I915_READ(reg);
-	if (HAS_PCH_CPT(dev)) {
-		temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
-		temp |= FDI_LINK_TRAIN_NORMAL_CPT;
-	} else {
-		temp &= ~FDI_LINK_TRAIN_NONE;
-		temp |= FDI_LINK_TRAIN_NONE;
-	}
-	I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
-	/* wait one idle pattern time */
-	POSTING_READ(reg);
-	udelay(1000);
 }
 
 static const int const snb_b_fdi_train_param [] = {
@@ -2090,6 +2100,8 @@
 	I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
 	I915_WRITE(TRANS_VSYNC(pipe),  I915_READ(VSYNC(pipe)));
 
+	intel_fdi_normal_train(crtc);
+
 	/* For PCH DP, enable TRANS_DP_CTL */
 	if (HAS_PCH_CPT(dev) &&
 	    intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
@@ -2200,9 +2212,10 @@
 	udelay(100);
 
 	/* Ironlake workaround, disable clock pointer after downing FDI */
-	I915_WRITE(FDI_RX_CHICKEN(pipe),
-		   I915_READ(FDI_RX_CHICKEN(pipe) &
-			     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+	if (HAS_PCH_IBX(dev))
+		I915_WRITE(FDI_RX_CHICKEN(pipe),
+			   I915_READ(FDI_RX_CHICKEN(pipe) &
+				     ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
 
 	/* still set train pattern 1 */
 	reg = FDI_TX_CTL(pipe);
@@ -5581,20 +5594,19 @@
 	fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
 	fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
 		MEMMODE_FSTART_SHIFT;
-	fstart = fmax;
 
 	vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
 		PXVFREQ_PX_SHIFT;
 
-	dev_priv->fmax = fstart; /* IPS callback will increase this */
+	dev_priv->fmax = fmax; /* IPS callback will increase this */
 	dev_priv->fstart = fstart;
 
-	dev_priv->max_delay = fmax;
+	dev_priv->max_delay = fstart;
 	dev_priv->min_delay = fmin;
 	dev_priv->cur_delay = fstart;
 
-	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
-			 fstart);
+	DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+			 fmax, fmin, fstart);
 
 	I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
 
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1..c8e0055 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1517,7 +1517,7 @@
 			status = connector_status_connected;
 	}
 
-	return bit;
+	return status;
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86..21551fe 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -296,6 +296,7 @@
 extern void intel_init_clock_gating(struct drm_device *dev);
 extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 				      struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a6499..4324a32 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -481,11 +481,8 @@
 	struct drm_device *dev = connector->dev;
 	struct drm_display_mode *mode;
 
-	if (intel_lvds->edid) {
-		drm_mode_connector_update_edid_property(connector,
-							intel_lvds->edid);
+	if (intel_lvds->edid)
 		return drm_add_edid_modes(connector, intel_lvds->edid);
-	}
 
 	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
 	if (mode == 0)
@@ -939,7 +936,16 @@
 	 */
 	intel_lvds->edid = drm_get_edid(connector,
 					&dev_priv->gmbus[pin].adapter);
-
+	if (intel_lvds->edid) {
+		if (drm_add_edid_modes(connector,
+				       intel_lvds->edid)) {
+			drm_mode_connector_update_edid_property(connector,
+								intel_lvds->edid);
+		} else {
+			kfree(intel_lvds->edid);
+			intel_lvds->edid = NULL;
+		}
+	}
 	if (!intel_lvds->edid) {
 		/* Didn't get an EDID, so
 		 * Set wide sync ranges so we get all modes
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc..9b0d9a8 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@
 	return 0;
 
 err_out:
-	iounmap(opregion->header);
+	iounmap(base);
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d2..02ff0a4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@
 {
 	int uv_hscale = uv_hsubsampling(rec->flags);
 	int uv_vscale = uv_vsubsampling(rec->flags);
-	u32 stride_mask, depth, tmp;
+	u32 stride_mask;
+	int depth;
+	u32 tmp;
 
 	/* check src dimensions */
 	if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc3..b83306f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -177,7 +177,7 @@
 
 	I915_WRITE_CTL(ring,
 			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
-			| RING_NO_REPORT | RING_VALID);
+			| RING_REPORT_64K | RING_VALID);
 
 	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 	/* If the head is still not zero, the ring is dead */
@@ -654,6 +654,10 @@
 	i915_gem_object_unpin(ring->gem_object);
 	drm_gem_object_unreference(ring->gem_object);
 	ring->gem_object = NULL;
+
+	if (ring->cleanup)
+		ring->cleanup(ring);
+
 	cleanup_status_page(dev, ring);
 }
 
@@ -688,6 +692,17 @@
 {
 	unsigned long end;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 head;
+
+	head = intel_read_status_page(ring, 4);
+	if (head) {
+		ring->head = head & HEAD_ADDR;
+		ring->space = ring->head - (ring->tail + 8);
+		if (ring->space < 0)
+			ring->space += ring->size;
+		if (ring->space >= n)
+			return 0;
+	}
 
 	trace_i915_ring_wait_begin (dev);
 	end = jiffies + 3 * HZ;
@@ -854,19 +869,125 @@
 	/* do nothing */
 }
 
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+	(IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+	return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+			 struct intel_ring_buffer *ring)
+{
+	if (NEED_BLT_WORKAROUND(dev)) {
+		struct drm_i915_gem_object *obj;
+		u32 __iomem *ptr;
+		int ret;
+
+		obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+		if (obj == NULL)
+			return -ENOMEM;
+
+		ret = i915_gem_object_pin(&obj->base, 4096);
+		if (ret) {
+			drm_gem_object_unreference(&obj->base);
+			return ret;
+		}
+
+		ptr = kmap(obj->pages[0]);
+		iowrite32(MI_BATCH_BUFFER_END, ptr);
+		iowrite32(MI_NOOP, ptr+1);
+		kunmap(obj->pages[0]);
+
+		ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+		if (ret) {
+			i915_gem_object_unpin(&obj->base);
+			drm_gem_object_unreference(&obj->base);
+			return ret;
+		}
+
+		ring->private = obj;
+	}
+
+	return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+			   struct intel_ring_buffer *ring,
+			  int num_dwords)
+{
+	if (ring->private) {
+		intel_ring_begin(dev, ring, num_dwords+2);
+		intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+		intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+	} else
+		intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+			   struct intel_ring_buffer *ring,
+			   u32 invalidate_domains,
+			   u32 flush_domains)
+{
+	blt_ring_begin(dev, ring, 4);
+	intel_ring_emit(dev, ring, MI_FLUSH_DW);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_emit(dev, ring, 0);
+	intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+		     struct intel_ring_buffer *ring,
+		     u32 flush_domains)
+{
+	u32 seqno = i915_gem_get_seqno(dev);
+
+	blt_ring_begin(dev, ring, 4);
+	intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(dev, ring,
+			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(dev, ring, seqno);
+	intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+	intel_ring_advance(dev, ring);
+
+	DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+	return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+	if (!ring->private)
+		return;
+
+	i915_gem_object_unpin(ring->private);
+	drm_gem_object_unreference(ring->private);
+	ring->private = NULL;
+}
+
 static const struct intel_ring_buffer gen6_blt_ring = {
        .name			= "blt ring",
        .id			= RING_BLT,
        .mmio_base		= BLT_RING_BASE,
        .size			= 32 * PAGE_SIZE,
-       .init			= init_ring_common,
+       .init			= blt_ring_init,
        .write_tail		= ring_write_tail,
-       .flush			= gen6_ring_flush,
-       .add_request		= ring_add_request,
+       .flush			= blt_ring_flush,
+       .add_request		= blt_ring_add_request,
        .get_seqno		= ring_status_page_get_seqno,
        .user_irq_get		= blt_ring_get_user_irq,
        .user_irq_put		= blt_ring_put_user_irq,
        .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+       .cleanup			= blt_ring_cleanup,
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0..3126c26 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@
 			struct drm_i915_gem_execbuffer2 *exec,
 			struct drm_clip_rect *cliprects,
 			uint64_t exec_offset);
+	void		(*cleanup)(struct intel_ring_buffer *ring);
 
 	/**
 	 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@
 
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
+
+	void *private;
 };
 
 static inline u32
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f12a5b3..488c36c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2033,7 +2033,7 @@
 	u32 grbm_int_cntl = 0;
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		return -EINVAL;
 	}
 	/* don't enable anything if the ih is disabled */
@@ -2295,6 +2295,7 @@
 			case 0: /* D1 vblank */
 				if (disp_int & LB_D1_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 0);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int &= ~LB_D1_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D1 vblank\n");
@@ -2316,6 +2317,7 @@
 			case 0: /* D2 vblank */
 				if (disp_int_cont & LB_D2_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 1);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont &= ~LB_D2_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D2 vblank\n");
@@ -2337,6 +2339,7 @@
 			case 0: /* D3 vblank */
 				if (disp_int_cont2 & LB_D3_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 2);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont2 &= ~LB_D3_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D3 vblank\n");
@@ -2358,6 +2361,7 @@
 			case 0: /* D4 vblank */
 				if (disp_int_cont3 & LB_D4_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 3);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont3 &= ~LB_D4_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D4 vblank\n");
@@ -2379,6 +2383,7 @@
 			case 0: /* D5 vblank */
 				if (disp_int_cont4 & LB_D5_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 4);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont4 &= ~LB_D5_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D5 vblank\n");
@@ -2400,6 +2405,7 @@
 			case 0: /* D6 vblank */
 				if (disp_int_cont5 & LB_D6_VBLANK_INTERRUPT) {
 					drm_handle_vblank(rdev->ddev, 5);
+					rdev->pm.vblank_sync = true;
 					wake_up(&rdev->irq.vblank_queue);
 					disp_int_cont5 &= ~LB_D6_VBLANK_INTERRUPT;
 					DRM_DEBUG("IH: D6 vblank\n");
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 0e8f28a..8e10aa9 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -442,7 +442,7 @@
 	int r;
 
 	if (rdev->gart.table.ram.ptr) {
-		WARN(1, "R100 PCI GART already initialized.\n");
+		WARN(1, "R100 PCI GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
@@ -516,7 +516,7 @@
 	uint32_t tmp = 0;
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		WREG32(R_000040_GEN_INT_CNTL, 0);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 34527e6..cde1d34 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -91,7 +91,7 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj) {
-		WARN(1, "RV370 PCIE GART already initialized.\n");
+		WARN(1, "RV370 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 33952a1..0f806cc 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -97,14 +97,8 @@
 {
 	u32 temp = (RREG32(CG_THERMAL_STATUS) & ASIC_T_MASK) >>
 		ASIC_T_SHIFT;
-	u32 actual_temp = 0;
 
-	if ((temp >> 7) & 1)
-		actual_temp = 0;
-	else
-		actual_temp = (temp >> 1) & 0xff;
-
-	return actual_temp * 1000;
+	return temp * 1000;
 }
 
 void r600_pm_get_dynpm_state(struct radeon_device *rdev)
@@ -919,7 +913,7 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj) {
-		WARN(1, "R600 PCIE GART already initialized.\n");
+		WARN(1, "R600 PCIE GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
@@ -2995,7 +2989,7 @@
 	u32 hdmi1, hdmi2;
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		return -EINVAL;
 	}
 	/* don't enable anything if the ih is disabled */
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 04cac7e..87ead09 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -526,8 +526,6 @@
 	if (crev < 2)
 		return false;
 
-	router.valid = false;
-
 	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
 	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
 	    (ctx->bios + data_offset +
@@ -624,6 +622,8 @@
 			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
 				continue;
 
+			router.ddc_valid = false;
+			router.cd_valid = false;
 			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
 				uint8_t grph_obj_id, grph_obj_num, grph_obj_type;
 
@@ -647,9 +647,8 @@
 								 usDeviceTag));
 
 				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
-					router.valid = false;
 					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
-						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[j].usObjectID);
+						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
 						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
 							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
 								(ctx->bios + data_offset +
@@ -657,6 +656,7 @@
 							ATOM_I2C_RECORD *i2c_record;
 							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
 							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
+							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
 							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
 								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
 								(ctx->bios + data_offset +
@@ -690,10 +690,18 @@
 								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
 									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
 										record;
-									router.valid = true;
-									router.mux_type = ddc_path->ucMuxType;
-									router.mux_control_pin = ddc_path->ucMuxControlPin;
-									router.mux_state = ddc_path->ucMuxState[enum_id];
+									router.ddc_valid = true;
+									router.ddc_mux_type = ddc_path->ucMuxType;
+									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
+									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
+									break;
+								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
+									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
+										record;
+									router.cd_valid = true;
+									router.cd_mux_type = cd_path->ucMuxType;
+									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
+									router.cd_mux_state = cd_path->ucMuxState[enum_id];
 									break;
 								}
 								record = (ATOM_COMMON_RECORD_HEADER *)
@@ -860,7 +868,8 @@
 	size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE;
 	struct radeon_router router;
 
-	router.valid = false;
+	router.ddc_valid = false;
+	router.cd_valid = false;
 
 	bios_connectors = kzalloc(bc_size, GFP_KERNEL);
 	if (!bios_connectors)
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 4dac4b0..fe6c747 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -183,13 +183,13 @@
 					continue;
 
 				if (priority == true) {
-					DRM_INFO("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
-					DRM_INFO("in favor of %s\n", drm_get_connector_name(connector));
+					DRM_DEBUG_KMS("1: conflicting encoders switching off %s\n", drm_get_connector_name(conflict));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(connector));
 					conflict->status = connector_status_disconnected;
 					radeon_connector_update_scratch_regs(conflict, connector_status_disconnected);
 				} else {
-					DRM_INFO("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
-					DRM_INFO("in favor of %s\n", drm_get_connector_name(conflict));
+					DRM_DEBUG_KMS("2: conflicting encoders switching off %s\n", drm_get_connector_name(connector));
+					DRM_DEBUG_KMS("in favor of %s\n", drm_get_connector_name(conflict));
 					current_status = connector_status_disconnected;
 				}
 				break;
@@ -432,13 +432,13 @@
 			    mode->vdisplay == native_mode->vdisplay) {
 				*native_mode = *mode;
 				drm_mode_set_crtcinfo(native_mode, CRTC_INTERLACE_HALVE_V);
-				DRM_INFO("Determined LVDS native mode details from EDID\n");
+				DRM_DEBUG_KMS("Determined LVDS native mode details from EDID\n");
 				break;
 			}
 		}
 	}
 	if (!native_mode->clock) {
-		DRM_INFO("No LVDS native mode details, disabling RMX\n");
+		DRM_DEBUG_KMS("No LVDS native mode details, disabling RMX\n");
 		radeon_encoder->rmx_type = RMX_OFF;
 	}
 }
@@ -1116,7 +1116,7 @@
 				radeon_connector->shared_ddc = true;
 				shared_ddc = true;
 			}
-			if (radeon_connector->router_bus && router->valid &&
+			if (radeon_connector->router_bus && router->ddc_valid &&
 			    (radeon_connector->router.router_id == router->router_id)) {
 				radeon_connector->shared_ddc = false;
 				shared_ddc = false;
@@ -1136,7 +1136,7 @@
 	radeon_connector->connector_object_id = connector_object_id;
 	radeon_connector->hpd = *hpd;
 	radeon_connector->router = *router;
-	if (router->valid) {
+	if (router->ddc_valid || router->cd_valid) {
 		radeon_connector->router_bus = radeon_i2c_lookup(rdev, &router->i2c_info);
 		if (!radeon_connector->router_bus)
 			goto failed;
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 0383631..1df4dc6 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -315,10 +315,14 @@
 				 radeon_connector->ddc_bus->rec.en_data_reg,
 				 radeon_connector->ddc_bus->rec.y_clk_reg,
 				 radeon_connector->ddc_bus->rec.y_data_reg);
-			if (radeon_connector->router_bus)
+			if (radeon_connector->router.ddc_valid)
 				DRM_INFO("  DDC Router 0x%x/0x%x\n",
-					 radeon_connector->router.mux_control_pin,
-					 radeon_connector->router.mux_state);
+					 radeon_connector->router.ddc_mux_control_pin,
+					 radeon_connector->router.ddc_mux_state);
+			if (radeon_connector->router.cd_valid)
+				DRM_INFO("  Clock/Data Router 0x%x/0x%x\n",
+					 radeon_connector->router.cd_mux_control_pin,
+					 radeon_connector->router.cd_mux_state);
 		} else {
 			if (connector->connector_type == DRM_MODE_CONNECTOR_VGA ||
 			    connector->connector_type == DRM_MODE_CONNECTOR_DVII ||
@@ -398,8 +402,8 @@
 	int ret = 0;
 
 	/* on hw with routers, select right port */
-	if (radeon_connector->router.valid)
-		radeon_router_select_port(radeon_connector);
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
 
 	if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
 	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
@@ -432,8 +436,8 @@
 	int ret = 0;
 
 	/* on hw with routers, select right port */
-	if (radeon_connector->router.valid)
-		radeon_router_select_port(radeon_connector);
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
 
 	if (!radeon_connector->ddc_bus)
 		return -1;
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index ae58b68..f678257 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -1520,6 +1520,7 @@
 static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
 {
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
 	if (radeon_encoder->active_device &
 	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
@@ -1531,6 +1532,13 @@
 	radeon_atom_output_lock(encoder, true);
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
+	/* select the clock/data port if it uses a router */
+	if (connector) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		if (radeon_connector->router.cd_valid)
+			radeon_router_select_cd_port(radeon_connector);
+	}
+
 	/* this is needed for the pll/ss setup to work correctly in some cases */
 	atombios_set_encoder_crtc_source(encoder);
 }
@@ -1547,6 +1555,23 @@
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_encoder_atom_dig *dig;
+
+	/* check for pre-DCE3 cards with shared encoders;
+	 * can't really use the links individually, so don't disable
+	 * the encoder if it's in use by another connector
+	 */
+	if (!ASIC_IS_DCE3(rdev)) {
+		struct drm_encoder *other_encoder;
+		struct radeon_encoder *other_radeon_encoder;
+
+		list_for_each_entry(other_encoder, &dev->mode_config.encoder_list, head) {
+			other_radeon_encoder = to_radeon_encoder(other_encoder);
+			if ((radeon_encoder->encoder_id == other_radeon_encoder->encoder_id) &&
+			    drm_helper_encoder_in_use(other_encoder))
+				goto disable_done;
+		}
+	}
+
 	radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
 
 	switch (radeon_encoder->encoder_id) {
@@ -1586,6 +1611,7 @@
 		break;
 	}
 
+disable_done:
 	if (radeon_encoder_is_digital(encoder)) {
 		if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
 			r600_hdmi_disable(encoder);
diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
index 216392d..daacb28 100644
--- a/drivers/gpu/drm/radeon/radeon_fence.c
+++ b/drivers/gpu/drm/radeon/radeon_fence.c
@@ -240,7 +240,8 @@
 		 */
 		if (seq == rdev->fence_drv.last_seq && radeon_gpu_is_lockup(rdev)) {
 			/* good news we believe it's a lockup */
-			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n", fence->seq, seq);
+			WARN(1, "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
+			     fence->seq, seq);
 			/* FIXME: what should we do ? marking everyone
 			 * as signaled for now
 			 */
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 6a13ee3..0cfbba0 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -53,8 +53,8 @@
 	};
 
 	/* on hw with routers, select right port */
-	if (radeon_connector->router.valid)
-		radeon_router_select_port(radeon_connector);
+	if (radeon_connector->router.ddc_valid)
+		radeon_router_select_ddc_port(radeon_connector);
 
 	ret = i2c_transfer(&radeon_connector->ddc_bus->adapter, msgs, 2);
 	if (ret == 2)
@@ -1084,26 +1084,51 @@
 			  addr, val);
 }
 
-/* router switching */
-void radeon_router_select_port(struct radeon_connector *radeon_connector)
+/* ddc router switching */
+void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector)
 {
 	u8 val;
 
-	if (!radeon_connector->router.valid)
+	if (!radeon_connector->router.ddc_valid)
 		return;
 
 	radeon_i2c_get_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x3, &val);
-	val &= radeon_connector->router.mux_control_pin;
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
 	radeon_i2c_put_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x3, val);
 	radeon_i2c_get_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x1, &val);
-	val &= radeon_connector->router.mux_control_pin;
-	val |= radeon_connector->router.mux_state;
+	val &= ~radeon_connector->router.ddc_mux_control_pin;
+	val |= radeon_connector->router.ddc_mux_state;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, val);
+}
+
+/* clock/data router switching */
+void radeon_router_select_cd_port(struct radeon_connector *radeon_connector)
+{
+	u8 val;
+
+	if (!radeon_connector->router.cd_valid)
+		return;
+
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	radeon_i2c_put_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x3, val);
+	radeon_i2c_get_byte(radeon_connector->router_bus,
+			    radeon_connector->router.i2c_addr,
+			    0x1, &val);
+	val &= ~radeon_connector->router.cd_mux_control_pin;
+	val |= radeon_connector->router.cd_mux_state;
 	radeon_i2c_put_byte(radeon_connector->router_bus,
 			    radeon_connector->router.i2c_addr,
 			    0x1, val);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 9245716..680f576 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -401,13 +401,19 @@
 };
 
 struct radeon_router {
-	bool valid;
 	u32 router_id;
 	struct radeon_i2c_bus_rec i2c_info;
 	u8 i2c_addr;
-	u8 mux_type;
-	u8 mux_control_pin;
-	u8 mux_state;
+	/* i2c mux */
+	bool ddc_valid;
+	u8 ddc_mux_type;
+	u8 ddc_mux_control_pin;
+	u8 ddc_mux_state;
+	/* clock/data mux */
+	bool cd_valid;
+	u8 cd_mux_type;
+	u8 cd_mux_control_pin;
+	u8 cd_mux_state;
 };
 
 struct radeon_connector {
@@ -488,7 +494,8 @@
 				u8 slave_addr,
 				u8 addr,
 				u8 val);
-extern void radeon_router_select_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_ddc_port(struct radeon_connector *radeon_connector);
+extern void radeon_router_select_cd_port(struct radeon_connector *radeon_connector);
 extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d7ab914..8eb1834 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -102,6 +102,8 @@
 		type = ttm_bo_type_device;
 	}
 	*bo_ptr = NULL;
+
+retry:
 	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
 	if (bo == NULL)
 		return -ENOMEM;
@@ -109,8 +111,6 @@
 	bo->gobj = gobj;
 	bo->surface_reg = -1;
 	INIT_LIST_HEAD(&bo->list);
-
-retry:
 	radeon_ttm_placement_from_domain(bo, domain);
 	/* Kernel allocation are uninterruptible */
 	mutex_lock(&rdev->vram_mutex);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index fe95bb3..01c2c73 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -689,7 +689,8 @@
 	gtt = container_of(backend, struct radeon_ttm_backend, backend);
 	gtt->offset = bo_mem->start << PAGE_SHIFT;
 	if (!gtt->num_pages) {
-		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
+		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
+		     gtt->num_pages, bo_mem, backend);
 	}
 	r = radeon_gart_bind(gtt->rdev, gtt->offset,
 			     gtt->num_pages, gtt->pages);
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index f683e51..5512e4e 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -78,7 +78,7 @@
 	int r;
 
 	if (rdev->gart.table.ram.ptr) {
-		WARN(1, "RS400 GART already initialized.\n");
+		WARN(1, "RS400 GART already initialized\n");
 		return 0;
 	}
 	/* Check gart size */
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index b091a1f..f1c6e02 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -375,7 +375,7 @@
 	int r;
 
 	if (rdev->gart.table.vram.robj) {
-		WARN(1, "RS600 GART already initialized.\n");
+		WARN(1, "RS600 GART already initialized\n");
 		return 0;
 	}
 	/* Initialize common gart structure */
@@ -505,7 +505,7 @@
 		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
 	if (!rdev->irq.installed) {
-		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
 		WREG32(R_000040_GEN_INT_CNTL, 0);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a1cb783..3ca77dc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -27,14 +27,6 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
-/* Notes:
- *
- * We store bo pointer in drm_mm_node struct so we know which bo own a
- * specific node. There is no protection on the pointer, thus to make
- * sure things don't go berserk you have to access this pointer while
- * holding the global lru lock and make sure anytime you free a node you
- * reset the pointer to NULL.
- */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -45,6 +37,7 @@
 #include <linux/mm.h>
 #include <linux/file.h>
 #include <linux/module.h>
+#include <asm/atomic.h>
 
 #define TTM_ASSERT_LOCKED(param)
 #define TTM_DEBUG(fmt, arg...)
@@ -452,6 +445,11 @@
 	ttm_bo_mem_put(bo, &bo->mem);
 
 	atomic_set(&bo->reserved, 0);
+
+	/*
+	 * Make processes trying to reserve really pick it up.
+	 */
+	smp_mb__after_atomic_dec();
 	wake_up_all(&bo->event_queue);
 }
 
@@ -460,7 +458,7 @@
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
 	struct ttm_bo_driver *driver;
-	void *sync_obj;
+	void *sync_obj = NULL;
 	void *sync_obj_arg;
 	int put_count;
 	int ret;
@@ -495,17 +493,20 @@
 		spin_lock(&glob->lru_lock);
 	}
 queue:
-	sync_obj = bo->sync_obj;
-	sync_obj_arg = bo->sync_obj_arg;
 	driver = bdev->driver;
+	if (bo->sync_obj)
+		sync_obj = driver->sync_obj_ref(bo->sync_obj);
+	sync_obj_arg = bo->sync_obj_arg;
 
 	kref_get(&bo->list_kref);
 	list_add_tail(&bo->ddestroy, &bdev->ddestroy);
 	spin_unlock(&glob->lru_lock);
 	spin_unlock(&bo->lock);
 
-	if (sync_obj)
+	if (sync_obj) {
 		driver->sync_obj_flush(sync_obj, sync_obj_arg);
+		driver->sync_obj_unref(&sync_obj);
+	}
 	schedule_delayed_work(&bdev->wq,
 			      ((HZ / 100) < 1) ? 1 : HZ / 100);
 }
@@ -822,7 +823,6 @@
 					bool no_wait_gpu)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_bo_global *glob = bdev->glob;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	int ret;
 
@@ -832,12 +832,6 @@
 			return ret;
 		if (mem->mm_node)
 			break;
-		spin_lock(&glob->lru_lock);
-		if (list_empty(&man->lru)) {
-			spin_unlock(&glob->lru_lock);
-			break;
-		}
-		spin_unlock(&glob->lru_lock);
 		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
 						no_wait_reserve, no_wait_gpu);
 		if (unlikely(ret != 0))
@@ -1125,35 +1119,9 @@
 int ttm_bo_check_placement(struct ttm_buffer_object *bo,
 				struct ttm_placement *placement)
 {
-	int i;
+	BUG_ON((placement->fpfn || placement->lpfn) &&
+	       (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
 
-	if (placement->fpfn || placement->lpfn) {
-		if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
-			printk(KERN_ERR TTM_PFX "Page number range to small "
-				"Need %lu pages, range is [%u, %u]\n",
-				bo->mem.num_pages, placement->fpfn,
-				placement->lpfn);
-			return -EINVAL;
-		}
-	}
-	for (i = 0; i < placement->num_placement; i++) {
-		if (!capable(CAP_SYS_ADMIN)) {
-			if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
-				printk(KERN_ERR TTM_PFX "Need to be root to "
-					"modify NO_EVICT status.\n");
-				return -EINVAL;
-			}
-		}
-	}
-	for (i = 0; i < placement->num_busy_placement; i++) {
-		if (!capable(CAP_SYS_ADMIN)) {
-			if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
-				printk(KERN_ERR TTM_PFX "Need to be root to "
-					"modify NO_EVICT status.\n");
-				return -EINVAL;
-			}
-		}
-	}
 	return 0;
 }
 
@@ -1176,6 +1144,10 @@
 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
 	if (num_pages == 0) {
 		printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
+		if (destroy)
+			(*destroy)(bo);
+		else
+			kfree(bo);
 		return -EINVAL;
 	}
 	bo->destroy = destroy;
@@ -1369,18 +1341,9 @@
 	int ret = -EINVAL;
 	struct ttm_mem_type_manager *man;
 
-	if (type >= TTM_NUM_MEM_TYPES) {
-		printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
-		return ret;
-	}
-
+	BUG_ON(type >= TTM_NUM_MEM_TYPES);
 	man = &bdev->man[type];
-	if (man->has_type) {
-		printk(KERN_ERR TTM_PFX
-		       "Memory manager already initialized for type %d\n",
-		       type);
-		return ret;
-	}
+	BUG_ON(man->has_type);
 
 	ret = bdev->driver->init_mem_type(bdev, type, man);
 	if (ret)
@@ -1389,13 +1352,6 @@
 
 	ret = 0;
 	if (type != TTM_PL_SYSTEM) {
-		if (!p_size) {
-			printk(KERN_ERR TTM_PFX
-			       "Zero size memory manager type %d\n",
-			       type);
-			return ret;
-		}
-
 		ret = (*man->func->init)(man, p_size);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index 7410c19..038e947 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -31,20 +31,29 @@
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_placement.h"
-#include <linux/jiffies.h>
+#include "drm_mm.h"
 #include <linux/slab.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/file.h>
+#include <linux/spinlock.h>
 #include <linux/module.h>
 
+/**
+ * Currently we use a spinlock for the lock, but a mutex *may* be
+ * more appropriate to reduce scheduling latency if the range manager
+ * ends up with very fragmented allocation patterns.
+ */
+
+struct ttm_range_manager {
+	struct drm_mm mm;
+	spinlock_t lock;
+};
+
 static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
 			       struct ttm_buffer_object *bo,
 			       struct ttm_placement *placement,
 			       struct ttm_mem_reg *mem)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
-	struct drm_mm *mm = man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
 	struct drm_mm_node *node = NULL;
 	unsigned long lpfn;
 	int ret;
@@ -57,19 +66,19 @@
 		if (unlikely(ret))
 			return ret;
 
-		spin_lock(&glob->lru_lock);
+		spin_lock(&rman->lock);
 		node = drm_mm_search_free_in_range(mm,
 					mem->num_pages, mem->page_alignment,
 					placement->fpfn, lpfn, 1);
 		if (unlikely(node == NULL)) {
-			spin_unlock(&glob->lru_lock);
+			spin_unlock(&rman->lock);
 			return 0;
 		}
 		node = drm_mm_get_block_atomic_range(node, mem->num_pages,
-							mem->page_alignment,
-							placement->fpfn,
-							lpfn);
-		spin_unlock(&glob->lru_lock);
+						     mem->page_alignment,
+						     placement->fpfn,
+						     lpfn);
+		spin_unlock(&rman->lock);
 	} while (node == NULL);
 
 	mem->mm_node = node;
@@ -80,12 +89,12 @@
 static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
 				struct ttm_mem_reg *mem)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
 	if (mem->mm_node) {
-		spin_lock(&glob->lru_lock);
+		spin_lock(&rman->lock);
 		drm_mm_put_block(mem->mm_node);
-		spin_unlock(&glob->lru_lock);
+		spin_unlock(&rman->lock);
 		mem->mm_node = NULL;
 	}
 }
@@ -93,49 +102,49 @@
 static int ttm_bo_man_init(struct ttm_mem_type_manager *man,
 			   unsigned long p_size)
 {
-	struct drm_mm *mm;
+	struct ttm_range_manager *rman;
 	int ret;
 
-	mm = kzalloc(sizeof(*mm), GFP_KERNEL);
-	if (!mm)
+	rman = kzalloc(sizeof(*rman), GFP_KERNEL);
+	if (!rman)
 		return -ENOMEM;
 
-	ret = drm_mm_init(mm, 0, p_size);
+	ret = drm_mm_init(&rman->mm, 0, p_size);
 	if (ret) {
-		kfree(mm);
+		kfree(rman);
 		return ret;
 	}
 
-	man->priv = mm;
+	spin_lock_init(&rman->lock);
+	man->priv = rman;
 	return 0;
 }
 
 static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
-	struct drm_mm *mm = man->priv;
-	int ret = 0;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
+	struct drm_mm *mm = &rman->mm;
 
-	spin_lock(&glob->lru_lock);
+	spin_lock(&rman->lock);
 	if (drm_mm_clean(mm)) {
 		drm_mm_takedown(mm);
-		kfree(mm);
+		spin_unlock(&rman->lock);
+		kfree(rman);
 		man->priv = NULL;
-	} else
-		ret = -EBUSY;
-	spin_unlock(&glob->lru_lock);
-	return ret;
+		return 0;
+	}
+	spin_unlock(&rman->lock);
+	return -EBUSY;
 }
 
 static void ttm_bo_man_debug(struct ttm_mem_type_manager *man,
 			     const char *prefix)
 {
-	struct ttm_bo_global *glob = man->bdev->glob;
-	struct drm_mm *mm = man->priv;
+	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
 
-	spin_lock(&glob->lru_lock);
-	drm_mm_debug_table(mm, prefix);
-	spin_unlock(&glob->lru_lock);
+	spin_lock(&rman->lock);
+	drm_mm_debug_table(&rman->mm, prefix);
+	spin_unlock(&rman->lock);
 }
 
 const struct ttm_mem_type_manager_func ttm_bo_manager_func = {
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a7bab87..af789dc 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -440,10 +440,8 @@
 		return ret;
 
 	ret = be->func->bind(be, bo_mem);
-	if (ret) {
-		printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
+	if (unlikely(ret != 0))
 		return ret;
-	}
 
 	ttm->state = tt_bound;
 
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 9b5b4d9..3e038a3 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -235,9 +235,9 @@
 	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride - 1)) -
 		first_pfn + 1;
 
-	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
+	vsg->pages = vzalloc(sizeof(struct page *) * vsg->num_pages);
+	if (NULL == vsg->pages)
 		return -ENOMEM;
-	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
 	down_read(&current->mm->mmap_sem);
 	ret = get_user_pages(current, current->mm,
 			     (unsigned long)xfer->mem_addr,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 51d9f9f..76954e3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -691,6 +691,7 @@
 
 	fence_rep.error = ret;
 	fence_rep.fence_seq = (uint64_t) sequence;
+	fence_rep.pad64 = 0;
 
 	user_fence_rep = (struct drm_vmw_fence_rep __user *)
 	    (unsigned long)arg->fence_rep;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 87c6e61..cceeb42 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,6 +720,8 @@
 			       &vmw_vram_ne_placement,
 			       false, &vmw_dmabuf_bo_free);
 	vmw_overlay_resume_all(dev_priv);
+	if (unlikely(ret != 0))
+		vfbs->buffer = NULL;
 
 	return ret;
 }
@@ -730,6 +732,9 @@
 	struct vmw_framebuffer_surface *vfbs =
 		vmw_framebuffer_to_vfbs(&vfb->base);
 
+	if (unlikely(vfbs->buffer == NULL))
+		return 0;
+
 	bo = &vfbs->buffer->base;
 	ttm_bo_unref(&bo);
 	vfbs->buffer = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index a01c47d..29113c9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -557,7 +557,7 @@
 		return -EINVAL;
 	}
 
-	dev_priv->ldu_priv = kmalloc(GFP_KERNEL, sizeof(*dev_priv->ldu_priv));
+	dev_priv->ldu_priv = kmalloc(sizeof(*dev_priv->ldu_priv), GFP_KERNEL);
 
 	if (!dev_priv->ldu_priv)
 		return -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index df2036e..f1a52f9 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -585,7 +585,7 @@
 		return -ENOSYS;
 	}
 
-	overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
+	overlay = kmalloc(sizeof(*overlay), GFP_KERNEL);
 	if (!overlay)
 		return -ENOMEM;
 
diff --git a/drivers/gpu/stub/Kconfig b/drivers/gpu/stub/Kconfig
index 742c423..0e1edd7 100644
--- a/drivers/gpu/stub/Kconfig
+++ b/drivers/gpu/stub/Kconfig
@@ -3,6 +3,9 @@
 	depends on PCI
 	# Poulsbo stub depends on ACPI_VIDEO when ACPI is enabled
 	# but for select to work, need to select ACPI_VIDEO's dependencies, ick
+	select VIDEO_OUTPUT_CONTROL if ACPI
+	select BACKLIGHT_CLASS_DEVICE if ACPI
+	select INPUT if ACPI
 	select ACPI_VIDEO if ACPI
 	help
 	  Choose this option if you have a system that has Intel GMA500
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 1e4c21f..86d822a 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -178,11 +178,13 @@
 {
 	struct ad7414_data *data;
 	int conf;
-	int err = 0;
+	int err;
 
 	if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA |
-				     I2C_FUNC_SMBUS_READ_WORD_DATA))
+				     I2C_FUNC_SMBUS_READ_WORD_DATA)) {
+		err = -EOPNOTSUPP;
 		goto exit;
+	}
 
 	data = kzalloc(sizeof(struct ad7414_data), GFP_KERNEL);
 	if (!data) {
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 9e77571..87d92a5 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -1286,8 +1286,10 @@
 	init_completion(&data->auto_update_stop);
 	data->auto_update = kthread_run(adt7470_update_thread, client,
 					dev_name(data->hwmon_dev));
-	if (IS_ERR(data->auto_update))
+	if (IS_ERR(data->auto_update)) {
+		err = PTR_ERR(data->auto_update);
 		goto exit_unregister;
+	}
 
 	return 0;
 
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index aa701a1..f141a1d 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -376,10 +376,6 @@
 		}
 	}
 
-	err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
-	if (err)
-		goto err_free_gpio;
-
 	fan_data->num_ctrl = num_ctrl;
 	fan_data->ctrl = ctrl;
 	fan_data->num_speed = pdata->num_speed;
@@ -391,6 +387,10 @@
 		goto err_free_gpio;
 	}
 
+	err = sysfs_create_group(&pdev->dev.kobj, &gpio_fan_ctrl_group);
+	if (err)
+		goto err_free_gpio;
+
 	return 0;
 
 err_free_gpio:
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 2676261..4b50601 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -82,7 +82,7 @@
 			val = i2c_smbus_read_byte_data(client, i);
 			if (unlikely(val < 0)) {
 				dev_dbg(dev,
-					"Failed to read ADC value: error %d",
+					"Failed to read ADC value: error %d\n",
 					val);
 				ret = ERR_PTR(val);
 				goto abort;
@@ -230,8 +230,7 @@
 		return -ENODEV;
 
 	if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
-		dev_err(&client->dev, "Failed to read register %d:%02x:%02x\n",
-			adapter->id, client->addr, LTC4261_STATUS);
+		dev_err(&client->dev, "Failed to read status register\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index a5ea1bc..8aba0ba 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -130,8 +130,8 @@
 
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 	case AF_INET6:
-		read_lock(&dev_base_lock);
-		for_each_netdev(&init_net, dev) {
+		rcu_read_lock();
+		for_each_netdev_rcu(&init_net, dev) {
 			if (ipv6_chk_addr(&init_net,
 					  &((struct sockaddr_in6 *) addr)->sin6_addr,
 					  dev, 1)) {
@@ -139,7 +139,7 @@
 				break;
 			}
 		}
-		read_unlock(&dev_base_lock);
+		rcu_read_unlock();
 		break;
 #endif
 	}
@@ -200,7 +200,7 @@
 	src_in->sin_family = AF_INET;
 	src_in->sin_addr.s_addr = rt->rt_src;
 
-	if (rt->idev->dev->flags & IFF_LOOPBACK) {
+	if (rt->dst.dev->flags & IFF_LOOPBACK) {
 		ret = rdma_translate_ip((struct sockaddr *) dst_in, addr);
 		if (!ret)
 			memcpy(addr->dst_dev_addr, addr->src_dev_addr, MAX_ADDR_LEN);
@@ -208,12 +208,12 @@
 	}
 
 	/* If the device does ARP internally, return 'done' */
-	if (rt->idev->dev->flags & IFF_NOARP) {
-		rdma_copy_addr(addr, rt->idev->dev, NULL);
+	if (rt->dst.dev->flags & IFF_NOARP) {
+		rdma_copy_addr(addr, rt->dst.dev, NULL);
 		goto put;
 	}
 
-	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->idev->dev);
+	neigh = neigh_lookup(&arp_tbl, &rt->rt_gateway, rt->dst.dev);
 	if (!neigh || !(neigh->nud_state & NUD_VALID)) {
 		neigh_event_send(rt->dst.neighbour, NULL);
 		ret = -ENODATA;
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bf3e20c..4e55a28 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -848,8 +848,8 @@
 		goto out;
 	}
 
-	read_lock(&dev_base_lock);
-	for_each_netdev(&init_net, tmp) {
+	rcu_read_lock();
+	for_each_netdev_rcu(&init_net, tmp) {
 		if (ndev && (tmp == ndev || rdma_vlan_dev_real_dev(tmp) == ndev)) {
 			gid.global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
 			vid = rdma_vlan_dev_vlan_id(tmp);
@@ -884,7 +884,7 @@
 			}
 		}
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 
 	for (i = 0; i < 128; ++i)
 		if (!hits[i]) {
diff --git a/drivers/input/input.c b/drivers/input/input.c
index d092ef9..7f26ca6 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -74,6 +74,7 @@
  * dev->event_lock held and interrupts disabled.
  */
 static void input_pass_event(struct input_dev *dev,
+			     struct input_handler *src_handler,
 			     unsigned int type, unsigned int code, int value)
 {
 	struct input_handler *handler;
@@ -92,6 +93,15 @@
 				continue;
 
 			handler = handle->handler;
+
+			/*
+			 * If this is the handler that injected this
+			 * particular event we want to skip it to avoid
+			 * filters firing again and again.
+			 */
+			if (handler == src_handler)
+				continue;
+
 			if (!handler->filter) {
 				if (filtered)
 					break;
@@ -121,7 +131,7 @@
 	if (test_bit(dev->repeat_key, dev->key) &&
 	    is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
 
-		input_pass_event(dev, EV_KEY, dev->repeat_key, 2);
+		input_pass_event(dev, NULL, EV_KEY, dev->repeat_key, 2);
 
 		if (dev->sync) {
 			/*
@@ -130,7 +140,7 @@
 			 * Otherwise assume that the driver will send
 			 * SYN_REPORT once it's done.
 			 */
-			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
 		}
 
 		if (dev->rep[REP_PERIOD])
@@ -163,6 +173,7 @@
 #define INPUT_PASS_TO_ALL	(INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
 
 static int input_handle_abs_event(struct input_dev *dev,
+				  struct input_handler *src_handler,
 				  unsigned int code, int *pval)
 {
 	bool is_mt_event;
@@ -206,13 +217,15 @@
 	/* Flush pending "slot" event */
 	if (is_mt_event && dev->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
 		input_abs_set_val(dev, ABS_MT_SLOT, dev->slot);
-		input_pass_event(dev, EV_ABS, ABS_MT_SLOT, dev->slot);
+		input_pass_event(dev, src_handler,
+				 EV_ABS, ABS_MT_SLOT, dev->slot);
 	}
 
 	return INPUT_PASS_TO_HANDLERS;
 }
 
 static void input_handle_event(struct input_dev *dev,
+			       struct input_handler *src_handler,
 			       unsigned int type, unsigned int code, int value)
 {
 	int disposition = INPUT_IGNORE_EVENT;
@@ -265,7 +278,8 @@
 
 	case EV_ABS:
 		if (is_event_supported(code, dev->absbit, ABS_MAX))
-			disposition = input_handle_abs_event(dev, code, &value);
+			disposition = input_handle_abs_event(dev, src_handler,
+							     code, &value);
 
 		break;
 
@@ -323,7 +337,7 @@
 		dev->event(dev, type, code, value);
 
 	if (disposition & INPUT_PASS_TO_HANDLERS)
-		input_pass_event(dev, type, code, value);
+		input_pass_event(dev, src_handler, type, code, value);
 }
 
 /**
@@ -352,7 +366,7 @@
 
 		spin_lock_irqsave(&dev->event_lock, flags);
 		add_input_randomness(type, code, value);
-		input_handle_event(dev, type, code, value);
+		input_handle_event(dev, NULL, type, code, value);
 		spin_unlock_irqrestore(&dev->event_lock, flags);
 	}
 }
@@ -382,7 +396,8 @@
 		rcu_read_lock();
 		grab = rcu_dereference(dev->grab);
 		if (!grab || grab == handle)
-			input_handle_event(dev, type, code, value);
+			input_handle_event(dev, handle->handler,
+					   type, code, value);
 		rcu_read_unlock();
 
 		spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -595,10 +610,10 @@
 		for (code = 0; code <= KEY_MAX; code++) {
 			if (is_event_supported(code, dev->keybit, KEY_MAX) &&
 			    __test_and_clear_bit(code, dev->key)) {
-				input_pass_event(dev, EV_KEY, code, 0);
+				input_pass_event(dev, NULL, EV_KEY, code, 0);
 			}
 		}
-		input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+		input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
 	}
 }
 
@@ -873,9 +888,9 @@
 	    !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
 	    __test_and_clear_bit(old_keycode, dev->key)) {
 
-		input_pass_event(dev, EV_KEY, old_keycode, 0);
+		input_pass_event(dev, NULL, EV_KEY, old_keycode, 0);
 		if (dev->sync)
-			input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
+			input_pass_event(dev, NULL, EV_SYN, SYN_REPORT, 1);
 	}
 
  out:
@@ -1565,8 +1580,7 @@
 		}							\
 	} while (0)
 
-#ifdef CONFIG_PM
-static void input_dev_reset(struct input_dev *dev, bool activate)
+static void input_dev_toggle(struct input_dev *dev, bool activate)
 {
 	if (!dev->event)
 		return;
@@ -1580,12 +1594,44 @@
 	}
 }
 
+/**
+ * input_reset_device() - reset/restore the state of input device
+ * @dev: input device whose state needs to be reset
+ *
+ * This function tries to reset the state of an opened input device and
+ * bring internal state and state if the hardware in sync with each other.
+ * We mark all keys as released, restore LED state, repeat rate, etc.
+ */
+void input_reset_device(struct input_dev *dev)
+{
+	mutex_lock(&dev->mutex);
+
+	if (dev->users) {
+		input_dev_toggle(dev, true);
+
+		/*
+		 * Keys that have been pressed at suspend time are unlikely
+		 * to be still pressed when we resume.
+		 */
+		spin_lock_irq(&dev->event_lock);
+		input_dev_release_keys(dev);
+		spin_unlock_irq(&dev->event_lock);
+	}
+
+	mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL(input_reset_device);
+
+#ifdef CONFIG_PM
 static int input_dev_suspend(struct device *dev)
 {
 	struct input_dev *input_dev = to_input_dev(dev);
 
 	mutex_lock(&input_dev->mutex);
-	input_dev_reset(input_dev, false);
+
+	if (input_dev->users)
+		input_dev_toggle(input_dev, false);
+
 	mutex_unlock(&input_dev->mutex);
 
 	return 0;
@@ -1595,18 +1641,7 @@
 {
 	struct input_dev *input_dev = to_input_dev(dev);
 
-	mutex_lock(&input_dev->mutex);
-	input_dev_reset(input_dev, true);
-
-	/*
-	 * Keys that have been pressed at suspend time are unlikely
-	 * to be still pressed when we resume.
-	 */
-	spin_lock_irq(&input_dev->event_lock);
-	input_dev_release_keys(input_dev);
-	spin_unlock_irq(&input_dev->event_lock);
-
-	mutex_unlock(&input_dev->mutex);
+	input_reset_device(input_dev);
 
 	return 0;
 }
diff --git a/drivers/input/keyboard/adp5588-keys.c b/drivers/input/keyboard/adp5588-keys.c
index b92d1cd..af45d27 100644
--- a/drivers/input/keyboard/adp5588-keys.c
+++ b/drivers/input/keyboard/adp5588-keys.c
@@ -4,7 +4,7 @@
  *		 I2C QWERTY Keypad and IO Expander
  * Bugs: Enter bugs at http://blackfin.uclinux.org/
  *
- * Copyright (C) 2008-2009 Analog Devices Inc.
+ * Copyright (C) 2008-2010 Analog Devices Inc.
  * Licensed under the GPL-2 or later.
  */
 
@@ -24,29 +24,6 @@
 
 #include <linux/i2c/adp5588.h>
 
- /* Configuration Register1 */
-#define AUTO_INC	(1 << 7)
-#define GPIEM_CFG	(1 << 6)
-#define OVR_FLOW_M	(1 << 5)
-#define INT_CFG		(1 << 4)
-#define OVR_FLOW_IEN	(1 << 3)
-#define K_LCK_IM	(1 << 2)
-#define GPI_IEN		(1 << 1)
-#define KE_IEN		(1 << 0)
-
-/* Interrupt Status Register */
-#define CMP2_INT	(1 << 5)
-#define CMP1_INT	(1 << 4)
-#define OVR_FLOW_INT	(1 << 3)
-#define K_LCK_INT	(1 << 2)
-#define GPI_INT		(1 << 1)
-#define KE_INT		(1 << 0)
-
-/* Key Lock and Event Counter Register */
-#define K_LCK_EN	(1 << 6)
-#define LCK21		0x30
-#define KEC		0xF
-
 /* Key Event Register xy */
 #define KEY_EV_PRESSED		(1 << 7)
 #define KEY_EV_MASK		(0x7F)
@@ -55,10 +32,6 @@
 
 #define KEYP_MAX_EVENT		10
 
-#define MAXGPIO			18
-#define ADP_BANK(offs)		((offs) >> 3)
-#define ADP_BIT(offs)		(1u << ((offs) & 0x7))
-
 /*
  * Early pre 4.0 Silicon required to delay readout by at least 25ms,
  * since the Event Counter Register updated 25ms after the interrupt
@@ -75,7 +48,7 @@
 	const struct adp5588_gpi_map *gpimap;
 	unsigned short gpimapsize;
 #ifdef CONFIG_GPIOLIB
-	unsigned char gpiomap[MAXGPIO];
+	unsigned char gpiomap[ADP5588_MAXGPIO];
 	bool export_gpio;
 	struct gpio_chip gc;
 	struct mutex gpio_lock;	/* Protect cached dir, dat_out */
@@ -103,8 +76,8 @@
 static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 
 	return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
 }
@@ -113,8 +86,8 @@
 				   unsigned off, int val)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 
 	mutex_lock(&kpad->gpio_lock);
 
@@ -132,8 +105,8 @@
 static int adp5588_gpio_direction_input(struct gpio_chip *chip, unsigned off)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int ret;
 
 	mutex_lock(&kpad->gpio_lock);
@@ -150,8 +123,8 @@
 					 unsigned off, int val)
 {
 	struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
-	unsigned int bank = ADP_BANK(kpad->gpiomap[off]);
-	unsigned int bit = ADP_BIT(kpad->gpiomap[off]);
+	unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
+	unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
 	int ret;
 
 	mutex_lock(&kpad->gpio_lock);
@@ -176,7 +149,7 @@
 static int __devinit adp5588_build_gpiomap(struct adp5588_kpad *kpad,
 				const struct adp5588_kpad_platform_data *pdata)
 {
-	bool pin_used[MAXGPIO];
+	bool pin_used[ADP5588_MAXGPIO];
 	int n_unused = 0;
 	int i;
 
@@ -191,7 +164,7 @@
 	for (i = 0; i < kpad->gpimapsize; i++)
 		pin_used[kpad->gpimap[i].pin - GPI_PIN_BASE] = true;
 
-	for (i = 0; i < MAXGPIO; i++)
+	for (i = 0; i < ADP5588_MAXGPIO; i++)
 		if (!pin_used[i])
 			kpad->gpiomap[n_unused++] = i;
 
@@ -234,7 +207,7 @@
 		return error;
 	}
 
-	for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
+	for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
 		kpad->dat_out[i] = adp5588_read(kpad->client,
 						GPIO_DAT_OUT1 + i);
 		kpad->dir[i] = adp5588_read(kpad->client, GPIO_DIR1 + i);
@@ -318,11 +291,11 @@
 
 	status = adp5588_read(client, INT_STAT);
 
-	if (status & OVR_FLOW_INT)	/* Unlikely and should never happen */
+	if (status & ADP5588_OVR_FLOW_INT)	/* Unlikely and should never happen */
 		dev_err(&client->dev, "Event Overflow Error\n");
 
-	if (status & KE_INT) {
-		ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & KEC;
+	if (status & ADP5588_KE_INT) {
+		ev_cnt = adp5588_read(client, KEY_LCK_EC_STAT) & ADP5588_KEC;
 		if (ev_cnt) {
 			adp5588_report_events(kpad, ev_cnt);
 			input_sync(kpad->input);
@@ -360,7 +333,7 @@
 	if (pdata->en_keylock) {
 		ret |= adp5588_write(client, UNLOCK1, pdata->unlock_key1);
 		ret |= adp5588_write(client, UNLOCK2, pdata->unlock_key2);
-		ret |= adp5588_write(client, KEY_LCK_EC_STAT, K_LCK_EN);
+		ret |= adp5588_write(client, KEY_LCK_EC_STAT, ADP5588_K_LCK_EN);
 	}
 
 	for (i = 0; i < KEYP_MAX_EVENT; i++)
@@ -384,7 +357,7 @@
 	}
 
 	if (gpio_data) {
-		for (i = 0; i <= ADP_BANK(MAXGPIO); i++) {
+		for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
 			int pull_mask = gpio_data->pullup_dis_mask;
 
 			ret |= adp5588_write(client, GPIO_PULL1 + i,
@@ -392,11 +365,14 @@
 		}
 	}
 
-	ret |= adp5588_write(client, INT_STAT, CMP2_INT | CMP1_INT |
-					OVR_FLOW_INT | K_LCK_INT |
-					GPI_INT | KE_INT); /* Status is W1C */
+	ret |= adp5588_write(client, INT_STAT,
+				ADP5588_CMP2_INT | ADP5588_CMP1_INT |
+				ADP5588_OVR_FLOW_INT | ADP5588_K_LCK_INT |
+				ADP5588_GPI_INT | ADP5588_KE_INT); /* Status is W1C */
 
-	ret |= adp5588_write(client, CFG, INT_CFG | OVR_FLOW_IEN | KE_IEN);
+	ret |= adp5588_write(client, CFG, ADP5588_INT_CFG |
+					  ADP5588_OVR_FLOW_IEN |
+					  ADP5588_KE_IEN);
 
 	if (ret < 0) {
 		dev_err(&client->dev, "Write Error\n");
diff --git a/drivers/input/keyboard/atkbd.c b/drivers/input/keyboard/atkbd.c
index d358ef8..11478eb 100644
--- a/drivers/input/keyboard/atkbd.c
+++ b/drivers/input/keyboard/atkbd.c
@@ -63,6 +63,10 @@
 module_param_named(extra, atkbd_extra, bool, 0);
 MODULE_PARM_DESC(extra, "Enable extra LEDs and keys on IBM RapidAcces, EzKey and similar keyboards");
 
+static bool atkbd_terminal;
+module_param_named(terminal, atkbd_terminal, bool, 0);
+MODULE_PARM_DESC(terminal, "Enable break codes on an IBM Terminal keyboard connected via AT/PS2");
+
 /*
  * Scancode to keycode tables. These are just the default setting, and
  * are loadable via a userland utility.
@@ -136,7 +140,8 @@
 #define ATKBD_CMD_ENABLE	0x00f4
 #define ATKBD_CMD_RESET_DIS	0x00f5	/* Reset to defaults and disable */
 #define ATKBD_CMD_RESET_DEF	0x00f6	/* Reset to defaults */
-#define ATKBD_CMD_SETALL_MBR	0x00fa
+#define ATKBD_CMD_SETALL_MB	0x00f8	/* Set all keys to give break codes */
+#define ATKBD_CMD_SETALL_MBR	0x00fa  /* ... and repeat */
 #define ATKBD_CMD_RESET_BAT	0x02ff
 #define ATKBD_CMD_RESEND	0x00fe
 #define ATKBD_CMD_EX_ENABLE	0x10ea
@@ -764,6 +769,11 @@
 		}
 	}
 
+	if (atkbd_terminal) {
+		ps2_command(ps2dev, param, ATKBD_CMD_SETALL_MB);
+		return 3;
+	}
+
 	if (target_set != 3)
 		return 2;
 
diff --git a/drivers/input/misc/pcf8574_keypad.c b/drivers/input/misc/pcf8574_keypad.c
index 4b42ffc..d1583ae 100644
--- a/drivers/input/misc/pcf8574_keypad.c
+++ b/drivers/input/misc/pcf8574_keypad.c
@@ -127,14 +127,6 @@
 	idev->id.product = 0x0001;
 	idev->id.version = 0x0100;
 
-	input_set_drvdata(idev, lp);
-
-	ret = input_register_device(idev);
-	if (ret) {
-		dev_err(&client->dev, "input_register_device() failed\n");
-		goto fail_register;
-	}
-
 	lp->laststate = read_state(lp);
 
 	ret = request_threaded_irq(client->irq, NULL, pcf8574_kp_irq_handler,
@@ -142,16 +134,21 @@
 				   DRV_NAME, lp);
 	if (ret) {
 		dev_err(&client->dev, "IRQ %d is not free\n", client->irq);
-		goto fail_irq;
+		goto fail_free_device;
+	}
+
+	ret = input_register_device(idev);
+	if (ret) {
+		dev_err(&client->dev, "input_register_device() failed\n");
+		goto fail_free_irq;
 	}
 
 	i2c_set_clientdata(client, lp);
 	return 0;
 
- fail_irq:
-	input_unregister_device(idev);
- fail_register:
-	input_set_drvdata(idev, NULL);
+ fail_free_irq:
+	free_irq(client->irq, lp);
+ fail_free_device:
 	input_free_device(idev);
  fail_allocate:
 	kfree(lp);
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index ed7ad74..a5475b5 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -351,6 +351,17 @@
 		},
 	},
 	{
+		/*
+		 * Most (all?) VAIOs do not have external PS/2 ports nor
+		 * they implement active multiplexing properly, and
+		 * MUX discovery usually messes up keyboard/touchpad.
+		 */
+		.matches = {
+			DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+			DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
+		},
+	},
+	{
 		/* Amoi M636/A737 */
 		.matches = {
 			DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
diff --git a/drivers/input/tablet/acecad.c b/drivers/input/tablet/acecad.c
index aea9a93..d94f7e9 100644
--- a/drivers/input/tablet/acecad.c
+++ b/drivers/input/tablet/acecad.c
@@ -229,12 +229,13 @@
 
 	err = input_register_device(acecad->input);
 	if (err)
-		goto fail2;
+		goto fail3;
 
 	usb_set_intfdata(intf, acecad);
 
 	return 0;
 
+ fail3:	usb_free_urb(acecad->irq);
  fail2:	usb_free_coherent(dev, 8, acecad->data, acecad->data_dma);
  fail1: input_free_device(input_dev);
 	kfree(acecad);
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index e90db88..bc0529a 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -420,7 +420,7 @@
 		break;
 	case INF_NICCY:
 		val = inl((u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
-		val |= NICCY_IRQ_ENABLE;;
+		val |= NICCY_IRQ_ENABLE;
 		outl(val, (u32)hw->cfg.start + NICCY_IRQ_CTRL_REG);
 		break;
 	case INF_SCT_1:
@@ -924,7 +924,7 @@
 		mISDNipac_init(&card->ipac, card);
 
 	if (card->ipac.isac.dch.dev.Bprotocols == 0)
-		goto error_setup;;
+		goto error_setup;
 
 	err = mISDN_register_device(&card->ipac.isac.dch.dev,
 		&card->pdev->dev, card->name);
diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c
index 38eb314..d13fa5b 100644
--- a/drivers/isdn/hardware/mISDN/mISDNisar.c
+++ b/drivers/isdn/hardware/mISDN/mISDNisar.c
@@ -264,7 +264,7 @@
 			while (noc) {
 				val = le16_to_cpu(*sp++);
 				*mp++ = val >> 8;
-				*mp++ = val & 0xFF;;
+				*mp++ = val & 0xFF;
 				noc--;
 			}
 			spin_lock_irqsave(isar->hwlock, flags);
diff --git a/drivers/isdn/hisax/avm_pci.c b/drivers/isdn/hisax/avm_pci.c
index fcf4ed1..0e66af1 100644
--- a/drivers/isdn/hisax/avm_pci.c
+++ b/drivers/isdn/hisax/avm_pci.c
@@ -314,7 +314,7 @@
 			bcs->hw.hdlc.ctrl.sr.cmd |= HDLC_CMD_XME;
 	}
 	if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO))
-		debugl1(cs, "hdlc_fill_fifo %d/%ld", count, bcs->tx_skb->len);
+		debugl1(cs, "hdlc_fill_fifo %d/%u", count, bcs->tx_skb->len);
 	p = bcs->tx_skb->data;
 	ptr = (u_int *)p;
 	skb_pull(bcs->tx_skb, count);
diff --git a/drivers/isdn/hisax/callc.c b/drivers/isdn/hisax/callc.c
index f150330..37e685e 100644
--- a/drivers/isdn/hisax/callc.c
+++ b/drivers/isdn/hisax/callc.c
@@ -65,7 +65,7 @@
 	return (struct IsdnCardState *) 0;
 }
 
-static void
+static __attribute__((format(printf, 3, 4))) void
 link_debug(struct Channel *chanp, int direction, char *fmt, ...)
 {
 	va_list args;
@@ -1068,7 +1068,7 @@
 	return 0;
 }
 
-static void
+static __attribute__((format(printf, 2, 3))) void
 callc_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	va_list args;
diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c
index b133378..c110f86 100644
--- a/drivers/isdn/hisax/config.c
+++ b/drivers/isdn/hisax/config.c
@@ -1917,7 +1917,7 @@
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
 
-static struct pci_device_id hisax_pci_tbl[] __devinitdata = {
+static struct pci_device_id hisax_pci_tbl[] __devinitdata __used = {
 #ifdef CONFIG_HISAX_FRITZPCI
 	{PCI_VDEVICE(AVM,      PCI_DEVICE_ID_AVM_A1)			},
 #endif
diff --git a/drivers/isdn/hisax/hfc_2bds0.c b/drivers/isdn/hisax/hfc_2bds0.c
index 7250f56..a16459a 100644
--- a/drivers/isdn/hisax/hfc_2bds0.c
+++ b/drivers/isdn/hisax/hfc_2bds0.c
@@ -292,7 +292,7 @@
 	}
 	count = GetFreeFifoBytes_B(bcs);
 	if (cs->debug & L1_DEB_HSCX)
-		debugl1(cs, "hfc_fill_fifo %d count(%ld/%d),%lx",
+		debugl1(cs, "hfc_fill_fifo %d count(%u/%d),%lx",
 			bcs->channel, bcs->tx_skb->len,
 			count, current->state);
 	if (count < bcs->tx_skb->len) {
@@ -719,7 +719,7 @@
 	}
 	count = GetFreeFifoBytes_D(cs);
 	if (cs->debug & L1_DEB_ISAC)
-		debugl1(cs, "hfc_fill_Dfifo count(%ld/%d)",
+		debugl1(cs, "hfc_fill_Dfifo count(%u/%d)",
 			cs->tx_skb->len, count);
 	if (count < cs->tx_skb->len) {
 		if (cs->debug & L1_DEB_ISAC)
diff --git a/drivers/isdn/hisax/hfc_2bs0.c b/drivers/isdn/hisax/hfc_2bs0.c
index b1f6481..626f85d 100644
--- a/drivers/isdn/hisax/hfc_2bs0.c
+++ b/drivers/isdn/hisax/hfc_2bs0.c
@@ -282,7 +282,7 @@
 	    count += cs->hw.hfc.fifosize; 
 	} /* L1_MODE_TRANS */
 	if (cs->debug & L1_DEB_HSCX)
-		debugl1(cs, "hfc_fill_fifo %d count(%ld/%d)",
+		debugl1(cs, "hfc_fill_fifo %d count(%u/%d)",
 			bcs->channel, bcs->tx_skb->len,
 			count);
 	if (count < bcs->tx_skb->len) {
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 917cc84..3147020 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -550,7 +550,7 @@
 		count += D_FIFO_SIZE;	/* count now contains available bytes */
 
 	if (cs->debug & L1_DEB_ISAC)
-		debugl1(cs, "hfcpci_fill_Dfifo count(%ld/%d)",
+		debugl1(cs, "hfcpci_fill_Dfifo count(%u/%d)",
 			cs->tx_skb->len, count);
 	if (count < cs->tx_skb->len) {
 		if (cs->debug & L1_DEB_ISAC)
@@ -681,7 +681,7 @@
 		count += B_FIFO_SIZE;	/* count now contains available bytes */
 
 	if (cs->debug & L1_DEB_HSCX)
-		debugl1(cs, "hfcpci_fill_fifo %d count(%ld/%d),%lx",
+		debugl1(cs, "hfcpci_fill_fifo %d count(%u/%d),%lx",
 			bcs->channel, bcs->tx_skb->len,
 			count, current->state);
 
diff --git a/drivers/isdn/hisax/hfc_sx.c b/drivers/isdn/hisax/hfc_sx.c
index 5aa138e..1235b71 100644
--- a/drivers/isdn/hisax/hfc_sx.c
+++ b/drivers/isdn/hisax/hfc_sx.c
@@ -179,7 +179,7 @@
 	  count += fifo_size;	/* count now contains available bytes */
 
 	if (cs->debug & L1_DEB_ISAC_FIFO)
-	  debugl1(cs, "hfcsx_write_fifo %d count(%ld/%d)",
+	  debugl1(cs, "hfcsx_write_fifo %d count(%u/%d)",
 		  fifo, skb->len, count);
 	if (count < skb->len) {
 	  if (cs->debug & L1_DEB_ISAC_FIFO)
@@ -265,7 +265,7 @@
 	  count++;
 
 	  if (cs->debug & L1_DEB_ISAC_FIFO)
-	    debugl1(cs, "hfcsx_read_fifo %d count %ld)",
+	    debugl1(cs, "hfcsx_read_fifo %d count %u)",
 		    fifo, count);
 
 	  if ((count > fifo_size) || (count < 4)) {
@@ -986,7 +986,7 @@
 				default:
 					spin_unlock_irqrestore(&cs->lock, flags);
 					if (cs->debug & L1_DEB_WARN)
-						debugl1(cs, "hfcsx_l1hw loop invalid %4lx", arg);
+						debugl1(cs, "hfcsx_l1hw loop invalid %4lx", (unsigned long)arg);
 					return;
 			}
 			cs->hw.hfcsx.trm |= 0x80;	/* enable IOM-loop */
diff --git a/drivers/isdn/hisax/hisax.h b/drivers/isdn/hisax/hisax.h
index 32ab392..de1c669 100644
--- a/drivers/isdn/hisax/hisax.h
+++ b/drivers/isdn/hisax/hisax.h
@@ -1286,7 +1286,9 @@
 
 int HiSax_command(isdn_ctrl * ic);
 int HiSax_writebuf_skb(int id, int chan, int ack, struct sk_buff *skb);
+__attribute__((format(printf, 3, 4)))
 void HiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, ...);
+__attribute__((format(printf, 3, 0)))
 void VHiSax_putstatus(struct IsdnCardState *cs, char *head, char *fmt, va_list args);
 void HiSax_reportcard(int cardnr, int sel);
 int QuickHex(char *txt, u_char * p, int cnt);
diff --git a/drivers/isdn/hisax/ipacx.c b/drivers/isdn/hisax/ipacx.c
index 751b25f..3321041 100644
--- a/drivers/isdn/hisax/ipacx.c
+++ b/drivers/isdn/hisax/ipacx.c
@@ -717,7 +717,7 @@
 
         bc = bc ? 1 : 0;  // in case bc is greater than 1
 	if (cs->debug & L1_DEB_HSCX)
-		debugl1(cs, "mode_bch() switch B-% mode %d chan %d", hscx, mode, bc);
+		debugl1(cs, "mode_bch() switch B-%d mode %d chan %d", hscx, mode, bc);
 	bcs->mode = mode;
 	bcs->channel = bc;
   
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 40b914b..d4cce33 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -189,7 +189,7 @@
 static int
 isar_load_firmware(struct IsdnCardState *cs, u_char __user *buf)
 {
-	int ret, size, cnt, debug;
+	int cfu_ret, ret, size, cnt, debug;
 	u_char len, nom, noc;
 	u_short sadr, left, *sp;
 	u_char __user *p = buf;
@@ -212,9 +212,10 @@
 	cs->debug &= ~(L1_DEB_HSCX | L1_DEB_HSCX_FIFO);
 #endif
 	
-	if ((ret = copy_from_user(&size, p, sizeof(int)))) {
-		printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", ret);
-		return ret;
+	cfu_ret = copy_from_user(&size, p, sizeof(int));
+	if (cfu_ret) {
+		printk(KERN_ERR"isar_load_firmware copy_from_user ret %d\n", cfu_ret);
+		return -EFAULT;
 	}
 	p += sizeof(int);
 	printk(KERN_DEBUG"isar_load_firmware size: %d\n", size);
@@ -953,7 +954,7 @@
 			break;
 		case PSEV_GSTN_CLR:
 			if (cs->debug & L1_DEB_HSCX)
-				debugl1(cs, "pump stev GSTN CLEAR", devt);
+				debugl1(cs, "pump stev GSTN CLEAR");
 			break;
 		default:
 			if (cs->debug & L1_DEB_HSCX)
@@ -1268,7 +1269,7 @@
 static void
 ftimer_handler(struct BCState *bcs) {
 	if (bcs->cs->debug)
-		debugl1(bcs->cs, "ftimer flags %04x",
+		debugl1(bcs->cs, "ftimer flags %04lx",
 			bcs->Flag);
 	test_and_clear_bit(BC_FLG_FTI_RUN, &bcs->Flag);
 	if (test_and_clear_bit(BC_FLG_LL_CONN, &bcs->Flag)) {
@@ -1427,8 +1428,8 @@
 					&bcs->hw.isar.reg->Flags))
 					bcs->hw.isar.dpath = 1;
 				else {
-					printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n");
-					debugl1(cs, "isar modeisar analog funktions only with DP1");
+					printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
+					debugl1(cs, "isar modeisar analog functions only with DP1");
 					return(1);
 				}
 				break;
@@ -1748,7 +1749,7 @@
 	struct BCState *bcs;
 
 	if (cs->debug & L1_DEB_HSCX)
-		debugl1(cs, "isar_auxcmd cmd/ch %x/%d", ic->command, ic->arg);
+		debugl1(cs, "isar_auxcmd cmd/ch %x/%ld", ic->command, ic->arg);
 	switch (ic->command) {
 		case (ISDN_CMD_FAXCMD):
 			bcs = cs->channel[ic->arg].bcs;
diff --git a/drivers/isdn/hisax/isdnl1.h b/drivers/isdn/hisax/isdnl1.h
index 172ad4c..425d861 100644
--- a/drivers/isdn/hisax/isdnl1.h
+++ b/drivers/isdn/hisax/isdnl1.h
@@ -21,6 +21,7 @@
 #define B_XMTBUFREADY	1
 #define B_ACKPENDING	2
 
+__attribute__((format(printf, 2, 3)))
 void debugl1(struct IsdnCardState *cs, char *fmt, ...);
 void DChannel_proc_xmt(struct IsdnCardState *cs);
 void DChannel_proc_rcv(struct IsdnCardState *cs);
diff --git a/drivers/isdn/hisax/isdnl3.c b/drivers/isdn/hisax/isdnl3.c
index fd0b643..ad291f2 100644
--- a/drivers/isdn/hisax/isdnl3.c
+++ b/drivers/isdn/hisax/isdnl3.c
@@ -66,7 +66,7 @@
 	"EV_TIMEOUT",
 };
 
-static void
+static __attribute__((format(printf, 2, 3))) void
 l3m_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	va_list args;
diff --git a/drivers/isdn/hisax/netjet.c b/drivers/isdn/hisax/netjet.c
index 5d7f0f2..644891e 100644
--- a/drivers/isdn/hisax/netjet.c
+++ b/drivers/isdn/hisax/netjet.c
@@ -254,7 +254,7 @@
 		val >>= 1;
 	}
 	if (bcs->cs->debug & L1_DEB_HSCX)
-		debugl1(bcs->cs,"tiger make_raw: in %ld out %d.%d",
+		debugl1(bcs->cs,"tiger make_raw: in %u out %d.%d",
 			bcs->tx_skb->len, s_cnt, bitcnt);
 	if (bitcnt) {
 		while (8>bitcnt++) {
@@ -361,7 +361,7 @@
 		val >>= 1;
 	}
 	if (bcs->cs->debug & L1_DEB_HSCX)
-		debugl1(bcs->cs,"tiger make_raw_56k: in %ld out %d.%d",
+		debugl1(bcs->cs,"tiger make_raw_56k: in %u out %d.%d",
 			bcs->tx_skb->len, s_cnt, bitcnt);
 	if (bitcnt) {
 		while (8>bitcnt++) {
@@ -612,7 +612,7 @@
 	if (!bcs->tx_skb)
 		return;
 	if (bcs->cs->debug & L1_DEB_HSCX)
-		debugl1(bcs->cs,"tiger fill_dma1: c%d %4x", bcs->channel,
+		debugl1(bcs->cs,"tiger fill_dma1: c%d %4lx", bcs->channel,
 			bcs->Flag);
 	if (test_and_set_bit(BC_FLG_BUSY, &bcs->Flag))
 		return;
@@ -625,7 +625,7 @@
 			return;		
 	};
 	if (bcs->cs->debug & L1_DEB_HSCX)
-		debugl1(bcs->cs,"tiger fill_dma2: c%d %4x", bcs->channel,
+		debugl1(bcs->cs,"tiger fill_dma2: c%d %4lx", bcs->channel,
 			bcs->Flag);
 	if (test_and_clear_bit(BC_FLG_NOFRAME, &bcs->Flag)) {
 		write_raw(bcs, bcs->hw.tiger.sendp, bcs->hw.tiger.free);
@@ -667,7 +667,7 @@
 		write_raw(bcs, p, cnt);
 	}
 	if (bcs->cs->debug & L1_DEB_HSCX)
-		debugl1(bcs->cs,"tiger fill_dma3: c%d %4x", bcs->channel,
+		debugl1(bcs->cs,"tiger fill_dma3: c%d %4lx", bcs->channel,
 			bcs->Flag);
 }
 
diff --git a/drivers/isdn/hisax/st5481_d.c b/drivers/isdn/hisax/st5481_d.c
index b7876b1..4408263 100644
--- a/drivers/isdn/hisax/st5481_d.c
+++ b/drivers/isdn/hisax/st5481_d.c
@@ -167,7 +167,8 @@
 	{ST_L1_F8, EV_IND_RSY,           l1_ignore},
 };
 
-static void l1m_debug(struct FsmInst *fi, char *fmt, ...)
+static __attribute__((format(printf, 2, 3)))
+void l1m_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	va_list args;
 	char buf[256];
@@ -269,7 +270,8 @@
 	"EV_DOUT_UNDERRUN",
 };
 
-static void dout_debug(struct FsmInst *fi, char *fmt, ...)
+static __attribute__((format(printf, 2, 3)))
+void dout_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	va_list args;
 	char buf[256];
diff --git a/drivers/isdn/i4l/isdn_concap.c b/drivers/isdn/i4l/isdn_concap.c
index 46048e5..d568689 100644
--- a/drivers/isdn/i4l/isdn_concap.c
+++ b/drivers/isdn/i4l/isdn_concap.c
@@ -61,7 +61,7 @@
 static int isdn_concap_dl_connect_req(struct concap_proto *concap)
 {
 	struct net_device *ndev = concap -> net_dev;
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+	isdn_net_local *lp = netdev_priv(ndev);
 	int ret;
 	IX25DEBUG( "isdn_concap_dl_connect_req: %s \n", ndev -> name);
 
diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
index 26d44c3..afeede7 100644
--- a/drivers/isdn/i4l/isdn_net.c
+++ b/drivers/isdn/i4l/isdn_net.c
@@ -827,7 +827,7 @@
 void
 isdn_net_hangup(struct net_device *d)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(d);
+	isdn_net_local *lp = netdev_priv(d);
 	isdn_ctrl cmd;
 #ifdef CONFIG_ISDN_X25
 	struct concap_proto *cprot = lp->netdev->cprot;
@@ -1052,7 +1052,7 @@
 {
 	isdn_net_dev *nd;
 	isdn_net_local *slp;
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+	isdn_net_local *lp = netdev_priv(ndev);
 	int retv = NETDEV_TX_OK;
 
 	if (((isdn_net_local *) netdev_priv(ndev))->master) {
@@ -1116,7 +1116,7 @@
 static void
 isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+	isdn_net_local *lp = netdev_priv(dev);
 	if (!skb)
 		return;
 	if (lp->p_encap == ISDN_NET_ENCAP_ETHER) {
@@ -1131,7 +1131,7 @@
 
 static void isdn_net_tx_timeout(struct net_device * ndev)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+	isdn_net_local *lp = netdev_priv(ndev);
 
 	printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate);
 	if (!lp->dialstate){
@@ -1165,7 +1165,7 @@
 static netdev_tx_t
 isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+	isdn_net_local *lp = netdev_priv(ndev);
 #ifdef CONFIG_ISDN_X25
 	struct concap_proto * cprot = lp -> netdev -> cprot;
 /* At this point hard_start_xmit() passes control to the encapsulation
@@ -1347,7 +1347,7 @@
 static struct net_device_stats *
 isdn_net_get_stats(struct net_device *dev)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+	isdn_net_local *lp = netdev_priv(dev);
 	return &lp->stats;
 }
 
@@ -1426,7 +1426,7 @@
 static int
 isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+	isdn_net_local *lp = netdev_priv(dev);
 	unsigned long len = 0;
 	unsigned long expires = 0;
 	int tmp = 0;
@@ -1493,7 +1493,7 @@
 static int isdn_net_ioctl(struct net_device *dev,
 			  struct ifreq *ifr, int cmd)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+	isdn_net_local *lp = netdev_priv(dev);
 
 	switch (lp->p_encap) {
 #ifdef CONFIG_ISDN_PPP
@@ -1786,7 +1786,7 @@
 static void
 isdn_net_receive(struct net_device *ndev, struct sk_buff *skb)
 {
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev);
+	isdn_net_local *lp = netdev_priv(ndev);
 	isdn_net_local *olp = lp;	/* original 'lp' */
 #ifdef CONFIG_ISDN_X25
 	struct concap_proto *cprot = lp -> netdev -> cprot;
@@ -1800,7 +1800,7 @@
 		 * handle master's statistics and hangup-timeout
 		 */
 		ndev = lp->master;
-		lp = (isdn_net_local *) netdev_priv(ndev);
+		lp = netdev_priv(ndev);
 		lp->stats.rx_packets++;
 		lp->stats.rx_bytes += skb->len;
 	}
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index fe824e0..9e8162c 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -1147,15 +1147,14 @@
 	}
 
 	if (is->pass_filter
-	    && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0) {
+	    && sk_run_filter(skb, is->pass_filter) == 0) {
 		if (is->debug & 0x2)
 			printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
 		kfree_skb(skb);
 		return;
 	}
 	if (!(is->active_filter
-	      && sk_run_filter(skb, is->active_filter,
-	                       is->active_len) == 0)) {
+	      && sk_run_filter(skb, is->active_filter) == 0)) {
 		if (is->debug & 0x2)
 			printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
 		lp->huptimer = 0;
@@ -1221,7 +1220,7 @@
 	struct ippp_struct *ipt,*ipts;
 	int slot, retval = NETDEV_TX_OK;
 
-	mlp = (isdn_net_local *) netdev_priv(netdev);
+	mlp = netdev_priv(netdev);
 	nd = mlp->netdev;       /* get master lp */
 
 	slot = mlp->ppp_slot;
@@ -1294,15 +1293,14 @@
 	}
 
 	if (ipt->pass_filter
-	    && sk_run_filter(skb, ipt->pass_filter, ipt->pass_len) == 0) {
+	    && sk_run_filter(skb, ipt->pass_filter) == 0) {
 		if (ipt->debug & 0x4)
 			printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
 		kfree_skb(skb);
 		goto unlock;
 	}
 	if (!(ipt->active_filter
-	      && sk_run_filter(skb, ipt->active_filter,
-		               ipt->active_len) == 0)) {
+	      && sk_run_filter(skb, ipt->active_filter) == 0)) {
 		if (ipt->debug & 0x4)
 			printk(KERN_DEBUG "IPPP: link-active filter: reseting huptimer.\n");
 		lp->huptimer = 0;
@@ -1492,9 +1490,9 @@
 	}
 	
 	drop |= is->pass_filter
-	        && sk_run_filter(skb, is->pass_filter, is->pass_len) == 0;
+	        && sk_run_filter(skb, is->pass_filter) == 0;
 	drop |= is->active_filter
-	        && sk_run_filter(skb, is->active_filter, is->active_len) == 0;
+	        && sk_run_filter(skb, is->active_filter) == 0;
 	
 	skb_push(skb, IPPP_MAX_HEADER - 4);
 	return drop;
@@ -1985,7 +1983,7 @@
 {
 	struct ppp_stats __user *res = ifr->ifr_data;
 	struct ppp_stats t;
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+	isdn_net_local *lp = netdev_priv(dev);
 
 	if (!access_ok(VERIFY_WRITE, res, sizeof(struct ppp_stats)))
 		return -EFAULT;
@@ -2024,7 +2022,7 @@
 {
 	int error=0;
 	int len;
-	isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev);
+	isdn_net_local *lp = netdev_priv(dev);
 
 
 	if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP)
@@ -2091,7 +2089,7 @@
 
 	sdev = lp->slave;
 	while (sdev) {
-		isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev);
+		isdn_net_local *mlp = netdev_priv(sdev);
 		if (!(mlp->flags & ISDN_NET_CONNECTED))
 			break;
 		sdev = mlp->slave;
@@ -2099,7 +2097,7 @@
 	if (!sdev)
 		return 2;
 
-	isdn_net_dial_req((isdn_net_local *) netdev_priv(sdev));
+	isdn_net_dial_req(netdev_priv(sdev));
 	return 0;
 #else
 	return -1;
@@ -2122,7 +2120,7 @@
 
 	sdev = lp->slave;
 	while (sdev) {
-		isdn_net_local *mlp = (isdn_net_local *) netdev_priv(sdev);
+		isdn_net_local *mlp = netdev_priv(sdev);
 
 		if (mlp->slave) { /* find last connected link in chain */
 			isdn_net_local *nlp = ISDN_SLAVE_PRIV(mlp);
diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
index 2e847a9..f2b5bab 100644
--- a/drivers/isdn/icn/icn.c
+++ b/drivers/isdn/icn/icn.c
@@ -1627,7 +1627,7 @@
 static int __init icn_init(void)
 {
 	char *p;
-	char rev[10];
+	char rev[20];
 
 	memset(&dev, 0, sizeof(icn_dev));
 	dev.memaddr = (membase & 0x0ffc000);
@@ -1637,9 +1637,10 @@
 	spin_lock_init(&dev.devlock);
 
 	if ((p = strchr(revision, ':'))) {
-		strcpy(rev, p + 1);
+		strncpy(rev, p + 1, 20);
 		p = strchr(rev, '$');
-		*p = 0;
+		if (p)
+			*p = 0;
 	} else
 		strcpy(rev, " ??? ");
 	printk(KERN_NOTICE "ICN-ISDN-driver Rev%smem=0x%08lx\n", rev,
diff --git a/drivers/isdn/mISDN/layer1.c b/drivers/isdn/mISDN/layer1.c
index ac4aa18..5cc7c00 100644
--- a/drivers/isdn/mISDN/layer1.c
+++ b/drivers/isdn/mISDN/layer1.c
@@ -99,12 +99,16 @@
 l1m_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	struct layer1 *l1 = fi->userdata;
+	struct va_format vaf;
 	va_list va;
 
 	va_start(va, fmt);
-	printk(KERN_DEBUG "%s: ", dev_name(&l1->dch->dev.dev));
-	vprintk(fmt, va);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	printk(KERN_DEBUG "%s: %pV\n", dev_name(&l1->dch->dev.dev), &vaf);
+
 	va_end(va);
 }
 
diff --git a/drivers/isdn/mISDN/layer2.c b/drivers/isdn/mISDN/layer2.c
index c973717..4ae7505 100644
--- a/drivers/isdn/mISDN/layer2.c
+++ b/drivers/isdn/mISDN/layer2.c
@@ -95,14 +95,20 @@
 l2m_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	struct layer2 *l2 = fi->userdata;
+	struct va_format vaf;
 	va_list va;
 
 	if (!(*debug & DEBUG_L2_FSM))
 		return;
+
 	va_start(va, fmt);
-	printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
-	vprintk(fmt, va);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
+	       l2->sapi, l2->tei, &vaf);
+
 	va_end(va);
 }
 
diff --git a/drivers/isdn/mISDN/tei.c b/drivers/isdn/mISDN/tei.c
index 1b85d9d..687c9b6 100644
--- a/drivers/isdn/mISDN/tei.c
+++ b/drivers/isdn/mISDN/tei.c
@@ -79,14 +79,19 @@
 da_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	struct manager	*mgr = fi->userdata;
+	struct va_format vaf;
 	va_list va;
 
 	if (!(*debug & DEBUG_L2_TEIFSM))
 		return;
+
 	va_start(va, fmt);
-	printk(KERN_DEBUG "mgr(%d): ", mgr->ch.st->dev->id);
-	vprintk(fmt, va);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	printk(KERN_DEBUG "mgr(%d): %pV\n", mgr->ch.st->dev->id, &vaf);
+
 	va_end(va);
 }
 
@@ -223,14 +228,20 @@
 tei_debug(struct FsmInst *fi, char *fmt, ...)
 {
 	struct teimgr	*tm = fi->userdata;
+	struct va_format vaf;
 	va_list va;
 
 	if (!(*debug & DEBUG_L2_TEIFSM))
 		return;
+
 	va_start(va, fmt);
-	printk(KERN_DEBUG "sapi(%d) tei(%d): ", tm->l2->sapi, tm->l2->tei);
-	vprintk(fmt, va);
-	printk("\n");
+
+	vaf.fmt = fmt;
+	vaf.va = &va;
+
+	printk(KERN_DEBUG "sapi(%d) tei(%d): %pV\n",
+	       tm->l2->sapi, tm->l2->tei, &vaf);
+
 	va_end(va);
 }
 
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index cc2a88d..77b8fd2 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -10,7 +10,7 @@
 if NEW_LEDS
 
 config LEDS_CLASS
-	tristate "LED Class Support"
+	bool "LED Class Support"
 	help
 	  This option enables the led sysfs class in /sys/class/leds.  You'll
 	  need this to do anything useful with LEDs.  If unsure, say N.
@@ -176,6 +176,24 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called leds-lp3944.
 
+config LEDS_LP5521
+	tristate "LED Support for N.S. LP5521 LED driver chip"
+	depends on LEDS_CLASS && I2C
+	help
+	  If you say yes here you get support for the National Semiconductor
+	  LP5521 LED driver. It is 3 channel chip with programmable engines.
+	  Driver provides direct control via LED class and interface for
+	  programming the engines.
+
+config LEDS_LP5523
+	tristate "LED Support for N.S. LP5523 LED driver chip"
+	depends on LEDS_CLASS && I2C
+	help
+	  If you say yes here you get support for the National Semiconductor
+	  LP5523 LED driver. It is 9 channel chip with programmable engines.
+	  Driver provides direct control via LED class and interface for
+	  programming the engines.
+
 config LEDS_CLEVO_MAIL
 	tristate "Mail LED on Clevo notebook"
 	depends on X86 && SERIO_I8042 && DMI
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 9c96db4..aae6989 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -23,6 +23,8 @@
 obj-$(CONFIG_LEDS_PCA9532)		+= leds-pca9532.o
 obj-$(CONFIG_LEDS_GPIO)			+= leds-gpio.o
 obj-$(CONFIG_LEDS_LP3944)		+= leds-lp3944.o
+obj-$(CONFIG_LEDS_LP5521)		+= leds-lp5521.o
+obj-$(CONFIG_LEDS_LP5523)		+= leds-lp5523.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)		+= leds-clevo-mail.o
 obj-$(CONFIG_LEDS_HP6XX)		+= leds-hp6xx.o
 obj-$(CONFIG_LEDS_FSG)			+= leds-fsg.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 2606600..211e21f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -81,6 +81,79 @@
 	__ATTR_NULL,
 };
 
+static void led_timer_function(unsigned long data)
+{
+	struct led_classdev *led_cdev = (void *)data;
+	unsigned long brightness;
+	unsigned long delay;
+
+	if (!led_cdev->blink_delay_on || !led_cdev->blink_delay_off) {
+		led_set_brightness(led_cdev, LED_OFF);
+		return;
+	}
+
+	brightness = led_get_brightness(led_cdev);
+	if (!brightness) {
+		/* Time to switch the LED on. */
+		brightness = led_cdev->blink_brightness;
+		delay = led_cdev->blink_delay_on;
+	} else {
+		/* Store the current brightness value to be able
+		 * to restore it when the delay_off period is over.
+		 */
+		led_cdev->blink_brightness = brightness;
+		brightness = LED_OFF;
+		delay = led_cdev->blink_delay_off;
+	}
+
+	led_set_brightness(led_cdev, brightness);
+
+	mod_timer(&led_cdev->blink_timer, jiffies + msecs_to_jiffies(delay));
+}
+
+static void led_stop_software_blink(struct led_classdev *led_cdev)
+{
+	/* deactivate previous settings */
+	del_timer_sync(&led_cdev->blink_timer);
+	led_cdev->blink_delay_on = 0;
+	led_cdev->blink_delay_off = 0;
+}
+
+static void led_set_software_blink(struct led_classdev *led_cdev,
+				   unsigned long delay_on,
+				   unsigned long delay_off)
+{
+	int current_brightness;
+
+	current_brightness = led_get_brightness(led_cdev);
+	if (current_brightness)
+		led_cdev->blink_brightness = current_brightness;
+	if (!led_cdev->blink_brightness)
+		led_cdev->blink_brightness = led_cdev->max_brightness;
+
+	if (delay_on == led_cdev->blink_delay_on &&
+	    delay_off == led_cdev->blink_delay_off)
+		return;
+
+	led_stop_software_blink(led_cdev);
+
+	led_cdev->blink_delay_on = delay_on;
+	led_cdev->blink_delay_off = delay_off;
+
+	/* never on - don't blink */
+	if (!delay_on)
+		return;
+
+	/* never off - just set to brightness */
+	if (!delay_off) {
+		led_set_brightness(led_cdev, led_cdev->blink_brightness);
+		return;
+	}
+
+	mod_timer(&led_cdev->blink_timer, jiffies + 1);
+}
+
+
 /**
  * led_classdev_suspend - suspend an led_classdev.
  * @led_cdev: the led_classdev to suspend.
@@ -148,6 +221,10 @@
 
 	led_update_brightness(led_cdev);
 
+	init_timer(&led_cdev->blink_timer);
+	led_cdev->blink_timer.function = led_timer_function;
+	led_cdev->blink_timer.data = (unsigned long)led_cdev;
+
 #ifdef CONFIG_LEDS_TRIGGERS
 	led_trigger_set_default(led_cdev);
 #endif
@@ -157,7 +234,6 @@
 
 	return 0;
 }
-
 EXPORT_SYMBOL_GPL(led_classdev_register);
 
 /**
@@ -175,6 +251,9 @@
 	up_write(&led_cdev->trigger_lock);
 #endif
 
+	/* Stop blinking */
+	led_brightness_set(led_cdev, LED_OFF);
+
 	device_unregister(led_cdev->dev);
 
 	down_write(&leds_list_lock);
@@ -183,6 +262,30 @@
 }
 EXPORT_SYMBOL_GPL(led_classdev_unregister);
 
+void led_blink_set(struct led_classdev *led_cdev,
+		   unsigned long *delay_on,
+		   unsigned long *delay_off)
+{
+	if (led_cdev->blink_set &&
+	    led_cdev->blink_set(led_cdev, delay_on, delay_off))
+		return;
+
+	/* blink with 1 Hz as default if nothing specified */
+	if (!*delay_on && !*delay_off)
+		*delay_on = *delay_off = 500;
+
+	led_set_software_blink(led_cdev, *delay_on, *delay_off);
+}
+EXPORT_SYMBOL(led_blink_set);
+
+void led_brightness_set(struct led_classdev *led_cdev,
+			enum led_brightness brightness)
+{
+	led_stop_software_blink(led_cdev);
+	led_cdev->brightness_set(led_cdev, brightness);
+}
+EXPORT_SYMBOL(led_brightness_set);
+
 static int __init leds_init(void)
 {
 	leds_class = class_create(THIS_MODULE, "leds");
diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
index f1c00db..c41eb61 100644
--- a/drivers/leds/led-triggers.c
+++ b/drivers/leds/led-triggers.c
@@ -113,7 +113,7 @@
 		if (led_cdev->trigger->deactivate)
 			led_cdev->trigger->deactivate(led_cdev);
 		led_cdev->trigger = NULL;
-		led_set_brightness(led_cdev, LED_OFF);
+		led_brightness_set(led_cdev, LED_OFF);
 	}
 	if (trigger) {
 		write_lock_irqsave(&trigger->leddev_list_lock, flags);
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index ea57e05..4d9fa38 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -316,7 +316,7 @@
 
 static int __init gpio_led_init(void)
 {
-	int ret;
+	int ret = 0;
 
 #ifdef CONFIG_LEDS_GPIO_PLATFORM	
 	ret = platform_driver_register(&gpio_led_driver);
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
new file mode 100644
index 0000000..3782f31f
--- /dev/null
+++ b/drivers/leds/leds-lp5521.c
@@ -0,0 +1,821 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5521.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5521_PROGRAM_LENGTH		32	/* in bytes */
+
+#define LP5521_MAX_LEDS			3	/* Maximum number of LEDs */
+#define LP5521_MAX_ENGINES		3	/* Maximum number of engines */
+
+#define LP5521_ENG_MASK_BASE		0x30	/* 00110000 */
+#define LP5521_ENG_STATUS_MASK		0x07	/* 00000111 */
+
+#define LP5521_CMD_LOAD			0x15	/* 00010101 */
+#define LP5521_CMD_RUN			0x2a	/* 00101010 */
+#define LP5521_CMD_DIRECT		0x3f	/* 00111111 */
+#define LP5521_CMD_DISABLED		0x00	/* 00000000 */
+
+/* Registers */
+#define LP5521_REG_ENABLE		0x00
+#define LP5521_REG_OP_MODE		0x01
+#define LP5521_REG_R_PWM		0x02
+#define LP5521_REG_G_PWM		0x03
+#define LP5521_REG_B_PWM		0x04
+#define LP5521_REG_R_CURRENT		0x05
+#define LP5521_REG_G_CURRENT		0x06
+#define LP5521_REG_B_CURRENT		0x07
+#define LP5521_REG_CONFIG		0x08
+#define LP5521_REG_R_CHANNEL_PC		0x09
+#define LP5521_REG_G_CHANNEL_PC		0x0A
+#define LP5521_REG_B_CHANNEL_PC		0x0B
+#define LP5521_REG_STATUS		0x0C
+#define LP5521_REG_RESET		0x0D
+#define LP5521_REG_GPO			0x0E
+#define LP5521_REG_R_PROG_MEM		0x10
+#define LP5521_REG_G_PROG_MEM		0x30
+#define LP5521_REG_B_PROG_MEM		0x50
+
+#define LP5521_PROG_MEM_BASE		LP5521_REG_R_PROG_MEM
+#define LP5521_PROG_MEM_SIZE		0x20
+
+/* Base register to set LED current */
+#define LP5521_REG_LED_CURRENT_BASE	LP5521_REG_R_CURRENT
+
+/* Base register to set the brightness */
+#define LP5521_REG_LED_PWM_BASE		LP5521_REG_R_PWM
+
+/* Bits in ENABLE register */
+#define LP5521_MASTER_ENABLE		0x40	/* Chip master enable */
+#define LP5521_LOGARITHMIC_PWM		0x80	/* Logarithmic PWM adjustment */
+#define LP5521_EXEC_RUN			0x2A
+
+/* Bits in CONFIG register */
+#define LP5521_PWM_HF			0x40	/* PWM: 0 = 256Hz, 1 = 558Hz */
+#define LP5521_PWRSAVE_EN		0x20	/* 1 = Power save mode */
+#define LP5521_CP_MODE_OFF		0	/* Charge pump (CP) off */
+#define LP5521_CP_MODE_BYPASS		8	/* CP forced to bypass mode */
+#define LP5521_CP_MODE_1X5		0x10	/* CP forced to 1.5x mode */
+#define LP5521_CP_MODE_AUTO		0x18	/* Automatic mode selection */
+#define LP5521_R_TO_BATT		4	/* R out: 0 = CP, 1 = Vbat */
+#define LP5521_CLK_SRC_EXT		0	/* Ext-clk source (CLK_32K) */
+#define LP5521_CLK_INT			1	/* Internal clock */
+#define LP5521_CLK_AUTO			2	/* Automatic clock selection */
+
+/* Status */
+#define LP5521_EXT_CLK_USED		0x08
+
+struct lp5521_engine {
+	const struct attribute_group *attributes;
+	int		id;
+	u8		mode;
+	u8		prog_page;
+	u8		engine_mask;
+};
+
+struct lp5521_led {
+	int			id;
+	u8			chan_nr;
+	u8			led_current;
+	u8			max_current;
+	struct led_classdev	cdev;
+	struct work_struct	brightness_work;
+	u8			brightness;
+};
+
+struct lp5521_chip {
+	struct lp5521_platform_data *pdata;
+	struct mutex		lock; /* Serialize control */
+	struct i2c_client	*client;
+	struct lp5521_engine	engines[LP5521_MAX_ENGINES];
+	struct lp5521_led	leds[LP5521_MAX_LEDS];
+	u8			num_channels;
+	u8			num_leds;
+};
+
+#define cdev_to_led(c)		container_of(c, struct lp5521_led, cdev)
+#define engine_to_lp5521(eng)	container_of((eng), struct lp5521_chip, \
+						engines[(eng)->id - 1])
+#define led_to_lp5521(led)	container_of((led), struct lp5521_chip, \
+						leds[(led)->id])
+
+static void lp5521_led_brightness_work(struct work_struct *work);
+
+static inline int lp5521_write(struct i2c_client *client, u8 reg, u8 value)
+{
+	return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5521_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+	s32 ret;
+
+	ret = i2c_smbus_read_byte_data(client, reg);
+	if (ret < 0)
+		return -EIO;
+
+	*buf = ret;
+	return 0;
+}
+
+static int lp5521_set_engine_mode(struct lp5521_engine *engine, u8 mode)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(engine);
+	struct i2c_client *client = chip->client;
+	int ret;
+	u8 engine_state;
+
+	/* Only transition between RUN and DIRECT mode are handled here */
+	if (mode == LP5521_CMD_LOAD)
+		return 0;
+
+	if (mode == LP5521_CMD_DISABLED)
+		mode = LP5521_CMD_DIRECT;
+
+	ret = lp5521_read(client, LP5521_REG_OP_MODE, &engine_state);
+
+	/* set mode only for this engine */
+	engine_state &= ~(engine->engine_mask);
+	mode &= engine->engine_mask;
+	engine_state |= mode;
+	ret |= lp5521_write(client, LP5521_REG_OP_MODE, engine_state);
+
+	return ret;
+}
+
+static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(eng);
+	struct i2c_client *client = chip->client;
+	int ret;
+	int addr;
+	u8 mode;
+
+	/* move current engine to direct mode and remember the state */
+	ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
+	usleep_range(1000, 10000);
+	ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode);
+
+	/* For loading, all the engines to load mode */
+	lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
+	usleep_range(1000, 10000);
+	lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_LOAD);
+	usleep_range(1000, 10000);
+
+	addr = LP5521_PROG_MEM_BASE + eng->prog_page * LP5521_PROG_MEM_SIZE;
+	i2c_smbus_write_i2c_block_data(client,
+				addr,
+				LP5521_PROG_MEM_SIZE,
+				pattern);
+
+	ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode);
+	return ret;
+}
+
+static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
+{
+	return lp5521_write(chip->client,
+		    LP5521_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+		    curr);
+}
+
+static void lp5521_init_engine(struct lp5521_chip *chip,
+			const struct attribute_group *attr_group)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+		chip->engines[i].id = i + 1;
+		chip->engines[i].engine_mask = LP5521_ENG_MASK_BASE >> (i * 2);
+		chip->engines[i].prog_page = i;
+		chip->engines[i].attributes = &attr_group[i];
+	}
+}
+
+static int lp5521_configure(struct i2c_client *client,
+			const struct attribute_group *attr_group)
+{
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	int ret;
+
+	lp5521_init_engine(chip, attr_group);
+
+	lp5521_write(client, LP5521_REG_RESET, 0xff);
+
+	usleep_range(10000, 20000);
+
+	/* Set all PWMs to direct control mode */
+	ret = lp5521_write(client, LP5521_REG_OP_MODE, 0x3F);
+
+	/* Enable auto-powersave, set charge pump to auto, red to battery */
+	ret |= lp5521_write(client, LP5521_REG_CONFIG,
+		LP5521_PWRSAVE_EN | LP5521_CP_MODE_AUTO | LP5521_R_TO_BATT);
+
+	/* Initialize all channels PWM to zero -> leds off */
+	ret |= lp5521_write(client, LP5521_REG_R_PWM, 0);
+	ret |= lp5521_write(client, LP5521_REG_G_PWM, 0);
+	ret |= lp5521_write(client, LP5521_REG_B_PWM, 0);
+
+	/* Set engines are set to run state when OP_MODE enables engines */
+	ret |= lp5521_write(client, LP5521_REG_ENABLE,
+			LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM |
+			LP5521_EXEC_RUN);
+	/* enable takes 500us */
+	usleep_range(500, 20000);
+
+	return ret;
+}
+
+static int lp5521_run_selftest(struct lp5521_chip *chip, char *buf)
+{
+	int ret;
+	u8 status;
+
+	ret = lp5521_read(chip->client, LP5521_REG_STATUS, &status);
+	if (ret < 0)
+		return ret;
+
+	/* Check that ext clock is really in use if requested */
+	if (chip->pdata && chip->pdata->clock_mode == LP5521_CLOCK_EXT)
+		if  ((status & LP5521_EXT_CLK_USED) == 0)
+			return -EIO;
+	return 0;
+}
+
+static void lp5521_set_brightness(struct led_classdev *cdev,
+			     enum led_brightness brightness)
+{
+	struct lp5521_led *led = cdev_to_led(cdev);
+	led->brightness = (u8)brightness;
+	schedule_work(&led->brightness_work);
+}
+
+static void lp5521_led_brightness_work(struct work_struct *work)
+{
+	struct lp5521_led *led = container_of(work,
+					      struct lp5521_led,
+					      brightness_work);
+	struct lp5521_chip *chip = led_to_lp5521(led);
+	struct i2c_client *client = chip->client;
+
+	mutex_lock(&chip->lock);
+	lp5521_write(client, LP5521_REG_LED_PWM_BASE + led->chan_nr,
+		led->brightness);
+	mutex_unlock(&chip->lock);
+}
+
+/* Detect the chip by setting its ENABLE register and reading it back. */
+static int lp5521_detect(struct i2c_client *client)
+{
+	int ret;
+	u8 buf;
+
+	ret = lp5521_write(client, LP5521_REG_ENABLE,
+			LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM);
+	if (ret)
+		return ret;
+	usleep_range(1000, 10000);
+	ret = lp5521_read(client, LP5521_REG_ENABLE, &buf);
+	if (ret)
+		return ret;
+	if (buf != (LP5521_MASTER_ENABLE | LP5521_LOGARITHMIC_PWM))
+		return -ENODEV;
+
+	return 0;
+}
+
+/* Set engine mode and create appropriate sysfs attributes, if required. */
+static int lp5521_set_mode(struct lp5521_engine *engine, u8 mode)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(engine);
+	struct i2c_client *client = chip->client;
+	struct device *dev = &client->dev;
+	int ret = 0;
+
+	/* if in that mode already do nothing, except for run */
+	if (mode == engine->mode && mode != LP5521_CMD_RUN)
+		return 0;
+
+	if (mode == LP5521_CMD_RUN) {
+		ret = lp5521_set_engine_mode(engine, LP5521_CMD_RUN);
+	} else if (mode == LP5521_CMD_LOAD) {
+		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+		lp5521_set_engine_mode(engine, LP5521_CMD_LOAD);
+
+		ret = sysfs_create_group(&dev->kobj, engine->attributes);
+		if (ret)
+			return ret;
+	} else if (mode == LP5521_CMD_DISABLED) {
+		lp5521_set_engine_mode(engine, LP5521_CMD_DISABLED);
+	}
+
+	/* remove load attribute from sysfs if not in load mode */
+	if (engine->mode == LP5521_CMD_LOAD && mode != LP5521_CMD_LOAD)
+		sysfs_remove_group(&dev->kobj, engine->attributes);
+
+	engine->mode = mode;
+
+	return ret;
+}
+
+static int lp5521_do_store_load(struct lp5521_engine *engine,
+				const char *buf, size_t len)
+{
+	struct lp5521_chip *chip = engine_to_lp5521(engine);
+	struct i2c_client *client = chip->client;
+	int  ret, nrchars, offset = 0, i = 0;
+	char c[3];
+	unsigned cmd;
+	u8 pattern[LP5521_PROGRAM_LENGTH] = {0};
+
+	while ((offset < len - 1) && (i < LP5521_PROGRAM_LENGTH)) {
+		/* separate sscanfs because length is working only for %s */
+		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		ret = sscanf(c, "%2x", &cmd);
+		if (ret != 1)
+			goto fail;
+		pattern[i] = (u8)cmd;
+
+		offset += nrchars;
+		i++;
+	}
+
+	/* Each instruction is 16bit long. Check that length is even */
+	if (i % 2)
+		goto fail;
+
+	mutex_lock(&chip->lock);
+	ret = lp5521_load_program(engine, pattern);
+	mutex_unlock(&chip->lock);
+
+	if (ret) {
+		dev_err(&client->dev, "failed loading pattern\n");
+		return ret;
+	}
+
+	return len;
+fail:
+	dev_err(&client->dev, "wrong pattern format\n");
+	return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	return lp5521_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr)							\
+static ssize_t store_engine##nr##_load(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_load(dev, attr, buf, len, nr);		\
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+				struct device_attribute *attr,
+				char *buf, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	switch (chip->engines[nr - 1].mode) {
+	case LP5521_CMD_RUN:
+		return sprintf(buf, "run\n");
+	case LP5521_CMD_LOAD:
+		return sprintf(buf, "load\n");
+	case LP5521_CMD_DISABLED:
+		return sprintf(buf, "disabled\n");
+	default:
+		return sprintf(buf, "disabled\n");
+	}
+}
+
+#define show_mode(nr)							\
+static ssize_t show_engine##nr##_mode(struct device *dev,		\
+				    struct device_attribute *attr,	\
+				    char *buf)				\
+{									\
+	return show_engine_mode(dev, attr, buf, nr);			\
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	struct lp5521_engine *engine = &chip->engines[nr - 1];
+	mutex_lock(&chip->lock);
+
+	if (!strncmp(buf, "run", 3))
+		lp5521_set_mode(engine, LP5521_CMD_RUN);
+	else if (!strncmp(buf, "load", 4))
+		lp5521_set_mode(engine, LP5521_CMD_LOAD);
+	else if (!strncmp(buf, "disabled", 8))
+		lp5521_set_mode(engine, LP5521_CMD_DISABLED);
+
+	mutex_unlock(&chip->lock);
+	return len;
+}
+
+#define store_mode(nr)							\
+static ssize_t store_engine##nr##_mode(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_mode(dev, attr, buf, len, nr);		\
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5521_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5521_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5521_led *led = cdev_to_led(led_cdev);
+	struct lp5521_chip *chip = led_to_lp5521(led);
+	ssize_t ret;
+	unsigned long curr;
+
+	if (strict_strtoul(buf, 0, &curr))
+		return -EINVAL;
+
+	if (curr > led->max_current)
+		return -EINVAL;
+
+	mutex_lock(&chip->lock);
+	ret = lp5521_set_led_current(chip, led->id, curr);
+	mutex_unlock(&chip->lock);
+
+	if (ret < 0)
+		return ret;
+
+	led->led_current = (u8)curr;
+
+	return len;
+}
+
+static ssize_t lp5521_selftest(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	int ret;
+
+	mutex_lock(&chip->lock);
+	ret = lp5521_run_selftest(chip, buf);
+	mutex_unlock(&chip->lock);
+	return sprintf(buf, "%s\n", ret ? "FAIL" : "OK");
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5521_led_attributes[] = {
+	&dev_attr_led_current.attr,
+	&dev_attr_max_current.attr,
+	NULL,
+};
+
+static struct attribute_group lp5521_led_attribute_group = {
+	.attrs = lp5521_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+		   show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+		   show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+		   show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5521_selftest, NULL);
+
+static struct attribute *lp5521_attributes[] = {
+	&dev_attr_engine1_mode.attr,
+	&dev_attr_engine2_mode.attr,
+	&dev_attr_engine3_mode.attr,
+	&dev_attr_selftest.attr,
+	NULL
+};
+
+static struct attribute *lp5521_engine1_attributes[] = {
+	&dev_attr_engine1_load.attr,
+	NULL
+};
+
+static struct attribute *lp5521_engine2_attributes[] = {
+	&dev_attr_engine2_load.attr,
+	NULL
+};
+
+static struct attribute *lp5521_engine3_attributes[] = {
+	&dev_attr_engine3_load.attr,
+	NULL
+};
+
+static const struct attribute_group lp5521_group = {
+	.attrs = lp5521_attributes,
+};
+
+static const struct attribute_group lp5521_engine_group[] = {
+	{.attrs = lp5521_engine1_attributes },
+	{.attrs = lp5521_engine2_attributes },
+	{.attrs = lp5521_engine3_attributes },
+};
+
+static int lp5521_register_sysfs(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	return sysfs_create_group(&dev->kobj, &lp5521_group);
+}
+
+static void lp5521_unregister_sysfs(struct i2c_client *client)
+{
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	struct device *dev = &client->dev;
+	int i;
+
+	sysfs_remove_group(&dev->kobj, &lp5521_group);
+
+	for (i = 0; i <  ARRAY_SIZE(chip->engines); i++) {
+		if (chip->engines[i].mode == LP5521_CMD_LOAD)
+			sysfs_remove_group(&dev->kobj,
+					chip->engines[i].attributes);
+	}
+
+	for (i = 0; i < chip->num_leds; i++)
+		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+				&lp5521_led_attribute_group);
+}
+
+static int __init lp5521_init_led(struct lp5521_led *led,
+				struct i2c_client *client,
+				int chan, struct lp5521_platform_data *pdata)
+{
+	struct device *dev = &client->dev;
+	char name[32];
+	int res;
+
+	if (chan >= LP5521_MAX_LEDS)
+		return -EINVAL;
+
+	if (pdata->led_config[chan].led_current == 0)
+		return 0;
+
+	led->led_current = pdata->led_config[chan].led_current;
+	led->max_current = pdata->led_config[chan].max_current;
+	led->chan_nr = pdata->led_config[chan].chan_nr;
+
+	if (led->chan_nr >= LP5521_MAX_LEDS) {
+		dev_err(dev, "Use channel numbers between 0 and %d\n",
+			LP5521_MAX_LEDS - 1);
+		return -EINVAL;
+	}
+
+	snprintf(name, sizeof(name), "%s:channel%d", client->name, chan);
+	led->cdev.brightness_set = lp5521_set_brightness;
+	led->cdev.name = name;
+	res = led_classdev_register(dev, &led->cdev);
+	if (res < 0) {
+		dev_err(dev, "couldn't register led on channel %d\n", chan);
+		return res;
+	}
+
+	res = sysfs_create_group(&led->cdev.dev->kobj,
+			&lp5521_led_attribute_group);
+	if (res < 0) {
+		dev_err(dev, "couldn't register current attribute\n");
+		led_classdev_unregister(&led->cdev);
+		return res;
+	}
+	return 0;
+}
+
+static int lp5521_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct lp5521_chip		*chip;
+	struct lp5521_platform_data	*pdata;
+	int ret, i, led;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, chip);
+	chip->client = client;
+
+	pdata = client->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&client->dev, "no platform data\n");
+		ret = -EINVAL;
+		goto fail1;
+	}
+
+	mutex_init(&chip->lock);
+
+	chip->pdata   = pdata;
+
+	if (pdata->setup_resources) {
+		ret = pdata->setup_resources();
+		if (ret < 0)
+			goto fail1;
+	}
+
+	if (pdata->enable) {
+		pdata->enable(0);
+		usleep_range(1000, 10000);
+		pdata->enable(1);
+		usleep_range(1000, 10000); /* Spec says min 500us */
+	}
+
+	ret = lp5521_detect(client);
+
+	if (ret) {
+		dev_err(&client->dev, "Chip not found\n");
+		goto fail2;
+	}
+
+	dev_info(&client->dev, "%s programmable led chip found\n", id->name);
+
+	ret = lp5521_configure(client, lp5521_engine_group);
+	if (ret < 0) {
+		dev_err(&client->dev, "error configuring chip\n");
+		goto fail2;
+	}
+
+	/* Initialize leds */
+	chip->num_channels = pdata->num_channels;
+	chip->num_leds = 0;
+	led = 0;
+	for (i = 0; i < pdata->num_channels; i++) {
+		/* Do not initialize channels that are not connected */
+		if (pdata->led_config[i].led_current == 0)
+			continue;
+
+		ret = lp5521_init_led(&chip->leds[led], client, i, pdata);
+		if (ret) {
+			dev_err(&client->dev, "error initializing leds\n");
+			goto fail3;
+		}
+		chip->num_leds++;
+
+		chip->leds[led].id = led;
+		/* Set initial LED current */
+		lp5521_set_led_current(chip, led,
+				chip->leds[led].led_current);
+
+		INIT_WORK(&(chip->leds[led].brightness_work),
+			lp5521_led_brightness_work);
+
+		led++;
+	}
+
+	ret = lp5521_register_sysfs(client);
+	if (ret) {
+		dev_err(&client->dev, "registering sysfs failed\n");
+		goto fail3;
+	}
+	return ret;
+fail3:
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+fail2:
+	if (pdata->enable)
+		pdata->enable(0);
+	if (pdata->release_resources)
+		pdata->release_resources();
+fail1:
+	kfree(chip);
+	return ret;
+}
+
+static int lp5521_remove(struct i2c_client *client)
+{
+	struct lp5521_chip *chip = i2c_get_clientdata(client);
+	int i;
+
+	lp5521_unregister_sysfs(client);
+
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+
+	if (chip->pdata->enable)
+		chip->pdata->enable(0);
+	if (chip->pdata->release_resources)
+		chip->pdata->release_resources();
+	kfree(chip);
+	return 0;
+}
+
+static const struct i2c_device_id lp5521_id[] = {
+	{ "lp5521", 0 }, /* Three channel chip */
+	{ }
+};
+MODULE_DEVICE_TABLE(i2c, lp5521_id);
+
+static struct i2c_driver lp5521_driver = {
+	.driver = {
+		.name	= "lp5521",
+	},
+	.probe		= lp5521_probe,
+	.remove		= lp5521_remove,
+	.id_table	= lp5521_id,
+};
+
+static int __init lp5521_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&lp5521_driver);
+
+	if (ret < 0)
+		printk(KERN_ALERT "Adding lp5521 driver failed\n");
+
+	return ret;
+}
+
+static void __exit lp5521_exit(void)
+{
+	i2c_del_driver(&lp5521_driver);
+}
+
+module_init(lp5521_init);
+module_exit(lp5521_exit);
+
+MODULE_AUTHOR("Mathias Nyman, Yuri Zaporozhets, Samu Onkalo");
+MODULE_DESCRIPTION("LP5521 LED engine");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/leds/leds-lp5523.c b/drivers/leds/leds-lp5523.c
new file mode 100644
index 0000000..1e11fcc
--- /dev/null
+++ b/drivers/leds/leds-lp5523.c
@@ -0,0 +1,1065 @@
+/*
+ * lp5523.c - LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/leds.h>
+#include <linux/leds-lp5523.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+
+#define LP5523_REG_ENABLE		0x00
+#define LP5523_REG_OP_MODE		0x01
+#define LP5523_REG_RATIOMETRIC_MSB	0x02
+#define LP5523_REG_RATIOMETRIC_LSB	0x03
+#define LP5523_REG_ENABLE_LEDS_MSB	0x04
+#define LP5523_REG_ENABLE_LEDS_LSB	0x05
+#define LP5523_REG_LED_CNTRL_BASE	0x06
+#define LP5523_REG_LED_PWM_BASE		0x16
+#define LP5523_REG_LED_CURRENT_BASE	0x26
+#define LP5523_REG_CONFIG		0x36
+#define LP5523_REG_CHANNEL1_PC		0x37
+#define LP5523_REG_CHANNEL2_PC		0x38
+#define LP5523_REG_CHANNEL3_PC		0x39
+#define LP5523_REG_STATUS		0x3a
+#define LP5523_REG_GPO			0x3b
+#define LP5523_REG_VARIABLE		0x3c
+#define LP5523_REG_RESET		0x3d
+#define LP5523_REG_TEMP_CTRL		0x3e
+#define LP5523_REG_TEMP_READ		0x3f
+#define LP5523_REG_TEMP_WRITE		0x40
+#define LP5523_REG_LED_TEST_CTRL	0x41
+#define LP5523_REG_LED_TEST_ADC		0x42
+#define LP5523_REG_ENG1_VARIABLE	0x45
+#define LP5523_REG_ENG2_VARIABLE	0x46
+#define LP5523_REG_ENG3_VARIABLE	0x47
+#define LP5523_REG_MASTER_FADER1	0x48
+#define LP5523_REG_MASTER_FADER2	0x49
+#define LP5523_REG_MASTER_FADER3	0x4a
+#define LP5523_REG_CH1_PROG_START	0x4c
+#define LP5523_REG_CH2_PROG_START	0x4d
+#define LP5523_REG_CH3_PROG_START	0x4e
+#define LP5523_REG_PROG_PAGE_SEL	0x4f
+#define LP5523_REG_PROG_MEM		0x50
+
+#define LP5523_CMD_LOAD			0x15 /* 00010101 */
+#define LP5523_CMD_RUN			0x2a /* 00101010 */
+#define LP5523_CMD_DISABLED		0x00 /* 00000000 */
+
+#define LP5523_ENABLE			0x40
+#define LP5523_AUTO_INC			0x40
+#define LP5523_PWR_SAVE			0x20
+#define LP5523_PWM_PWR_SAVE		0x04
+#define LP5523_CP_1			0x08
+#define LP5523_CP_1_5			0x10
+#define LP5523_CP_AUTO			0x18
+#define LP5523_INT_CLK			0x01
+#define LP5523_AUTO_CLK			0x02
+#define LP5523_EN_LEDTEST		0x80
+#define LP5523_LEDTEST_DONE		0x80
+
+#define LP5523_DEFAULT_CURRENT		50 /* microAmps */
+#define LP5523_PROGRAM_LENGTH		32 /* in bytes */
+#define LP5523_PROGRAM_PAGES		6
+#define LP5523_ADC_SHORTCIRC_LIM	80
+
+#define LP5523_LEDS			9
+#define LP5523_ENGINES			3
+
+#define LP5523_ENG_MASK_BASE		0x30 /* 00110000 */
+
+#define LP5523_ENG_STATUS_MASK          0x07 /* 00000111 */
+
+#define LP5523_IRQ_FLAGS                IRQF_TRIGGER_FALLING
+
+#define LP5523_EXT_CLK_USED		0x08
+
+#define LED_ACTIVE(mux, led)		(!!(mux & (0x0001 << led)))
+#define SHIFT_MASK(id)			(((id) - 1) * 2)
+
+struct lp5523_engine {
+	const struct attribute_group *attributes;
+	int		id;
+	u8		mode;
+	u8		prog_page;
+	u8		mux_page;
+	u16		led_mux;
+	u8		engine_mask;
+};
+
+struct lp5523_led {
+	int			id;
+	u8			chan_nr;
+	u8			led_current;
+	u8			max_current;
+	struct led_classdev     cdev;
+	struct work_struct	brightness_work;
+	u8			brightness;
+};
+
+struct lp5523_chip {
+	struct mutex		lock; /* Serialize control */
+	struct i2c_client	*client;
+	struct lp5523_engine	engines[LP5523_ENGINES];
+	struct lp5523_led	leds[LP5523_LEDS];
+	struct lp5523_platform_data *pdata;
+	u8			num_channels;
+	u8			num_leds;
+};
+
+#define cdev_to_led(c)          container_of(c, struct lp5523_led, cdev)
+
+static struct lp5523_chip *engine_to_lp5523(struct lp5523_engine *engine)
+{
+	return container_of(engine, struct lp5523_chip,
+			    engines[engine->id - 1]);
+}
+
+static struct lp5523_chip *led_to_lp5523(struct lp5523_led *led)
+{
+	return container_of(led, struct lp5523_chip,
+			    leds[led->id]);
+}
+
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode);
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern);
+
+static void lp5523_led_brightness_work(struct work_struct *work);
+
+static int lp5523_write(struct i2c_client *client, u8 reg, u8 value)
+{
+	return i2c_smbus_write_byte_data(client, reg, value);
+}
+
+static int lp5523_read(struct i2c_client *client, u8 reg, u8 *buf)
+{
+	s32 ret = i2c_smbus_read_byte_data(client, reg);
+
+	if (ret < 0)
+		return -EIO;
+
+	*buf = ret;
+	return 0;
+}
+
+static int lp5523_detect(struct i2c_client *client)
+{
+	int ret;
+	u8 buf;
+
+	ret = lp5523_write(client, LP5523_REG_ENABLE, 0x40);
+	if (ret)
+		return ret;
+	ret = lp5523_read(client, LP5523_REG_ENABLE, &buf);
+	if (ret)
+		return ret;
+	if (buf == 0x40)
+		return 0;
+	else
+		return -ENODEV;
+}
+
+static int lp5523_configure(struct i2c_client *client)
+{
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	int ret = 0;
+	u8 status;
+
+	/* one pattern per engine setting led mux start and stop addresses */
+	u8 pattern[][LP5523_PROGRAM_LENGTH] =  {
+		{ 0x9c, 0x30, 0x9c, 0xb0, 0x9d, 0x80, 0xd8, 0x00, 0},
+		{ 0x9c, 0x40, 0x9c, 0xc0, 0x9d, 0x80, 0xd8, 0x00, 0},
+		{ 0x9c, 0x50, 0x9c, 0xd0, 0x9d, 0x80, 0xd8, 0x00, 0},
+	};
+
+	lp5523_write(client, LP5523_REG_RESET, 0xff);
+
+	usleep_range(10000, 100000);
+
+	ret |= lp5523_write(client, LP5523_REG_ENABLE, LP5523_ENABLE);
+	/* Chip startup time after reset is 500 us */
+	usleep_range(1000, 10000);
+
+	ret |= lp5523_write(client, LP5523_REG_CONFIG,
+			    LP5523_AUTO_INC | LP5523_PWR_SAVE |
+			    LP5523_CP_AUTO | LP5523_AUTO_CLK |
+			    LP5523_PWM_PWR_SAVE);
+
+	/* turn on all leds */
+	ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_MSB, 0x01);
+	ret |= lp5523_write(client, LP5523_REG_ENABLE_LEDS_LSB, 0xff);
+
+	/* hardcode 32 bytes of memory for each engine from program memory */
+	ret |= lp5523_write(client, LP5523_REG_CH1_PROG_START, 0x00);
+	ret |= lp5523_write(client, LP5523_REG_CH2_PROG_START, 0x10);
+	ret |= lp5523_write(client, LP5523_REG_CH3_PROG_START, 0x20);
+
+	/* write led mux address space for each channel */
+	ret |= lp5523_load_program(&chip->engines[0], pattern[0]);
+	ret |= lp5523_load_program(&chip->engines[1], pattern[1]);
+	ret |= lp5523_load_program(&chip->engines[2], pattern[2]);
+
+	if (ret) {
+		dev_err(&client->dev, "could not load mux programs\n");
+		return -1;
+	}
+
+	/* set all engines exec state and mode to run 00101010 */
+	ret |= lp5523_write(client, LP5523_REG_ENABLE,
+			    (LP5523_CMD_RUN | LP5523_ENABLE));
+
+	ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_RUN);
+
+	if (ret) {
+		dev_err(&client->dev, "could not start mux programs\n");
+		return -1;
+	}
+
+	/* Wait 3ms and check the engine status */
+	usleep_range(3000, 20000);
+	lp5523_read(client, LP5523_REG_STATUS, &status);
+	status &= LP5523_ENG_STATUS_MASK;
+
+	if (status == LP5523_ENG_STATUS_MASK) {
+		dev_dbg(&client->dev, "all engines configured\n");
+	} else {
+		dev_info(&client->dev, "status == %x\n", status);
+		dev_err(&client->dev, "cound not configure LED engine\n");
+		return -1;
+	}
+
+	dev_info(&client->dev, "disabling engines\n");
+
+	ret |= lp5523_write(client, LP5523_REG_OP_MODE, LP5523_CMD_DISABLED);
+
+	return ret;
+}
+
+static int lp5523_set_engine_mode(struct lp5523_engine *engine, u8 mode)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int ret;
+	u8 engine_state;
+
+	ret = lp5523_read(client, LP5523_REG_OP_MODE, &engine_state);
+	if (ret)
+		goto fail;
+
+	engine_state &= ~(engine->engine_mask);
+
+	/* set mode only for this engine */
+	mode &= engine->engine_mask;
+
+	engine_state |= mode;
+
+	ret |= lp5523_write(client, LP5523_REG_OP_MODE, engine_state);
+fail:
+	return ret;
+}
+
+static int lp5523_load_mux(struct lp5523_engine *engine, u16 mux)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int ret = 0;
+
+	ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+	ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL, engine->mux_page);
+	ret |= lp5523_write(client, LP5523_REG_PROG_MEM,
+			    (u8)(mux >> 8));
+	ret |= lp5523_write(client, LP5523_REG_PROG_MEM + 1, (u8)(mux));
+	engine->led_mux = mux;
+
+	return ret;
+}
+
+static int lp5523_load_program(struct lp5523_engine *engine, u8 *pattern)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+
+	int ret = 0;
+
+	ret |= lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+	ret |= lp5523_write(client, LP5523_REG_PROG_PAGE_SEL,
+			    engine->prog_page);
+	ret |= i2c_smbus_write_i2c_block_data(client, LP5523_REG_PROG_MEM,
+					      LP5523_PROGRAM_LENGTH, pattern);
+
+	return ret;
+}
+
+static int lp5523_run_program(struct lp5523_engine *engine)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int ret;
+
+	ret = lp5523_write(client, LP5523_REG_ENABLE,
+					LP5523_CMD_RUN | LP5523_ENABLE);
+	if (ret)
+		goto fail;
+
+	ret = lp5523_set_engine_mode(engine, LP5523_CMD_RUN);
+fail:
+	return ret;
+}
+
+static int lp5523_mux_parse(const char *buf, u16 *mux, size_t len)
+{
+	int i;
+	u16 tmp_mux = 0;
+	len = len < LP5523_LEDS ? len : LP5523_LEDS;
+	for (i = 0; i < len; i++) {
+		switch (buf[i]) {
+		case '1':
+			tmp_mux |= (1 << i);
+			break;
+		case '0':
+			break;
+		case '\n':
+			i = len;
+			break;
+		default:
+			return -1;
+		}
+	}
+	*mux = tmp_mux;
+
+	return 0;
+}
+
+static void lp5523_mux_to_array(u16 led_mux, char *array)
+{
+	int i, pos = 0;
+	for (i = 0; i < LP5523_LEDS; i++)
+		pos += sprintf(array + pos, "%x", LED_ACTIVE(led_mux, i));
+
+	array[pos] = '\0';
+}
+
+/*--------------------------------------------------------------*/
+/*			Sysfs interface				*/
+/*--------------------------------------------------------------*/
+
+static ssize_t show_engine_leds(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	char mux[LP5523_LEDS + 1];
+
+	lp5523_mux_to_array(chip->engines[nr - 1].led_mux, mux);
+
+	return sprintf(buf, "%s\n", mux);
+}
+
+#define show_leds(nr)							\
+static ssize_t show_engine##nr##_leds(struct device *dev,		\
+			    struct device_attribute *attr,		\
+			    char *buf)					\
+{									\
+	return show_engine_leds(dev, attr, buf, nr);			\
+}
+show_leds(1)
+show_leds(2)
+show_leds(3)
+
+static ssize_t store_engine_leds(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	u16 mux = 0;
+
+	if (lp5523_mux_parse(buf, &mux, len))
+		return -EINVAL;
+
+	if (lp5523_load_mux(&chip->engines[nr - 1], mux))
+		return -EINVAL;
+
+	return len;
+}
+
+#define store_leds(nr)						\
+static ssize_t store_engine##nr##_leds(struct device *dev,	\
+			     struct device_attribute *attr,	\
+			     const char *buf, size_t len)	\
+{								\
+	return store_engine_leds(dev, attr, buf, len, nr);	\
+}
+store_leds(1)
+store_leds(2)
+store_leds(3)
+
+static ssize_t lp5523_selftest(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	int i, ret, pos = 0;
+	int led = 0;
+	u8 status, adc, vdd;
+
+	mutex_lock(&chip->lock);
+
+	ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+	if (ret < 0)
+		goto fail;
+
+	/* Check that ext clock is really in use if requested */
+	if ((chip->pdata) && (chip->pdata->clock_mode == LP5523_CLOCK_EXT))
+		if  ((status & LP5523_EXT_CLK_USED) == 0)
+			goto fail;
+
+	/* Measure VDD (i.e. VBAT) first (channel 16 corresponds to VDD) */
+	lp5523_write(chip->client, LP5523_REG_LED_TEST_CTRL,
+				    LP5523_EN_LEDTEST | 16);
+	usleep_range(3000, 10000);
+	ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+	if (!(status & LP5523_LEDTEST_DONE))
+		usleep_range(3000, 10000);
+
+	ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &vdd);
+	vdd--;	/* There may be some fluctuation in measurement */
+
+	for (i = 0; i < LP5523_LEDS; i++) {
+		/* Skip non-existing channels */
+		if (chip->pdata->led_config[i].led_current == 0)
+			continue;
+
+		/* Set default current */
+		lp5523_write(chip->client,
+			LP5523_REG_LED_CURRENT_BASE + i,
+			chip->pdata->led_config[i].led_current);
+
+		lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0xff);
+		/* let current stabilize 2ms before measurements start */
+		usleep_range(2000, 10000);
+		lp5523_write(chip->client,
+			     LP5523_REG_LED_TEST_CTRL,
+			     LP5523_EN_LEDTEST | i);
+		/* ledtest takes 2.7ms */
+		usleep_range(3000, 10000);
+		ret = lp5523_read(chip->client, LP5523_REG_STATUS, &status);
+		if (!(status & LP5523_LEDTEST_DONE))
+			usleep_range(3000, 10000);
+		ret |= lp5523_read(chip->client, LP5523_REG_LED_TEST_ADC, &adc);
+
+		if (adc >= vdd || adc < LP5523_ADC_SHORTCIRC_LIM)
+			pos += sprintf(buf + pos, "LED %d FAIL\n", i);
+
+		lp5523_write(chip->client, LP5523_REG_LED_PWM_BASE + i, 0x00);
+
+		/* Restore current */
+		lp5523_write(chip->client,
+			LP5523_REG_LED_CURRENT_BASE + i,
+			chip->leds[led].led_current);
+		led++;
+	}
+	if (pos == 0)
+		pos = sprintf(buf, "OK\n");
+	goto release_lock;
+fail:
+	pos = sprintf(buf, "FAIL\n");
+
+release_lock:
+	mutex_unlock(&chip->lock);
+
+	return pos;
+}
+
+static void lp5523_set_brightness(struct led_classdev *cdev,
+			     enum led_brightness brightness)
+{
+	struct lp5523_led *led = cdev_to_led(cdev);
+
+	led->brightness = (u8)brightness;
+
+	schedule_work(&led->brightness_work);
+}
+
+static void lp5523_led_brightness_work(struct work_struct *work)
+{
+	struct lp5523_led *led = container_of(work,
+					      struct lp5523_led,
+					      brightness_work);
+	struct lp5523_chip *chip = led_to_lp5523(led);
+	struct i2c_client *client = chip->client;
+
+	mutex_lock(&chip->lock);
+
+	lp5523_write(client, LP5523_REG_LED_PWM_BASE + led->chan_nr,
+		     led->brightness);
+
+	mutex_unlock(&chip->lock);
+}
+
+static int lp5523_do_store_load(struct lp5523_engine *engine,
+				const char *buf, size_t len)
+{
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	int  ret, nrchars, offset = 0, i = 0;
+	char c[3];
+	unsigned cmd;
+	u8 pattern[LP5523_PROGRAM_LENGTH] = {0};
+
+	while ((offset < len - 1) && (i < LP5523_PROGRAM_LENGTH)) {
+		/* separate sscanfs because length is working only for %s */
+		ret = sscanf(buf + offset, "%2s%n ", c, &nrchars);
+		ret = sscanf(c, "%2x", &cmd);
+		if (ret != 1)
+			goto fail;
+		pattern[i] = (u8)cmd;
+
+		offset += nrchars;
+		i++;
+	}
+
+	/* Each instruction is 16bit long. Check that length is even */
+	if (i % 2)
+		goto fail;
+
+	mutex_lock(&chip->lock);
+
+	ret = lp5523_load_program(engine, pattern);
+	mutex_unlock(&chip->lock);
+
+	if (ret) {
+		dev_err(&client->dev, "failed loading pattern\n");
+		return ret;
+	}
+
+	return len;
+fail:
+	dev_err(&client->dev, "wrong pattern format\n");
+	return -EINVAL;
+}
+
+static ssize_t store_engine_load(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	return lp5523_do_store_load(&chip->engines[nr - 1], buf, len);
+}
+
+#define store_load(nr)							\
+static ssize_t store_engine##nr##_load(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_load(dev, attr, buf, len, nr);		\
+}
+store_load(1)
+store_load(2)
+store_load(3)
+
+static ssize_t show_engine_mode(struct device *dev,
+				struct device_attribute *attr,
+				char *buf, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	switch (chip->engines[nr - 1].mode) {
+	case LP5523_CMD_RUN:
+		return sprintf(buf, "run\n");
+	case LP5523_CMD_LOAD:
+		return sprintf(buf, "load\n");
+	case LP5523_CMD_DISABLED:
+		return sprintf(buf, "disabled\n");
+	default:
+		return sprintf(buf, "disabled\n");
+	}
+}
+
+#define show_mode(nr)							\
+static ssize_t show_engine##nr##_mode(struct device *dev,		\
+				    struct device_attribute *attr,	\
+				    char *buf)				\
+{									\
+	return show_engine_mode(dev, attr, buf, nr);			\
+}
+show_mode(1)
+show_mode(2)
+show_mode(3)
+
+static ssize_t store_engine_mode(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len, int nr)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	struct lp5523_engine *engine = &chip->engines[nr - 1];
+	mutex_lock(&chip->lock);
+
+	if (!strncmp(buf, "run", 3))
+		lp5523_set_mode(engine, LP5523_CMD_RUN);
+	else if (!strncmp(buf, "load", 4))
+		lp5523_set_mode(engine, LP5523_CMD_LOAD);
+	else if (!strncmp(buf, "disabled", 8))
+		lp5523_set_mode(engine, LP5523_CMD_DISABLED);
+
+	mutex_unlock(&chip->lock);
+	return len;
+}
+
+#define store_mode(nr)							\
+static ssize_t store_engine##nr##_mode(struct device *dev,		\
+				     struct device_attribute *attr,	\
+				     const char *buf, size_t len)	\
+{									\
+	return store_engine_mode(dev, attr, buf, len, nr);		\
+}
+store_mode(1)
+store_mode(2)
+store_mode(3)
+
+static ssize_t show_max_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5523_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->max_current);
+}
+
+static ssize_t show_current(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5523_led *led = cdev_to_led(led_cdev);
+
+	return sprintf(buf, "%d\n", led->led_current);
+}
+
+static ssize_t store_current(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t len)
+{
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+	struct lp5523_led *led = cdev_to_led(led_cdev);
+	struct lp5523_chip *chip = led_to_lp5523(led);
+	ssize_t ret;
+	unsigned long curr;
+
+	if (strict_strtoul(buf, 0, &curr))
+		return -EINVAL;
+
+	if (curr > led->max_current)
+		return -EINVAL;
+
+	mutex_lock(&chip->lock);
+	ret = lp5523_write(chip->client,
+			LP5523_REG_LED_CURRENT_BASE + led->chan_nr,
+			(u8)curr);
+	mutex_unlock(&chip->lock);
+
+	if (ret < 0)
+		return ret;
+
+	led->led_current = (u8)curr;
+
+	return len;
+}
+
+/* led class device attributes */
+static DEVICE_ATTR(led_current, S_IRUGO | S_IWUGO, show_current, store_current);
+static DEVICE_ATTR(max_current, S_IRUGO , show_max_current, NULL);
+
+static struct attribute *lp5523_led_attributes[] = {
+	&dev_attr_led_current.attr,
+	&dev_attr_max_current.attr,
+	NULL,
+};
+
+static struct attribute_group lp5523_led_attribute_group = {
+	.attrs = lp5523_led_attributes
+};
+
+/* device attributes */
+static DEVICE_ATTR(engine1_mode, S_IRUGO | S_IWUGO,
+		   show_engine1_mode, store_engine1_mode);
+static DEVICE_ATTR(engine2_mode, S_IRUGO | S_IWUGO,
+		   show_engine2_mode, store_engine2_mode);
+static DEVICE_ATTR(engine3_mode, S_IRUGO | S_IWUGO,
+		   show_engine3_mode, store_engine3_mode);
+static DEVICE_ATTR(engine1_leds, S_IRUGO | S_IWUGO,
+		   show_engine1_leds, store_engine1_leds);
+static DEVICE_ATTR(engine2_leds, S_IRUGO | S_IWUGO,
+		   show_engine2_leds, store_engine2_leds);
+static DEVICE_ATTR(engine3_leds, S_IRUGO | S_IWUGO,
+		   show_engine3_leds, store_engine3_leds);
+static DEVICE_ATTR(engine1_load, S_IWUGO, NULL, store_engine1_load);
+static DEVICE_ATTR(engine2_load, S_IWUGO, NULL, store_engine2_load);
+static DEVICE_ATTR(engine3_load, S_IWUGO, NULL, store_engine3_load);
+static DEVICE_ATTR(selftest, S_IRUGO, lp5523_selftest, NULL);
+
+static struct attribute *lp5523_attributes[] = {
+	&dev_attr_engine1_mode.attr,
+	&dev_attr_engine2_mode.attr,
+	&dev_attr_engine3_mode.attr,
+	&dev_attr_selftest.attr,
+	NULL
+};
+
+static struct attribute *lp5523_engine1_attributes[] = {
+	&dev_attr_engine1_load.attr,
+	&dev_attr_engine1_leds.attr,
+	NULL
+};
+
+static struct attribute *lp5523_engine2_attributes[] = {
+	&dev_attr_engine2_load.attr,
+	&dev_attr_engine2_leds.attr,
+	NULL
+};
+
+static struct attribute *lp5523_engine3_attributes[] = {
+	&dev_attr_engine3_load.attr,
+	&dev_attr_engine3_leds.attr,
+	NULL
+};
+
+static const struct attribute_group lp5523_group = {
+	.attrs = lp5523_attributes,
+};
+
+static const struct attribute_group lp5523_engine_group[] = {
+	{.attrs = lp5523_engine1_attributes },
+	{.attrs = lp5523_engine2_attributes },
+	{.attrs = lp5523_engine3_attributes },
+};
+
+static int lp5523_register_sysfs(struct i2c_client *client)
+{
+	struct device *dev = &client->dev;
+	int ret;
+
+	ret = sysfs_create_group(&dev->kobj, &lp5523_group);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+static void lp5523_unregister_sysfs(struct i2c_client *client)
+{
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	struct device *dev = &client->dev;
+	int i;
+
+	sysfs_remove_group(&dev->kobj, &lp5523_group);
+
+	for (i = 0; i < ARRAY_SIZE(chip->engines); i++)
+		if (chip->engines[i].mode == LP5523_CMD_LOAD)
+			sysfs_remove_group(&dev->kobj, &lp5523_engine_group[i]);
+
+	for (i = 0; i < chip->num_leds; i++)
+		sysfs_remove_group(&chip->leds[i].cdev.dev->kobj,
+				&lp5523_led_attribute_group);
+}
+
+/*--------------------------------------------------------------*/
+/*			Set chip operating mode			*/
+/*--------------------------------------------------------------*/
+static int lp5523_set_mode(struct lp5523_engine *engine, u8 mode)
+{
+	/*  engine to chip */
+	struct lp5523_chip *chip = engine_to_lp5523(engine);
+	struct i2c_client *client = chip->client;
+	struct device *dev = &client->dev;
+	int ret = 0;
+
+	/* if in that mode already do nothing, except for run */
+	if (mode == engine->mode && mode != LP5523_CMD_RUN)
+		return 0;
+
+	if (mode == LP5523_CMD_RUN) {
+		ret = lp5523_run_program(engine);
+	} else if (mode == LP5523_CMD_LOAD) {
+		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+		lp5523_set_engine_mode(engine, LP5523_CMD_LOAD);
+
+		ret = sysfs_create_group(&dev->kobj, engine->attributes);
+		if (ret)
+			return ret;
+	} else if (mode == LP5523_CMD_DISABLED) {
+		lp5523_set_engine_mode(engine, LP5523_CMD_DISABLED);
+	}
+
+	/* remove load attribute from sysfs if not in load mode */
+	if (engine->mode == LP5523_CMD_LOAD && mode != LP5523_CMD_LOAD)
+		sysfs_remove_group(&dev->kobj, engine->attributes);
+
+	engine->mode = mode;
+
+	return ret;
+}
+
+/*--------------------------------------------------------------*/
+/*			Probe, Attach, Remove			*/
+/*--------------------------------------------------------------*/
+static int __init lp5523_init_engine(struct lp5523_engine *engine, int id)
+{
+	if (id < 1 || id > LP5523_ENGINES)
+		return -1;
+	engine->id = id;
+	engine->engine_mask = LP5523_ENG_MASK_BASE >> SHIFT_MASK(id);
+	engine->prog_page = id - 1;
+	engine->mux_page = id + 2;
+	engine->attributes = &lp5523_engine_group[id - 1];
+
+	return 0;
+}
+
+static int __init lp5523_init_led(struct lp5523_led *led, struct device *dev,
+			   int chan, struct lp5523_platform_data *pdata)
+{
+	char name[32];
+	int res;
+
+	if (chan >= LP5523_LEDS)
+		return -EINVAL;
+
+	if (pdata->led_config[chan].led_current) {
+		led->led_current = pdata->led_config[chan].led_current;
+		led->max_current = pdata->led_config[chan].max_current;
+		led->chan_nr = pdata->led_config[chan].chan_nr;
+
+		if (led->chan_nr >= LP5523_LEDS) {
+			dev_err(dev, "Use channel numbers between 0 and %d\n",
+				LP5523_LEDS - 1);
+			return -EINVAL;
+		}
+
+		snprintf(name, 32, "lp5523:channel%d", chan);
+
+		led->cdev.name = name;
+		led->cdev.brightness_set = lp5523_set_brightness;
+		res = led_classdev_register(dev, &led->cdev);
+		if (res < 0) {
+			dev_err(dev, "couldn't register led on channel %d\n",
+				chan);
+			return res;
+		}
+		res = sysfs_create_group(&led->cdev.dev->kobj,
+				&lp5523_led_attribute_group);
+		if (res < 0) {
+			dev_err(dev, "couldn't register current attribute\n");
+			led_classdev_unregister(&led->cdev);
+			return res;
+		}
+	} else {
+		led->led_current = 0;
+	}
+	return 0;
+}
+
+static struct i2c_driver lp5523_driver;
+
+static int lp5523_probe(struct i2c_client *client,
+			const struct i2c_device_id *id)
+{
+	struct lp5523_chip		*chip;
+	struct lp5523_platform_data	*pdata;
+	int ret, i, led;
+
+	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+	if (!chip)
+		return -ENOMEM;
+
+	i2c_set_clientdata(client, chip);
+	chip->client = client;
+
+	pdata = client->dev.platform_data;
+
+	if (!pdata) {
+		dev_err(&client->dev, "no platform data\n");
+		ret = -EINVAL;
+		goto fail1;
+	}
+
+	mutex_init(&chip->lock);
+
+	chip->pdata   = pdata;
+
+	if (pdata->setup_resources) {
+		ret = pdata->setup_resources();
+		if (ret < 0)
+			goto fail1;
+	}
+
+	if (pdata->enable) {
+		pdata->enable(0);
+		usleep_range(1000, 10000);
+		pdata->enable(1);
+		usleep_range(1000, 10000); /* Spec says min 500us */
+	}
+
+	ret = lp5523_detect(client);
+	if (ret)
+		goto fail2;
+
+	dev_info(&client->dev, "LP5523 Programmable led chip found\n");
+
+	/* Initialize engines */
+	for (i = 0; i < ARRAY_SIZE(chip->engines); i++) {
+		ret = lp5523_init_engine(&chip->engines[i], i + 1);
+		if (ret) {
+			dev_err(&client->dev, "error initializing engine\n");
+			goto fail2;
+		}
+	}
+	ret = lp5523_configure(client);
+	if (ret < 0) {
+		dev_err(&client->dev, "error configuring chip\n");
+		goto fail2;
+	}
+
+	/* Initialize leds */
+	chip->num_channels = pdata->num_channels;
+	chip->num_leds = 0;
+	led = 0;
+	for (i = 0; i < pdata->num_channels; i++) {
+		/* Do not initialize channels that are not connected */
+		if (pdata->led_config[i].led_current == 0)
+			continue;
+
+		ret = lp5523_init_led(&chip->leds[led], &client->dev, i, pdata);
+		if (ret) {
+			dev_err(&client->dev, "error initializing leds\n");
+			goto fail3;
+		}
+		chip->num_leds++;
+
+		chip->leds[led].id = led;
+		/* Set LED current */
+		lp5523_write(client,
+			  LP5523_REG_LED_CURRENT_BASE + chip->leds[led].chan_nr,
+			  chip->leds[led].led_current);
+
+		INIT_WORK(&(chip->leds[led].brightness_work),
+			lp5523_led_brightness_work);
+
+		led++;
+	}
+
+	ret = lp5523_register_sysfs(client);
+	if (ret) {
+		dev_err(&client->dev, "registering sysfs failed\n");
+		goto fail3;
+	}
+	return ret;
+fail3:
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+fail2:
+	if (pdata->enable)
+		pdata->enable(0);
+	if (pdata->release_resources)
+		pdata->release_resources();
+fail1:
+	kfree(chip);
+	return ret;
+}
+
+static int lp5523_remove(struct i2c_client *client)
+{
+	struct lp5523_chip *chip = i2c_get_clientdata(client);
+	int i;
+
+	lp5523_unregister_sysfs(client);
+
+	for (i = 0; i < chip->num_leds; i++) {
+		led_classdev_unregister(&chip->leds[i].cdev);
+		cancel_work_sync(&chip->leds[i].brightness_work);
+	}
+
+	if (chip->pdata->enable)
+		chip->pdata->enable(0);
+	if (chip->pdata->release_resources)
+		chip->pdata->release_resources();
+	kfree(chip);
+	return 0;
+}
+
+static const struct i2c_device_id lp5523_id[] = {
+	{ "lp5523", 0 },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(i2c, lp5523_id);
+
+static struct i2c_driver lp5523_driver = {
+	.driver = {
+		.name	= "lp5523",
+	},
+	.probe		= lp5523_probe,
+	.remove		= lp5523_remove,
+	.id_table	= lp5523_id,
+};
+
+static int __init lp5523_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&lp5523_driver);
+
+	if (ret < 0)
+		printk(KERN_ALERT "Adding lp5523 driver failed\n");
+
+	return ret;
+}
+
+static void __exit lp5523_exit(void)
+{
+	i2c_del_driver(&lp5523_driver);
+}
+
+module_init(lp5523_init);
+module_exit(lp5523_exit);
+
+MODULE_AUTHOR("Mathias Nyman <mathias.nyman@nokia.com>");
+MODULE_DESCRIPTION("LP5523 LED engine");
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 3063f59..1739557 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -92,3 +92,5 @@
 }
 
 arch_initcall(soekris_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 82b77bd..b09bcbe 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -12,73 +12,25 @@
  */
 
 #include <linux/module.h>
-#include <linux/jiffies.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
 #include <linux/device.h>
-#include <linux/sysdev.h>
-#include <linux/timer.h>
 #include <linux/ctype.h>
 #include <linux/leds.h>
-#include <linux/slab.h>
 #include "leds.h"
 
-struct timer_trig_data {
-	int brightness_on;		/* LED brightness during "on" period.
-					 * (LED_OFF < brightness_on <= LED_FULL)
-					 */
-	unsigned long delay_on;		/* milliseconds on */
-	unsigned long delay_off;	/* milliseconds off */
-	struct timer_list timer;
-};
-
-static void led_timer_function(unsigned long data)
-{
-	struct led_classdev *led_cdev = (struct led_classdev *) data;
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
-	unsigned long brightness;
-	unsigned long delay;
-
-	if (!timer_data->delay_on || !timer_data->delay_off) {
-		led_set_brightness(led_cdev, LED_OFF);
-		return;
-	}
-
-	brightness = led_get_brightness(led_cdev);
-	if (!brightness) {
-		/* Time to switch the LED on. */
-		brightness = timer_data->brightness_on;
-		delay = timer_data->delay_on;
-	} else {
-		/* Store the current brightness value to be able
-		 * to restore it when the delay_off period is over.
-		 */
-		timer_data->brightness_on = brightness;
-		brightness = LED_OFF;
-		delay = timer_data->delay_off;
-	}
-
-	led_set_brightness(led_cdev, brightness);
-
-	mod_timer(&timer_data->timer, jiffies + msecs_to_jiffies(delay));
-}
-
 static ssize_t led_delay_on_show(struct device *dev,
 		struct device_attribute *attr, char *buf)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 
-	return sprintf(buf, "%lu\n", timer_data->delay_on);
+	return sprintf(buf, "%lu\n", led_cdev->blink_delay_on);
 }
 
 static ssize_t led_delay_on_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 	int ret = -EINVAL;
 	char *after;
 	unsigned long state = simple_strtoul(buf, &after, 10);
@@ -88,21 +40,7 @@
 		count++;
 
 	if (count == size) {
-		if (timer_data->delay_on != state) {
-			/* the new value differs from the previous */
-			timer_data->delay_on = state;
-
-			/* deactivate previous settings */
-			del_timer_sync(&timer_data->timer);
-
-			/* try to activate hardware acceleration, if any */
-			if (!led_cdev->blink_set ||
-			    led_cdev->blink_set(led_cdev,
-			      &timer_data->delay_on, &timer_data->delay_off)) {
-				/* no hardware acceleration, blink via timer */
-				mod_timer(&timer_data->timer, jiffies + 1);
-			}
-		}
+		led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
 		ret = count;
 	}
 
@@ -113,16 +51,14 @@
 		struct device_attribute *attr, char *buf)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 
-	return sprintf(buf, "%lu\n", timer_data->delay_off);
+	return sprintf(buf, "%lu\n", led_cdev->blink_delay_off);
 }
 
 static ssize_t led_delay_off_store(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t size)
 {
 	struct led_classdev *led_cdev = dev_get_drvdata(dev);
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
 	int ret = -EINVAL;
 	char *after;
 	unsigned long state = simple_strtoul(buf, &after, 10);
@@ -132,21 +68,7 @@
 		count++;
 
 	if (count == size) {
-		if (timer_data->delay_off != state) {
-			/* the new value differs from the previous */
-			timer_data->delay_off = state;
-
-			/* deactivate previous settings */
-			del_timer_sync(&timer_data->timer);
-
-			/* try to activate hardware acceleration, if any */
-			if (!led_cdev->blink_set ||
-			    led_cdev->blink_set(led_cdev,
-			      &timer_data->delay_on, &timer_data->delay_off)) {
-				/* no hardware acceleration, blink via timer */
-				mod_timer(&timer_data->timer, jiffies + 1);
-			}
-		}
+		led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
 		ret = count;
 	}
 
@@ -158,60 +80,34 @@
 
 static void timer_trig_activate(struct led_classdev *led_cdev)
 {
-	struct timer_trig_data *timer_data;
 	int rc;
 
-	timer_data = kzalloc(sizeof(struct timer_trig_data), GFP_KERNEL);
-	if (!timer_data)
-		return;
-
-	timer_data->brightness_on = led_get_brightness(led_cdev);
-	if (timer_data->brightness_on == LED_OFF)
-		timer_data->brightness_on = led_cdev->max_brightness;
-	led_cdev->trigger_data = timer_data;
-
-	init_timer(&timer_data->timer);
-	timer_data->timer.function = led_timer_function;
-	timer_data->timer.data = (unsigned long) led_cdev;
+	led_cdev->trigger_data = NULL;
 
 	rc = device_create_file(led_cdev->dev, &dev_attr_delay_on);
 	if (rc)
-		goto err_out;
+		return;
 	rc = device_create_file(led_cdev->dev, &dev_attr_delay_off);
 	if (rc)
 		goto err_out_delayon;
 
-	/* If there is hardware support for blinking, start one
-	 * user friendly blink rate chosen by the driver.
-	 */
-	if (led_cdev->blink_set)
-		led_cdev->blink_set(led_cdev,
-			&timer_data->delay_on, &timer_data->delay_off);
+	led_cdev->trigger_data = (void *)1;
 
 	return;
 
 err_out_delayon:
 	device_remove_file(led_cdev->dev, &dev_attr_delay_on);
-err_out:
-	led_cdev->trigger_data = NULL;
-	kfree(timer_data);
 }
 
 static void timer_trig_deactivate(struct led_classdev *led_cdev)
 {
-	struct timer_trig_data *timer_data = led_cdev->trigger_data;
-	unsigned long on = 0, off = 0;
-
-	if (timer_data) {
+	if (led_cdev->trigger_data) {
 		device_remove_file(led_cdev->dev, &dev_attr_delay_on);
 		device_remove_file(led_cdev->dev, &dev_attr_delay_off);
-		del_timer_sync(&timer_data->timer);
-		kfree(timer_data);
 	}
 
-	/* If there is hardware support for blinking, stop it */
-	if (led_cdev->blink_set)
-		led_cdev->blink_set(led_cdev, &on, &off);
+	/* Stop blinking */
+	led_brightness_set(led_cdev, LED_OFF);
 }
 
 static struct led_trigger timer_led_trigger = {
diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c
index 4446966..f5f4da3 100644
--- a/drivers/macintosh/adb-iop.c
+++ b/drivers/macintosh/adb-iop.c
@@ -80,7 +80,7 @@
 static void adb_iop_complete(struct iop_msg *msg)
 {
 	struct adb_request *req;
-	uint flags;
+	unsigned long flags;
 
 	local_irq_save(flags);
 
@@ -103,7 +103,7 @@
 {
 	struct adb_iopmsg *amsg = (struct adb_iopmsg *) msg->message;
 	struct adb_request *req;
-	uint flags;
+	unsigned long flags;
 #ifdef DEBUG_ADB_IOP
 	int i;
 #endif
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4e957f3..324a366 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -706,7 +706,7 @@
 /* return the offset of the super block in 512byte sectors */
 static inline sector_t calc_dev_sboffset(struct block_device *bdev)
 {
-	sector_t num_sectors = bdev->bd_inode->i_size / 512;
+	sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
 	return MD_NEW_SIZE_SECTORS(num_sectors);
 }
 
@@ -1386,7 +1386,7 @@
 	 */
 	switch(minor_version) {
 	case 0:
-		sb_start = rdev->bdev->bd_inode->i_size >> 9;
+		sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
 		sb_start -= 8*2;
 		sb_start &= ~(sector_t)(4*2-1);
 		break;
@@ -1472,7 +1472,7 @@
 			ret = 0;
 	}
 	if (minor_version)
-		rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) -
+		rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
 			le64_to_cpu(sb->data_offset);
 	else
 		rdev->sectors = rdev->sb_start;
@@ -1680,7 +1680,7 @@
 		return 0; /* component must fit device */
 	if (rdev->sb_start < rdev->data_offset) {
 		/* minor versions 1 and 2; superblock before data */
-		max_sectors = rdev->bdev->bd_inode->i_size >> 9;
+		max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
 		max_sectors -= rdev->data_offset;
 		if (!num_sectors || num_sectors > max_sectors)
 			num_sectors = max_sectors;
@@ -1690,7 +1690,7 @@
 	} else {
 		/* minor version 0; superblock after data */
 		sector_t sb_start;
-		sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2;
+		sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
 		sb_start &= ~(sector_t)(4*2 - 1);
 		max_sectors = rdev->sectors + sb_start - rdev->sb_start;
 		if (!num_sectors || num_sectors > max_sectors)
@@ -2584,7 +2584,7 @@
 			if (!sectors)
 				return -EBUSY;
 		} else if (!sectors)
-			sectors = (rdev->bdev->bd_inode->i_size >> 9) -
+			sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
 				rdev->data_offset;
 	}
 	if (sectors < my_mddev->dev_sectors)
@@ -2797,7 +2797,7 @@
 
 	kobject_init(&rdev->kobj, &rdev_ktype);
 
-	size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
+	size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
 	if (!size) {
 		printk(KERN_WARNING 
 			"md: %s has zero or unknown size, marking faulty!\n",
@@ -5235,8 +5235,8 @@
 
 		if (!mddev->persistent) {
 			printk(KERN_INFO "md: nonpersistent superblock ...\n");
-			rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
-		} else 
+			rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
+		} else
 			rdev->sb_start = calc_dev_sboffset(rdev->bdev);
 		rdev->sectors = rdev->sb_start;
 
@@ -5306,7 +5306,7 @@
 	if (mddev->persistent)
 		rdev->sb_start = calc_dev_sboffset(rdev->bdev);
 	else
-		rdev->sb_start = rdev->bdev->bd_inode->i_size / 512;
+		rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
 
 	rdev->sectors = rdev->sb_start;
 
diff --git a/drivers/misc/apds9802als.c b/drivers/misc/apds9802als.c
index f9b91ba..0ed0935 100644
--- a/drivers/misc/apds9802als.c
+++ b/drivers/misc/apds9802als.c
@@ -123,7 +123,7 @@
 {
 	struct i2c_client *client = to_i2c_client(dev);
 	struct als_data *data = i2c_get_clientdata(client);
-	unsigned int ret_val;
+	int ret_val;
 	unsigned long val;
 
 	if (strict_strtoul(buf, 10, &val))
diff --git a/drivers/misc/bh1770glc.c b/drivers/misc/bh1770glc.c
index cee632e..d79a972 100644
--- a/drivers/misc/bh1770glc.c
+++ b/drivers/misc/bh1770glc.c
@@ -649,7 +649,7 @@
 {
 	struct bh1770_chip *chip =  dev_get_drvdata(dev);
 	unsigned long value;
-	size_t ret;
+	ssize_t ret;
 
 	if (strict_strtoul(buf, 0, &value))
 		return -EINVAL;
@@ -659,8 +659,12 @@
 		pm_runtime_get_sync(dev);
 
 		ret = bh1770_lux_rate(chip, chip->lux_rate_index);
-		ret |= bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
+		if (ret < 0) {
+			pm_runtime_put(dev);
+			goto leave;
+		}
 
+		ret = bh1770_lux_interrupt_control(chip, BH1770_ENABLE);
 		if (ret < 0) {
 			pm_runtime_put(dev);
 			goto leave;
diff --git a/drivers/misc/isl29020.c b/drivers/misc/isl29020.c
index 34fe835..ca47e62 100644
--- a/drivers/misc/isl29020.c
+++ b/drivers/misc/isl29020.c
@@ -87,7 +87,7 @@
 		struct device_attribute *attr, const  char *buf, size_t count)
 {
 	struct i2c_client *client = to_i2c_client(dev);
-	unsigned int ret_val;
+	int ret_val;
 	unsigned long val;
 
 	if (strict_strtoul(buf, 10, &val))
@@ -106,6 +106,8 @@
 		val = 4;
 
 	ret_val = i2c_smbus_read_byte_data(client, 0x00);
+	if (ret_val < 0)
+		return ret_val;
 
 	ret_val &= 0xFC; /*reset the bit before setting them */
 	ret_val |= val - 1;
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a0..475a66d 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@
 #define RX_BUF_SIZE 	(1518+14+18)	/* packet+header+RBD */
 #define RX_BUF_END		(dev->mem_end - dev->mem_start)
 
-#define TX_TIMEOUT 5
+#define TX_TIMEOUT (HZ/20)
 
 /*
   That's it: only 86 bytes to set up the beast, including every extra
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226..d2bb4b2 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@
 #define WAIT_TX_AVAIL 200
 
 /* Operational parameter that usually are not changed. */
-#define TX_TIMEOUT  40		/* Time in jiffies before concluding Tx hung */
+#define TX_TIMEOUT  ((4*HZ)/10)	/* Time in jiffies before concluding Tx hung */
 
 /* The size here is somewhat misleading: the Corkscrew also uses the ISA
    aliased registers at <base>+0x400.
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index e1da258..0a92436f 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -699,7 +699,8 @@
 #define DEVICE_PCI(dev) NULL
 #endif
 
-#define VORTEX_PCI(vp) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL)
+#define VORTEX_PCI(vp)							\
+	((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
 
 #ifdef CONFIG_EISA
 #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
@@ -707,7 +708,8 @@
 #define DEVICE_EISA(dev) NULL
 #endif
 
-#define VORTEX_EISA(vp) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL)
+#define VORTEX_EISA(vp)							\
+	((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
 
 /* The action to take with a media selection timer tick.
    Note that we deviate from the 3Com order by checking 10base2 before AUI.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index ac422cd..dd16e83 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -490,13 +490,11 @@
 {
 	unsigned int protocol = (status >> 16) & 0x3;
 
-	if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
+	if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
+	    ((protocol == RxProtoUDP) && !(status & UDPFail)))
 		return 1;
-	else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
-		return 1;
-	else if ((protocol == RxProtoIP) && (!(status & IPFail)))
-		return 1;
-	return 0;
+	else
+		return 0;
 }
 
 static int cp_rx_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f5166dc..98517a3 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1092,10 +1092,11 @@
 static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata (pdev);
+	struct rtl8139_private *tp = netdev_priv(dev);
 
 	assert (dev != NULL);
 
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&tp->thread);
 
 	unregister_netdev (dev);
 
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b..be1f197 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@
 #define	 RX_SUSPEND	0x0030
 #define	 RX_ABORT	0x0040
 
-#define TX_TIMEOUT	5
+#define TX_TIMEOUT	(HZ/20)
 
 
 struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f6668cd..a20693f 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@
 
 	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-          to identify the adapter.
+	  to identify the adapter.
 
 	  For the latest Intel PRO/100 network driver for Linux, see:
 
@@ -1786,17 +1786,17 @@
 	tristate "Micrel KSZ8841/42 with generic bus interface"
 	depends on HAS_IOMEM && DMA_ENGINE
 	help
-	 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
-	 ethernet switch chip (managed, VLAN, QoS) from Micrel or
-	 Timberdale(FPGA).
+	  This platform driver is for KSZ8841(1-port) / KS8842(2-port)
+	  ethernet switch chip (managed, VLAN, QoS) from Micrel or
+	  Timberdale(FPGA).
 
 config KS8851
-       tristate "Micrel KS8851 SPI"
-       depends on SPI
-       select MII
+	tristate "Micrel KS8851 SPI"
+	depends on SPI
+	select MII
 	select CRC32
-       help
-         SPI driver for Micrel KS8851 SPI attached network chip.
+	help
+	  SPI driver for Micrel KS8851 SPI attached network chip.
 
 config KS8851_MLL
 	tristate "Micrel KS8851 MLL"
@@ -2133,25 +2133,25 @@
 	  will be called ipg.  This is recommended.
 
 config IGB
-       tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
-       depends on PCI
-       ---help---
-         This driver supports Intel(R) 82575/82576 gigabit ethernet family of
-         adapters.  For more information on how to identify your adapter, go
-         to the Adapter & Driver ID Guide at:
+	tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
+	depends on PCI
+	---help---
+	  This driver supports Intel(R) 82575/82576 gigabit ethernet family of
+	  adapters.  For more information on how to identify your adapter, go
+	  to the Adapter & Driver ID Guide at:
 
-         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+	  For general information and support, go to the Intel support
+	  website at:
 
-         <http://support.intel.com>
+	  <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+	  More specific information on configuring the driver is in
+	  <file:Documentation/networking/e1000.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called igb.
+	  To compile this driver as a module, choose M here. The module
+	  will be called igb.
 
 config IGB_DCA
 	bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2163,25 @@
 	  is used, with the intent of lessening the impact of cache misses.
 
 config IGBVF
-       tristate "Intel(R) 82576 Virtual Function Ethernet support"
-       depends on PCI
-       ---help---
-         This driver supports Intel(R) 82576 virtual functions.  For more
-         information on how to identify your adapter, go to the Adapter &
-         Driver ID Guide at:
+	tristate "Intel(R) 82576 Virtual Function Ethernet support"
+	depends on PCI
+	---help---
+	  This driver supports Intel(R) 82576 virtual functions.  For more
+	  information on how to identify your adapter, go to the Adapter &
+	  Driver ID Guide at:
 
-         <http://support.intel.com/support/network/adapter/pro100/21397.htm>
+	  <http://support.intel.com/support/network/adapter/pro100/21397.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+	  For general information and support, go to the Intel support
+	  website at:
 
-         <http://support.intel.com>
+	  <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/e1000.txt>.
+	  More specific information on configuring the driver is in
+	  <file:Documentation/networking/e1000.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called igbvf.
+	  To compile this driver as a module, choose M here. The module
+	  will be called igbvf.
 
 source "drivers/net/ixp2000/Kconfig"
 
@@ -2300,14 +2300,14 @@
 	  will be called skge.  This is recommended.
 
 config SKGE_DEBUG
-       bool "Debugging interface"
-       depends on SKGE && DEBUG_FS
-       help
-	 This option adds the ability to dump driver state for debugging.
-	 The file /sys/kernel/debug/skge/ethX displays the state of the internal
-	 transmit and receive rings.
+	bool "Debugging interface"
+	depends on SKGE && DEBUG_FS
+	help
+	  This option adds the ability to dump driver state for debugging.
+	  The file /sys/kernel/debug/skge/ethX displays the state of the internal
+	  transmit and receive rings.
 
-	 If unsure, say N.
+	  If unsure, say N.
 
 config SKY2
 	tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2326,14 @@
 	  will be called sky2.  This is recommended.
 
 config SKY2_DEBUG
-       bool "Debugging interface"
-       depends on SKY2 && DEBUG_FS
-       help
-	 This option adds the ability to dump driver state for debugging.
-	 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
-	 transmit and receive rings.
+	bool "Debugging interface"
+	depends on SKY2 && DEBUG_FS
+	help
+	  This option adds the ability to dump driver state for debugging.
+	  The file /sys/kernel/debug/sky2/ethX displays the state of the internal
+	  transmit and receive rings.
 
-	 If unsure, say N.
+	  If unsure, say N.
 
 config VIA_VELOCITY
 	tristate "VIA Velocity support"
@@ -2389,12 +2389,12 @@
 	  Cell Processor-Based Blades from IBM.
 
 config TSI108_ETH
-	   tristate "Tundra TSI108 gigabit Ethernet support"
-	   depends on TSI108_BRIDGE
-	   help
-	     This driver supports Tundra TSI108 gigabit Ethernet ports.
-	     To compile this driver as a module, choose M here: the module
-	     will be called tsi108_eth.
+	tristate "Tundra TSI108 gigabit Ethernet support"
+	depends on TSI108_BRIDGE
+	help
+	  This driver supports Tundra TSI108 gigabit Ethernet ports.
+	  To compile this driver as a module, choose M here: the module
+	  will be called tsi108_eth.
 
 config GELIC_NET
 	tristate "PS3 Gigabit Ethernet driver"
@@ -2543,10 +2543,10 @@
 	depends on PCI
 	select MII
 	---help---
-	  This is a gigabit ethernet driver for Topcliff PCH.
-	  Topcliff PCH is the platform controller hub that is used in Intel's
+	  This is a gigabit ethernet driver for EG20T PCH.
+	  EG20T PCH is the platform controller hub that is used in Intel's
 	  general embedded platform.
-	  Topcliff PCH has Gigabit Ethernet interface.
+	  EG20T PCH has Gigabit Ethernet interface.
 	  Using this interface, it is able to access system devices connected
 	  to Gigabit Ethernet.
 	  This driver enables Gigabit Ethernet function.
@@ -2573,32 +2573,32 @@
 	tristate
 
 config CHELSIO_T1
-        tristate "Chelsio 10Gb Ethernet support"
-        depends on PCI
+	tristate "Chelsio 10Gb Ethernet support"
+	depends on PCI
 	select CRC32
 	select MDIO
-        help
-          This driver supports Chelsio gigabit and 10-gigabit
-          Ethernet cards. More information about adapter features and
+	help
+	  This driver supports Chelsio gigabit and 10-gigabit
+	  Ethernet cards. More information about adapter features and
 	  performance tuning is in <file:Documentation/networking/cxgb.txt>.
 
-          For general information about Chelsio and our products, visit
-          our website at <http://www.chelsio.com>.
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
 
-          For customer support, please visit our customer support page at
-          <http://www.chelsio.com/support.html>.
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.html>.
 
-          Please send feedback to <linux-bugs@chelsio.com>.
+	  Please send feedback to <linux-bugs@chelsio.com>.
 
-          To compile this driver as a module, choose M here: the module
-          will be called cxgb.
+	  To compile this driver as a module, choose M here: the module
+	  will be called cxgb.
 
 config CHELSIO_T1_1G
-        bool "Chelsio gigabit Ethernet support"
-        depends on CHELSIO_T1
-        help
-          Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
-          are using only 10G cards say 'N' here.
+	bool "Chelsio gigabit Ethernet support"
+	depends on CHELSIO_T1
+	help
+	  Enables support for Chelsio's gigabit Ethernet PCI cards.  If you
+	  are using only 10G cards say 'N' here.
 
 config CHELSIO_T3_DEPENDS
 	tristate
@@ -2728,26 +2728,26 @@
 	  If unsure, say N.
 
 config IXGBEVF
-       tristate "Intel(R) 82599 Virtual Function Ethernet support"
-       depends on PCI_MSI
-       ---help---
-         This driver supports Intel(R) 82599 virtual functions.  For more
-         information on how to identify your adapter, go to the Adapter &
-         Driver ID Guide at:
+	tristate "Intel(R) 82599 Virtual Function Ethernet support"
+	depends on PCI_MSI
+	---help---
+	  This driver supports Intel(R) 82599 virtual functions.  For more
+	  information on how to identify your adapter, go to the Adapter &
+	  Driver ID Guide at:
 
-         <http://support.intel.com/support/network/sb/CS-008441.htm>
+	  <http://support.intel.com/support/network/sb/CS-008441.htm>
 
-         For general information and support, go to the Intel support
-         website at:
+	  For general information and support, go to the Intel support
+	  website at:
 
-         <http://support.intel.com>
+	  <http://support.intel.com>
 
-         More specific information on configuring the driver is in
-         <file:Documentation/networking/ixgbevf.txt>.
+	  More specific information on configuring the driver is in
+	  <file:Documentation/networking/ixgbevf.txt>.
 
-         To compile this driver as a module, choose M here. The module
-         will be called ixgbevf.  MSI-X interrupt support is required
-         for this driver to work correctly.
+	  To compile this driver as a module, choose M here. The module
+	  will be called ixgbevf.  MSI-X interrupt support is required
+	  for this driver to work correctly.
 
 config IXGB
 	tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2772,38 @@
 	  will be called ixgb.
 
 config S2IO
-	tristate "S2IO 10Gbe XFrame NIC"
+	tristate "Exar Xframe 10Gb Ethernet Adapter"
 	depends on PCI
 	---help---
-	  This driver supports the 10Gbe XFrame NIC of S2IO. 
+	  This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
+
 	  More specific information on configuring the driver is in 
 	  <file:Documentation/networking/s2io.txt>.
 
+	  To compile this driver as a module, choose M here. The module
+	  will be called s2io.
+
 config VXGE
-	tristate "Neterion X3100 Series 10GbE PCIe Server Adapter"
+	tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
 	depends on PCI && INET
 	---help---
-	  This driver supports Neterion Inc's X3100 Series 10 GbE PCIe
+	  This driver supports Exar Corp's X3100 Series 10 GbE PCIe
 	  I/O Virtualized Server Adapter.
+
 	  More specific information on configuring the driver is in
 	  <file:Documentation/networking/vxge.txt>.
 
+	  To compile this driver as a module, choose M here. The module
+	  will be called vxge.
+
 config VXGE_DEBUG_TRACE_ALL
 	bool "Enabling All Debug trace statments in driver"
 	default n
 	depends on VXGE
 	---help---
 	  Say Y here if you want to enabling all the debug trace statements in
-	  driver. By  default only few debug trace statements are enabled.
+	  the vxge driver. By default only few debug trace statements are
+	  enabled.
 
 config MYRI10GE
 	tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2915,18 @@
 	  will be called qlge.
 
 config BNA
-        tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
-        depends on PCI
-        ---help---
-          This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
-          cards.
-          To compile this driver as a module, choose M here: the module
-          will be called bna.
+	tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
+	depends on PCI
+	---help---
+	  This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
+	  cards.
+	  To compile this driver as a module, choose M here: the module
+	  will be called bna.
 
-          For general information and support, go to the Brocade support
-          website at:
+	  For general information and support, go to the Brocade support
+	  website at:
 
-          <http://support.brocade.com>
+	  <http://support.brocade.com>
 
 source "drivers/net/sfc/Kconfig"
 
@@ -3227,18 +3236,18 @@
 	  modules once you have said "make modules". If unsure, say N.
 
 config PPP_MPPE
-       tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
-       depends on PPP && EXPERIMENTAL
-       select CRYPTO
-       select CRYPTO_SHA1
-       select CRYPTO_ARC4
-       select CRYPTO_ECB
-       ---help---
-         Support for the MPPE Encryption protocol, as employed by the
-	 Microsoft Point-to-Point Tunneling Protocol.
+	tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
+	depends on PPP && EXPERIMENTAL
+	select CRYPTO
+	select CRYPTO_SHA1
+	select CRYPTO_ARC4
+	select CRYPTO_ECB
+	---help---
+	  Support for the MPPE Encryption protocol, as employed by the
+	  Microsoft Point-to-Point Tunneling Protocol.
 
-	 See http://pptpclient.sourceforge.net/ for information on
-	 configuring PPTP clients and servers to utilize this method.
+	  See http://pptpclient.sourceforge.net/ for information on
+	  configuring PPTP clients and servers to utilize this method.
 
 config PPPOE
 	tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3397,14 +3406,14 @@
 	depends on EXPERIMENTAL && VIRTIO
 	---help---
 	  This is the virtual network driver for virtio.  It can be used with
-          lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
+	  lguest or QEMU based VMMs (like KVM or Xen).  Say Y or M.
 
 config VMXNET3
-       tristate "VMware VMXNET3 ethernet driver"
-       depends on PCI && INET
-       help
-         This driver supports VMware's vmxnet3 virtual ethernet NIC.
-         To compile this driver as a module, choose M here: the
-         module will be called vmxnet3.
+	tristate "VMware VMXNET3 ethernet driver"
+	depends on PCI && INET
+	help
+	  This driver supports VMware's vmxnet3 virtual ethernet NIC.
+	  To compile this driver as a module, choose M here: the
+	  module will be called vmxnet3.
 
 endif # NETDEVICES
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 62f2110..0c9217f 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -340,14 +340,6 @@
 	return 0;
 }
 
-/*
- * Get the current statistics.
- */
-static struct net_device_stats *am79c961_getstats (struct net_device *dev)
-{
-	return &dev->stats;
-}
-
 static void am79c961_mc_hash(char *addr, unsigned short *hash)
 {
 	if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@
 	.ndo_open		= am79c961_open,
 	.ndo_stop		= am79c961_close,
 	.ndo_start_xmit		= am79c961_sendpacket,
-	.ndo_get_stats		= am79c961_getstats,
 	.ndo_set_multicast_list	= am79c961_setmulticastlist,
 	.ndo_tx_timeout		= am79c961_timeout,
 	.ndo_validate_addr	= eth_validate_addr,
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a..bfea499 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
 #define TX_DESC_SIZE		10
 #define MAX_RBUFF_SZ		0x600
 #define MAX_TBUFF_SZ		0x600
-#define TX_TIMEOUT		50
+#define TX_TIMEOUT		(HZ/2)
 #define DELAY			1000
 #define CAM0			0x0
 
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 8987689..871b163 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@
 #define PORT_OFFSET(o) (o)
 
 
-#define TX_TIMEOUT		10
+#define TX_TIMEOUT		(HZ/10)
 
 
 /* Index to functions, as function prototypes. */
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb..ce0091e 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@
 #define RX_RING_LEN_BITS		(RX_LOG_RING_SIZE << 5)
 #define	RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
 
-#define TX_TIMEOUT	20
+#define TX_TIMEOUT	(HZ/5)
 
 /* The LANCE Rx and Tx ring descriptors. */
 struct lance_rx_head {
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 919080b..1bf6720 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -82,7 +82,7 @@
 	addr[0] = addr[1] = 0;
 	AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
 	if (atl1c_check_eeprom_exist(hw)) {
-		if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+		if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c) {
 			/* Enable OTP CLK */
 			if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
 				otp_ctrl_data |= OTP_CTRL_CLK_EN;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3..5336310 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3043,7 +3043,6 @@
 	atl1_pcie_patch(adapter);
 	/* assume we have no link for now */
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
 	setup_timer(&adapter->phy_config_timer, atl1_phy_config,
 		    (unsigned long)adapter);
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 35b14be..4e6f4e9 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1504,8 +1504,8 @@
 
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_config_timer);
-
-	flush_scheduled_work();
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->link_chg_task);
 
 	unregister_netdev(netdev);
 
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 43489f8..b9debcf 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -106,8 +106,6 @@
  * complete immediately.
  */
 
-struct au1000_private *au_macs[NUM_ETH_INTERFACES];
-
 /*
  * board-specific configurations
  *
@@ -155,10 +153,10 @@
 	spin_lock_irqsave(&aup->lock, flags);
 
 	if (force_reset || (!aup->mac_enabled)) {
-		writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+		writel(MAC_EN_CLOCK_ENABLE, aup->enable);
 		au_sync_delay(2);
 		writel((MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2
-				| MAC_EN_CLOCK_ENABLE), &aup->enable);
+				| MAC_EN_CLOCK_ENABLE), aup->enable);
 		au_sync_delay(2);
 
 		aup->mac_enabled = 1;
@@ -503,9 +501,9 @@
 
 	au1000_hard_stop(dev);
 
-	writel(MAC_EN_CLOCK_ENABLE, &aup->enable);
+	writel(MAC_EN_CLOCK_ENABLE, aup->enable);
 	au_sync_delay(2);
-	writel(0, &aup->enable);
+	writel(0, aup->enable);
 	au_sync_delay(2);
 
 	aup->tx_full = 0;
@@ -1119,7 +1117,7 @@
 	/* set a random MAC now in case platform_data doesn't provide one */
 	random_ether_addr(dev->dev_addr);
 
-	writel(0, &aup->enable);
+	writel(0, aup->enable);
 	aup->mac_enabled = 0;
 
 	pd = pdev->dev.platform_data;
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf..4bebff3 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -325,7 +325,7 @@
 static void
 ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
 	unsigned int memr;
 
@@ -364,7 +364,7 @@
 static unsigned int
 ax_phy_ei_inbits(struct net_device *dev, int no)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
 	unsigned int memr;
 	unsigned int result = 0;
@@ -412,7 +412,7 @@
 static int
 ax_phy_read(struct net_device *dev, int phy_addr, int reg)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned long flags;
  	unsigned int result;
 
@@ -435,7 +435,7 @@
 static void
 ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
 {
-	struct ei_device *ei = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei = netdev_priv(dev);
 	struct ax_device  *ax = to_ax_dev(dev);
 	unsigned long flags;
 
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index c6e8631..2e2b762 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -381,11 +381,11 @@
 	__b44_set_flow_ctrl(bp, pause_enab);
 }
 
-#ifdef SSB_DRIVER_MIPS
-extern char *nvram_get(char *name);
+#ifdef CONFIG_BCM47XX
+#include <asm/mach-bcm47xx/nvram.h>
 static void b44_wap54g10_workaround(struct b44 *bp)
 {
-	const char *str;
+	char buf[20];
 	u32 val;
 	int err;
 
@@ -394,10 +394,9 @@
 	 * see https://dev.openwrt.org/ticket/146
 	 * check and reset bit "isolate"
 	 */
-	str = nvram_get("boardnum");
-	if (!str)
+	if (nvram_getenv("boardnum", buf, sizeof(buf)) < 0)
 		return;
-	if (simple_strtoul(str, NULL, 0) == 2) {
+	if (simple_strtoul(buf, NULL, 0) == 2) {
 		err = __b44_readphy(bp, 0, MII_BMCR, &val);
 		if (err)
 			goto error;
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ecfef24..e94a966a 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1097,7 +1097,7 @@
 	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
 
 	/* make sure no mib update is scheduled */
-	flush_scheduled_work();
+	cancel_work_sync(&priv->mib_update_task);
 
 	/* disable dma & mac */
 	bcm_enet_disable_dma(priv, priv->tx_chan);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28..9cab323 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
 #define BE_NAME			"ServerEngines BladeEngine2 10Gbps NIC"
 #define BE3_NAME		"ServerEngines BladeEngine3 10Gbps NIC"
 #define OC_NAME			"Emulex OneConnect 10Gbps NIC"
-#define OC_NAME1		"Emulex OneConnect 10Gbps NIC (be3)"
+#define OC_NAME_BE		OC_NAME	"(be3)"
+#define OC_NAME_LANCER		OC_NAME "(Lancer)"
 #define DRV_DESC		"ServerEngines BladeEngine 10Gbps NIC Driver"
 
 #define BE_VENDOR_ID 		0x19a2
+#define EMULEX_VENDOR_ID	0x10df
 #define BE_DEVICE_ID1		0x211
 #define BE_DEVICE_ID2		0x221
-#define OC_DEVICE_ID1		0x700
-#define OC_DEVICE_ID2		0x710
+#define OC_DEVICE_ID1		0x700	/* Device Id for BE2 cards */
+#define OC_DEVICE_ID2		0x710	/* Device Id for BE3 cards */
+#define OC_DEVICE_ID3		0xe220	/* Device id for Lancer cards */
 
 static inline char *nic_name(struct pci_dev *pdev)
 {
@@ -53,7 +56,9 @@
 	case OC_DEVICE_ID1:
 		return OC_NAME;
 	case OC_DEVICE_ID2:
-		return OC_NAME1;
+		return OC_NAME_BE;
+	case OC_DEVICE_ID3:
+		return OC_NAME_LANCER;
 	case BE_DEVICE_ID2:
 		return BE3_NAME;
 	default:
@@ -149,6 +154,7 @@
 	u16 min_eqd;		/* in usecs */
 	u16 max_eqd;		/* in usecs */
 	u16 cur_eqd;		/* in usecs */
+	u8  msix_vec_idx;
 
 	struct napi_struct napi;
 };
@@ -214,7 +220,9 @@
 	struct be_rx_stats stats;
 	u8 rss_id;
 	bool rx_post_starved;	/* Zero rx frags have been posted to BE */
-	u32 cache_line_barrier[16];
+	u16 last_frag_index;
+	u16 rsvd;
+	u32 cache_line_barrier[15];
 };
 
 struct be_vf_cfg {
@@ -260,6 +268,8 @@
 	u32 num_rx_qs;
 	u32 big_page_size;	/* Compounded page size shared by rx wrbs */
 
+	u8 msix_vec_next_idx;
+
 	struct vlan_group *vlan_grp;
 	u16 vlans_added;
 	u16 max_vlans;	/* Number of vlans supported */
@@ -299,8 +309,8 @@
 
 	bool sriov_enabled;
 	struct be_vf_cfg vf_cfg[BE_MAX_VF];
-	u8 base_eq_id;
 	u8 is_virtfn;
+	u32 sli_family;
 };
 
 #define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +319,8 @@
 #define BE_GEN2 2
 #define BE_GEN3 3
 
+#define lancer_chip(adapter)		(adapter->pdev->device == OC_DEVICE_ID3)
+
 extern const struct ethtool_ops be_ethtool_ops;
 
 #define tx_stats(adapter)		(&adapter->tx_stats)
@@ -416,10 +428,17 @@
 static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
 {
 	u8 data;
+	u32 sli_intf;
 
-	pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
-	pci_read_config_byte(adapter->pdev, 0xFE, &data);
-	adapter->is_virtfn = (data != 0xAA);
+	if (lancer_chip(adapter)) {
+		pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
+								&sli_intf);
+		adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
+	} else {
+		pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
+		pci_read_config_byte(adapter->pdev, 0xFE, &data);
+		adapter->is_virtfn = (data != 0xAA);
+	}
 }
 
 static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 36eca1c..171a08c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@
 
 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
 {
-	u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
+	u32 sem;
+
+	if (lancer_chip(adapter))
+		sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
+	else
+		sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
 
 	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
 	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -680,16 +685,36 @@
 		OPCODE_COMMON_CQ_CREATE, sizeof(*req));
 
 	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+	if (lancer_chip(adapter)) {
+		req->hdr.version = 1;
+		req->page_size = 1; /* 1 for 4K */
+		AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
+								coalesce_wm);
+		AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
+								no_delay);
+		AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
+						__ilog2_u32(cq->len/256));
+		AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
+								ctxt, 1);
+		AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
+								ctxt, eq->id);
+		AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
+	} else {
+		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
+								coalesce_wm);
+		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
+								ctxt, no_delay);
+		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
+						__ilog2_u32(cq->len/256));
+		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_cq_context_be, solevent,
+								ctxt, sol_evts);
+		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
+		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
+		AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
+	}
 
-	AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
-	AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
-	AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
-			__ilog2_u32(cq->len/256));
-	AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
-	AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
-	AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
-	AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
-	AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -737,13 +762,27 @@
 			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
 
 	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+	if (lancer_chip(adapter)) {
+		req->hdr.version = 1;
+		req->cq_id = cpu_to_le16(cq->id);
 
-	AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
-	AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
-		be_encoded_q_len(mccq->len));
-	AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
+		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
+						be_encoded_q_len(mccq->len));
+		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
+								ctxt, cq->id);
+		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
+								 ctxt, 1);
+
+	} else {
+		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
+		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
+						be_encoded_q_len(mccq->len));
+		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
+	}
+
 	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
-	req->async_event_bitmap[0] |= 0x00000022;
+	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
 	be_dws_cpu_to_le(ctxt, sizeof(req->context));
 
 	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -1235,7 +1274,7 @@
 
 		i = 0;
 		netdev_for_each_mc_addr(ha, netdev)
-			memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
+			memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
 	} else {
 		req->promiscuous = 1;
 	}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff0..83d15c8 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@
 /******************** Create CQ ***************************/
 /* Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_cq_context {
+struct amap_cq_context_be {
 	u8 cidx[11];		/* dword 0*/
 	u8 rsvd0;		/* dword 0*/
 	u8 coalescwm[2];	/* dword 0*/
@@ -332,14 +332,32 @@
 	u8 rsvd5[32];		/* dword 3*/
 } __packed;
 
+struct amap_cq_context_lancer {
+	u8 rsvd0[12];		/* dword 0*/
+	u8 coalescwm[2];	/* dword 0*/
+	u8 nodelay;		/* dword 0*/
+	u8 rsvd1[12];		/* dword 0*/
+	u8 count[2];		/* dword 0*/
+	u8 valid;		/* dword 0*/
+	u8 rsvd2;		/* dword 0*/
+	u8 eventable;		/* dword 0*/
+	u8 eqid[16];		/* dword 1*/
+	u8 rsvd3[15];		/* dword 1*/
+	u8 armed;		/* dword 1*/
+	u8 rsvd4[32];		/* dword 2*/
+	u8 rsvd5[32];		/* dword 3*/
+} __packed;
+
 struct be_cmd_req_cq_create {
 	struct be_cmd_req_hdr hdr;
 	u16 num_pages;
-	u16 rsvd0;
-	u8 context[sizeof(struct amap_cq_context) / 8];
+	u8 page_size;
+	u8 rsvd0;
+	u8 context[sizeof(struct amap_cq_context_be) / 8];
 	struct phys_addr pages[8];
 } __packed;
 
+
 struct be_cmd_resp_cq_create {
 	struct be_cmd_resp_hdr hdr;
 	u16 cq_id;
@@ -349,7 +367,7 @@
 /******************** Create MCCQ ***************************/
 /* Pseudo amap definition in which each bit of the actual structure is defined
  * as a byte: used to calculate offset/shift/mask of each field */
-struct amap_mcc_context {
+struct amap_mcc_context_be {
 	u8 con_index[14];
 	u8 rsvd0[2];
 	u8 ring_size[4];
@@ -364,12 +382,23 @@
 	u8 rsvd2[32];
 } __packed;
 
+struct amap_mcc_context_lancer {
+	u8 async_cq_id[16];
+	u8 ring_size[4];
+	u8 rsvd0[12];
+	u8 rsvd1[31];
+	u8 valid;
+	u8 async_cq_valid[1];
+	u8 rsvd2[31];
+	u8 rsvd3[32];
+} __packed;
+
 struct be_cmd_req_mcc_create {
 	struct be_cmd_req_hdr hdr;
 	u16 num_pages;
-	u16 rsvd0;
+	u16 cq_id;
 	u32 async_event_bitmap[1];
-	u8 context[sizeof(struct amap_mcc_context) / 8];
+	u8 context[sizeof(struct amap_mcc_context_be) / 8];
 	struct phys_addr pages[8];
 } __packed;
 
@@ -605,6 +634,7 @@
 	struct be_rxf_stats rxf;
 	u32 rsvd[48];
 	struct be_erx_stats erx;
+	u32 rsvd1[6];
 };
 
 struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df..4096d97 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
 #define MPU_EP_CONTROL 		0
 
 /********** MPU semphore ******************/
-#define MPU_EP_SEMAPHORE_OFFSET 	0xac
-#define EP_SEMAPHORE_POST_STAGE_MASK	0x0000FFFF
-#define EP_SEMAPHORE_POST_ERR_MASK	0x1
-#define EP_SEMAPHORE_POST_ERR_SHIFT	31
+#define MPU_EP_SEMAPHORE_OFFSET		0xac
+#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET	0x400
+#define EP_SEMAPHORE_POST_STAGE_MASK		0x0000FFFF
+#define EP_SEMAPHORE_POST_ERR_MASK		0x1
+#define EP_SEMAPHORE_POST_ERR_SHIFT		31
+
 /* MPU semphore POST stage values */
 #define POST_STAGE_AWAITING_HOST_RDY 	0x1 /* FW awaiting goahead from host */
 #define POST_STAGE_HOST_RDY 		0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
 #define PCICFG_UE_STATUS_LOW_MASK		0xA8
 #define PCICFG_UE_STATUS_HI_MASK		0xAC
 
+/******** SLI_INTF ***********************/
+#define SLI_INTF_REG_OFFSET			0x58
+#define SLI_INTF_VALID_MASK			0xE0000000
+#define SLI_INTF_VALID				0xC0000000
+#define SLI_INTF_HINT2_MASK			0x1F000000
+#define SLI_INTF_HINT2_SHIFT			24
+#define SLI_INTF_HINT1_MASK			0x00FF0000
+#define SLI_INTF_HINT1_SHIFT			16
+#define SLI_INTF_FAMILY_MASK			0x00000F00
+#define SLI_INTF_FAMILY_SHIFT			8
+#define SLI_INTF_IF_TYPE_MASK			0x0000F000
+#define SLI_INTF_IF_TYPE_SHIFT			12
+#define SLI_INTF_REV_MASK			0x000000F0
+#define SLI_INTF_REV_SHIFT			4
+#define SLI_INTF_FT_MASK			0x00000001
+
+
+/* SLI family */
+#define BE_SLI_FAMILY		0x0
+#define LANCER_A0_SLI_FAMILY	0xA
+
+
 /********* ISR0 Register offset **********/
 #define CEV_ISR0_OFFSET 			0xC18
 #define CEV_ISR_SIZE				4
@@ -73,6 +97,9 @@
 /********* Event Q door bell *************/
 #define DB_EQ_OFFSET			DB_CQ_OFFSET
 #define DB_EQ_RING_ID_MASK		0x1FF	/* bits 0 - 8 */
+#define DB_EQ_RING_ID_EXT_MASK		0x3e00  /* bits 9-13 */
+#define DB_EQ_RING_ID_EXT_MASK_SHIFT	(2) /* qid bits 9-13 placing at 11-15 */
+
 /* Clear the interrupt for this eq */
 #define DB_EQ_CLR_SHIFT			(9)	/* bit 9 */
 /* Must be 1 */
@@ -85,6 +112,10 @@
 /********* Compl Q door bell *************/
 #define DB_CQ_OFFSET 			0x120
 #define DB_CQ_RING_ID_MASK		0x3FF	/* bits 0 - 9 */
+#define DB_CQ_RING_ID_EXT_MASK		0x7C00	/* bits 10-14 */
+#define DB_CQ_RING_ID_EXT_MASK_SHIFT	(1)	/* qid bits 10-14
+						 placing at 11-15 */
+
 /* Number of event entries processed */
 #define DB_CQ_NUM_POPPED_SHIFT		(16) 	/* bits 16 - 28 */
 /* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index c36cd2f..0b35e4a 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
+	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
 	{ 0 }
 };
 MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@
 {
 	u32 val = 0;
 	val |= qid & DB_EQ_RING_ID_MASK;
+	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
+			DB_EQ_RING_ID_EXT_MASK_SHIFT);
 
 	if (adapter->eeh_err)
 		return;
@@ -205,6 +208,8 @@
 {
 	u32 val = 0;
 	val |= qid & DB_CQ_RING_ID_MASK;
+	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
+			DB_CQ_RING_ID_EXT_MASK_SHIFT);
 
 	if (adapter->eeh_err)
 		return;
@@ -404,7 +409,8 @@
 }
 
 /* Determine number of WRB entries needed to xmit data in an skb */
-static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
+static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
+								bool *dummy)
 {
 	int cnt = (skb->len > skb->data_len);
 
@@ -412,12 +418,13 @@
 
 	/* to account for hdr wrb */
 	cnt++;
-	if (cnt & 1) {
+	if (lancer_chip(adapter) || !(cnt & 1)) {
+		*dummy = false;
+	} else {
 		/* add a dummy to make it an even num */
 		cnt++;
 		*dummy = true;
-	} else
-		*dummy = false;
+	}
 	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
 	return cnt;
 }
@@ -443,8 +450,18 @@
 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
 			hdr, skb_shinfo(skb)->gso_size);
-		if (skb_is_gso_v6(skb))
+		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
+		if (lancer_chip(adapter) && adapter->sli_family  ==
+							LANCER_A0_SLI_FAMILY) {
+			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
+			if (is_tcp_pkt(skb))
+				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+								tcpcs, hdr, 1);
+			else if (is_udp_pkt(skb))
+				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
+								udpcs, hdr, 1);
+		}
 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
 		if (is_tcp_pkt(skb))
 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@
 	u32 start = txq->head;
 	bool dummy_wrb, stopped = false;
 
-	wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb);
+	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
 
 	copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
 	if (copied) {
@@ -894,11 +911,17 @@
 	rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
 	num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
 
-	for (i = 0; i < num_rcvd; i++) {
-		page_info = get_rx_page_info(adapter, rxo, rxq_idx);
-		put_page(page_info->page);
-		memset(page_info, 0, sizeof(*page_info));
-		index_inc(&rxq_idx, rxq->len);
+	 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
+	if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
+
+		rxo->last_frag_index = rxq_idx;
+
+		for (i = 0; i < num_rcvd; i++) {
+			page_info = get_rx_page_info(adapter, rxo, rxq_idx);
+			put_page(page_info->page);
+			memset(page_info, 0, sizeof(*page_info));
+			index_inc(&rxq_idx, rxq->len);
+		}
 	}
 }
 
@@ -999,9 +1022,6 @@
 	u8 vtm;
 
 	num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
-	/* Is it a flush compl that has no data */
-	if (unlikely(num_rcvd == 0))
-		return;
 
 	skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
 	if (unlikely(!skb)) {
@@ -1035,7 +1055,8 @@
 			return;
 		}
 		vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-		vid = swab16(vid);
+		if (!lancer_chip(adapter))
+			vid = swab16(vid);
 		vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
 	} else {
 		netif_receive_skb(skb);
@@ -1057,10 +1078,6 @@
 	u8 pkt_type;
 
 	num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
-	/* Is it a flush compl that has no data */
-	if (unlikely(num_rcvd == 0))
-		return;
-
 	pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
 	vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
 	rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -1113,7 +1130,8 @@
 		napi_gro_frags(&eq_obj->napi);
 	} else {
 		vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-		vid = swab16(vid);
+		if (!lancer_chip(adapter))
+			vid = swab16(vid);
 
 		if (!adapter->vlan_grp || adapter->vlans_added == 0)
 			return;
@@ -1330,7 +1348,7 @@
 	while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
 		be_rx_compl_discard(adapter, rxo, rxcp);
 		be_rx_compl_reset(rxcp);
-		be_cq_notify(adapter, rx_cq->id, true, 1);
+		be_cq_notify(adapter, rx_cq->id, false, 1);
 	}
 
 	/* Then free posted rx buffer that were not used */
@@ -1381,7 +1399,8 @@
 		sent_skb = sent_skbs[txq->tail];
 		end_idx = txq->tail;
 		index_adv(&end_idx,
-			wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+			wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
+			txq->len);
 		be_tx_compl_process(adapter, end_idx);
 	}
 }
@@ -1476,7 +1495,9 @@
 	/* Ask BE to create Tx Event queue */
 	if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
 		goto tx_eq_free;
-	adapter->base_eq_id = adapter->tx_eq.q.id;
+
+	adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
 
 	/* Alloc TX eth compl queue */
 	cq = &adapter->tx_obj.cq;
@@ -1554,6 +1575,9 @@
 	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
 	for_all_rx_queues(adapter, rxo, i) {
 		rxo->adapter = adapter;
+		/* Init last_frag_index so that the frag index in the first
+		 * completion will never match */
+		rxo->last_frag_index = 0xffff;
 		rxo->rx_eq.max_eqd = BE_MAX_EQD;
 		rxo->rx_eq.enable_aic = true;
 
@@ -1568,6 +1592,8 @@
 		if (rc)
 			goto err;
 
+		rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
+
 		/* CQ */
 		cq = &rxo->cq;
 		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1604,6 @@
 		rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
 		if (rc)
 			goto err;
-
 		/* Rx Q */
 		q = &rxo->q;
 		rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1636,45 @@
 	return -1;
 }
 
-/* There are 8 evt ids per func. Retruns the evt id's bit number */
-static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
+static bool event_peek(struct be_eq_obj *eq_obj)
 {
-	return eq_id - adapter->base_eq_id;
+	struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
+	if (!eqe->evt)
+		return false;
+	else
+		return true;
 }
 
 static irqreturn_t be_intx(int irq, void *dev)
 {
 	struct be_adapter *adapter = dev;
 	struct be_rx_obj *rxo;
-	int isr, i;
+	int isr, i, tx = 0 , rx = 0;
 
-	isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
-		(adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE);
-	if (!isr)
-		return IRQ_NONE;
+	if (lancer_chip(adapter)) {
+		if (event_peek(&adapter->tx_eq))
+			tx = event_handle(adapter, &adapter->tx_eq);
+		for_all_rx_queues(adapter, rxo, i) {
+			if (event_peek(&rxo->rx_eq))
+				rx |= event_handle(adapter, &rxo->rx_eq);
+		}
 
-	if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr))
-		event_handle(adapter, &adapter->tx_eq);
+		if (!(tx || rx))
+			return IRQ_NONE;
 
-	for_all_rx_queues(adapter, rxo, i) {
-		if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr))
-			event_handle(adapter, &rxo->rx_eq);
+	} else {
+		isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
+			(adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
+		if (!isr)
+			return IRQ_NONE;
+
+		if ((1 << adapter->tx_eq.msix_vec_idx & isr))
+			event_handle(adapter, &adapter->tx_eq);
+
+		for_all_rx_queues(adapter, rxo, i) {
+			if ((1 << rxo->rx_eq.msix_vec_idx & isr))
+				event_handle(adapter, &rxo->rx_eq);
+		}
 	}
 
 	return IRQ_HANDLED;
@@ -1658,10 +1699,9 @@
 	return IRQ_HANDLED;
 }
 
-static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo,
-			struct be_eth_rx_compl *rxcp)
+static inline bool do_gro(struct be_rx_obj *rxo,
+			struct be_eth_rx_compl *rxcp, u8 err)
 {
-	int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
 	int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
 
 	if (err)
@@ -1678,6 +1718,8 @@
 	struct be_queue_info *rx_cq = &rxo->cq;
 	struct be_eth_rx_compl *rxcp;
 	u32 work_done;
+	u16 frag_index, num_rcvd;
+	u8 err;
 
 	rxo->stats.rx_polls++;
 	for (work_done = 0; work_done < budget; work_done++) {
@@ -1685,10 +1727,22 @@
 		if (!rxcp)
 			break;
 
-		if (do_gro(adapter, rxo, rxcp))
-			be_rx_compl_process_gro(adapter, rxo, rxcp);
-		else
-			be_rx_compl_process(adapter, rxo, rxcp);
+		err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
+		frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
+								rxcp);
+		num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
+								rxcp);
+
+		/* Skip out-of-buffer compl(lancer) or flush compl(BE) */
+		if (likely(frag_index != rxo->last_frag_index &&
+				num_rcvd != 0)) {
+			rxo->last_frag_index = frag_index;
+
+			if (do_gro(rxo, rxcp, err))
+				be_rx_compl_process_gro(adapter, rxo, rxcp);
+			else
+				be_rx_compl_process(adapter, rxo, rxcp);
+		}
 
 		be_rx_compl_reset(rxcp);
 	}
@@ -1830,8 +1884,7 @@
 			be_post_rx_frags(rxo);
 		}
 	}
-
-	if (!adapter->ue_detected)
+	if (!adapter->ue_detected && !lancer_chip(adapter))
 		be_detect_dump_ue(adapter);
 
 reschedule:
@@ -1910,10 +1963,10 @@
 #endif
 }
 
-static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
+static inline int be_msix_vec_get(struct be_adapter *adapter,
+					struct be_eq_obj *eq_obj)
 {
-	return adapter->msix_entries[
-			be_evt_bit_get(adapter, eq_id)].vector;
+	return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
 }
 
 static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1977,14 @@
 	int vec;
 
 	sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
-	vec = be_msix_vec_get(adapter, eq_obj->q.id);
+	vec = be_msix_vec_get(adapter, eq_obj);
 	return request_irq(vec, handler, 0, eq_obj->desc, context);
 }
 
 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
 			void *context)
 {
-	int vec = be_msix_vec_get(adapter, eq_obj->q.id);
+	int vec = be_msix_vec_get(adapter, eq_obj);
 	free_irq(vec, context);
 }
 
@@ -2036,14 +2089,15 @@
 	netif_carrier_off(netdev);
 	adapter->link_up = false;
 
-	be_intr_set(adapter, false);
+	if (!lancer_chip(adapter))
+		be_intr_set(adapter, false);
 
 	if (adapter->msix_enabled) {
-		vec = be_msix_vec_get(adapter, tx_eq->q.id);
+		vec = be_msix_vec_get(adapter, tx_eq);
 		synchronize_irq(vec);
 
 		for_all_rx_queues(adapter, rxo, i) {
-			vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id);
+			vec = be_msix_vec_get(adapter, &rxo->rx_eq);
 			synchronize_irq(vec);
 		}
 	} else {
@@ -2082,7 +2136,8 @@
 
 	be_irq_register(adapter);
 
-	be_intr_set(adapter, true);
+	if (!lancer_chip(adapter))
+		be_intr_set(adapter, true);
 
 	/* The evt queues are created in unarmed state; arm them */
 	for_all_rx_queues(adapter, rxo, i) {
@@ -2458,6 +2513,12 @@
 	int status, i = 0, num_imgs = 0;
 	const u8 *p;
 
+	if (!netif_running(adapter->netdev)) {
+		dev_err(&adapter->pdev->dev,
+			"Firmware load not allowed (interface is down)\n");
+		return -EPERM;
+	}
+
 	strcpy(fw_file, func);
 
 	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -2537,10 +2598,15 @@
 	int i;
 
 	netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
-		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM |
+		NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
+		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 		NETIF_F_GRO | NETIF_F_TSO6;
 
-	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM;
+	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
+		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+	if (lancer_chip(adapter))
+		netdev->vlan_features |= NETIF_F_TSO6;
 
 	netdev->flags |= IFF_MULTICAST;
 
@@ -2581,6 +2647,15 @@
 	u8 __iomem *addr;
 	int pcicfg_reg, db_reg;
 
+	if (lancer_chip(adapter)) {
+		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
+			pci_resource_len(adapter->pdev, 0));
+		if (addr == NULL)
+			return -ENOMEM;
+		adapter->db = addr;
+		return 0;
+	}
+
 	if (be_physfn(adapter)) {
 		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
 				pci_resource_len(adapter->pdev, 2));
@@ -2777,6 +2852,44 @@
 	return 0;
 }
 
+static int be_dev_family_check(struct be_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	u32 sli_intf = 0, if_type;
+
+	switch (pdev->device) {
+	case BE_DEVICE_ID1:
+	case OC_DEVICE_ID1:
+		adapter->generation = BE_GEN2;
+		break;
+	case BE_DEVICE_ID2:
+	case OC_DEVICE_ID2:
+		adapter->generation = BE_GEN3;
+		break;
+	case OC_DEVICE_ID3:
+		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
+		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
+						SLI_INTF_IF_TYPE_SHIFT;
+
+		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
+			if_type != 0x02) {
+			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
+			return -EINVAL;
+		}
+		if (num_vfs > 0) {
+			dev_err(&pdev->dev, "VFs not supported\n");
+			return -EINVAL;
+		}
+		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
+					 SLI_INTF_FAMILY_SHIFT);
+		adapter->generation = BE_GEN3;
+		break;
+	default:
+		adapter->generation = 0;
+	}
+	return 0;
+}
+
 static int __devinit be_probe(struct pci_dev *pdev,
 			const struct pci_device_id *pdev_id)
 {
@@ -2799,22 +2912,13 @@
 		goto rel_reg;
 	}
 	adapter = netdev_priv(netdev);
-
-	switch (pdev->device) {
-	case BE_DEVICE_ID1:
-	case OC_DEVICE_ID1:
-		adapter->generation = BE_GEN2;
-		break;
-	case BE_DEVICE_ID2:
-	case OC_DEVICE_ID2:
-		adapter->generation = BE_GEN3;
-		break;
-	default:
-		adapter->generation = 0;
-	}
-
 	adapter->pdev = pdev;
 	pci_set_drvdata(pdev, adapter);
+
+	status = be_dev_family_check(adapter);
+	if (status)
+		goto free_netdev;
+
 	adapter->netdev = netdev;
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
@@ -2889,7 +2993,7 @@
 	be_ctrl_cleanup(adapter);
 free_netdev:
 	be_sriov_disable(adapter);
-	free_netdev(adapter->netdev);
+	free_netdev(netdev);
 	pci_set_drvdata(pdev, NULL);
 rel_reg:
 	pci_release_regions(pdev);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600b..5c811f3 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,8 +56,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME		"bnx2"
-#define DRV_MODULE_VERSION	"2.0.18"
-#define DRV_MODULE_RELDATE	"Oct 7, 2010"
+#define DRV_MODULE_VERSION	"2.0.20"
+#define DRV_MODULE_RELDATE	"Nov 24, 2010"
 #define FW_MIPS_FILE_06		"bnx2/bnx2-mips-06-6.0.15.fw"
 #define FW_RV2P_FILE_06		"bnx2/bnx2-rv2p-06-6.0.15.fw"
 #define FW_MIPS_FILE_09		"bnx2/bnx2-mips-09-6.0.17.fw"
@@ -766,13 +766,10 @@
 		int j;
 
 		rxr->rx_buf_ring =
-			vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
+			vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
 		if (rxr->rx_buf_ring == NULL)
 			return -ENOMEM;
 
-		memset(rxr->rx_buf_ring, 0,
-		       SW_RXBD_RING_SIZE * bp->rx_max_ring);
-
 		for (j = 0; j < bp->rx_max_ring; j++) {
 			rxr->rx_desc_ring[j] =
 				dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@
 		}
 
 		if (bp->rx_pg_ring_size) {
-			rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
+			rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
 						  bp->rx_max_pg_ring);
 			if (rxr->rx_pg_ring == NULL)
 				return -ENOMEM;
 
-			memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
-			       bp->rx_max_pg_ring);
 		}
 
 		for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@
 
 	/* Wait for the current PCI transaction to complete before
 	 * issuing a reset. */
-	REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
-	       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
-	       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
-	       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
-	       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
-	val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
-	udelay(5);
+	if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
+	    (CHIP_NUM(bp) == CHIP_NUM_5708)) {
+		REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
+		       BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
+		       BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
+		       BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
+		       BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
+		val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
+		udelay(5);
+	} else {  /* 5709 */
+		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+		val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
+		REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
+		val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
+
+		for (i = 0; i < 100; i++) {
+			msleep(1);
+			val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
+			if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
+				break;
+		}
+	}
 
 	/* Wait for the firmware to tell us it is ok to issue a reset. */
 	bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@
 		val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
 		      BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
 
-		pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
+		REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
 
 	} else {
 		val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -7914,15 +7924,15 @@
 		goto err_out_release;
 	}
 
+	bnx2_set_power_state(bp, PCI_D0);
+
 	/* Configure byte swap and enable write to the reg_window registers.
 	 * Rely on CPU to do target byte swapping on big endian systems
 	 * The chip's target access swapping will not swap all accesses
 	 */
-	pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
-			       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
-			       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
-
-	bnx2_set_power_state(bp, PCI_D0);
+	REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
+		   BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
+		   BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
 
 	bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
 
@@ -8383,7 +8393,7 @@
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct bnx2 *bp = netdev_priv(dev);
 
-	flush_scheduled_work();
+	cancel_work_sync(&bp->reset_task);
 
 	unregister_netdev(dev);
 
@@ -8421,7 +8431,7 @@
 	if (!netif_running(dev))
 		return 0;
 
-	flush_scheduled_work();
+	cancel_work_sync(&bp->reset_task);
 	bnx2_netif_stop(bp, true);
 	netif_device_detach(dev);
 	del_timer_sync(&bp->timer);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c342..5488a2e 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -461,6 +461,8 @@
 #define BNX2_PCICFG_MAILBOX_QUEUE_ADDR			0x00000090
 #define BNX2_PCICFG_MAILBOX_QUEUE_DATA			0x00000094
 
+#define BNX2_PCICFG_DEVICE_CONTROL			0x000000b4
+#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND		 ((1L<<5)<<16)
 
 /*
  *  pci_reg definition
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225..7e4d682 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.60.00-3"
-#define DRV_MODULE_RELDATE      "2010/10/19"
+#define DRV_MODULE_VERSION      "1.60.00-7"
+#define DRV_MODULE_RELDATE      "2010/12/08"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
@@ -671,6 +671,10 @@
 	CAM_ISCSI_ETH_LINE,
 	CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE
 };
+/* number of MACs per function in NIG memory - used for SI mode */
+#define NIG_LLH_FUNC_MEM_SIZE		16
+/* number of entries in NIG_REG_LLHX_FUNC_MEM */
+#define NIG_LLH_FUNC_MEM_MAX_OFFSET	8
 
 #define BNX2X_VF_ID_INVALID	0xFF
 
@@ -967,6 +971,8 @@
 	u16			mf_ov;
 	u8			mf_mode;
 #define IS_MF(bp)		(bp->mf_mode != 0)
+#define IS_MF_SI(bp)		(bp->mf_mode == MULTI_FUNCTION_SI)
+#define IS_MF_SD(bp)		(bp->mf_mode == MULTI_FUNCTION_SD)
 
 	u8			wol;
 
@@ -1010,6 +1016,7 @@
 #define BNX2X_ACCEPT_ALL_UNICAST	0x0004
 #define BNX2X_ACCEPT_ALL_MULTICAST	0x0008
 #define BNX2X_ACCEPT_BROADCAST		0x0010
+#define BNX2X_ACCEPT_UNMATCHED_UCAST	0x0020
 #define BNX2X_PROMISCUOUS_MODE		0x10000
 
 	u32			rx_mode;
@@ -1329,7 +1336,7 @@
 
 #define BNX2X_ILT_ZALLOC(x, y, size) \
 	do { \
-		x = pci_alloc_consistent(bp->pdev, size, y); \
+		x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
 		if (x) \
 			memset(x, 0, size); \
 	} while (0)
@@ -1337,7 +1344,7 @@
 #define BNX2X_ILT_FREE(x, y, size) \
 	do { \
 		if (x) { \
-			pci_free_consistent(bp->pdev, size, x, y); \
+			dma_free_coherent(&bp->pdev->dev, size, x, y); \
 			x = NULL; \
 			y = 0; \
 		} \
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 459614d..236c00c 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -698,6 +698,29 @@
 	mutex_unlock(&bp->port.phy_mutex);
 }
 
+/* calculates MF speed according to current linespeed and MF configuration */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp)
+{
+	u16 line_speed = bp->link_vars.line_speed;
+	if (IS_MF(bp)) {
+		u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
+						FUNC_MF_CFG_MAX_BW_MASK) >>
+						FUNC_MF_CFG_MAX_BW_SHIFT;
+		/* Calculate the current MAX line speed limit for the DCC
+		 * capable devices
+		 */
+		if (IS_MF_SD(bp)) {
+			u16 vn_max_rate = maxCfg * 100;
+
+			if (vn_max_rate < line_speed)
+				line_speed = vn_max_rate;
+		} else /* IS_MF_SI(bp)) */
+			line_speed = (line_speed * maxCfg) / 100;
+	}
+
+	return line_speed;
+}
+
 void bnx2x_link_report(struct bnx2x *bp)
 {
 	if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +736,8 @@
 			netif_carrier_on(bp->dev);
 		netdev_info(bp->dev, "NIC Link is Up, ");
 
-		line_speed = bp->link_vars.line_speed;
-		if (IS_MF(bp)) {
-			u16 vn_max_rate;
+		line_speed = bnx2x_get_mf_speed(bp);
 
-			vn_max_rate =
-				((bp->mf_config[BP_VN(bp)] &
-				  FUNC_MF_CFG_MAX_BW_MASK) >>
-						FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
-			if (vn_max_rate < line_speed)
-				line_speed = vn_max_rate;
-		}
 		pr_cont("%d Mbps ", line_speed);
 
 		if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -1680,7 +1694,7 @@
 		rc = XMIT_PLAIN;
 
 	else {
-		if (skb->protocol == htons(ETH_P_IPV6)) {
+		if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
 			rc = XMIT_CSUM_V6;
 			if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
 				rc |= XMIT_CSUM_TCP;
@@ -1692,11 +1706,10 @@
 		}
 	}
 
-	if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
-		rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
-
-	else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
-		rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
+	if (skb_is_gso_v6(skb))
+		rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
+	else if (skb_is_gso(skb))
+		rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
 
 	return rc;
 }
@@ -1782,15 +1795,15 @@
 }
 #endif
 
-static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb,
-				     struct eth_tx_parse_bd_e2 *pbd,
-				     u32 xmit_type)
+static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
+					u32 xmit_type)
 {
-	pbd->parsing_data |= cpu_to_le16(skb_shinfo(skb)->gso_size) <<
-		ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT;
+	*parsing_data |= (skb_shinfo(skb)->gso_size <<
+			      ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
+			      ETH_TX_PARSE_BD_E2_LSO_MSS;
 	if ((xmit_type & XMIT_GSO_V6) &&
 	    (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
-		pbd->parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
+		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
 }
 
 /**
@@ -1835,15 +1848,15 @@
  * @return header len
  */
 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
-	struct eth_tx_parse_bd_e2 *pbd,
-	u32 xmit_type)
+	u32 *parsing_data, u32 xmit_type)
 {
-	pbd->parsing_data |= cpu_to_le16(tcp_hdrlen(skb)/4) <<
-		ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT;
+	*parsing_data |= ((tcp_hdrlen(skb)/4) <<
+		ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
+		ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
 
-	pbd->parsing_data |= cpu_to_le16(((unsigned char *)tcp_hdr(skb) -
-					  skb->data) / 2) <<
-		ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT;
+	*parsing_data |= ((((u8 *)tcp_hdr(skb) - skb->data) / 2) <<
+		ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
+		ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
 
 	return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
 }
@@ -1912,6 +1925,7 @@
 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
+	u32 pbd_e2_parsing_data = 0;
 	u16 pkt_prod, bd_prod;
 	int nbd, fp_index;
 	dma_addr_t mapping;
@@ -2033,8 +2047,9 @@
 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
 		/* Set PBD in checksum offload case */
 		if (xmit_type & XMIT_CSUM)
-			hlen = bnx2x_set_pbd_csum_e2(bp,
-						     skb, pbd_e2, xmit_type);
+			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
+						     &pbd_e2_parsing_data,
+						     xmit_type);
 	} else {
 		pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
@@ -2076,10 +2091,18 @@
 			bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
 						 hlen, bd_prod, ++nbd);
 		if (CHIP_IS_E2(bp))
-			bnx2x_set_pbd_gso_e2(skb, pbd_e2, xmit_type);
+			bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
+					     xmit_type);
 		else
 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
 	}
+
+	/* Set the PBD's parsing_data field if not zero
+	 * (for the chips newer than 57711).
+	 */
+	if (pbd_e2_parsing_data)
+		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
+
 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
 
 	/* Handle fragmented skb */
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 6b28739..cb8f2a0 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -73,6 +73,16 @@
 void bnx2x_link_report(struct bnx2x *bp);
 
 /**
+ * calculates MF speed according to current linespeed and MF
+ * configuration
+ *
+ * @param bp
+ *
+ * @return u16
+ */
+u16 bnx2x_get_mf_speed(struct bnx2x *bp);
+
+/**
  * MSI-X slowpath interrupt handler
  *
  * @param irq
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbd..bd94827 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -45,14 +45,9 @@
 		cmd->speed = bp->link_params.req_line_speed[cfg_idx];
 		cmd->duplex = bp->link_params.req_duplex[cfg_idx];
 	}
-	if (IS_MF(bp)) {
-		u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
-			FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
-			100;
 
-		if (vn_max_rate < cmd->speed)
-			cmd->speed = vn_max_rate;
-	}
+	if (IS_MF(bp))
+		cmd->speed = bnx2x_get_mf_speed(bp);
 
 	if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
 		cmd->port = PORT_TP;
@@ -87,18 +82,57 @@
 {
 	struct bnx2x *bp = netdev_priv(dev);
 	u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
+	u32 speed;
 
-	if (IS_MF(bp))
+	if (IS_MF_SD(bp))
 		return 0;
 
 	DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
-	   DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
-	   DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
-	   DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+	   "  supported 0x%x  advertising 0x%x  speed %d speed_hi %d\n"
+	   "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+	   "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
 	   cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
+	   cmd->speed_hi,
 	   cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
 	   cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
 
+	speed = cmd->speed;
+	speed |= (cmd->speed_hi << 16);
+
+	if (IS_MF_SI(bp)) {
+		u32 param = 0;
+		u32 line_speed = bp->link_vars.line_speed;
+
+		/* use 10G if no link detected */
+		if (!line_speed)
+			line_speed = 10000;
+
+		if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
+			BNX2X_DEV_INFO("To set speed BC %X or higher "
+				       "is required, please upgrade BC\n",
+				       REQ_BC_VER_4_SET_MF_BW);
+			return -EINVAL;
+		}
+		if (line_speed < speed) {
+			BNX2X_DEV_INFO("New speed should be less or equal "
+				       "to actual line speed\n");
+			return -EINVAL;
+		}
+		/* load old values */
+		param = bp->mf_config[BP_VN(bp)];
+
+		/* leave only MIN value */
+		param &= FUNC_MF_CFG_MIN_BW_MASK;
+
+		/* set new MAX value */
+		param |= (((speed * 100) / line_speed)
+				 << FUNC_MF_CFG_MAX_BW_SHIFT)
+				  & FUNC_MF_CFG_MAX_BW_MASK;
+
+		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
+		return 0;
+	}
+
 	cfg_idx = bnx2x_get_link_cfg_idx(bp);
 	old_multi_phy_config = bp->link_params.multi_phy_config;
 	switch (cmd->port) {
@@ -168,8 +202,6 @@
 
 	} else { /* forced speed */
 		/* advertise the requested speed and duplex if supported */
-		u32 speed = cmd->speed;
-		speed |= (cmd->speed_hi << 16);
 		switch (speed) {
 		case SPEED_10:
 			if (cmd->duplex == DUPLEX_FULL) {
@@ -1499,8 +1531,15 @@
 	 * updates that have been performed while interrupts were
 	 * disabled.
 	 */
-	if (bp->common.int_block == INT_BLOCK_IGU)
+	if (bp->common.int_block == INT_BLOCK_IGU) {
+		/* Disable local BHes to prevent a dead-lock situation between
+		 * sch_direct_xmit() and bnx2x_run_loopback() (calling
+		 * bnx2x_tx_int()), as both are taking netif_tx_lock().
+		 */
+		local_bh_disable();
 		bnx2x_tx_int(fp_tx);
+		local_bh_enable();
+	}
 
 	rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
 	if (rx_idx != rx_start_idx + num_pkts)
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23..6555c47 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@
 
 	u16 xgxs_config_tx[4];				    /* 0x1A0 */
 
-	u32 Reserved1[57];				    /* 0x1A8 */
+	u32 Reserved1[56];				    /* 0x1A8 */
+	u32 default_cfg;				    /* 0x288 */
+	/*  Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK		      0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT		      20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED		      0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED		      0x00100000
+
 	u32 speed_capability_mask2;			    /* 0x28C */
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK		      0x0000FFFF
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT		      0
@@ -427,7 +434,12 @@
 #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED     0x00000000
 #define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED      0x00000002
 
-#define SHARED_FEATURE_MF_MODE_DISABLED 	    0x00000100
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK		      0x00000700
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT		      8
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED	      0x00000000
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF		      0x00000100
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4		      0x00000200
+#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT	      0x00000300
 
 };
 
@@ -808,6 +820,9 @@
 #define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL	    0xa1000000
 #define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL	    0x00050234
 
+#define DRV_MSG_CODE_SET_MF_BW				0xe0000000
+#define REQ_BC_VER_4_SET_MF_BW				0x00060202
+#define DRV_MSG_CODE_SET_MF_BW_ACK			0xe1000000
 #define BIOS_MSG_CODE_LIC_CHALLENGE			0xff010000
 #define BIOS_MSG_CODE_LIC_RESPONSE			0xff020000
 #define BIOS_MSG_CODE_VIRT_MAC_PRIM			0xff030000
@@ -881,6 +896,7 @@
 
 	u32 drv_status;
 #define DRV_STATUS_PMF					0x00000001
+#define DRV_STATUS_SET_MF_BW				0x00000004
 
 #define DRV_STATUS_DCC_EVENT_MASK			0x0000ff00
 #define DRV_STATUS_DCC_DISABLE_ENABLE_PF		0x00000100
@@ -981,12 +997,43 @@
 
 };
 
+/* This structure is not applicable and should not be accessed on 57711 */
+struct func_ext_cfg {
+	u32 func_cfg;
+#define MACP_FUNC_CFG_FLAGS_MASK			      0x000000FF
+#define MACP_FUNC_CFG_FLAGS_SHIFT			      0
+#define MACP_FUNC_CFG_FLAGS_ENABLED			      0x00000001
+#define MACP_FUNC_CFG_FLAGS_ETHERNET			      0x00000002
+#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD		      0x00000004
+#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD		      0x00000008
+
+	u32 iscsi_mac_addr_upper;
+	u32 iscsi_mac_addr_lower;
+
+	u32 fcoe_mac_addr_upper;
+	u32 fcoe_mac_addr_lower;
+
+	u32 fcoe_wwn_port_name_upper;
+	u32 fcoe_wwn_port_name_lower;
+
+	u32 fcoe_wwn_node_name_upper;
+	u32 fcoe_wwn_node_name_lower;
+
+	u32 preserve_data;
+#define MF_FUNC_CFG_PRESERVE_L2_MAC			     (1<<0)
+#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC			     (1<<1)
+#define MF_FUNC_CFG_PRESERVE_FCOE_MAC			     (1<<2)
+#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P			     (1<<3)
+#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N			     (1<<4)
+};
+
 struct mf_cfg {
 
 	struct shared_mf_cfg	shared_mf_config;
 	struct port_mf_cfg	port_mf_config[PORT_MAX];
 	struct func_mf_cfg	func_mf_config[E1H_FUNC_MAX];
 
+	struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
 };
 
 
diff --git a/drivers/net/bnx2x/bnx2x_init_ops.h b/drivers/net/bnx2x/bnx2x_init_ops.h
index a306b0e..66df29f 100644
--- a/drivers/net/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x/bnx2x_init_ops.h
@@ -838,7 +838,7 @@
 /****************************************************************************
 * SRC initializations
 ****************************************************************************/
-
+#ifdef BCM_CNIC
 /* called during init func stage */
 static void bnx2x_src_init_t2(struct bnx2x *bp, struct src_ent *t2,
 			      dma_addr_t t2_mapping, int src_cid_count)
@@ -862,5 +862,5 @@
 		    U64_HI((u64)t2_mapping +
 			   (src_cid_count-1) * sizeof(struct src_ent)));
 }
-
+#endif
 #endif /* BNX2X_INIT_OPS_H */
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774..38aeffe 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -610,7 +610,7 @@
 	/* reset and unreset the BigMac */
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-	udelay(10);
+	msleep(1);
 
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@
 	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
 	/* Enable CL37 BAM */
-	bnx2x_cl45_read(bp, phy,
-			MDIO_AN_DEVAD,
-			MDIO_AN_REG_8073_BAM, &val);
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_AN_DEVAD,
-			 MDIO_AN_REG_8073_BAM, val | 1);
+	if (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+	    PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
 
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8073_BAM, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8073_BAM, val | 1);
+		DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+	}
 	if (params->loopback_mode == LOOPBACK_EXT) {
 		bnx2x_807x_force_10G(bp, phy);
 		DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -3898,7 +3904,7 @@
 			      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
-			return 0;;
+			return 0;
 		msleep(1);
 	}
 	return -EINVAL;
@@ -3982,7 +3988,7 @@
 			      MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
 		if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
 		    MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
-			return 0;;
+			return 0;
 		msleep(1);
 	}
 
@@ -5302,7 +5308,7 @@
 {
 	struct bnx2x *bp = params->bp;
 	u16 autoneg_val, an_1000_val, an_10_100_val;
-	bnx2x_wait_reset_complete(bp, phy);
+
 	bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
 		      1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
@@ -5431,6 +5437,7 @@
 
 	/* HW reset */
 	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_wait_reset_complete(bp, phy);
 
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@
 				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port, initialize = 1;
+	u8 port, initialize = 1;
 	u16 val;
 	u16 temp;
 	u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@
 	/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
 
 	msleep(1);
+	if (CHIP_IS_E2(bp))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 		       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 		       port);
-	msleep(200); /* 100 is not enough */
-
+	bnx2x_wait_reset_complete(bp, phy);
+	/* Wait for GPHY to come out of reset */
+	msleep(50);
 	/* BCM84823 requires that XGXS links up first @ 10G for normal
 	behavior */
 	temp = vars->line_speed;
@@ -5625,7 +5637,11 @@
 				   struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port;
+	u8 port;
+	if (CHIP_IS_E2(bp))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 			    MISC_REGISTERS_GPIO_OUTPUT_LOW,
 			    port);
@@ -6928,7 +6944,7 @@
 		  u8 reset_ext_phy)
 {
 	struct bnx2x *bp = params->bp;
-	u8 phy_index, port = params->port;
+	u8 phy_index, port = params->port, clear_latch_ind = 0;
 	DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
 	/* disable attentions */
 	vars->link_status = 0;
@@ -6966,9 +6982,18 @@
 				params->phy[phy_index].link_reset(
 					&params->phy[phy_index],
 					params);
+			if (params->phy[phy_index].flags &
+			    FLAGS_REARM_LATCH_SIGNAL)
+				clear_latch_ind = 1;
 		}
 	}
 
+	if (clear_latch_ind) {
+		/* Clear latching indication */
+		bnx2x_rearm_latch_signal(bp, port, 0);
+		bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+			       1 << NIG_LATCH_BC_ENABLE_MI_INT);
+	}
 	if (params->phy[INT_PHY].link_reset)
 		params->phy[INT_PHY].link_reset(
 			&params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@
 	s8 port;
 	s8 port_of_path = 0;
 
+	bnx2x_ext_phy_hw_reset(bp, 0);
 	/* PART1 - Reset both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 		u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@
 			return -EINVAL;
 		}
 		/* disable attentions */
-		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+			       port_of_path*4,
 			     (NIG_MASK_XGXS0_LINK_STATUS |
 			      NIG_MASK_XGXS0_LINK10G |
 			      NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@
 		(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
 	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
 
-	bnx2x_ext_phy_hw_reset(bp, 1);
+	bnx2x_ext_phy_hw_reset(bp, 0);
 	msleep(5);
 	for (port = 0; port < PORT_MAX; port++) {
 		u32 shmem_base, shmem2_base;
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index e9ad16f..0068a1d 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -2026,13 +2026,28 @@
 
 static void bnx2x_read_mf_cfg(struct bnx2x *bp)
 {
-	int vn;
+	int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
 
 	if (BP_NOMCP(bp))
 		return; /* what should be the default bvalue in this case */
 
+	/* For 2 port configuration the absolute function number formula
+	 * is:
+	 *      abs_func = 2 * vn + BP_PORT + BP_PATH
+	 *
+	 *      and there are 4 functions per port
+	 *
+	 * For 4 port configuration it is
+	 *      abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
+	 *
+	 *      and there are 2 functions per port
+	 */
 	for (vn = VN_0; vn < E1HVN_MAX; vn++) {
-		int /*abs*/func = 2*vn + BP_PORT(bp);
+		int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
+
+		if (func >= E1H_FUNC_MAX)
+			break;
+
 		bp->mf_config[vn] =
 			MF_CFG_RD(bp, func_mf_config[func].config);
 	}
@@ -2248,10 +2263,21 @@
 	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
 	u8 unmatched_unicast = 0;
 
+	if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
+		unmatched_unicast = 1;
+
 	if (filters & BNX2X_PROMISCUOUS_MODE) {
 		/* promiscious - accept all, drop none */
 		drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
 		accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
+		if (IS_MF_SI(bp)) {
+			/*
+			 * SI mode defines to accept in promiscuos mode
+			 * only unmatched packets
+			 */
+			unmatched_unicast = 1;
+			accp_all_ucast = 0;
+		}
 	}
 	if (filters & BNX2X_ACCEPT_UNICAST) {
 		/* accept matched ucast */
@@ -2260,6 +2286,11 @@
 	if (filters & BNX2X_ACCEPT_MULTICAST) {
 		/* accept matched mcast */
 		drop_all_mcast = 0;
+		if (IS_MF_SI(bp))
+			/* since mcast addresses won't arrive with ovlan,
+			 * fw needs to accept all of them in
+			 * switch-independent mode */
+			accp_all_mcast = 1;
 	}
 	if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
 		/* accept all mcast */
@@ -2372,7 +2403,7 @@
 	/* calculate queue flags */
 	flags |= QUEUE_FLG_CACHE_ALIGN;
 	flags |= QUEUE_FLG_HC;
-	flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
+	flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
 
 	flags |= QUEUE_FLG_VLAN;
 	DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -2573,6 +2604,26 @@
 	 */
 }
 
+/* called due to MCP event (on pmf):
+ *	reread new bandwidth configuration
+ *	configure FW
+ *	notify others function about the change
+ */
+static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
+{
+	if (bp->link_vars.link_up) {
+		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
+		bnx2x_link_sync_notify(bp);
+	}
+	storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+}
+
+static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
+{
+	bnx2x_config_mf_bw(bp);
+	bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
+}
+
 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
 {
 	DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2598,10 +2649,7 @@
 		dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
 	}
 	if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
-
-		bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
-		bnx2x_link_sync_notify(bp);
-		storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
+		bnx2x_config_mf_bw(bp);
 		dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
 	}
 
@@ -3022,6 +3070,10 @@
 			if (val & DRV_STATUS_DCC_EVENT_MASK)
 				bnx2x_dcc_event(bp,
 					    (val & DRV_STATUS_DCC_EVENT_MASK));
+
+			if (val & DRV_STATUS_SET_MF_BW)
+				bnx2x_set_mf_bw(bp);
+
 			bnx2x__link_status_update(bp);
 			if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
 				bnx2x_pmf_update(bp);
@@ -4232,6 +4284,15 @@
 			bp->mf_mode);
 	}
 
+	if (IS_MF_SI(bp))
+		/*
+		 * In switch independent mode, the TSTORM needs to accept
+		 * packets that failed classification, since approximate match
+		 * mac addresses aren't written to NIG LLH
+		 */
+		REG_WR8(bp, BAR_TSTRORM_INTMEM +
+			    TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
+
 	/* Zero this manually as its initialization is
 	   currently missing in the initTool */
 	for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -5048,12 +5109,12 @@
 	REG_WR(bp, PRS_REG_NIC_MODE, 1);
 #endif
 	if (!CHIP_IS_E1(bp))
-		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
+		REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
 
 	if (CHIP_IS_E2(bp)) {
 		/* Bit-map indicating which L2 hdrs may appear after the
 		   basic Ethernet header */
-		int has_ovlan = IS_MF(bp);
+		int has_ovlan = IS_MF_SD(bp);
 		REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
 		REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
 	}
@@ -5087,7 +5148,7 @@
 	bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
 
 	if (CHIP_IS_E2(bp)) {
-		int has_ovlan = IS_MF(bp);
+		int has_ovlan = IS_MF_SD(bp);
 		REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
 		REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
 	}
@@ -5164,12 +5225,12 @@
 	bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
 	if (!CHIP_IS_E1(bp)) {
 		REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
-		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
+		REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
 	}
 	if (CHIP_IS_E2(bp)) {
 		/* Bit-map indicating which L2 hdrs may appear after the
 		   basic Ethernet header */
-		REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
+		REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
 	}
 
 	if (CHIP_REV_IS_SLOW(bp))
@@ -5386,7 +5447,7 @@
 	if (!CHIP_IS_E1(bp)) {
 		/* 0x2 disable mf_ov, 0x1 enable */
 		REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
-		       (IS_MF(bp) ? 0x1 : 0x2));
+		       (IS_MF_SD(bp) ? 0x1 : 0x2));
 
 		if (CHIP_IS_E2(bp)) {
 			val = 0;
@@ -6170,6 +6231,70 @@
 		return BP_VN(bp) * 32  + rel_offset;
 }
 
+/**
+ *  LLH CAM line allocations: currently only iSCSI and ETH macs are
+ *  relevant. In addition, current implementation is tuned for a
+ *  single ETH MAC.
+ *
+ *  When multiple unicast ETH MACs PF configuration in switch
+ *  independent mode is required (NetQ, multiple netdev MACs,
+ *  etc.), consider better utilisation of 16 per function MAC
+ *  entries in the LLH memory.
+ */
+enum {
+	LLH_CAM_ISCSI_ETH_LINE = 0,
+	LLH_CAM_ETH_LINE,
+	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
+};
+
+static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
+			  int set,
+			  unsigned char *dev_addr,
+			  int index)
+{
+	u32 wb_data[2];
+	u32 mem_offset, ena_offset, mem_index;
+	/**
+	 * indexes mapping:
+	 * 0..7 - goes to MEM
+	 * 8..15 - goes to MEM2
+	 */
+
+	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
+		return;
+
+	/* calculate memory start offset according to the mapping
+	 * and index in the memory */
+	if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
+		mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
+					   NIG_REG_LLH0_FUNC_MEM;
+		ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
+					   NIG_REG_LLH0_FUNC_MEM_ENABLE;
+		mem_index = index;
+	} else {
+		mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
+					   NIG_REG_P0_LLH_FUNC_MEM2;
+		ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
+					   NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
+		mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
+	}
+
+	if (set) {
+		/* LLH_FUNC_MEM is a u64 WB register */
+		mem_offset += 8*mem_index;
+
+		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
+			      (dev_addr[4] <<  8) |  dev_addr[5]);
+		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
+
+		REG_WR_DMAE(bp, mem_offset, wb_data, 2);
+	}
+
+	/* enable/disable the entry */
+	REG_WR(bp, ena_offset + 4*mem_index, set);
+
+}
+
 void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
 {
 	u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
@@ -6179,6 +6304,8 @@
 	bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
 			       (1 << bp->fp->cl_id), cam_offset , 0);
 
+	bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
+
 	if (CHIP_IS_E1(bp)) {
 		/* broadcast MAC */
 		u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
@@ -6289,6 +6416,8 @@
 	/* Send a SET_MAC ramrod */
 	bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
 			       cam_offset, 0);
+
+	bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
 	return 0;
 }
 #endif
@@ -8076,9 +8205,8 @@
 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
 {
 	int port = BP_PORT(bp);
-	u32 val, val2;
 	u32 config;
-	u32 ext_phy_type, ext_phy_config;;
+	u32 ext_phy_type, ext_phy_config;
 
 	bp->link_params.bp = bp;
 	bp->link_params.port = port;
@@ -8135,25 +8263,62 @@
 		 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
 		bp->mdio.prtad =
 			XGXS_EXT_PHY_ADDR(ext_phy_config);
+}
 
-	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
-	val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
-	bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
+{
+	u32 val, val2;
+	int func = BP_ABS_FUNC(bp);
+	int port = BP_PORT(bp);
+
+	if (BP_NOMCP(bp)) {
+		BNX2X_ERROR("warning: random MAC workaround active\n");
+		random_ether_addr(bp->dev->dev_addr);
+	} else if (IS_MF(bp)) {
+		val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
+		val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
+		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
+		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
+			bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+		/* iSCSI NPAR MAC */
+		if (IS_MF_SI(bp)) {
+			u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
+			if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
+				val2 = MF_CFG_RD(bp, func_ext_config[func].
+						     iscsi_mac_addr_upper);
+				val = MF_CFG_RD(bp, func_ext_config[func].
+						    iscsi_mac_addr_lower);
+				bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+			}
+		}
+#endif
+	} else {
+		/* in SF read MACs from port configuration */
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
+		bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
+
+#ifdef BCM_CNIC
+		val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				    iscsi_mac_upper);
+		val = SHMEM_RD(bp, dev_info.port_hw_config[port].
+				   iscsi_mac_lower);
+		bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
+#endif
+	}
+
 	memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
 	memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
 
-#ifdef BCM_CNIC
-	val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
-	val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
-	bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
-#endif
 }
 
 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
 {
-	int func = BP_ABS_FUNC(bp);
-	int vn;
-	u32 val, val2;
+	int /*abs*/func = BP_ABS_FUNC(bp);
+	int vn, port;
+	u32 val = 0;
 	int rc = 0;
 
 	bnx2x_get_common_hwinfo(bp);
@@ -8186,44 +8351,99 @@
 	bp->mf_ov = 0;
 	bp->mf_mode = 0;
 	vn = BP_E1HVN(bp);
+	port = BP_PORT(bp);
+
 	if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
+		DP(NETIF_MSG_PROBE,
+			    "shmem2base 0x%x, size %d, mfcfg offset %d\n",
+			    bp->common.shmem2_base, SHMEM2_RD(bp, size),
+			    (u32)offsetof(struct shmem2_region, mf_cfg_addr));
 		if (SHMEM2_HAS(bp, mf_cfg_addr))
 			bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
 		else
 			bp->common.mf_cfg_base = bp->common.shmem_base +
 				offsetof(struct shmem_region, func_mb) +
 				E1H_FUNC_MAX * sizeof(struct drv_func_mb);
-		bp->mf_config[vn] =
-			MF_CFG_RD(bp, func_mf_config[func].config);
+		/*
+		 * get mf configuration:
+		 * 1. existance of MF configuration
+		 * 2. MAC address must be legal (check only upper bytes)
+		 *    for  Switch-Independent mode;
+		 *    OVLAN must be legal for Switch-Dependent mode
+		 * 3. SF_MODE configures specific MF mode
+		 */
+		if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
+			/* get mf configuration */
+			val = SHMEM_RD(bp,
+				       dev_info.shared_feature_config.config);
+			val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
 
-		val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
-		       FUNC_MF_CFG_E1HOV_TAG_MASK);
-		if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
-			bp->mf_mode = 1;
+			switch (val) {
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
+				val = MF_CFG_RD(bp, func_mf_config[func].
+						mac_upper);
+				/* check for legal mac (upper bytes)*/
+				if (val != 0xffff) {
+					bp->mf_mode = MULTI_FUNCTION_SI;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						   func_mf_config[func].config);
+				} else
+					DP(NETIF_MSG_PROBE, "illegal MAC "
+							    "address for SI\n");
+				break;
+			case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
+				/* get OV configuration */
+				val = MF_CFG_RD(bp,
+					func_mf_config[FUNC_0].e1hov_tag);
+				val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
+
+				if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
+					bp->mf_mode = MULTI_FUNCTION_SD;
+					bp->mf_config[vn] = MF_CFG_RD(bp,
+						func_mf_config[func].config);
+				} else
+					DP(NETIF_MSG_PROBE, "illegal OV for "
+							    "SD\n");
+				break;
+			default:
+				/* Unknown configuration: reset mf_config */
+				bp->mf_config[vn] = 0;
+				DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
+				   val);
+			}
+		}
+
 		BNX2X_DEV_INFO("%s function mode\n",
 			       IS_MF(bp) ? "multi" : "single");
 
-		if (IS_MF(bp)) {
-			val = (MF_CFG_RD(bp, func_mf_config[func].
-								e1hov_tag) &
-			       FUNC_MF_CFG_E1HOV_TAG_MASK);
+		switch (bp->mf_mode) {
+		case MULTI_FUNCTION_SD:
+			val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
+			      FUNC_MF_CFG_E1HOV_TAG_MASK;
 			if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
 				bp->mf_ov = val;
-				BNX2X_DEV_INFO("MF OV for func %d is %d "
-					       "(0x%04x)\n",
-					       func, bp->mf_ov, bp->mf_ov);
+				BNX2X_DEV_INFO("MF OV for func %d is %d"
+					       " (0x%04x)\n", func,
+					       bp->mf_ov, bp->mf_ov);
 			} else {
-				BNX2X_ERROR("No valid MF OV for func %d,"
-					    "  aborting\n", func);
+				BNX2X_ERR("No valid MF OV for func %d,"
+					  "  aborting\n", func);
 				rc = -EPERM;
 			}
-		} else {
-			if (BP_VN(bp)) {
-				BNX2X_ERROR("VN %d in single function mode,"
-					    "  aborting\n", BP_E1HVN(bp));
+			break;
+		case MULTI_FUNCTION_SI:
+			BNX2X_DEV_INFO("func %d is in MF "
+				       "switch-independent mode\n", func);
+			break;
+		default:
+			if (vn) {
+				BNX2X_ERR("VN %d in single function mode,"
+					  "  aborting\n", vn);
 				rc = -EPERM;
 			}
+			break;
 		}
+
 	}
 
 	/* adjust igu_sb_cnt to MF for E1x */
@@ -8248,32 +8468,8 @@
 		BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
 	}
 
-	if (IS_MF(bp)) {
-		val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
-		val = MF_CFG_RD(bp,  func_mf_config[func].mac_lower);
-		if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
-		    (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
-			bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
-			bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
-			bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
-			bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
-			bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
-			bp->dev->dev_addr[5] = (u8)(val & 0xff);
-			memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
-			       ETH_ALEN);
-			memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
-			       ETH_ALEN);
-		}
-
-		return rc;
-	}
-
-	if (BP_NOMCP(bp)) {
-		/* only supposed to happen on emulation/FPGA */
-		BNX2X_ERROR("warning: random MAC workaround active\n");
-		random_ether_addr(bp->dev->dev_addr);
-		memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
-	}
+	/* Get MAC addresses */
+	bnx2x_get_mac_hwinfo(bp);
 
 	return rc;
 }
@@ -8761,7 +8957,7 @@
 	dev->netdev_ops = &bnx2x_netdev_ops;
 	bnx2x_set_ethtool_ops(dev);
 	dev->features |= NETIF_F_SG;
-	dev->features |= NETIF_F_HW_CSUM;
+	dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	if (bp->flags & USING_DAC_FLAG)
 		dev->features |= NETIF_F_HIGHDMA;
 	dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8769,7 +8965,7 @@
 	dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
 
 	dev->vlan_features |= NETIF_F_SG;
-	dev->vlan_features |= NETIF_F_HW_CSUM;
+	dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	if (bp->flags & USING_DAC_FLAG)
 		dev->vlan_features |= NETIF_F_HIGHDMA;
 	dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -9064,7 +9260,7 @@
 	default:
 		pr_err("Unknown board_type (%ld), aborting\n",
 			   ent->driver_data);
-		return ENODEV;
+		return -ENODEV;
 	}
 
 	cid_count += CNIC_CONTEXT_USE;
@@ -9096,12 +9292,6 @@
 	/* calc qm_cid_count */
 	bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
 
-	rc = register_netdev(dev);
-	if (rc) {
-		dev_err(&pdev->dev, "Cannot register net device\n");
-		goto init_one_exit;
-	}
-
 	/* Configure interupt mode: try to enable MSI-X/MSI if
 	 * needed, set bp->num_queues appropriately.
 	 */
@@ -9110,6 +9300,12 @@
 	/* Add all NAPI objects */
 	bnx2x_add_all_napi(bp);
 
+	rc = register_netdev(dev);
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot register net device\n");
+		goto init_one_exit;
+	}
+
 	bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
 
 	netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 1cefe48..64bdda1 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -1774,6 +1774,8 @@
 /* [RW 8] event id for llh0 */
 #define NIG_REG_LLH0_EVENT_ID					 0x10084
 #define NIG_REG_LLH0_FUNC_EN					 0x160fc
+#define NIG_REG_LLH0_FUNC_MEM					 0x16180
+#define NIG_REG_LLH0_FUNC_MEM_ENABLE				 0x16140
 #define NIG_REG_LLH0_FUNC_VLAN_ID				 0x16100
 /* [RW 1] Determine the IP version to look for in
    ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1797,6 +1799,9 @@
 #define NIG_REG_LLH1_ERROR_MASK 				 0x10090
 /* [RW 8] event id for llh1 */
 #define NIG_REG_LLH1_EVENT_ID					 0x10088
+#define NIG_REG_LLH1_FUNC_MEM					 0x161c0
+#define NIG_REG_LLH1_FUNC_MEM_ENABLE				 0x16160
+#define NIG_REG_LLH1_FUNC_MEM_SIZE				 16
 /* [RW 8] init credit counter for port1 in LLH */
 #define NIG_REG_LLH1_XCM_INIT_CREDIT				 0x10564
 #define NIG_REG_LLH1_XCM_MASK					 0x10134
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 6f9c6fa..0e2737e 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
 
 obj-$(CONFIG_BONDING) += bonding.o
 
-bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o
+bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
 
 ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
 bonding-objs += $(ipv6-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 881914b..48cf24f 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2474,8 +2474,7 @@
 		goto out;
 
 	read_lock(&bond->lock);
-	slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev),
-					orig_dev);
+	slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
 	if (!slave)
 		goto out_unlock;
 
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
new file mode 100644
index 0000000..ae1eb2f
--- /dev/null
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -0,0 +1,96 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+
+#include "bonding.h"
+
+#ifdef CONFIG_DEBUG_FS
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static struct dentry *bonding_debug_root;
+
+void bond_debug_register(struct bonding *bond)
+{
+	if (!bonding_debug_root)
+		return;
+
+	bond->debug_dir =
+		debugfs_create_dir(bond->dev->name, bonding_debug_root);
+
+	if (!bond->debug_dir) {
+		pr_warning("%s: Warning: failed to register to debugfs\n",
+			bond->dev->name);
+		return;
+	}
+}
+
+void bond_debug_unregister(struct bonding *bond)
+{
+	if (!bonding_debug_root)
+		return;
+
+	debugfs_remove_recursive(bond->debug_dir);
+}
+
+void bond_debug_reregister(struct bonding *bond)
+{
+	struct dentry *d;
+
+	if (!bonding_debug_root)
+		return;
+
+	d = debugfs_rename(bonding_debug_root, bond->debug_dir,
+			   bonding_debug_root, bond->dev->name);
+	if (d) {
+		bond->debug_dir = d;
+	} else {
+		pr_warning("%s: Warning: failed to reregister, "
+				"so just unregister old one\n",
+				bond->dev->name);
+		bond_debug_unregister(bond);
+	}
+}
+
+void bond_create_debugfs(void)
+{
+	bonding_debug_root = debugfs_create_dir("bonding", NULL);
+
+	if (!bonding_debug_root) {
+		pr_warning("Warning: Cannot create bonding directory"
+				" in debugfs\n");
+	}
+}
+
+void bond_destroy_debugfs(void)
+{
+	debugfs_remove_recursive(bonding_debug_root);
+	bonding_debug_root = NULL;
+}
+
+
+#else /* !CONFIG_DEBUG_FS */
+
+void bond_debug_register(struct bonding *bond)
+{
+}
+
+void bond_debug_unregister(struct bonding *bond)
+{
+}
+
+void bond_debug_reregister(struct bonding *bond)
+{
+}
+
+void bond_create_debugfs(void)
+{
+}
+
+void bond_destroy_debugfs(void)
+{
+}
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index bdb68a6..07011e4 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -873,15 +873,11 @@
 static void __bond_resend_igmp_join_requests(struct net_device *dev)
 {
 	struct in_device *in_dev;
-	struct ip_mc_list *im;
 
 	rcu_read_lock();
 	in_dev = __in_dev_get_rcu(dev);
-	if (in_dev) {
-		for (im = in_dev->mc_list; im; im = im->next)
-			ip_mc_rejoin_group(im);
-	}
-
+	if (in_dev)
+		ip_mc_rejoin_groups(in_dev);
 	rcu_read_unlock();
 }
 
@@ -1574,7 +1570,7 @@
 
 	/* If this is the first slave, then we need to set the master's hardware
 	 * address to be the same as the slave's. */
-	if (bond->slave_cnt == 0)
+	if (is_zero_ether_addr(bond->dev->dev_addr))
 		memcpy(bond->dev->dev_addr, slave_dev->dev_addr,
 		       slave_dev->addr_len);
 
@@ -3209,7 +3205,7 @@
 #ifdef CONFIG_PROC_FS
 
 static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(&dev_base_lock)
+	__acquires(RCU)
 	__acquires(&bond->lock)
 {
 	struct bonding *bond = seq->private;
@@ -3218,7 +3214,7 @@
 	int i;
 
 	/* make sure the bond won't be taken away */
-	read_lock(&dev_base_lock);
+	rcu_read_lock();
 	read_lock(&bond->lock);
 
 	if (*pos == 0)
@@ -3248,12 +3244,12 @@
 
 static void bond_info_seq_stop(struct seq_file *seq, void *v)
 	__releases(&bond->lock)
-	__releases(&dev_base_lock)
+	__releases(RCU)
 {
 	struct bonding *bond = seq->private;
 
 	read_unlock(&bond->lock);
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 }
 
 static void bond_info_show_master(struct seq_file *seq)
@@ -3507,6 +3503,8 @@
 	bond_remove_proc_entry(bond);
 	bond_create_proc_entry(bond);
 
+	bond_debug_reregister(bond);
+
 	return NOTIFY_DONE;
 }
 
@@ -4789,6 +4787,8 @@
 
 	bond_remove_proc_entry(bond);
 
+	bond_debug_unregister(bond);
+
 	__hw_addr_flush(&bond->mc_list);
 
 	list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
@@ -5191,6 +5191,8 @@
 
 	bond_prepare_sysfs_group(bond);
 
+	bond_debug_register(bond);
+
 	__hw_addr_init(&bond->mc_list);
 	return 0;
 }
@@ -5312,6 +5314,8 @@
 	if (res)
 		goto err_link;
 
+	bond_create_debugfs();
+
 	for (i = 0; i < max_bonds; i++) {
 		res = bond_create(&init_net, NULL);
 		if (res)
@@ -5322,7 +5326,6 @@
 	if (res)
 		goto err;
 
-
 	register_netdevice_notifier(&bond_netdev_notifier);
 	register_inetaddr_notifier(&bond_inetaddr_notifier);
 	bond_register_ipv6_notifier();
@@ -5346,6 +5349,7 @@
 	bond_unregister_ipv6_notifier();
 
 	bond_destroy_sysfs();
+	bond_destroy_debugfs();
 
 	rtnl_link_unregister(&bond_link_ops);
 	unregister_pernet_subsys(&bond_net_ops);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 4eedb12..03710f8 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -259,6 +259,10 @@
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 	struct   in6_addr master_ipv6;
 #endif
+#ifdef CONFIG_DEBUG_FS
+	/* debugging suport via debugfs */
+	struct	 dentry *debug_dir;
+#endif /* CONFIG_DEBUG_FS */
 };
 
 /**
@@ -286,7 +290,7 @@
 		return NULL;
 	}
 
-	return (struct bonding *)netdev_priv(slave->dev->master);
+	return netdev_priv(slave->dev->master);
 }
 
 static inline bool bond_is_lb(const struct bonding *bond)
@@ -380,6 +384,11 @@
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
 void bond_register_arp(struct bonding *);
 void bond_unregister_arp(struct bonding *);
+void bond_create_debugfs(void);
+void bond_destroy_debugfs(void);
+void bond_debug_register(struct bonding *bond);
+void bond_debug_unregister(struct bonding *bond);
+void bond_debug_reregister(struct bonding *bond);
 
 struct bond_net {
 	struct net *		net;	/* Associated network namespace */
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 1cd90da..32b1c6f 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -5,7 +5,7 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
 
 #include <linux/version.h>
 #include <linux/init.h>
diff --git a/drivers/net/caif/caif_shmcore.c b/drivers/net/caif/caif_shmcore.c
index 19f9c06..8051116 100644
--- a/drivers/net/caif/caif_shmcore.c
+++ b/drivers/net/caif/caif_shmcore.c
@@ -6,7 +6,7 @@
  * License terms: GNU General Public License (GPL) version 2
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ":" __func__ "():" fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ":" fmt
 
 #include <linux/spinlock.h>
 #include <linux/sched.h>
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533..20da199 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@
 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
 MODULE_DESCRIPTION("CAIF SPI driver");
 
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
 static int spi_loop;
 module_param(spi_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@
 module_param(spi_frm_align, int, S_IRUGO);
 MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
 
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
 module_param(spi_up_head_align, int, S_IRUGO);
 MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
 
@@ -240,15 +246,13 @@
 static const struct file_operations dbgfs_state_fops = {
 	.open = dbgfs_open,
 	.read = dbgfs_state,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
+	.owner = THIS_MODULE
 };
 
 static const struct file_operations dbgfs_frame_fops = {
 	.open = dbgfs_open,
 	.read = dbgfs_frame,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
+	.owner = THIS_MODULE
 };
 
 static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@
 	u8 *dst = buf;
 	caif_assert(buf);
 
+	if (cfspi->slave && !cfspi->slave_talked)
+		cfspi->slave_talked = true;
+
 	do {
 		struct sk_buff *skb;
 		struct caif_payload_info *info;
@@ -357,8 +364,8 @@
 		 * Compute head offset i.e. number of bytes to add to
 		 * get the start of the payload aligned.
 		 */
-		if (spi_up_head_align) {
-			spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+		if (spi_up_head_align > 1) {
+			spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
 			*dst = (u8)(spad - 1);
 			dst += spad;
 		}
@@ -373,7 +380,7 @@
 		 * Compute tail offset i.e. number of bytes to add to
 		 * get the complete CAIF frame aligned.
 		 */
-		epad = (skb->len + spad) & spi_up_tail_align;
+		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
 		dst += epad;
 
 		dev_kfree_skb(skb);
@@ -417,14 +424,14 @@
 		 * Compute head offset i.e. number of bytes to add to
 		 * get the start of the payload aligned.
 		 */
-		if (spi_up_head_align)
-			spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+		if (spi_up_head_align > 1)
+			spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
 
 		/*
 		 * Compute tail offset i.e. number of bytes to add to
 		 * get the complete CAIF frame aligned.
 		 */
-		epad = (skb->len + spad) & spi_up_tail_align;
+		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
 
 		if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
 			skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@
 		} else {
 			/* Put back packet. */
 			skb_queue_head(&cfspi->qhead, skb);
+			break;
 		}
 	} while (pkts <= CAIF_MAX_SPI_PKTS);
 
@@ -453,6 +461,15 @@
 {
 	struct cfspi *cfspi = (struct cfspi *)ifc->priv;
 
+	/*
+	 * The slave device is the master on the link. Interrupts before the
+	 * slave has transmitted are considered spurious.
+	 */
+	if (cfspi->slave && !cfspi->slave_talked) {
+		printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+		return;
+	}
+
 	if (!in_interrupt())
 		spin_lock(&cfspi->lock);
 	if (assert) {
@@ -465,7 +482,8 @@
 		spin_unlock(&cfspi->lock);
 
 	/* Wake up the xfer thread. */
-	wake_up_interruptible(&cfspi->wait);
+	if (assert)
+		wake_up_interruptible(&cfspi->wait);
 }
 
 static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@
 		 * Compute head offset i.e. number of bytes added to
 		 * get the start of the payload aligned.
 		 */
-		if (spi_down_head_align) {
+		if (spi_down_head_align > 1) {
 			spad = 1 + *src;
 			src += spad;
 		}
@@ -564,7 +582,7 @@
 		 * Compute tail offset i.e. number of bytes added to
 		 * get the complete CAIF frame aligned.
 		 */
-		epad = (pkt_len + spad) & spi_down_tail_align;
+		epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
 		src += epad;
 	} while ((src - buf) < len);
 
@@ -617,19 +635,28 @@
 
 	ndev = alloc_netdev(sizeof(struct cfspi),
 			"cfspi%d", cfspi_setup);
-	if (!dev)
-		return -ENODEV;
+	if (!ndev)
+		return -ENOMEM;
 
 	cfspi = netdev_priv(ndev);
 	netif_stop_queue(ndev);
 	cfspi->ndev = ndev;
 	cfspi->pdev = pdev;
 
-	/* Set flow info */
+	/* Set flow info. */
 	cfspi->flow_off_sent = 0;
 	cfspi->qd_low_mark = LOW_WATER_MARK;
 	cfspi->qd_high_mark = HIGH_WATER_MARK;
 
+	/* Set slave info. */
+	if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+		cfspi->slave = true;
+		cfspi->slave_talked = false;
+	} else {
+		cfspi->slave = false;
+		cfspi->slave_talked = false;
+	}
+
 	/* Assign the SPI device. */
 	cfspi->dev = dev;
 	/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbf..1b9943a 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@
 #endif
 
 int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align   = 1 << 1;
+int spi_up_tail_align   = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
 
 #ifdef CONFIG_DEBUG_FS
 static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 080574b..d5a9db6 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,27 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called vcan.
 
+config CAN_SLCAN
+	tristate "Serial / USB serial CAN Adaptors (slcan)"
+	depends on CAN
+	default N
+	---help---
+	  CAN driver for several 'low cost' CAN interfaces that are attached
+	  via serial lines or via USB-to-serial adapters using the LAWICEL
+	  ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
+
+	  As only the sending and receiving of CAN frames is implemented, this
+	  driver should work with the (serial/USB) CAN hardware from:
+	  www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
+
+	  Userspace tools to attach the SLCAN line discipline (slcan_attach,
+	  slcand) can be found in the can-utils at the SocketCAN SVN, see
+	  http://developer.berlios.de/projects/socketcan for details.
+
+	  The slcan driver supports up to 10 CAN netdevices by default which
+	  can be changed by the 'maxdev=xx' module option. This driver can
+	  also be built as a module. If so, the module will be called slcan.
+
 config CAN_DEV
 	tristate "Platform CAN drivers with Netlink support"
 	depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 90af15a..07ca159 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_CAN_VCAN)		+= vcan.o
+obj-$(CONFIG_CAN_SLCAN)		+= slcan.o
 
 obj-$(CONFIG_CAN_DEV)		+= can-dev.o
 can-dev-y			:= dev.o
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 6e533dc..b9a6d7a 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1114,11 +1114,6 @@
 /*
  * Recieve one CAN frame from the hardware
  *
- * This works like the core of a NAPI function, but is intended to be called
- * from workqueue context instead. This driver already needs a workqueue to
- * process control messages, so we use the workqueue instead of using NAPI.
- * This was done to simplify locking.
- *
  * CONTEXT: must be called from user context
  */
 static int ican3_recv_skb(struct ican3_dev *mod)
@@ -1251,7 +1246,6 @@
  * Reset an ICAN module to its power-on state
  *
  * CONTEXT: no network device registered
- * LOCKING: work function disabled
  */
 static int ican3_reset_module(struct ican3_dev *mod)
 {
@@ -1262,9 +1256,6 @@
 	/* disable interrupts so no more work is scheduled */
 	iowrite8(1 << mod->num, &mod->ctrl->int_disable);
 
-	/* flush any pending work */
-	flush_scheduled_work();
-
 	/* the first unallocated page in the DPM is #9 */
 	mod->free_page = DPM_FREE_START;
 
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378c..74cd880 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@
 
 		priv->can.state = CAN_STATE_ERROR_ACTIVE;
 		WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
-		     "bus-off state expected");
+		     "bus-off state expected\n");
 		out_8(&regs->canmisc, MSCAN_BOHOLD);
 		/* Re-enable receive interrupts. */
 		out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 6727182..a9b6a65 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -32,99 +32,91 @@
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
-#define MAX_MSG_OBJ		32
-#define MSG_OBJ_RX		0 /* The receive message object flag. */
-#define MSG_OBJ_TX		1 /* The transmit message object flag. */
+#define PCH_ENABLE		1 /* The enable flag */
+#define PCH_DISABLE		0 /* The disable flag */
+#define PCH_CTRL_INIT		BIT(0) /* The INIT bit of CANCONT register. */
+#define PCH_CTRL_IE		BIT(1) /* The IE bit of CAN control register */
+#define PCH_CTRL_IE_SIE_EIE	(BIT(3) | BIT(2) | BIT(1))
+#define PCH_CTRL_CCE		BIT(6)
+#define PCH_CTRL_OPT		BIT(7) /* The OPT bit of CANCONT register. */
+#define PCH_OPT_SILENT		BIT(3) /* The Silent bit of CANOPT reg. */
+#define PCH_OPT_LBACK		BIT(4) /* The LoopBack bit of CANOPT reg. */
 
-#define ENABLE			1 /* The enable flag */
-#define DISABLE			0 /* The disable flag */
-#define CAN_CTRL_INIT		0x0001 /* The INIT bit of CANCONT register. */
-#define CAN_CTRL_IE		0x0002 /* The IE bit of CAN control register */
-#define CAN_CTRL_IE_SIE_EIE	0x000e
-#define CAN_CTRL_CCE		0x0040
-#define CAN_CTRL_OPT		0x0080 /* The OPT bit of CANCONT register. */
-#define CAN_OPT_SILENT		0x0008 /* The Silent bit of CANOPT reg. */
-#define CAN_OPT_LBACK		0x0010 /* The LoopBack bit of CANOPT reg. */
-#define CAN_CMASK_RX_TX_SET	0x00f3
-#define CAN_CMASK_RX_TX_GET	0x0073
-#define CAN_CMASK_ALL		0xff
-#define CAN_CMASK_RDWR		0x80
-#define CAN_CMASK_ARB		0x20
-#define CAN_CMASK_CTRL		0x10
-#define CAN_CMASK_MASK		0x40
-#define CAN_CMASK_NEWDAT	0x04
-#define CAN_CMASK_CLRINTPND	0x08
+#define PCH_CMASK_RX_TX_SET	0x00f3
+#define PCH_CMASK_RX_TX_GET	0x0073
+#define PCH_CMASK_ALL		0xff
+#define PCH_CMASK_NEWDAT	BIT(2)
+#define PCH_CMASK_CLRINTPND	BIT(3)
+#define PCH_CMASK_CTRL		BIT(4)
+#define PCH_CMASK_ARB		BIT(5)
+#define PCH_CMASK_MASK		BIT(6)
+#define PCH_CMASK_RDWR		BIT(7)
+#define PCH_IF_MCONT_NEWDAT	BIT(15)
+#define PCH_IF_MCONT_MSGLOST	BIT(14)
+#define PCH_IF_MCONT_INTPND	BIT(13)
+#define PCH_IF_MCONT_UMASK	BIT(12)
+#define PCH_IF_MCONT_TXIE	BIT(11)
+#define PCH_IF_MCONT_RXIE	BIT(10)
+#define PCH_IF_MCONT_RMTEN	BIT(9)
+#define PCH_IF_MCONT_TXRQXT	BIT(8)
+#define PCH_IF_MCONT_EOB	BIT(7)
+#define PCH_IF_MCONT_DLC	(BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#define PCH_MASK2_MDIR_MXTD	(BIT(14) | BIT(15))
+#define PCH_ID2_DIR		BIT(13)
+#define PCH_ID2_XTD		BIT(14)
+#define PCH_ID_MSGVAL		BIT(15)
+#define PCH_IF_CREQ_BUSY	BIT(15)
 
-#define CAN_IF_MCONT_NEWDAT	0x8000
-#define CAN_IF_MCONT_INTPND	0x2000
-#define CAN_IF_MCONT_UMASK	0x1000
-#define CAN_IF_MCONT_TXIE	0x0800
-#define CAN_IF_MCONT_RXIE	0x0400
-#define CAN_IF_MCONT_RMTEN	0x0200
-#define CAN_IF_MCONT_TXRQXT	0x0100
-#define CAN_IF_MCONT_EOB	0x0080
-#define CAN_IF_MCONT_DLC	0x000f
-#define CAN_IF_MCONT_MSGLOST	0x4000
-#define CAN_MASK2_MDIR_MXTD	0xc000
-#define CAN_ID2_DIR		0x2000
-#define CAN_ID_MSGVAL		0x8000
+#define PCH_STATUS_INT		0x8000
+#define PCH_REC			0x00007f00
+#define PCH_TEC			0x000000ff
 
-#define CAN_STATUS_INT		0x8000
-#define CAN_IF_CREQ_BUSY	0x8000
-#define CAN_ID2_XTD		0x4000
-
-#define CAN_REC			0x00007f00
-#define CAN_TEC			0x000000ff
-
-#define PCH_RX_OK		0x00000010
-#define PCH_TX_OK		0x00000008
-#define PCH_BUS_OFF		0x00000080
-#define PCH_EWARN		0x00000040
-#define PCH_EPASSIV		0x00000020
-#define PCH_LEC0		0x00000001
-#define PCH_LEC1		0x00000002
-#define PCH_LEC2		0x00000004
-#define PCH_LEC_ALL		(PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
-#define PCH_STUF_ERR		PCH_LEC0
-#define PCH_FORM_ERR		PCH_LEC1
-#define PCH_ACK_ERR		(PCH_LEC0 | PCH_LEC1)
-#define PCH_BIT1_ERR		PCH_LEC2
-#define PCH_BIT0_ERR		(PCH_LEC0 | PCH_LEC2)
-#define PCH_CRC_ERR		(PCH_LEC1 | PCH_LEC2)
+#define PCH_TX_OK		BIT(3)
+#define PCH_RX_OK		BIT(4)
+#define PCH_EPASSIV		BIT(5)
+#define PCH_EWARN		BIT(6)
+#define PCH_BUS_OFF		BIT(7)
 
 /* bit position of certain controller bits. */
-#define BIT_BITT_BRP		0
-#define BIT_BITT_SJW		6
-#define BIT_BITT_TSEG1		8
-#define BIT_BITT_TSEG2		12
-#define BIT_IF1_MCONT_RXIE	10
-#define BIT_IF2_MCONT_TXIE	11
-#define BIT_BRPE_BRPE		6
-#define BIT_ES_TXERRCNT		0
-#define BIT_ES_RXERRCNT		8
-#define MSK_BITT_BRP		0x3f
-#define MSK_BITT_SJW		0xc0
-#define MSK_BITT_TSEG1		0xf00
-#define MSK_BITT_TSEG2		0x7000
-#define MSK_BRPE_BRPE		0x3c0
-#define MSK_BRPE_GET		0x0f
-#define MSK_CTRL_IE_SIE_EIE	0x07
-#define MSK_MCONT_TXIE		0x08
-#define MSK_MCONT_RXIE		0x10
-#define PCH_CAN_NO_TX_BUFF	1
-#define COUNTER_LIMIT		10
+#define PCH_BIT_BRP		0
+#define PCH_BIT_SJW		6
+#define PCH_BIT_TSEG1		8
+#define PCH_BIT_TSEG2		12
+#define PCH_BIT_BRPE_BRPE	6
+#define PCH_MSK_BITT_BRP	0x3f
+#define PCH_MSK_BRPE_BRPE	0x3c0
+#define PCH_MSK_CTRL_IE_SIE_EIE	0x07
+#define PCH_COUNTER_LIMIT	10
 
 #define PCH_CAN_CLK		50000000	/* 50MHz */
 
 /* Define the number of message object.
  * PCH CAN communications are done via Message RAM.
  * The Message RAM consists of 32 message objects. */
-#define PCH_RX_OBJ_NUM		26  /* 1~ PCH_RX_OBJ_NUM is Rx*/
-#define PCH_TX_OBJ_NUM		6  /* PCH_RX_OBJ_NUM is RX ~ Tx*/
-#define PCH_OBJ_NUM		(PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM)
+#define PCH_RX_OBJ_NUM		26
+#define PCH_TX_OBJ_NUM		6
+#define PCH_RX_OBJ_START	1
+#define PCH_RX_OBJ_END		PCH_RX_OBJ_NUM
+#define PCH_TX_OBJ_START	(PCH_RX_OBJ_END + 1)
+#define PCH_TX_OBJ_END		(PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
 
 #define PCH_FIFO_THRESH		16
 
+enum pch_ifreg {
+	PCH_RX_IFREG,
+	PCH_TX_IFREG,
+};
+
+enum pch_can_err {
+	PCH_STUF_ERR = 1,
+	PCH_FORM_ERR,
+	PCH_ACK_ERR,
+	PCH_BIT1_ERR,
+	PCH_BIT0_ERR,
+	PCH_CRC_ERR,
+	PCH_LEC_ALL,
+};
+
 enum pch_can_mode {
 	PCH_CAN_ENABLE,
 	PCH_CAN_DISABLE,
@@ -134,6 +126,21 @@
 	PCH_CAN_RUN
 };
 
+struct pch_can_if_regs {
+	u32 creq;
+	u32 cmask;
+	u32 mask1;
+	u32 mask2;
+	u32 id1;
+	u32 id2;
+	u32 mcont;
+	u32 dataa1;
+	u32 dataa2;
+	u32 datab1;
+	u32 datab2;
+	u32 rsv[13];
+};
+
 struct pch_can_regs {
 	u32 cont;
 	u32 stat;
@@ -142,38 +149,21 @@
 	u32 intr;
 	u32 opt;
 	u32 brpe;
-	u32 reserve1;
-	u32 if1_creq;
-	u32 if1_cmask;
-	u32 if1_mask1;
-	u32 if1_mask2;
-	u32 if1_id1;
-	u32 if1_id2;
-	u32 if1_mcont;
-	u32 if1_dataa1;
-	u32 if1_dataa2;
-	u32 if1_datab1;
-	u32 if1_datab2;
-	u32 reserve2;
-	u32 reserve3[12];
-	u32 if2_creq;
-	u32 if2_cmask;
-	u32 if2_mask1;
-	u32 if2_mask2;
-	u32 if2_id1;
-	u32 if2_id2;
-	u32 if2_mcont;
-	u32 if2_dataa1;
-	u32 if2_dataa2;
-	u32 if2_datab1;
-	u32 if2_datab2;
-	u32 reserve4;
-	u32 reserve5[20];
+	u32 reserve;
+	struct pch_can_if_regs ifregs[2]; /* [0]=if1  [1]=if2 */
+	u32 reserve1[8];
 	u32 treq1;
 	u32 treq2;
-	u32 reserve6[2];
-	u32 reserve7[56];
-	u32 reserve8[3];
+	u32 reserve2[6];
+	u32 data1;
+	u32 data2;
+	u32 reserve3[6];
+	u32 canipend1;
+	u32 canipend2;
+	u32 reserve4[6];
+	u32 canmval1;
+	u32 canmval2;
+	u32 reserve5[37];
 	u32 srst;
 };
 
@@ -181,14 +171,13 @@
 	struct can_priv can;
 	unsigned int can_num;
 	struct pci_dev *dev;
-	unsigned int tx_enable[MAX_MSG_OBJ];
-	unsigned int rx_enable[MAX_MSG_OBJ];
-	unsigned int rx_link[MAX_MSG_OBJ];
+	int tx_enable[PCH_TX_OBJ_END];
+	int rx_enable[PCH_TX_OBJ_END];
+	int rx_link[PCH_TX_OBJ_END];
 	unsigned int int_enables;
 	unsigned int int_stat;
 	struct net_device *ndev;
-	spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
-	unsigned int msg_obj[MAX_MSG_OBJ];
+	unsigned int msg_obj[PCH_TX_OBJ_END];
 	struct pch_can_regs __iomem *regs;
 	struct napi_struct napi;
 	unsigned int tx_obj;	/* Point next Tx Obj index */
@@ -228,11 +217,11 @@
 {
 	switch (mode) {
 	case PCH_CAN_RUN:
-		pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT);
+		pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
 		break;
 
 	case PCH_CAN_STOP:
-		pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT);
+		pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
 		break;
 
 	default:
@@ -246,30 +235,30 @@
 	u32 reg_val = ioread32(&priv->regs->opt);
 
 	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
-		reg_val |= CAN_OPT_SILENT;
+		reg_val |= PCH_OPT_SILENT;
 
 	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
-		reg_val |= CAN_OPT_LBACK;
+		reg_val |= PCH_OPT_LBACK;
 
-	pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT);
+	pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
 	iowrite32(reg_val, &priv->regs->opt);
 }
 
 static void pch_can_set_int_custom(struct pch_can_priv *priv)
 {
 	/* Clearing the IE, SIE and EIE bits of Can control register. */
-	pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+	pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
 
 	/* Appropriately setting them. */
 	pch_can_bit_set(&priv->regs->cont,
-			((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
+			((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
 }
 
 /* This function retrieves interrupt enabled for the CAN device. */
 static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables)
 {
 	/* Obtaining the status of IE, SIE and EIE interrupt bits. */
-	*enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1);
+	*enables = ((ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1);
 }
 
 static void pch_can_set_int_enables(struct pch_can_priv *priv,
@@ -277,19 +266,19 @@
 {
 	switch (interrupt_no) {
 	case PCH_CAN_ENABLE:
-		pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
+		pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE);
 		break;
 
 	case PCH_CAN_DISABLE:
-		pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE);
+		pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
 		break;
 
 	case PCH_CAN_ALL:
-		pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+		pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
 		break;
 
 	case PCH_CAN_NONE:
-		pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE);
+		pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
 		break;
 
 	default:
@@ -300,12 +289,12 @@
 
 static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num)
 {
-	u32 counter = COUNTER_LIMIT;
+	u32 counter = PCH_COUNTER_LIMIT;
 	u32 ifx_creq;
 
 	iowrite32(num, creq_addr);
 	while (counter) {
-		ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
+		ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
 		if (!ifx_creq)
 			break;
 		counter--;
@@ -315,143 +304,76 @@
 		pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
 }
 
-static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
-				  u32 set)
+static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
+			     u32 set, enum pch_ifreg dir)
 {
-	unsigned long flags;
+	u32 ie;
 
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+	if (dir)
+		ie = PCH_IF_MCONT_TXIE;
+	else
+		ie = PCH_IF_MCONT_RXIE;
+
 	/* Reading the receive buffer data from RAM to Interface1 registers */
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
-	pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
+	iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
+	pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
 
 	/* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
-	iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
-		  &priv->regs->if1_cmask);
+	iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
+		  &priv->regs->ifregs[dir].cmask);
 
-	if (set == ENABLE) {
+	if (set == PCH_ENABLE) {
 		/* Setting the MsgVal and RxIE bits */
-		pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
-		pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+		pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
+		pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
 
-	} else if (set == DISABLE) {
+	} else if (set == PCH_DISABLE) {
 		/* Resetting the MsgVal and RxIE bits */
-		pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
-		pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
+		pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
+		pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
 	}
 
-	pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+	pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
 }
 
-static void pch_can_rx_enable_all(struct pch_can_priv *priv)
+static void pch_can_set_rx_all(struct pch_can_priv *priv, u32 set)
 {
 	int i;
 
 	/* Traversing to obtain the object configured as receivers. */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_RX)
-			pch_can_set_rx_enable(priv, i + 1, ENABLE);
-	}
+	for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
+		pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
 }
 
-static void pch_can_rx_disable_all(struct pch_can_priv *priv)
-{
-	int i;
-
-	/* Traversing to obtain the object configured as receivers. */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_RX)
-			pch_can_set_rx_enable(priv, i + 1, DISABLE);
-	}
-}
-
-static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num,
-				 u32 set)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-	/* Reading the Msg buffer from Message RAM to Interface2 registers. */
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
-	pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-
-	/* Setting the IF2CMASK register for accessing the
-		MsgVal and TxIE bits */
-	iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
-		 &priv->regs->if2_cmask);
-
-	if (set == ENABLE) {
-		/* Setting the MsgVal and TxIE bits */
-		pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
-		pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
-	} else if (set == DISABLE) {
-		/* Resetting the MsgVal and TxIE bits. */
-		pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
-		pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
-	}
-
-	pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
-
-static void pch_can_tx_enable_all(struct pch_can_priv *priv)
+static void pch_can_set_tx_all(struct pch_can_priv *priv, u32 set)
 {
 	int i;
 
 	/* Traversing to obtain the object configured as transmit object. */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_TX)
-			pch_can_set_tx_enable(priv, i + 1, ENABLE);
-	}
+	for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+		pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
 }
 
-static void pch_can_tx_disable_all(struct pch_can_priv *priv)
+static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
+			       enum pch_ifreg dir)
 {
-	int i;
+	u32 ie, enable;
 
-	/* Traversing to obtain the object configured as transmit object. */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_TX)
-			pch_can_set_tx_enable(priv, i + 1, DISABLE);
-	}
-}
-
-static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
-				 u32 *enable)
-{
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
-	pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
-
-	if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
-			((ioread32(&priv->regs->if1_mcont)) &
-			CAN_IF_MCONT_RXIE))
-		*enable = ENABLE;
+	if (dir)
+		ie = PCH_IF_MCONT_RXIE;
 	else
-		*enable = DISABLE;
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-}
+		ie = PCH_IF_MCONT_TXIE;
 
-static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
-				 u32 *enable)
-{
-	unsigned long flags;
+	iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
+	pch_can_check_if_busy(&priv->regs->ifregs[dir].creq, buff_num);
 
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
-	pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
-
-	if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
-			((ioread32(&priv->regs->if2_mcont)) &
-			CAN_IF_MCONT_TXIE)) {
-		*enable = ENABLE;
+	if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
+			((ioread32(&priv->regs->ifregs[dir].mcont)) & ie)) {
+		enable = 1;
 	} else {
-		*enable = DISABLE;
+		enable = 0;
 	}
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+	return enable;
 }
 
 static int pch_can_int_pending(struct pch_can_priv *priv)
@@ -462,141 +384,131 @@
 static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
 				       u32 buffer_num, u32 set)
 {
-	unsigned long flags;
-
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
-	pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
-	iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask);
-	if (set == ENABLE)
-		pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+	iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+	pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
+	iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
+		  &priv->regs->ifregs[0].cmask);
+	if (set == PCH_ENABLE)
+		pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+				  PCH_IF_MCONT_EOB);
 	else
-		pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB);
+		pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
 
-	pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+	pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
 }
 
 static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv,
 				       u32 buffer_num, u32 *link)
 {
-	unsigned long flags;
+	iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+	pch_can_check_if_busy(&priv->regs->ifregs[0].creq, buffer_num);
 
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
-	pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
-
-	if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
-		*link = DISABLE;
+	if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
+		*link = PCH_DISABLE;
 	else
-		*link = ENABLE;
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+		*link = PCH_ENABLE;
 }
 
 static void pch_can_clear_buffers(struct pch_can_priv *priv)
 {
 	int i;
 
-	for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
-		iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
-		iowrite32(0xffff, &priv->regs->if1_mask1);
-		iowrite32(0xffff, &priv->regs->if1_mask2);
-		iowrite32(0x0, &priv->regs->if1_id1);
-		iowrite32(0x0, &priv->regs->if1_id2);
-		iowrite32(0x0, &priv->regs->if1_mcont);
-		iowrite32(0x0, &priv->regs->if1_dataa1);
-		iowrite32(0x0, &priv->regs->if1_dataa2);
-		iowrite32(0x0, &priv->regs->if1_datab1);
-		iowrite32(0x0, &priv->regs->if1_datab2);
-		iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-			  CAN_CMASK_ARB | CAN_CMASK_CTRL,
-			  &priv->regs->if1_cmask);
-		pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+	for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+		iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
+		iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
+		iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
+		iowrite32(0x0, &priv->regs->ifregs[0].id1);
+		iowrite32(0x0, &priv->regs->ifregs[0].id2);
+		iowrite32(0x0, &priv->regs->ifregs[0].mcont);
+		iowrite32(0x0, &priv->regs->ifregs[0].dataa1);
+		iowrite32(0x0, &priv->regs->ifregs[0].dataa2);
+		iowrite32(0x0, &priv->regs->ifregs[0].datab1);
+		iowrite32(0x0, &priv->regs->ifregs[0].datab2);
+		iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+			  PCH_CMASK_ARB | PCH_CMASK_CTRL,
+			  &priv->regs->ifregs[0].cmask);
+		pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
 	}
 
-	for (i = i;  i < PCH_OBJ_NUM; i++) {
-		iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
-		iowrite32(0xffff, &priv->regs->if2_mask1);
-		iowrite32(0xffff, &priv->regs->if2_mask2);
-		iowrite32(0x0, &priv->regs->if2_id1);
-		iowrite32(0x0, &priv->regs->if2_id2);
-		iowrite32(0x0, &priv->regs->if2_mcont);
-		iowrite32(0x0, &priv->regs->if2_dataa1);
-		iowrite32(0x0, &priv->regs->if2_dataa2);
-		iowrite32(0x0, &priv->regs->if2_datab1);
-		iowrite32(0x0, &priv->regs->if2_datab2);
-		iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-			  CAN_CMASK_ARB | CAN_CMASK_CTRL,
-			  &priv->regs->if2_cmask);
-		pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
+	for (i = PCH_TX_OBJ_START;  i <= PCH_TX_OBJ_END; i++) {
+		iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[1].cmask);
+		iowrite32(0xffff, &priv->regs->ifregs[1].mask1);
+		iowrite32(0xffff, &priv->regs->ifregs[1].mask2);
+		iowrite32(0x0, &priv->regs->ifregs[1].id1);
+		iowrite32(0x0, &priv->regs->ifregs[1].id2);
+		iowrite32(0x0, &priv->regs->ifregs[1].mcont);
+		iowrite32(0x0, &priv->regs->ifregs[1].dataa1);
+		iowrite32(0x0, &priv->regs->ifregs[1].dataa2);
+		iowrite32(0x0, &priv->regs->ifregs[1].datab1);
+		iowrite32(0x0, &priv->regs->ifregs[1].datab2);
+		iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+			  PCH_CMASK_ARB | PCH_CMASK_CTRL,
+			  &priv->regs->ifregs[1].cmask);
+		pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
 	}
 }
 
 static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
 {
 	int i;
-	unsigned long flags;
 
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+	for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+		iowrite32(PCH_CMASK_RX_TX_GET,
+			&priv->regs->ifregs[0].cmask);
+		pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
 
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_RX) {
-			iowrite32(CAN_CMASK_RX_TX_GET,
-				&priv->regs->if1_cmask);
-			pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
+		iowrite32(0x0, &priv->regs->ifregs[0].id1);
+		iowrite32(0x0, &priv->regs->ifregs[0].id2);
 
-			iowrite32(0x0, &priv->regs->if1_id1);
-			iowrite32(0x0, &priv->regs->if1_id2);
+		pch_can_bit_set(&priv->regs->ifregs[0].mcont,
+				PCH_IF_MCONT_UMASK);
 
-			pch_can_bit_set(&priv->regs->if1_mcont,
-					CAN_IF_MCONT_UMASK);
+		/* Set FIFO mode set to 0 except last Rx Obj*/
+		pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+				  PCH_IF_MCONT_EOB);
+		/* In case FIFO mode, Last EoB of Rx Obj must be 1 */
+		if (i == PCH_RX_OBJ_END)
+			pch_can_bit_set(&priv->regs->ifregs[0].mcont,
+					  PCH_IF_MCONT_EOB);
 
-			/* Set FIFO mode set to 0 except last Rx Obj*/
-			pch_can_bit_clear(&priv->regs->if1_mcont,
-					  CAN_IF_MCONT_EOB);
-			/* In case FIFO mode, Last EoB of Rx Obj must be 1 */
-			if (i == (PCH_RX_OBJ_NUM - 1))
-				pch_can_bit_set(&priv->regs->if1_mcont,
-						  CAN_IF_MCONT_EOB);
+		iowrite32(0, &priv->regs->ifregs[0].mask1);
+		pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
+				  0x1fff | PCH_MASK2_MDIR_MXTD);
 
-			iowrite32(0, &priv->regs->if1_mask1);
-			pch_can_bit_clear(&priv->regs->if1_mask2,
-					  0x1fff | CAN_MASK2_MDIR_MXTD);
+		/* Setting CMASK for writing */
+		iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+			  PCH_CMASK_ARB | PCH_CMASK_CTRL,
+			  &priv->regs->ifregs[0].cmask);
 
-			/* Setting CMASK for writing */
-			iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-				  CAN_CMASK_ARB | CAN_CMASK_CTRL,
-				  &priv->regs->if1_cmask);
-
-			pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
-		} else if (priv->msg_obj[i] == MSG_OBJ_TX) {
-			iowrite32(CAN_CMASK_RX_TX_GET,
-				&priv->regs->if2_cmask);
-			pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
-
-			/* Resetting DIR bit for reception */
-			iowrite32(0x0, &priv->regs->if2_id1);
-			iowrite32(0x0, &priv->regs->if2_id2);
-			pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
-
-			/* Setting EOB bit for transmitter */
-			iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont);
-
-			pch_can_bit_set(&priv->regs->if2_mcont,
-					CAN_IF_MCONT_UMASK);
-
-			iowrite32(0, &priv->regs->if2_mask1);
-			pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff);
-
-			/* Setting CMASK for writing */
-			iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
-				  CAN_CMASK_ARB | CAN_CMASK_CTRL,
-				  &priv->regs->if2_cmask);
-
-			pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
-		}
+		pch_can_check_if_busy(&priv->regs->ifregs[0].creq, i);
 	}
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+
+	for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
+		iowrite32(PCH_CMASK_RX_TX_GET,
+			&priv->regs->ifregs[1].cmask);
+		pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
+
+		/* Resetting DIR bit for reception */
+		iowrite32(0x0, &priv->regs->ifregs[1].id1);
+		iowrite32(0x0, &priv->regs->ifregs[1].id2);
+		pch_can_bit_set(&priv->regs->ifregs[1].id2, PCH_ID2_DIR);
+
+		/* Setting EOB bit for transmitter */
+		iowrite32(PCH_IF_MCONT_EOB, &priv->regs->ifregs[1].mcont);
+
+		pch_can_bit_set(&priv->regs->ifregs[1].mcont,
+				PCH_IF_MCONT_UMASK);
+
+		iowrite32(0, &priv->regs->ifregs[1].mask1);
+		pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
+
+		/* Setting CMASK for writing */
+		iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
+			  PCH_CMASK_ARB | PCH_CMASK_CTRL,
+			  &priv->regs->ifregs[1].cmask);
+
+		pch_can_check_if_busy(&priv->regs->ifregs[1].creq, i);
+	}
 }
 
 static void pch_can_init(struct pch_can_priv *priv)
@@ -623,50 +535,50 @@
 	pch_can_set_int_enables(priv, PCH_CAN_NONE);
 
 	/* Disabling all the receive object. */
-	pch_can_rx_disable_all(priv);
+	pch_can_set_rx_all(priv, 0);
 
 	/* Disabling all the transmit object. */
-	pch_can_tx_disable_all(priv);
+	pch_can_set_tx_all(priv, 0);
 }
 
 /* This function clears interrupt(s) from the CAN device. */
 static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
 {
-	if (mask == CAN_STATUS_INT) {
+	if (mask == PCH_STATUS_INT) {
 		ioread32(&priv->regs->stat);
 		return;
 	}
 
 	/* Clear interrupt for transmit object */
-	if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) {
-		/* Setting CMASK for clearing interrupts for
-					 frame transmission. */
-		iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
-			  &priv->regs->if2_cmask);
-
-		/* Resetting the ID registers. */
-		pch_can_bit_set(&priv->regs->if2_id2,
-			       CAN_ID2_DIR | (0x7ff << 2));
-		iowrite32(0x0, &priv->regs->if2_id1);
-
-		/* Claring NewDat, TxRqst & IntPnd */
-		pch_can_bit_clear(&priv->regs->if2_mcont,
-				  CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
-				  CAN_IF_MCONT_TXRQXT);
-		pch_can_check_if_busy(&priv->regs->if2_creq, mask);
-	} else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
+	if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
 		/* Setting CMASK for clearing the reception interrupts. */
-		iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
-			  &priv->regs->if1_cmask);
+		iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
+			  &priv->regs->ifregs[0].cmask);
 
 		/* Clearing the Dir bit. */
-		pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+		pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
 
 		/* Clearing NewDat & IntPnd */
-		pch_can_bit_clear(&priv->regs->if1_mcont,
-				  CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND);
+		pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+				  PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
 
-		pch_can_check_if_busy(&priv->regs->if1_creq, mask);
+		pch_can_check_if_busy(&priv->regs->ifregs[0].creq, mask);
+	} else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
+		/* Setting CMASK for clearing interrupts for
+					 frame transmission. */
+		iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
+			  &priv->regs->ifregs[1].cmask);
+
+		/* Resetting the ID registers. */
+		pch_can_bit_set(&priv->regs->ifregs[1].id2,
+			       PCH_ID2_DIR | (0x7ff << 2));
+		iowrite32(0x0, &priv->regs->ifregs[1].id1);
+
+		/* Claring NewDat, TxRqst & IntPnd */
+		pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
+				  PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+				  PCH_IF_MCONT_TXRQXT);
+		pch_can_check_if_busy(&priv->regs->ifregs[1].creq, mask);
 	}
 }
 
@@ -688,7 +600,7 @@
 	struct sk_buff *skb;
 	struct pch_can_priv *priv = netdev_priv(ndev);
 	struct can_frame *cf;
-	u32 errc;
+	u32 errc, lec;
 	struct net_device_stats *stats = &(priv->ndev->stats);
 	enum can_state state = priv->can.state;
 
@@ -697,8 +609,8 @@
 		return;
 
 	if (status & PCH_BUS_OFF) {
-		pch_can_tx_disable_all(priv);
-		pch_can_rx_disable_all(priv);
+		pch_can_set_tx_all(priv, 0);
+		pch_can_set_rx_all(priv, 0);
 		state = CAN_STATE_BUS_OFF;
 		cf->can_id |= CAN_ERR_BUSOFF;
 		can_bus_off(ndev);
@@ -712,9 +624,9 @@
 		priv->can.can_stats.error_warning++;
 		cf->can_id |= CAN_ERR_CRTL;
 		errc = ioread32(&priv->regs->errc);
-		if (((errc & CAN_REC) >> 8) > 96)
+		if (((errc & PCH_REC) >> 8) > 96)
 			cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
-		if ((errc & CAN_TEC) > 96)
+		if ((errc & PCH_TEC) > 96)
 			cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
 		dev_warn(&ndev->dev,
 			"%s -> Error Counter is more than 96.\n", __func__);
@@ -725,41 +637,45 @@
 		state = CAN_STATE_ERROR_PASSIVE;
 		cf->can_id |= CAN_ERR_CRTL;
 		errc = ioread32(&priv->regs->errc);
-		if (((errc & CAN_REC) >> 8) > 127)
+		if (((errc & PCH_REC) >> 8) > 127)
 			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
-		if ((errc & CAN_TEC) > 127)
+		if ((errc & PCH_TEC) > 127)
 			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
 		dev_err(&ndev->dev,
 			"%s -> CAN controller is ERROR PASSIVE .\n", __func__);
 	}
 
-	if (status & PCH_LEC_ALL) {
+	lec = status & PCH_LEC_ALL;
+	switch (lec) {
+	case PCH_STUF_ERR:
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
 		priv->can.can_stats.bus_error++;
 		stats->rx_errors++;
-		switch (status & PCH_LEC_ALL) {
-		case PCH_STUF_ERR:
-			cf->data[2] |= CAN_ERR_PROT_STUFF;
-			break;
-		case PCH_FORM_ERR:
-			cf->data[2] |= CAN_ERR_PROT_FORM;
-			break;
-		case PCH_ACK_ERR:
-			cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
-				       CAN_ERR_PROT_LOC_ACK_DEL;
-			break;
-		case PCH_BIT1_ERR:
-		case PCH_BIT0_ERR:
-			cf->data[2] |= CAN_ERR_PROT_BIT;
-			break;
-		case PCH_CRC_ERR:
-			cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
-				       CAN_ERR_PROT_LOC_CRC_DEL;
-			break;
-		default:
-			iowrite32(status | PCH_LEC_ALL, &priv->regs->stat);
-			break;
-		}
-
+		break;
+	case PCH_FORM_ERR:
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+		priv->can.can_stats.bus_error++;
+		stats->rx_errors++;
+		break;
+	case PCH_ACK_ERR:
+		cf->can_id |= CAN_ERR_ACK;
+		priv->can.can_stats.bus_error++;
+		stats->rx_errors++;
+		break;
+	case PCH_BIT1_ERR:
+	case PCH_BIT0_ERR:
+		cf->data[2] |= CAN_ERR_PROT_BIT;
+		priv->can.can_stats.bus_error++;
+		stats->rx_errors++;
+		break;
+	case PCH_CRC_ERR:
+		cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
+			       CAN_ERR_PROT_LOC_CRC_DEL;
+		priv->can.can_stats.bus_error++;
+		stats->rx_errors++;
+		break;
+	case PCH_LEC_ALL: /* Written by CPU. No error status */
+		break;
 	}
 
 	priv->can.state = state;
@@ -795,22 +711,22 @@
 	struct net_device_stats *stats = &(priv->ndev->stats);
 
 	/* Reading the messsage object from the Message RAM */
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
-	pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
+	iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+	pch_can_check_if_busy(&priv->regs->ifregs[0].creq, int_stat);
 
 	/* Reading the MCONT register. */
-	reg = ioread32(&priv->regs->if1_mcont);
+	reg = ioread32(&priv->regs->ifregs[0].mcont);
 	reg &= 0xffff;
 
-	for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
+	for (k = int_stat; !(reg & PCH_IF_MCONT_EOB); k++) {
 		/* If MsgLost bit set. */
-		if (reg & CAN_IF_MCONT_MSGLOST) {
+		if (reg & PCH_IF_MCONT_MSGLOST) {
 			dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n");
-			pch_can_bit_clear(&priv->regs->if1_mcont,
-					  CAN_IF_MCONT_MSGLOST);
-			iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
-				  &priv->regs->if1_cmask);
-			pch_can_check_if_busy(&priv->regs->if1_creq, k);
+			pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+					  PCH_IF_MCONT_MSGLOST);
+			iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
+				  &priv->regs->ifregs[0].cmask);
+			pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
 
 			skb = alloc_can_err_skb(ndev, &cf);
 			if (!skb)
@@ -828,7 +744,7 @@
 			rcv_pkts++;
 			goto RX_NEXT;
 		}
-		if (!(reg & CAN_IF_MCONT_NEWDAT))
+		if (!(reg & PCH_IF_MCONT_NEWDAT))
 			goto RX_NEXT;
 
 		skb = alloc_can_skb(priv->ndev, &cf);
@@ -836,29 +752,30 @@
 			return -ENOMEM;
 
 		/* Get Received data */
-		ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14;
+		ide = ((ioread32(&priv->regs->ifregs[0].id2)) & PCH_ID2_XTD) >>
+									     14;
 		if (ide) {
-			id = (ioread32(&priv->regs->if1_id1) & 0xffff);
-			id |= (((ioread32(&priv->regs->if1_id2)) &
+			id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
+			id |= (((ioread32(&priv->regs->ifregs[0].id2)) &
 					    0x1fff) << 16);
 			cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
 		} else {
-			id = (((ioread32(&priv->regs->if1_id2)) &
-					  (CAN_SFF_MASK << 2)) >> 2);
+			id = (((ioread32(&priv->regs->ifregs[0].id2)) &
+						     (CAN_SFF_MASK << 2)) >> 2);
 			cf->can_id = (id & CAN_SFF_MASK);
 		}
 
-		rtr = (ioread32(&priv->regs->if1_id2) &  CAN_ID2_DIR);
+		rtr = (ioread32(&priv->regs->ifregs[0].id2) &  PCH_ID2_DIR);
 		if (rtr) {
 			cf->can_dlc = 0;
 			cf->can_id |= CAN_RTR_FLAG;
 		} else {
-			cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
-						   0x0f);
+			cf->can_dlc =
+			      ((ioread32(&priv->regs->ifregs[0].mcont)) & 0x0f);
 		}
 
 		for (i = 0, j = 0; i < cf->can_dlc; j++) {
-			reg = ioread32(&priv->regs->if1_dataa1 + j*4);
+			reg = ioread32(&priv->regs->ifregs[0].dataa1 + j*4);
 			cf->data[i++] = cpu_to_le32(reg & 0xff);
 			if (i == cf->can_dlc)
 				break;
@@ -871,16 +788,17 @@
 		stats->rx_bytes += cf->can_dlc;
 
 		if (k < PCH_FIFO_THRESH) {
-			iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL |
-				  CAN_CMASK_ARB, &priv->regs->if1_cmask);
+			iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
+				  PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
 
 			/* Clearing the Dir bit. */
-			pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
+			pch_can_bit_clear(&priv->regs->ifregs[0].id2,
+					  PCH_ID2_DIR);
 
 			/* Clearing NewDat & IntPnd */
-			pch_can_bit_clear(&priv->regs->if1_mcont,
-					  CAN_IF_MCONT_INTPND);
-			pch_can_check_if_busy(&priv->regs->if1_creq, k);
+			pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
+					  PCH_IF_MCONT_INTPND);
+			pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
 		} else if (k > PCH_FIFO_THRESH) {
 			pch_can_int_clr(priv, k);
 		} else if (k == PCH_FIFO_THRESH) {
@@ -890,9 +808,9 @@
 		}
 RX_NEXT:
 		/* Reading the messsage object from the Message RAM */
-		iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
-		pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
-		reg = ioread32(&priv->regs->if1_mcont);
+		iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
+		pch_can_check_if_busy(&priv->regs->ifregs[0].creq, k);
+		reg = ioread32(&priv->regs->ifregs[0].mcont);
 	}
 
 	return rcv_pkts;
@@ -906,14 +824,13 @@
 	u32 int_stat;
 	int rcv_pkts = 0;
 	u32 reg_stat;
-	unsigned long flags;
 
 	int_stat = pch_can_int_pending(priv);
 	if (!int_stat)
 		return 0;
 
 INT_STAT:
-	if (int_stat == CAN_STATUS_INT) {
+	if (int_stat == PCH_STATUS_INT) {
 		reg_stat = ioread32(&priv->regs->stat);
 		if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
 			if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
@@ -921,11 +838,10 @@
 		}
 
 		if (reg_stat & PCH_TX_OK) {
-			spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-			iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
-			pch_can_check_if_busy(&priv->regs->if2_creq,
+			iowrite32(PCH_CMASK_RX_TX_GET,
+				  &priv->regs->ifregs[1].cmask);
+			pch_can_check_if_busy(&priv->regs->ifregs[1].creq,
 					       ioread32(&priv->regs->intr));
-			spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
 			pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
 		}
 
@@ -933,37 +849,32 @@
 			pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK);
 
 		int_stat = pch_can_int_pending(priv);
-		if (int_stat == CAN_STATUS_INT)
+		if (int_stat == PCH_STATUS_INT)
 			goto INT_STAT;
 	}
 
 MSG_OBJ:
-	if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) {
-		spin_lock_irqsave(&priv->msgif_reg_lock, flags);
+	if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
 		rcv_pkts = pch_can_rx_normal(ndev, int_stat);
-		spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
 		if (rcv_pkts < 0)
 			return 0;
-	} else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
-		if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
-			/* Handle transmission interrupt */
-			can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
-			spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-			iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
-				  &priv->regs->if2_cmask);
-			dlc = ioread32(&priv->regs->if2_mcont) &
-				       CAN_IF_MCONT_DLC;
-			pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
-			spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
-			if (dlc > 8)
-				dlc = 8;
-			stats->tx_bytes += dlc;
-			stats->tx_packets++;
-		}
+	} else if ((int_stat >= PCH_TX_OBJ_START) &&
+		   (int_stat <= PCH_TX_OBJ_END)) {
+		/* Handle transmission interrupt */
+		can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
+		iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
+			  &priv->regs->ifregs[1].cmask);
+		dlc = ioread32(&priv->regs->ifregs[1].mcont) &
+			       PCH_IF_MCONT_DLC;
+		pch_can_check_if_busy(&priv->regs->ifregs[1].creq, int_stat);
+		if (dlc > 8)
+			dlc = 8;
+		stats->tx_bytes += dlc;
+		stats->tx_packets++;
 	}
 
 	int_stat = pch_can_int_pending(priv);
-	if (int_stat == CAN_STATUS_INT)
+	if (int_stat == PCH_STATUS_INT)
 		goto INT_STAT;
 	else if (int_stat >= 1 && int_stat <= 32)
 		goto MSG_OBJ;
@@ -983,17 +894,17 @@
 	u32 brp;
 
 	/* Setting the CCE bit for accessing the Can Timing register. */
-	pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE);
+	pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
 
 	brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1;
-	canbit = brp & MSK_BITT_BRP;
-	canbit |= (bt->sjw - 1) << BIT_BITT_SJW;
-	canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1;
-	canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2;
-	bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
+	canbit = brp & PCH_MSK_BITT_BRP;
+	canbit |= (bt->sjw - 1) << PCH_BIT_SJW;
+	canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1;
+	canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2;
+	bepe = (brp & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE;
 	iowrite32(canbit, &priv->regs->bitt);
 	iowrite32(bepe, &priv->regs->brpe);
-	pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE);
+	pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
 
 	return 0;
 }
@@ -1008,8 +919,8 @@
 	pch_set_bittiming(ndev);
 	pch_can_set_optmode(priv);
 
-	pch_can_tx_enable_all(priv);
-	pch_can_rx_enable_all(priv);
+	pch_can_set_tx_all(priv, 1);
+	pch_can_set_rx_all(priv, 1);
 
 	/* Setting the CAN to run mode. */
 	pch_can_set_run_mode(priv, PCH_CAN_RUN);
@@ -1113,7 +1024,6 @@
 static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
 	int i, j;
-	unsigned long flags;
 	struct pch_can_priv *priv = netdev_priv(ndev);
 	struct can_frame *cf = (struct can_frame *)skb->data;
 	int tx_buffer_avail = 0;
@@ -1121,72 +1031,68 @@
 	if (can_dropped_invalid_skb(ndev, skb))
 		return NETDEV_TX_OK;
 
-	if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */
+	if (priv->tx_obj == PCH_TX_OBJ_END) { /* Point tail Obj */
 		while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) <<
 					   PCH_RX_OBJ_NUM)))
 			udelay(500);
 
-		priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */
+		priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj ID */
 		tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
 	} else {
 		tx_buffer_avail = priv->tx_obj;
 	}
 	priv->tx_obj++;
 
-	/* Attaining the lock. */
-	spin_lock_irqsave(&priv->msgif_reg_lock, flags);
-
 	/* Reading the Msg Obj from the Msg RAM to the Interface register. */
-	iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
-	pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
+	iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
+	pch_can_check_if_busy(&priv->regs->ifregs[1].creq, tx_buffer_avail);
 
 	/* Setting the CMASK register. */
-	pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL);
+	pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
 
 	/* If ID extended is set. */
-	pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
-	pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
+	pch_can_bit_clear(&priv->regs->ifregs[1].id1, 0xffff);
+	pch_can_bit_clear(&priv->regs->ifregs[1].id2, 0x1fff | PCH_ID2_XTD);
 	if (cf->can_id & CAN_EFF_FLAG) {
-		pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff);
-		pch_can_bit_set(&priv->regs->if2_id2,
-				((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
+		pch_can_bit_set(&priv->regs->ifregs[1].id1,
+				cf->can_id & 0xffff);
+		pch_can_bit_set(&priv->regs->ifregs[1].id2,
+				((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD);
 	} else {
-		pch_can_bit_set(&priv->regs->if2_id1, 0);
-		pch_can_bit_set(&priv->regs->if2_id2,
+		pch_can_bit_set(&priv->regs->ifregs[1].id1, 0);
+		pch_can_bit_set(&priv->regs->ifregs[1].id2,
 				(cf->can_id & CAN_SFF_MASK) << 2);
 	}
 
 	/* If remote frame has to be transmitted.. */
 	if (cf->can_id & CAN_RTR_FLAG)
-		pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
+		pch_can_bit_clear(&priv->regs->ifregs[1].id2, PCH_ID2_DIR);
 
 	for (i = 0, j = 0; i < cf->can_dlc; j++) {
 		iowrite32(le32_to_cpu(cf->data[i++]),
-			 (&priv->regs->if2_dataa1) + j*4);
+			 (&priv->regs->ifregs[1].dataa1) + j*4);
 		if (i == cf->can_dlc)
 			break;
 		iowrite32(le32_to_cpu(cf->data[i++] << 8),
-			 (&priv->regs->if2_dataa1) + j*4);
+			 (&priv->regs->ifregs[1].dataa1) + j*4);
 	}
 
-	can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
+	can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_END - 1);
 
 	/* Updating the size of the data. */
-	pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
-	pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
+	pch_can_bit_clear(&priv->regs->ifregs[1].mcont, 0x0f);
+	pch_can_bit_set(&priv->regs->ifregs[1].mcont, cf->can_dlc);
 
 	/* Clearing IntPend, NewDat & TxRqst */
-	pch_can_bit_clear(&priv->regs->if2_mcont,
-			  CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
-			  CAN_IF_MCONT_TXRQXT);
+	pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
+			  PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
+			  PCH_IF_MCONT_TXRQXT);
 
 	/* Setting NewDat, TxRqst bits */
-	pch_can_bit_set(&priv->regs->if2_mcont,
-			CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
+	pch_can_bit_set(&priv->regs->ifregs[1].mcont,
+			PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT);
 
-	pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
-
-	spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
+	pch_can_check_if_busy(&priv->regs->ifregs[1].creq, tx_buffer_avail);
 
 	return NETDEV_TX_OK;
 }
@@ -1244,27 +1150,20 @@
 	pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
 
 	/* Save Tx buffer enable state */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_TX)
-			pch_can_get_tx_enable(priv, i + 1,
-					      &(priv->tx_enable[i]));
-	}
+	for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+		priv->tx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_TX_IFREG);
 
 	/* Disable all Transmit buffers */
-	pch_can_tx_disable_all(priv);
+	pch_can_set_tx_all(priv, 0);
 
 	/* Save Rx buffer enable state */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_RX) {
-			pch_can_get_rx_enable(priv, i + 1,
-						&(priv->rx_enable[i]));
-			pch_can_get_rx_buffer_link(priv, i + 1,
-						&(priv->rx_link[i]));
-		}
+	for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+		priv->rx_enable[i] = pch_can_get_rxtx_ir(priv, i, PCH_RX_IFREG);
+		pch_can_get_rx_buffer_link(priv, i, &priv->rx_link[i]);
 	}
 
 	/* Disable all Receive buffers */
-	pch_can_rx_disable_all(priv);
+	pch_can_set_rx_all(priv, 0);
 	retval = pci_save_state(pdev);
 	if (retval) {
 		dev_err(&pdev->dev, "pci_save_state failed.\n");
@@ -1312,23 +1211,16 @@
 	pch_can_set_optmode(priv);
 
 	/* Enabling the transmit buffer. */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_TX) {
-			pch_can_set_tx_enable(priv, i + 1,
-					      priv->tx_enable[i]);
-		}
-	}
+	for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
+		pch_can_set_rxtx(priv, i, priv->tx_enable[i], PCH_TX_IFREG);
 
 	/* Configuring the receive buffer and enabling them. */
-	for (i = 0; i < PCH_OBJ_NUM; i++) {
-		if (priv->msg_obj[i] == MSG_OBJ_RX) {
-			/* Restore buffer link */
-			pch_can_set_rx_buffer_link(priv, i + 1,
-						   priv->rx_link[i]);
+	for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
+		/* Restore buffer link */
+		pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i]);
 
-			/* Restore buffer enables */
-			pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
-		}
+		/* Restore buffer enables */
+		pch_can_set_rxtx(priv, i, priv->rx_enable[i], PCH_RX_IFREG);
 	}
 
 	/* Enable CAN Interrupts */
@@ -1349,8 +1241,8 @@
 {
 	struct pch_can_priv *priv = netdev_priv(dev);
 
-	bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC;
-	bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8;
+	bec->txerr = ioread32(&priv->regs->errc) & PCH_TEC;
+	bec->rxerr = (ioread32(&priv->regs->errc) & PCH_REC) >> 8;
 
 	return 0;
 }
@@ -1361,7 +1253,6 @@
 	struct net_device *ndev;
 	struct pch_can_priv *priv;
 	int rc;
-	int index;
 	void __iomem *addr;
 
 	rc = pci_enable_device(pdev);
@@ -1383,7 +1274,7 @@
 		goto probe_exit_ipmap;
 	}
 
-	ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM);
+	ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
 	if (!ndev) {
 		rc = -ENOMEM;
 		dev_err(&pdev->dev, "Failed alloc_candev\n");
@@ -1399,7 +1290,7 @@
 	priv->can.do_get_berr_counter = pch_can_get_berr_counter;
 	priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
 				       CAN_CTRLMODE_LOOPBACK;
-	priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */
+	priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
 
 	ndev->irq = pdev->irq;
 	ndev->flags |= IFF_ECHO;
@@ -1407,15 +1298,9 @@
 	pci_set_drvdata(pdev, ndev);
 	SET_NETDEV_DEV(ndev, &pdev->dev);
 	ndev->netdev_ops = &pch_can_netdev_ops;
-
 	priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
-	for (index = 0; index < PCH_RX_OBJ_NUM;)
-		priv->msg_obj[index++] = MSG_OBJ_RX;
 
-	for (index = index;  index < PCH_OBJ_NUM;)
-		priv->msg_obj[index++] = MSG_OBJ_TX;
-
-	netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM);
+	netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_END);
 
 	rc = register_candev(ndev);
 	if (rc) {
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfd..09c3e9d 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -107,17 +107,13 @@
 	res_size = resource_size(&res);
 
 	if (!request_mem_region(res.start, res_size, DRV_NAME)) {
-		dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n",
-			(unsigned long long)res.start,
-			(unsigned long long)res.end);
+		dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
 		return -EBUSY;
 	}
 
 	base = ioremap_nocache(res.start, res_size);
 	if (!base) {
-		dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n",
-			(unsigned long long)res.start,
-			(unsigned long long)res.end);
+		dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
 		err = -ENOMEM;
 		goto exit_release_mem;
 	}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644
index 0000000..b423965
--- /dev/null
+++ b/drivers/net/can/slcan.c
@@ -0,0 +1,756 @@
+/*
+ * slcan.c - serial line CAN interface driver (using tty line discipline)
+ *
+ * This file is derived from linux/drivers/net/slip.c
+ *
+ * slip.c Authors  : Laurence Culhane <loz@holmes.demon.co.uk>
+ *                   Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
+ * slcan.c Author  : Oliver Hartkopp <socketcan@hartkopp.net>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
+ * at http://www.gnu.org/licenses/gpl.html
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ *
+ * Send feedback to <socketcan-users@lists.berlios.de>
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <asm/system.h>
+#include <linux/uaccess.h>
+#include <linux/bitops.h>
+#include <linux/string.h>
+#include <linux/tty.h>
+#include <linux/errno.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/can.h>
+
+static __initdata const char banner[] =
+	KERN_INFO "slcan: serial line CAN interface driver\n";
+
+MODULE_ALIAS_LDISC(N_SLCAN);
+MODULE_DESCRIPTION("serial line CAN interface");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
+
+#define SLCAN_MAGIC 0x53CA
+
+static int maxdev = 10;		/* MAX number of SLCAN channels;
+				   This can be overridden with
+				   insmod slcan.ko maxdev=nnn	*/
+module_param(maxdev, int, 0);
+MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
+
+/* maximum rx buffer len: extended CAN frame with timestamp */
+#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
+
+struct slcan {
+	int			magic;
+
+	/* Various fields. */
+	struct tty_struct	*tty;		/* ptr to TTY structure	     */
+	struct net_device	*dev;		/* easy for intr handling    */
+	spinlock_t		lock;
+
+	/* These are pointers to the malloc()ed frame buffers. */
+	unsigned char		rbuff[SLC_MTU];	/* receiver buffer	     */
+	int			rcount;         /* received chars counter    */
+	unsigned char		xbuff[SLC_MTU];	/* transmitter buffer	     */
+	unsigned char		*xhead;         /* pointer to next XMIT byte */
+	int			xleft;          /* bytes left in XMIT queue  */
+
+	unsigned long		flags;		/* Flag values/ mode etc     */
+#define SLF_INUSE		0		/* Channel in use            */
+#define SLF_ERROR		1               /* Parity, etc. error        */
+
+	unsigned char		leased;
+	dev_t			line;
+	pid_t			pid;
+};
+
+static struct net_device **slcan_devs;
+
+ /************************************************************************
+  *			SLCAN ENCAPSULATION FORMAT			 *
+  ************************************************************************/
+
+/*
+ * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
+ * frame format) a data length code (can_dlc) which can be from 0 to 8
+ * and up to <can_dlc> data bytes as payload.
+ * Additionally a CAN frame may become a remote transmission frame if the
+ * RTR-bit is set. This causes another ECU to send a CAN frame with the
+ * given can_id.
+ *
+ * The SLCAN ASCII representation of these different frame types is:
+ * <type> <id> <dlc> <data>*
+ *
+ * Extended frames (29 bit) are defined by capital characters in the type.
+ * RTR frames are defined as 'r' types - normal frames have 't' type:
+ * t => 11 bit data frame
+ * r => 11 bit RTR frame
+ * T => 29 bit data frame
+ * R => 29 bit RTR frame
+ *
+ * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
+ * The <dlc> is a one byte ASCII number ('0' - '8')
+ * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
+ *
+ * Examples:
+ *
+ * t1230 : can_id 0x123, can_dlc 0, no data
+ * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
+ * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
+ * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
+ *
+ */
+
+ /************************************************************************
+  *			STANDARD SLCAN DECAPSULATION			 *
+  ************************************************************************/
+
+static int asc2nibble(char c)
+{
+
+	if ((c >= '0') && (c <= '9'))
+		return c - '0';
+
+	if ((c >= 'A') && (c <= 'F'))
+		return c - 'A' + 10;
+
+	if ((c >= 'a') && (c <= 'f'))
+		return c - 'a' + 10;
+
+	return 16; /* error */
+}
+
+/* Send one completely decapsulated can_frame to the network layer */
+static void slc_bump(struct slcan *sl)
+{
+	struct sk_buff *skb;
+	struct can_frame cf;
+	int i, dlc_pos, tmp;
+	unsigned long ultmp;
+	char cmd = sl->rbuff[0];
+
+	if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
+		return;
+
+	if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
+		dlc_pos = 4; /* dlc position tiiid */
+	else
+		dlc_pos = 9; /* dlc position Tiiiiiiiid */
+
+	if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
+		return;
+
+	cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
+
+	sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
+
+	if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
+		return;
+
+	cf.can_id = ultmp;
+
+	if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
+		cf.can_id |= CAN_EFF_FLAG;
+
+	if ((cmd | 0x20) == 'r') /* RTR frame */
+		cf.can_id |= CAN_RTR_FLAG;
+
+	*(u64 *) (&cf.data) = 0; /* clear payload */
+
+	for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
+
+		tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+		if (tmp > 0x0F)
+			return;
+		cf.data[i] = (tmp << 4);
+		tmp = asc2nibble(sl->rbuff[dlc_pos++]);
+		if (tmp > 0x0F)
+			return;
+		cf.data[i] |= tmp;
+	}
+
+
+	skb = dev_alloc_skb(sizeof(struct can_frame));
+	if (!skb)
+		return;
+
+	skb->dev = sl->dev;
+	skb->protocol = htons(ETH_P_CAN);
+	skb->pkt_type = PACKET_BROADCAST;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	memcpy(skb_put(skb, sizeof(struct can_frame)),
+	       &cf, sizeof(struct can_frame));
+	netif_rx(skb);
+
+	sl->dev->stats.rx_packets++;
+	sl->dev->stats.rx_bytes += cf.can_dlc;
+}
+
+/* parse tty input stream */
+static void slcan_unesc(struct slcan *sl, unsigned char s)
+{
+
+	if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
+		if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
+		    (sl->rcount > 4))  {
+			slc_bump(sl);
+		}
+		sl->rcount = 0;
+	} else {
+		if (!test_bit(SLF_ERROR, &sl->flags))  {
+			if (sl->rcount < SLC_MTU)  {
+				sl->rbuff[sl->rcount++] = s;
+				return;
+			} else {
+				sl->dev->stats.rx_over_errors++;
+				set_bit(SLF_ERROR, &sl->flags);
+			}
+		}
+	}
+}
+
+ /************************************************************************
+  *			STANDARD SLCAN ENCAPSULATION			 *
+  ************************************************************************/
+
+/* Encapsulate one can_frame and stuff into a TTY queue. */
+static void slc_encaps(struct slcan *sl, struct can_frame *cf)
+{
+	int actual, idx, i;
+	char cmd;
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		cmd = 'R'; /* becomes 'r' in standard frame format */
+	else
+		cmd = 'T'; /* becomes 't' in standard frame format */
+
+	if (cf->can_id & CAN_EFF_FLAG)
+		sprintf(sl->xbuff, "%c%08X%d", cmd,
+			cf->can_id & CAN_EFF_MASK, cf->can_dlc);
+	else
+		sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
+			cf->can_id & CAN_SFF_MASK, cf->can_dlc);
+
+	idx = strlen(sl->xbuff);
+
+	for (i = 0; i < cf->can_dlc; i++)
+		sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
+
+	strcat(sl->xbuff, "\r"); /* add terminating character */
+
+	/* Order of next two lines is *very* important.
+	 * When we are sending a little amount of data,
+	 * the transfer may be completed inside the ops->write()
+	 * routine, because it's running with interrupts enabled.
+	 * In this case we *never* got WRITE_WAKEUP event,
+	 * if we did not request it before write operation.
+	 *       14 Oct 1994  Dmitry Gorodchanin.
+	 */
+	set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+	actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
+	sl->xleft = strlen(sl->xbuff) - actual;
+	sl->xhead = sl->xbuff + actual;
+	sl->dev->stats.tx_bytes += cf->can_dlc;
+}
+
+/*
+ * Called by the driver when there's room for more data.  If we have
+ * more packets to send, we send them here.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+	int actual;
+	struct slcan *sl = (struct slcan *) tty->disc_data;
+
+	/* First make sure we're connected. */
+	if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+		return;
+
+	if (sl->xleft <= 0)  {
+		/* Now serial buffer is almost free & we can start
+		 * transmission of another packet */
+		sl->dev->stats.tx_packets++;
+		clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+		netif_wake_queue(sl->dev);
+		return;
+	}
+
+	actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+	sl->xleft -= actual;
+	sl->xhead += actual;
+}
+
+/* Send a can_frame to a TTY queue. */
+static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct slcan *sl = netdev_priv(dev);
+
+	if (skb->len != sizeof(struct can_frame))
+		goto out;
+
+	spin_lock(&sl->lock);
+	if (!netif_running(dev))  {
+		spin_unlock(&sl->lock);
+		printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
+		goto out;
+	}
+	if (sl->tty == NULL) {
+		spin_unlock(&sl->lock);
+		goto out;
+	}
+
+	netif_stop_queue(sl->dev);
+	slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
+	spin_unlock(&sl->lock);
+
+out:
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+
+/******************************************
+ *   Routines looking at netdevice side.
+ ******************************************/
+
+/* Netdevice UP -> DOWN routine */
+static int slc_close(struct net_device *dev)
+{
+	struct slcan *sl = netdev_priv(dev);
+
+	spin_lock_bh(&sl->lock);
+	if (sl->tty) {
+		/* TTY discipline is running. */
+		clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
+	}
+	netif_stop_queue(dev);
+	sl->rcount   = 0;
+	sl->xleft    = 0;
+	spin_unlock_bh(&sl->lock);
+
+	return 0;
+}
+
+/* Netdevice DOWN -> UP routine */
+static int slc_open(struct net_device *dev)
+{
+	struct slcan *sl = netdev_priv(dev);
+
+	if (sl->tty == NULL)
+		return -ENODEV;
+
+	sl->flags &= (1 << SLF_INUSE);
+	netif_start_queue(dev);
+	return 0;
+}
+
+/* Hook the destructor so we can free slcan devs at the right point in time */
+static void slc_free_netdev(struct net_device *dev)
+{
+	int i = dev->base_addr;
+	free_netdev(dev);
+	slcan_devs[i] = NULL;
+}
+
+static const struct net_device_ops slc_netdev_ops = {
+	.ndo_open               = slc_open,
+	.ndo_stop               = slc_close,
+	.ndo_start_xmit         = slc_xmit,
+};
+
+static void slc_setup(struct net_device *dev)
+{
+	dev->netdev_ops		= &slc_netdev_ops;
+	dev->destructor		= slc_free_netdev;
+
+	dev->hard_header_len	= 0;
+	dev->addr_len		= 0;
+	dev->tx_queue_len	= 10;
+
+	dev->mtu		= sizeof(struct can_frame);
+	dev->type		= ARPHRD_CAN;
+
+	/* New-style flags. */
+	dev->flags		= IFF_NOARP;
+	dev->features           = NETIF_F_NO_CSUM;
+}
+
+/******************************************
+  Routines looking at TTY side.
+ ******************************************/
+
+/*
+ * Handle the 'receiver data ready' interrupt.
+ * This function is called by the 'tty_io' module in the kernel when
+ * a block of SLCAN data has been received, which can now be decapsulated
+ * and sent on to some IP layer for further processing. This will not
+ * be re-entered while running but other ldisc functions may be called
+ * in parallel
+ */
+
+static void slcan_receive_buf(struct tty_struct *tty,
+			      const unsigned char *cp, char *fp, int count)
+{
+	struct slcan *sl = (struct slcan *) tty->disc_data;
+
+	if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+		return;
+
+	/* Read the characters out of the buffer */
+	while (count--) {
+		if (fp && *fp++) {
+			if (!test_and_set_bit(SLF_ERROR, &sl->flags))
+				sl->dev->stats.rx_errors++;
+			cp++;
+			continue;
+		}
+		slcan_unesc(sl, *cp++);
+	}
+}
+
+/************************************
+ *  slcan_open helper routines.
+ ************************************/
+
+/* Collect hanged up channels */
+static void slc_sync(void)
+{
+	int i;
+	struct net_device *dev;
+	struct slcan	  *sl;
+
+	for (i = 0; i < maxdev; i++) {
+		dev = slcan_devs[i];
+		if (dev == NULL)
+			break;
+
+		sl = netdev_priv(dev);
+		if (sl->tty || sl->leased)
+			continue;
+		if (dev->flags & IFF_UP)
+			dev_close(dev);
+	}
+}
+
+/* Find a free SLCAN channel, and link in this `tty' line. */
+static struct slcan *slc_alloc(dev_t line)
+{
+	int i;
+	struct net_device *dev = NULL;
+	struct slcan       *sl;
+
+	if (slcan_devs == NULL)
+		return NULL;	/* Master array missing ! */
+
+	for (i = 0; i < maxdev; i++) {
+		dev = slcan_devs[i];
+		if (dev == NULL)
+			break;
+
+	}
+
+	/* Sorry, too many, all slots in use */
+	if (i >= maxdev)
+		return NULL;
+
+	if (dev) {
+		sl = netdev_priv(dev);
+		if (test_bit(SLF_INUSE, &sl->flags)) {
+			unregister_netdevice(dev);
+			dev = NULL;
+			slcan_devs[i] = NULL;
+		}
+	}
+
+	if (!dev) {
+		char name[IFNAMSIZ];
+		sprintf(name, "slcan%d", i);
+
+		dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+		if (!dev)
+			return NULL;
+		dev->base_addr  = i;
+	}
+
+	sl = netdev_priv(dev);
+
+	/* Initialize channel control data */
+	sl->magic = SLCAN_MAGIC;
+	sl->dev	= dev;
+	spin_lock_init(&sl->lock);
+	slcan_devs[i] = dev;
+
+	return sl;
+}
+
+/*
+ * Open the high-level part of the SLCAN channel.
+ * This function is called by the TTY module when the
+ * SLCAN line discipline is called for.  Because we are
+ * sure the tty line exists, we only have to link it to
+ * a free SLCAN channel...
+ *
+ * Called in process context serialized from other ldisc calls.
+ */
+
+static int slcan_open(struct tty_struct *tty)
+{
+	struct slcan *sl;
+	int err;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (tty->ops->write == NULL)
+		return -EOPNOTSUPP;
+
+	/* RTnetlink lock is misused here to serialize concurrent
+	   opens of slcan channels. There are better ways, but it is
+	   the simplest one.
+	 */
+	rtnl_lock();
+
+	/* Collect hanged up channels. */
+	slc_sync();
+
+	sl = tty->disc_data;
+
+	err = -EEXIST;
+	/* First make sure we're not already connected. */
+	if (sl && sl->magic == SLCAN_MAGIC)
+		goto err_exit;
+
+	/* OK.  Find a free SLCAN channel to use. */
+	err = -ENFILE;
+	sl = slc_alloc(tty_devnum(tty));
+	if (sl == NULL)
+		goto err_exit;
+
+	sl->tty = tty;
+	tty->disc_data = sl;
+	sl->line = tty_devnum(tty);
+	sl->pid = current->pid;
+
+	if (!test_bit(SLF_INUSE, &sl->flags)) {
+		/* Perform the low-level SLCAN initialization. */
+		sl->rcount   = 0;
+		sl->xleft    = 0;
+
+		set_bit(SLF_INUSE, &sl->flags);
+
+		err = register_netdevice(sl->dev);
+		if (err)
+			goto err_free_chan;
+	}
+
+	/* Done.  We have linked the TTY line to a channel. */
+	rtnl_unlock();
+	tty->receive_room = 65536;	/* We don't flow control */
+	return sl->dev->base_addr;
+
+err_free_chan:
+	sl->tty = NULL;
+	tty->disc_data = NULL;
+	clear_bit(SLF_INUSE, &sl->flags);
+
+err_exit:
+	rtnl_unlock();
+
+	/* Count references from TTY module */
+	return err;
+}
+
+/*
+ * Close down a SLCAN channel.
+ * This means flushing out any pending queues, and then returning. This
+ * call is serialized against other ldisc functions.
+ *
+ * We also use this method for a hangup event.
+ */
+
+static void slcan_close(struct tty_struct *tty)
+{
+	struct slcan *sl = (struct slcan *) tty->disc_data;
+
+	/* First make sure we're connected. */
+	if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
+		return;
+
+	tty->disc_data = NULL;
+	sl->tty = NULL;
+	if (!sl->leased)
+		sl->line = 0;
+
+	/* Flush network side */
+	unregister_netdev(sl->dev);
+	/* This will complete via sl_free_netdev */
+}
+
+static int slcan_hangup(struct tty_struct *tty)
+{
+	slcan_close(tty);
+	return 0;
+}
+
+/* Perform I/O control on an active SLCAN channel. */
+static int slcan_ioctl(struct tty_struct *tty, struct file *file,
+		       unsigned int cmd, unsigned long arg)
+{
+	struct slcan *sl = (struct slcan *) tty->disc_data;
+	unsigned int tmp;
+
+	/* First make sure we're connected. */
+	if (!sl || sl->magic != SLCAN_MAGIC)
+		return -EINVAL;
+
+	switch (cmd) {
+	case SIOCGIFNAME:
+		tmp = strlen(sl->dev->name) + 1;
+		if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
+			return -EFAULT;
+		return 0;
+
+	case SIOCSIFHWADDR:
+		return -EINVAL;
+
+	default:
+		return tty_mode_ioctl(tty, file, cmd, arg);
+	}
+}
+
+static struct tty_ldisc_ops slc_ldisc = {
+	.owner		= THIS_MODULE,
+	.magic		= TTY_LDISC_MAGIC,
+	.name		= "slcan",
+	.open		= slcan_open,
+	.close		= slcan_close,
+	.hangup		= slcan_hangup,
+	.ioctl		= slcan_ioctl,
+	.receive_buf	= slcan_receive_buf,
+	.write_wakeup	= slcan_write_wakeup,
+};
+
+static int __init slcan_init(void)
+{
+	int status;
+
+	if (maxdev < 4)
+		maxdev = 4; /* Sanity */
+
+	printk(banner);
+	printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
+
+	slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
+	if (!slcan_devs) {
+		printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
+		return -ENOMEM;
+	}
+
+	/* Fill in our line protocol discipline, and register it */
+	status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
+	if (status)  {
+		printk(KERN_ERR "slcan: can't register line discipline\n");
+		kfree(slcan_devs);
+	}
+	return status;
+}
+
+static void __exit slcan_exit(void)
+{
+	int i;
+	struct net_device *dev;
+	struct slcan *sl;
+	unsigned long timeout = jiffies + HZ;
+	int busy = 0;
+
+	if (slcan_devs == NULL)
+		return;
+
+	/* First of all: check for active disciplines and hangup them.
+	 */
+	do {
+		if (busy)
+			msleep_interruptible(100);
+
+		busy = 0;
+		for (i = 0; i < maxdev; i++) {
+			dev = slcan_devs[i];
+			if (!dev)
+				continue;
+			sl = netdev_priv(dev);
+			spin_lock_bh(&sl->lock);
+			if (sl->tty) {
+				busy++;
+				tty_hangup(sl->tty);
+			}
+			spin_unlock_bh(&sl->lock);
+		}
+	} while (busy && time_before(jiffies, timeout));
+
+	/* FIXME: hangup is async so we should wait when doing this second
+	   phase */
+
+	for (i = 0; i < maxdev; i++) {
+		dev = slcan_devs[i];
+		if (!dev)
+			continue;
+		slcan_devs[i] = NULL;
+
+		sl = netdev_priv(dev);
+		if (sl->tty) {
+			printk(KERN_ERR "%s: tty discipline still running\n",
+			       dev->name);
+			/* Intentionally leak the control block. */
+			dev->destructor = NULL;
+		}
+
+		unregister_netdev(dev);
+	}
+
+	kfree(slcan_devs);
+	slcan_devs = NULL;
+
+	i = tty_unregister_ldisc(N_SLCAN);
+	if (i)
+		printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
+}
+
+module_init(slcan_init);
+module_exit(slcan_exit);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d6b6d6a..a8a32bc 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -3880,7 +3880,7 @@
 	schedule_work(&cp->reset_task);
 #endif
 
-	flush_scheduled_work();
+	flush_work_sync(&cp->reset_task);
 	return 0;
 }
 
@@ -5177,7 +5177,7 @@
 		vfree(cp->fw_data);
 
 	mutex_lock(&cp->pm_mutex);
-	flush_scheduled_work();
+	cancel_work_sync(&cp->reset_task);
 	if (cp->hw_running)
 		cas_shutdown(cp);
 	mutex_unlock(&cp->pm_mutex);
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19..594ca9c 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1695,7 +1695,7 @@
 		*work = num;
 		return -EINVAL;
 	}
-	*work = 2 + req2->num_additional_wqes;;
+	*work = 2 + req2->num_additional_wqes;
 
 	l5_cid = req1->iscsi_conn_id;
 	if (l5_cid >= MAX_ISCSI_TBL_SZ)
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc..80c2fee 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@
 
 /* Information that need to be kept for each board. */
 struct net_local {
-	struct net_device_stats stats;
 	struct mii_if_info mii_if;
 
 	/* Tx control lock.  This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@
 
 	/* remember we got an error */
 
-	np->stats.tx_errors++;
+	dev->stats.tx_errors++;
 
 	/* reset the TX DMA in case it has hung on something */
 
@@ -1157,7 +1156,7 @@
 			 * allocate a new buffer to put a packet in.
 			 */
 			e100_rx(dev);
-			np->stats.rx_packets++;
+			dev->stats.rx_packets++;
 			/* restart/continue on the channel, for safety */
 			*R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
 			/* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@
 	/* Report any packets that have been sent */
 	while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
 	       (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
-		np->stats.tx_bytes += myFirstTxDesc->skb->len;
-		np->stats.tx_packets++;
+		dev->stats.tx_bytes += myFirstTxDesc->skb->len;
+		dev->stats.tx_packets++;
 
 		/* dma is ready with the transmission of the data in tx_skb, so now
 		   we can release the skb memory */
@@ -1197,7 +1196,6 @@
 e100nw_interrupt(int irq, void *dev_id)
 {
 	struct net_device *dev = (struct net_device *)dev_id;
-	struct net_local *np = netdev_priv(dev);
 	unsigned long irqbits = *R_IRQ_MASK0_RD;
 
 	/* check for underrun irq */
@@ -1205,13 +1203,13 @@
 		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
 		*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
 		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
-		np->stats.tx_errors++;
+		dev->stats.tx_errors++;
 		D(printk("ethernet receiver underrun!\n"));
 	}
 
 	/* check for overrun irq */
 	if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
-		update_rx_stats(&np->stats); /* this will ack the irq */
+		update_rx_stats(&dev->stats); /* this will ack the irq */
 		D(printk("ethernet receiver overrun!\n"));
 	}
 	/* check for excessive collision irq */
@@ -1219,7 +1217,7 @@
 		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
 		*R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
 		SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
-		np->stats.tx_errors++;
+		dev->stats.tx_errors++;
 		D(printk("ethernet excessive collisions!\n"));
 	}
 	return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@
 	spin_unlock(&np->led_lock);
 
 	length = myNextRxDesc->descr.hw_len - 4;
-	np->stats.rx_bytes += length;
+	dev->stats.rx_bytes += length;
 
 #ifdef ETHDEBUG
 	printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@
 		/* Small packet, copy data */
 		skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
 		if (!skb) {
-			np->stats.rx_errors++;
+			dev->stats.rx_errors++;
 			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
 			goto update_nextrxdesc;
 		}
@@ -1294,7 +1292,7 @@
 		int align;
 		struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
 		if (!new_skb) {
-			np->stats.rx_errors++;
+			dev->stats.rx_errors++;
 			printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
 			goto update_nextrxdesc;
 		}
@@ -1333,8 +1331,6 @@
 static int
 e100_close(struct net_device *dev)
 {
-	struct net_local *np = netdev_priv(dev);
-
 	printk(KERN_INFO "Closing %s.\n", dev->name);
 
 	netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@
 
 	/* Update the statistics here. */
 
-	update_rx_stats(&np->stats);
-	update_tx_stats(&np->stats);
+	update_rx_stats(&dev->stats);
+	update_tx_stats(&dev->stats);
 
 	/* Stop speed/duplex timers */
 	del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@
 
 	spin_lock_irqsave(&lp->lock, flags);
 
-	update_rx_stats(&lp->stats);
-	update_tx_stats(&lp->stats);
+	update_rx_stats(&dev->stats);
+	update_tx_stats(&dev->stats);
 
 	spin_unlock_irqrestore(&lp->lock, flags);
-	return &lp->stats;
+	return &dev->stats;
 }
 
 /*
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e2..4d538a4 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1359,6 +1359,7 @@
 static int offload_close(struct t3cdev *tdev)
 {
 	struct adapter *adapter = tdev2adap(tdev);
+	struct t3c_data *td = T3C_DATA(tdev);
 
 	if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
 		return 0;
@@ -1369,7 +1370,7 @@
 	sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
 
 	/* Flush work scheduled while releasing TIDs */
-	flush_scheduled_work();
+	flush_work_sync(&td->tid_release_task);
 
 	tdev->lldev = NULL;
 	cxgb3_set_dummy_ops(tdev);
@@ -3006,12 +3007,11 @@
 					     pci_channel_state_t state)
 {
 	struct adapter *adapter = pci_get_drvdata(pdev);
-	int ret;
 
 	if (state == pci_channel_io_perm_failure)
 		return PCI_ERS_RESULT_DISCONNECT;
 
-	ret = t3_adapter_error(adapter, 0, 0);
+	t3_adapter_error(adapter, 0, 0);
 
 	/* Request a slot reset. */
 	return PCI_ERS_RESULT_NEED_RESET;
@@ -3341,7 +3341,6 @@
 				adapter->name = adapter->port[i]->name;
 
 			__set_bit(i, &adapter->registered_device_map);
-			netif_tx_stop_all_queues(adapter->port[i]);
 		}
 	}
 	if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf0753..ef02aa6 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1164,12 +1164,10 @@
  */
 void *cxgb_alloc_mem(unsigned long size)
 {
-	void *p = kmalloc(size, GFP_KERNEL);
+	void *p = kzalloc(size, GFP_KERNEL);
 
 	if (!p)
-		p = vmalloc(size);
-	if (p)
-		memset(p, 0, size);
+		p = vzalloc(size);
 	return p;
 }
 
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f..848f89d 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -868,12 +868,10 @@
  */
 void *t4_alloc_mem(size_t size)
 {
-	void *p = kmalloc(size, GFP_KERNEL);
+	void *p = kzalloc(size, GFP_KERNEL);
 
 	if (!p)
-		p = vmalloc(size);
-	if (p)
-		memset(p, 0, size);
+		p = vzalloc(size);
 	return p;
 }
 
@@ -3736,7 +3734,6 @@
 
 			__set_bit(i, &adapter->registered_device_map);
 			adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
-			netif_tx_stop_all_queues(adapter->port[i]);
 		}
 	}
 	if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index bb813d9..e97521c 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -2408,7 +2408,7 @@
 		if (index < NEXACT_MAC)
 			ret++;
 		else if (hash)
-			*hash |= (1 << hash_mac_addr(addr[i]));
+			*hash |= (1ULL << hash_mac_addr(addr[i]));
 	}
 	return ret;
 }
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea0196..4766b41 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@
 	 * MSI-X interrupt index usage.
 	 */
 	MSIX_FW		= 0,		/* MSI-X index for firmware Q */
-	MSIX_NIQFLINT	= 1,		/* MSI-X index base for Ingress Qs */
+	MSIX_IQFLINT	= 1,		/* MSI-X index base for Ingress Qs */
 	MSIX_EXTRAS	= 1,
 	MSIX_ENTRIES	= MAX_ETH_QSETS + MSIX_EXTRAS,
 
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5..f54af48 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@
 		const struct port_info *pi = netdev_priv(dev);
 		int qs, msi;
 
-		for (qs = 0, msi = MSIX_NIQFLINT;
-		     qs < pi->nqsets;
-		     qs++, msi++) {
+		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
 			snprintf(adapter->msix_info[msi].desc, namelen,
 				 "%s-%d", dev->name, qs);
 			adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@
 	/*
 	 * Ethernet queues.
 	 */
-	msi = MSIX_NIQFLINT;
+	msi = MSIX_IQFLINT;
 	for_each_ethrxq(s, rxq) {
 		err = request_irq(adapter->msix_info[msi].vec,
 				  t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@
 	int rxq, msi;
 
 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
-	msi = MSIX_NIQFLINT;
+	msi = MSIX_IQFLINT;
 	for_each_ethrxq(s, rxq)
 		free_irq(adapter->msix_info[msi++].vec,
 			 &s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@
 	 * brought up at which point lots of things get nailed down
 	 * permanently ...
 	 */
-	msix = MSIX_NIQFLINT;
+	msix = MSIX_IQFLINT;
 	for_each_port(adapter, pidx) {
 		struct net_device *dev = adapter->port[pidx];
 		struct port_info *pi = netdev_priv(dev);
@@ -753,7 +751,9 @@
 	if (err)
 		return err;
 	set_bit(pi->port_id, &adapter->open_device_map);
-	link_start(dev);
+	err = link_start(dev);
+	if (err)
+		return err;
 	netif_tx_start_all_queues(dev);
 	return 0;
 }
@@ -814,40 +814,48 @@
 }
 
 /*
- * Collect up to maxaddrs worth of a netdevice's unicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's unicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
  */
-static inline int collect_netdev_uc_list_addrs(const struct net_device *dev,
-					       const u8 **addr,
-					       unsigned int maxaddrs)
+static inline unsigned int collect_netdev_uc_list_addrs(const struct net_device *dev,
+							const u8 **addr,
+							unsigned int offset,
+							unsigned int maxaddrs)
 {
+	unsigned int index = 0;
 	unsigned int naddr = 0;
 	const struct netdev_hw_addr *ha;
 
-	for_each_dev_addr(dev, ha) {
-		addr[naddr++] = ha->addr;
-		if (naddr >= maxaddrs)
-			break;
-	}
+	for_each_dev_addr(dev, ha)
+		if (index++ >= offset) {
+			addr[naddr++] = ha->addr;
+			if (naddr >= maxaddrs)
+				break;
+		}
 	return naddr;
 }
 
 /*
- * Collect up to maxaddrs worth of a netdevice's multicast addresses into an
- * array of addrss pointers and return the number collected.
+ * Collect up to maxaddrs worth of a netdevice's multicast addresses, starting
+ * at a specified offset within the list, into an array of addrss pointers and
+ * return the number collected.
  */
-static inline int collect_netdev_mc_list_addrs(const struct net_device *dev,
-					       const u8 **addr,
-					       unsigned int maxaddrs)
+static inline unsigned int collect_netdev_mc_list_addrs(const struct net_device *dev,
+							const u8 **addr,
+							unsigned int offset,
+							unsigned int maxaddrs)
 {
+	unsigned int index = 0;
 	unsigned int naddr = 0;
 	const struct netdev_hw_addr *ha;
 
-	netdev_for_each_mc_addr(ha, dev) {
-		addr[naddr++] = ha->addr;
-		if (naddr >= maxaddrs)
-			break;
-	}
+	netdev_for_each_mc_addr(ha, dev)
+		if (index++ >= offset) {
+			addr[naddr++] = ha->addr;
+			if (naddr >= maxaddrs)
+				break;
+		}
 	return naddr;
 }
 
@@ -860,16 +868,20 @@
 	u64 mhash = 0;
 	u64 uhash = 0;
 	bool free = true;
-	u16 filt_idx[7];
+	unsigned int offset, naddr;
 	const u8 *addr[7];
-	int ret, naddr = 0;
+	int ret;
 	const struct port_info *pi = netdev_priv(dev);
 
 	/* first do the secondary unicast addresses */
-	naddr = collect_netdev_uc_list_addrs(dev, addr, ARRAY_SIZE(addr));
-	if (naddr > 0) {
+	for (offset = 0; ; offset += naddr) {
+		naddr = collect_netdev_uc_list_addrs(dev, addr, offset,
+						     ARRAY_SIZE(addr));
+		if (naddr == 0)
+			break;
+
 		ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
-					  naddr, addr, filt_idx, &uhash, sleep);
+					  naddr, addr, NULL, &uhash, sleep);
 		if (ret < 0)
 			return ret;
 
@@ -877,12 +889,17 @@
 	}
 
 	/* next set up the multicast addresses */
-	naddr = collect_netdev_mc_list_addrs(dev, addr, ARRAY_SIZE(addr));
-	if (naddr > 0) {
+	for (offset = 0; ; offset += naddr) {
+		naddr = collect_netdev_mc_list_addrs(dev, addr, offset,
+						     ARRAY_SIZE(addr));
+		if (naddr == 0)
+			break;
+
 		ret = t4vf_alloc_mac_filt(pi->adapter, pi->viid, free,
-					  naddr, addr, filt_idx, &mhash, sleep);
+					  naddr, addr, NULL, &mhash, sleep);
 		if (ret < 0)
 			return ret;
+		free = false;
 	}
 
 	return t4vf_set_addr_hash(pi->adapter, pi->viid, uhash != 0,
@@ -1103,18 +1120,6 @@
 	return 0;
 }
 
-/*
- * Return a TX Queue on which to send the specified skb.
- */
-static u16 cxgb4vf_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-	/*
-	 * XXX For now just use the default hash but we probably want to
-	 * XXX look at other possibilities ...
-	 */
-	return skb_tx_hash(dev, skb);
-}
-
 #ifdef CONFIG_NET_POLL_CONTROLLER
 /*
  * Poll all of our receive queues.  This is called outside of normal interrupt
@@ -1358,6 +1363,8 @@
 	u64 rx_csum;
 	u64 vlan_ex;
 	u64 vlan_ins;
+	u64 lro_pkts;
+	u64 lro_merged;
 };
 
 /*
@@ -1395,6 +1402,8 @@
 	"RxCsumGood        ",
 	"VLANextractions   ",
 	"VLANinsertions    ",
+	"GROPackets        ",
+	"GROMerged         ",
 };
 
 /*
@@ -1444,6 +1453,8 @@
 		stats->rx_csum += rxq->stats.rx_cso;
 		stats->vlan_ex += rxq->stats.vlan_ex;
 		stats->vlan_ins += txq->vlan_ins;
+		stats->lro_pkts += rxq->stats.lro_pkts;
+		stats->lro_merged += rxq->stats.lro_merged;
 	}
 }
 
@@ -1540,14 +1551,19 @@
 }
 
 /*
+ * TCP Segmentation Offload flags which we support.
+ */
+#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
+
+/*
  * Set TCP Segmentation Offloading feature capabilities.
  */
 static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
 {
 	if (tso)
-		dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+		dev->features |= TSO_FLAGS;
 	else
-		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+		dev->features &= ~TSO_FLAGS;
 	return 0;
 }
 
@@ -2038,7 +2054,7 @@
  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
  * it to our caller to tear down the directory (debugfs_root).
  */
-static void __devexit cleanup_debugfs(struct adapter *adapter)
+static void cleanup_debugfs(struct adapter *adapter)
 {
 	BUG_ON(adapter->debugfs_root == NULL);
 
@@ -2056,7 +2072,7 @@
  * adapter parameters we're going to be using and initialize basic adapter
  * hardware support.
  */
-static int adap_init0(struct adapter *adapter)
+static int __devinit adap_init0(struct adapter *adapter)
 {
 	struct vf_resources *vfres = &adapter->params.vfres;
 	struct sge_params *sge_params = &adapter->params.sge;
@@ -2075,6 +2091,22 @@
 	}
 
 	/*
+	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
+	 * 2.6.31 and later we can't call pci_reset_function() in order to
+	 * issue an FLR because of a self- deadlock on the device semaphore.
+	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
+	 * cases where they're needed -- for instance, some versions of KVM
+	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
+	 * use the firmware based reset in order to reset any per function
+	 * state.
+	 */
+	err = t4vf_fw_reset(adapter);
+	if (err < 0) {
+		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
+		return err;
+	}
+
+	/*
 	 * Grab basic operational parameters.  These will predominantly have
 	 * been set up by the Physical Function Driver or will be hard coded
 	 * into the adapter.  We just have to live with them ...  Note that
@@ -2417,7 +2449,6 @@
 	.ndo_get_stats		= cxgb4vf_get_stats,
 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
-	.ndo_select_queue	= cxgb4vf_select_queue,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
 	.ndo_change_mtu		= cxgb4vf_change_mtu,
@@ -2465,7 +2496,6 @@
 		version_printed = 1;
 	}
 
-
 	/*
 	 * Initialize generic PCI device state.
 	 */
@@ -2600,10 +2630,9 @@
 		pi->xact_addr_filt = -1;
 		pi->rx_offload = RX_CSO;
 		netif_carrier_off(netdev);
-		netif_tx_stop_all_queues(netdev);
 		netdev->irq = pdev->irq;
 
-		netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
+		netdev->features = (NETIF_F_SG | TSO_FLAGS |
 				    NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
 				    NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
 				    NETIF_F_GRO);
@@ -2625,7 +2654,6 @@
 		netdev->do_ioctl = cxgb4vf_do_ioctl;
 		netdev->change_mtu = cxgb4vf_change_mtu;
 		netdev->set_mac_address = cxgb4vf_set_mac_addr;
-		netdev->select_queue = cxgb4vf_select_queue;
 #ifdef CONFIG_NET_POLL_CONTROLLER
 		netdev->poll_controller = cxgb4vf_poll_controller;
 #endif
@@ -2844,6 +2872,14 @@
 	CH_DEVICE(0x4800, 0),	/* T440-dbg */
 	CH_DEVICE(0x4801, 0),	/* T420-cr */
 	CH_DEVICE(0x4802, 0),	/* T422-cr */
+	CH_DEVICE(0x4803, 0),	/* T440-cr */
+	CH_DEVICE(0x4804, 0),	/* T420-bch */
+	CH_DEVICE(0x4805, 0),   /* T440-bch */
+	CH_DEVICE(0x4806, 0),	/* T460-ch */
+	CH_DEVICE(0x4807, 0),	/* T420-so */
+	CH_DEVICE(0x4808, 0),	/* T420-cx */
+	CH_DEVICE(0x4809, 0),	/* T420-bt */
+	CH_DEVICE(0x480a, 0),   /* T404-bt */
 	{ 0, }
 };
 
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index f10864d..e0b3d1b 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -154,13 +154,14 @@
 	 */
 	RX_COPY_THRES = 256,
 	RX_PULL_LEN = 128,
-};
 
-/*
- * Can't define this in the above enum because PKTSHIFT isn't a constant in
- * the VF Driver ...
- */
-#define RX_PKT_PULL_LEN (RX_PULL_LEN + PKTSHIFT)
+	/*
+	 * Main body length for sk_buffs used for RX Ethernet packets with
+	 * fragments.  Should be >= RX_PULL_LEN but possibly bigger to give
+	 * pskb_may_pull() some room.
+	 */
+	RX_SKB_LEN = 512,
+};
 
 /*
  * Software state per TX descriptor.
@@ -1355,6 +1356,67 @@
 }
 
 /**
+ *	t4vf_pktgl_to_skb - build an sk_buff from a packet gather list
+ *	@gl: the gather list
+ *	@skb_len: size of sk_buff main body if it carries fragments
+ *	@pull_len: amount of data to move to the sk_buff's main body
+ *
+ *	Builds an sk_buff from the given packet gather list.  Returns the
+ *	sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *t4vf_pktgl_to_skb(const struct pkt_gl *gl,
+				  unsigned int skb_len, unsigned int pull_len)
+{
+	struct sk_buff *skb;
+	struct skb_shared_info *ssi;
+
+	/*
+	 * If the ingress packet is small enough, allocate an skb large enough
+	 * for all of the data and copy it inline.  Otherwise, allocate an skb
+	 * with enough room to pull in the header and reference the rest of
+	 * the data via the skb fragment list.
+	 *
+	 * Below we rely on RX_COPY_THRES being less than the smallest Rx
+	 * buff!  size, which is expected since buffers are at least
+	 * PAGE_SIZEd.  In this case packets up to RX_COPY_THRES have only one
+	 * fragment.
+	 */
+	if (gl->tot_len <= RX_COPY_THRES) {
+		/* small packets have only one fragment */
+		skb = alloc_skb(gl->tot_len, GFP_ATOMIC);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, gl->tot_len);
+		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+	} else {
+		skb = alloc_skb(skb_len, GFP_ATOMIC);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, pull_len);
+		skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+		ssi = skb_shinfo(skb);
+		ssi->frags[0].page = gl->frags[0].page;
+		ssi->frags[0].page_offset = gl->frags[0].page_offset + pull_len;
+		ssi->frags[0].size = gl->frags[0].size - pull_len;
+		if (gl->nfrags > 1)
+			memcpy(&ssi->frags[1], &gl->frags[1],
+			       (gl->nfrags-1) * sizeof(skb_frag_t));
+		ssi->nr_frags = gl->nfrags;
+
+		skb->len = gl->tot_len;
+		skb->data_len = skb->len - pull_len;
+		skb->truesize += skb->data_len;
+
+		/* Get a reference for the last page, we don't own it */
+		get_page(gl->frags[gl->nfrags - 1].page);
+	}
+
+out:
+	return skb;
+}
+
+/**
  *	t4vf_pktgl_free - free a packet gather list
  *	@gl: the gather list
  *
@@ -1463,10 +1525,8 @@
 {
 	struct sk_buff *skb;
 	struct port_info *pi;
-	struct skb_shared_info *ssi;
 	const struct cpl_rx_pkt *pkt = (void *)&rsp[1];
 	bool csum_ok = pkt->csum_calc && !pkt->err_vec;
-	unsigned int len = be16_to_cpu(pkt->len);
 	struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
 
 	/*
@@ -1481,42 +1541,14 @@
 	}
 
 	/*
-	 * If the ingress packet is small enough, allocate an skb large enough
-	 * for all of the data and copy it inline.  Otherwise, allocate an skb
-	 * with enough room to pull in the header and reference the rest of
-	 * the data via the skb fragment list.
+	 * Convert the Packet Gather List into an skb.
 	 */
-	if (len <= RX_COPY_THRES) {
-		/* small packets have only one fragment */
-		skb = alloc_skb(gl->frags[0].size, GFP_ATOMIC);
-		if (!skb)
-			goto nomem;
-		__skb_put(skb, gl->frags[0].size);
-		skb_copy_to_linear_data(skb, gl->va, gl->frags[0].size);
-	} else {
-		skb = alloc_skb(RX_PKT_PULL_LEN, GFP_ATOMIC);
-		if (!skb)
-			goto nomem;
-		__skb_put(skb, RX_PKT_PULL_LEN);
-		skb_copy_to_linear_data(skb, gl->va, RX_PKT_PULL_LEN);
-
-		ssi = skb_shinfo(skb);
-		ssi->frags[0].page = gl->frags[0].page;
-		ssi->frags[0].page_offset = (gl->frags[0].page_offset +
-					     RX_PKT_PULL_LEN);
-		ssi->frags[0].size = gl->frags[0].size - RX_PKT_PULL_LEN;
-		if (gl->nfrags > 1)
-			memcpy(&ssi->frags[1], &gl->frags[1],
-			       (gl->nfrags-1) * sizeof(skb_frag_t));
-		ssi->nr_frags = gl->nfrags;
-		skb->len = len + PKTSHIFT;
-		skb->data_len = skb->len - RX_PKT_PULL_LEN;
-		skb->truesize += skb->data_len;
-
-		/* Get a reference for the last page, we don't own it */
-		get_page(gl->frags[gl->nfrags - 1].page);
+	skb = t4vf_pktgl_to_skb(gl, RX_SKB_LEN, RX_PULL_LEN);
+	if (unlikely(!skb)) {
+		t4vf_pktgl_free(gl);
+		rxq->stats.rx_drops++;
+		return 0;
 	}
-
 	__skb_pull(skb, PKTSHIFT);
 	skb->protocol = eth_type_trans(skb, rspq->netdev);
 	skb_record_rx_queue(skb, rspq->idx);
@@ -1536,6 +1568,9 @@
 	} else
 		skb_checksum_none_assert(skb);
 
+	/*
+	 * Deliver the packet to the stack.
+	 */
 	if (unlikely(pkt->vlan_ex)) {
 		struct vlan_group *grp = pi->vlan_grp;
 
@@ -1549,11 +1584,6 @@
 		netif_receive_skb(skb);
 
 	return 0;
-
-nomem:
-	t4vf_pktgl_free(gl);
-	rxq->stats.rx_drops++;
-	return 0;
 }
 
 /**
@@ -1679,6 +1709,7 @@
 				}
 				len = RSPD_LEN(len);
 			}
+			gl.tot_len = len;
 
 			/*
 			 * Gather packet fragments.
@@ -2115,7 +2146,7 @@
 
 		/*
 		 * Calculate the size of the hardware free list ring plus
-		 * status page (which the SGE will place at the end of the
+		 * Status Page (which the SGE will place after the end of the
 		 * free list ring) in Egress Queue Units.
 		 */
 		flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2212,8 +2243,8 @@
 	struct port_info *pi = netdev_priv(dev);
 
 	/*
-	 * Calculate the size of the hardware TX Queue (including the
-	 * status age on the end) in units of TX Descriptors.
+	 * Calculate the size of the hardware TX Queue (including the Status
+	 * Page on the end of the TX Queue) in units of TX Descriptors.
 	 */
 	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
 
diff --git a/drivers/net/cxgb4vf/t4vf_common.h b/drivers/net/cxgb4vf/t4vf_common.h
index 873cb7d..a65c80a 100644
--- a/drivers/net/cxgb4vf/t4vf_common.h
+++ b/drivers/net/cxgb4vf/t4vf_common.h
@@ -235,6 +235,7 @@
 int __devinit t4vf_wait_dev_ready(struct adapter *);
 int __devinit t4vf_port_init(struct adapter *, int);
 
+int t4vf_fw_reset(struct adapter *);
 int t4vf_query_params(struct adapter *, unsigned int, const u32 *, u32 *);
 int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
 
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index ea1c123..35fc803 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -326,6 +326,25 @@
 }
 
 /**
+ *      t4vf_fw_reset - issue a reset to FW
+ *      @adapter: the adapter
+ *
+ *	Issues a reset command to FW.  For a Physical Function this would
+ *	result in the Firmware reseting all of its state.  For a Virtual
+ *	Function this just resets the state associated with the VF.
+ */
+int t4vf_fw_reset(struct adapter *adapter)
+{
+	struct fw_reset_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_write = cpu_to_be32(FW_CMD_OP(FW_RESET_CMD) |
+				      FW_CMD_WRITE);
+	cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
+	return t4vf_wr_mbox(adapter, &cmd, sizeof(cmd), NULL);
+}
+
+/**
  *	t4vf_query_params - query FW or device parameters
  *	@adapter: the adapter
  *	@nparams: the number of parameters
@@ -995,48 +1014,72 @@
 			unsigned int naddr, const u8 **addr, u16 *idx,
 			u64 *hash, bool sleep_ok)
 {
-	int i, ret;
+	int offset, ret = 0;
+	unsigned nfilters = 0;
+	unsigned int rem = naddr;
 	struct fw_vi_mac_cmd cmd, rpl;
-	struct fw_vi_mac_exact *p;
-	size_t len16;
 
-	if (naddr > ARRAY_SIZE(cmd.u.exact))
+	if (naddr > FW_CLS_TCAM_NUM_ENTRIES)
 		return -EINVAL;
-	len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
-				      u.exact[naddr]), 16);
 
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
-				     FW_CMD_REQUEST |
-				     FW_CMD_WRITE |
-				     (free ? FW_CMD_EXEC : 0) |
-				     FW_VI_MAC_CMD_VIID(viid));
-	cmd.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
-					    FW_CMD_LEN16(len16));
+	for (offset = 0; offset < naddr; /**/) {
+		unsigned int fw_naddr = (rem < ARRAY_SIZE(cmd.u.exact)
+					 ? rem
+					 : ARRAY_SIZE(cmd.u.exact));
+		size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+						     u.exact[fw_naddr]), 16);
+		struct fw_vi_mac_exact *p;
+		int i;
 
-	for (i = 0, p = cmd.u.exact; i < naddr; i++, p++) {
-		p->valid_to_idx =
-			cpu_to_be16(FW_VI_MAC_CMD_VALID |
-				    FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
-		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+		memset(&cmd, 0, sizeof(cmd));
+		cmd.op_to_viid = cpu_to_be32(FW_CMD_OP(FW_VI_MAC_CMD) |
+					     FW_CMD_REQUEST |
+					     FW_CMD_WRITE |
+					     (free ? FW_CMD_EXEC : 0) |
+					     FW_VI_MAC_CMD_VIID(viid));
+		cmd.freemacs_to_len16 =
+			cpu_to_be32(FW_VI_MAC_CMD_FREEMACS(free) |
+				    FW_CMD_LEN16(len16));
+
+		for (i = 0, p = cmd.u.exact; i < fw_naddr; i++, p++) {
+			p->valid_to_idx = cpu_to_be16(
+				FW_VI_MAC_CMD_VALID |
+				FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+			memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
+		}
+
+
+		ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl,
+					sleep_ok);
+		if (ret && ret != -ENOMEM)
+			break;
+
+		for (i = 0, p = rpl.u.exact; i < fw_naddr; i++, p++) {
+			u16 index = FW_VI_MAC_CMD_IDX_GET(
+				be16_to_cpu(p->valid_to_idx));
+
+			if (idx)
+				idx[offset+i] =
+					(index >= FW_CLS_TCAM_NUM_ENTRIES
+					 ? 0xffff
+					 : index);
+			if (index < FW_CLS_TCAM_NUM_ENTRIES)
+				nfilters++;
+			else if (hash)
+				*hash |= (1ULL << hash_mac_addr(addr[offset+i]));
+		}
+
+		free = false;
+		offset += fw_naddr;
+		rem -= fw_naddr;
 	}
 
-	ret = t4vf_wr_mbox_core(adapter, &cmd, sizeof(cmd), &rpl, sleep_ok);
-	if (ret)
-		return ret;
-
-	for (i = 0, p = rpl.u.exact; i < naddr; i++, p++) {
-		u16 index = FW_VI_MAC_CMD_IDX_GET(be16_to_cpu(p->valid_to_idx));
-
-		if (idx)
-			idx[i] = (index >= FW_CLS_TCAM_NUM_ENTRIES
-				  ? 0xffff
-				  : index);
-		if (index < FW_CLS_TCAM_NUM_ENTRIES)
-			ret++;
-		else if (hash)
-			*hash |= (1 << hash_mac_addr(addr[i]));
-	}
+	/*
+	 * If there were no errors or we merely ran out of room in our MAC
+	 * address arena, return the number of filters actually written.
+	 */
+	if (ret == 0 || ret == -ENOMEM)
+		ret = nfilters;
 	return ret;
 }
 
@@ -1257,7 +1300,7 @@
  */
 int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
 {
-	struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl;
+	const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
 	u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
 
 	switch (opcode) {
@@ -1265,7 +1308,8 @@
 		/*
 		 * Link/module state change message.
 		 */
-		const struct fw_port_cmd *port_cmd = (void *)rpl;
+		const struct fw_port_cmd *port_cmd =
+			(const struct fw_port_cmd *)rpl;
 		u32 word;
 		int action, port_id, link_ok, speed, fc, pidx;
 
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeef..2d4c4fc 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1675,7 +1675,7 @@
 	platform_set_drvdata(pdev, NULL);
 
 	unregister_netdev(ndev);
-	dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev));
+	dm9000_release_board(pdev, netdev_priv(ndev));
 	free_netdev(ndev);		/* free device structure */
 
 	dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index c7e242b6..77d08e6 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -4892,11 +4892,11 @@
 	} else if (hw->phy_type == e1000_phy_igp) {	/* For IGP PHY */
 		u16 cur_agc_value;
 		u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
-		u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
-		    { IGP01E1000_PHY_AGC_A,
-			IGP01E1000_PHY_AGC_B,
-			IGP01E1000_PHY_AGC_C,
-			IGP01E1000_PHY_AGC_D
+		static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+		       IGP01E1000_PHY_AGC_A,
+		       IGP01E1000_PHY_AGC_B,
+		       IGP01E1000_PHY_AGC_C,
+		       IGP01E1000_PHY_AGC_D
 		};
 		/* Read the AGC registers for all channels */
 		for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5071,11 +5071,11 @@
 {
 	s32 ret_val;
 	u16 phy_data, phy_saved_data, speed, duplex, i;
-	u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
-	    { IGP01E1000_PHY_AGC_PARAM_A,
-		IGP01E1000_PHY_AGC_PARAM_B,
-		IGP01E1000_PHY_AGC_PARAM_C,
-		IGP01E1000_PHY_AGC_PARAM_D
+	static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
+	       IGP01E1000_PHY_AGC_PARAM_A,
+	       IGP01E1000_PHY_AGC_PARAM_B,
+	       IGP01E1000_PHY_AGC_PARAM_C,
+	       IGP01E1000_PHY_AGC_PARAM_D
 	};
 	u16 min_length, max_length;
 
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4686c39..491bf2a 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -31,7 +31,7 @@
 
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
-#define DRV_VERSION "7.3.21-k6-NAPI"
+#define DRV_VERSION "7.3.21-k8-NAPI"
 const char e1000_driver_version[] = DRV_VERSION;
 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -485,9 +485,6 @@
 	struct net_device *netdev = adapter->netdev;
 	u32 rctl, tctl;
 
-	/* signal that we're down so the interrupt handler does not
-	 * reschedule our watchdog timer */
-	set_bit(__E1000_DOWN, &adapter->flags);
 
 	/* disable receives in the hardware */
 	rctl = er32(RCTL);
@@ -508,6 +505,13 @@
 
 	e1000_irq_disable(adapter);
 
+	/*
+	 * Setting DOWN must be after irq_disable to prevent
+	 * a screaming interrupt.  Setting DOWN also prevents
+	 * timers and tasks from rescheduling.
+	 */
+	set_bit(__E1000_DOWN, &adapter->flags);
+
 	del_timer_sync(&adapter->tx_fifo_stall_timer);
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
@@ -967,11 +971,13 @@
 		 */
 		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
 		pci_using_dac = 1;
-	} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
-		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 	} else {
-		pr_err("No usable DMA config, aborting\n");
-		goto err_dma;
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			pr_err("No usable DMA config, aborting\n");
+			goto err_dma;
+		}
+		dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
 	}
 
 	netdev->netdev_ops = &e1000_netdev_ops;
@@ -1425,13 +1431,12 @@
 	int size;
 
 	size = sizeof(struct e1000_buffer) * txdr->count;
-	txdr->buffer_info = vmalloc(size);
+	txdr->buffer_info = vzalloc(size);
 	if (!txdr->buffer_info) {
 		e_err(probe, "Unable to allocate memory for the Tx descriptor "
 		      "ring\n");
 		return -ENOMEM;
 	}
-	memset(txdr->buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 
@@ -1621,13 +1626,12 @@
 	int size, desc_len;
 
 	size = sizeof(struct e1000_buffer) * rxdr->count;
-	rxdr->buffer_info = vmalloc(size);
+	rxdr->buffer_info = vzalloc(size);
 	if (!rxdr->buffer_info) {
 		e_err(probe, "Unable to allocate memory for the Rx descriptor "
 		      "ring\n");
 		return -ENOMEM;
 	}
-	memset(rxdr->buffer_info, 0, size);
 
 	desc_len = sizeof(struct e1000_rx_desc);
 
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 10d8d98..1301eba 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -352,12 +352,13 @@
 	}
 	{ /* Flow Control */
 
-		struct e1000_opt_list fc_list[] =
-			{{ E1000_FC_NONE,    "Flow Control Disabled" },
-			 { E1000_FC_RX_PAUSE,"Flow Control Receive Only" },
-			 { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" },
-			 { E1000_FC_FULL,    "Flow Control Enabled" },
-			 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }};
+		static const struct e1000_opt_list fc_list[] = {
+		       { E1000_FC_NONE, "Flow Control Disabled" },
+		       { E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
+		       { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
+		       { E1000_FC_FULL, "Flow Control Enabled" },
+		       { E1000_FC_DEFAULT, "Flow Control Hardware Default" }
+		};
 
 		opt = (struct e1000_option) {
 			.type = list_option,
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 7236f1a..e57e409 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,7 @@
 			      (ID_LED_DEF1_DEF2))
 
 #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT          5 /* Autoneg Retry Count value */
 #define E1000_BASE1000T_STATUS          10
 #define E1000_IDLE_ERROR_COUNT_MASK     0xFF
 #define E1000_RECEIVE_ERROR_COUNTER     21
@@ -74,6 +75,9 @@
 static s32 e1000_led_on_82574(struct e1000_hw *hw);
 static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
 static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
 
 /**
  *  e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +111,8 @@
 	case e1000_82574:
 	case e1000_82583:
 		phy->type		 = e1000_phy_bm;
+		phy->ops.acquire = e1000_get_hw_semaphore_82574;
+		phy->ops.release = e1000_put_hw_semaphore_82574;
 		break;
 	default:
 		return -E1000_ERR_PHY;
@@ -200,6 +206,17 @@
 		break;
 	}
 
+	/* Function Pointers */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+		nvm->ops.release = e1000_put_hw_semaphore_82574;
+		break;
+	default:
+		break;
+	}
+
 	return 0;
 }
 
@@ -542,6 +559,94 @@
 	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
 	ew32(SWSM, swsm);
 }
+/**
+ *  e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+	s32 ret_val = 0;
+	s32 i = 0;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+	extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+	do {
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+			break;
+
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+		msleep(2);
+		i++;
+	} while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+	if (i == MDIO_OWNERSHIP_TIMEOUT) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_82573(hw);
+		e_dbg("Driver can't access the PHY\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ *  e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	mutex_lock(&swflag_mutex);
+	ret_val = e1000_get_hw_semaphore_82573(hw);
+	if (ret_val)
+		mutex_unlock(&swflag_mutex);
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+	e1000_put_hw_semaphore_82573(hw);
+	mutex_unlock(&swflag_mutex);
+}
 
 /**
  *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +667,6 @@
 
 	switch (hw->mac.type) {
 	case e1000_82573:
-	case e1000_82574:
-	case e1000_82583:
 		break;
 	default:
 		ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +956,8 @@
  **/
 static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
 {
-	u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+	u32 ctrl, ctrl_ext, icr;
 	s32 ret_val;
-	u16 i = 0;
 
 	/*
 	 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +982,33 @@
 	 */
 	switch (hw->mac.type) {
 	case e1000_82573:
+		ret_val = e1000_get_hw_semaphore_82573(hw);
+		break;
 	case e1000_82574:
 	case e1000_82583:
-		extcnf_ctrl = er32(EXTCNF_CTRL);
-		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
-		do {
-			ew32(EXTCNF_CTRL, extcnf_ctrl);
-			extcnf_ctrl = er32(EXTCNF_CTRL);
-
-			if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
-				break;
-
-			extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
-
-			msleep(2);
-			i++;
-		} while (i < MDIO_OWNERSHIP_TIMEOUT);
+		ret_val = e1000_get_hw_semaphore_82574(hw);
 		break;
 	default:
 		break;
 	}
+	if (ret_val)
+		e_dbg("Cannot acquire MDIO ownership\n");
 
 	ctrl = er32(CTRL);
 
 	e_dbg("Issuing a global reset to MAC\n");
 	ew32(CTRL, ctrl | E1000_CTRL_RST);
 
+	/* Must release MDIO ownership and mutex after MAC reset. */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		e1000_put_hw_semaphore_82574(hw);
+		break;
+	default:
+		break;
+	}
+
 	if (hw->nvm.type == e1000_nvm_flash_hw) {
 		udelay(10);
 		ctrl_ext = er32(CTRL_EXT);
@@ -1402,6 +1504,8 @@
 	u32 rxcw;
 	u32 ctrl;
 	u32 status;
+	u32 txcw;
+	u32 i;
 	s32 ret_val = 0;
 
 	ctrl = er32(CTRL);
@@ -1422,8 +1526,10 @@
 				    e1000_serdes_link_autoneg_progress;
 				mac->serdes_has_link = false;
 				e_dbg("AN_UP     -> AN_PROG\n");
+			} else {
+				mac->serdes_has_link = true;
 			}
-		break;
+			break;
 
 		case e1000_serdes_link_forced_up:
 			/*
@@ -1431,8 +1537,10 @@
 			 * auto-negotiation in the TXCW register and disable
 			 * forced link in the Device Control register in an
 			 * attempt to auto-negotiate with our link partner.
+			 * If the partner code word is null, stop forcing
+			 * and restart auto negotiation.
 			 */
-			if (rxcw & E1000_RXCW_C) {
+			if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
 				/* Enable autoneg, and unforce link up */
 				ew32(TXCW, mac->txcw);
 				ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -1440,6 +1548,8 @@
 				    e1000_serdes_link_autoneg_progress;
 				mac->serdes_has_link = false;
 				e_dbg("FORCED_UP -> AN_PROG\n");
+			} else {
+				mac->serdes_has_link = true;
 			}
 			break;
 
@@ -1495,6 +1605,7 @@
 			ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
 			mac->serdes_link_state =
 			    e1000_serdes_link_autoneg_progress;
+			mac->serdes_has_link = false;
 			e_dbg("DOWN      -> AN_PROG\n");
 			break;
 		}
@@ -1505,16 +1616,32 @@
 			e_dbg("ANYSTATE  -> DOWN\n");
 		} else {
 			/*
-			 * We have sync, and can tolerate one invalid (IV)
-			 * codeword before declaring link down, so reread
-			 * to look again.
+			 * Check several times, if Sync and Config
+			 * both are consistently 1 then simply ignore
+			 * the Invalid bit and restart Autoneg
 			 */
-			udelay(10);
-			rxcw = er32(RXCW);
-			if (rxcw & E1000_RXCW_IV) {
-				mac->serdes_link_state = e1000_serdes_link_down;
+			for (i = 0; i < AN_RETRY_COUNT; i++) {
+				udelay(10);
+				rxcw = er32(RXCW);
+				if ((rxcw & E1000_RXCW_IV) &&
+				    !((rxcw & E1000_RXCW_SYNCH) &&
+				      (rxcw & E1000_RXCW_C))) {
+					mac->serdes_has_link = false;
+					mac->serdes_link_state =
+					    e1000_serdes_link_down;
+					e_dbg("ANYSTATE  -> DOWN\n");
+					break;
+				}
+			}
+
+			if (i == AN_RETRY_COUNT) {
+				txcw = er32(TXCW);
+				txcw |= E1000_TXCW_ANE;
+				ew32(TXCW, txcw);
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
 				mac->serdes_has_link = false;
-				e_dbg("ANYSTATE  -> DOWN\n");
+				e_dbg("ANYSTATE  -> AN_PROG\n");
 			}
 		}
 	}
@@ -1897,7 +2024,7 @@
 				  | FLAG_HAS_AMT
 				  | FLAG_HAS_CTRLEXT_ON_LOAD,
 	.flags2			  = FLAG2_CHECK_PHY_HANG,
-	.pba			= 36,
+	.pba			= 32,
 	.max_hw_frame_size	= DEFAULT_JUMBO,
 	.get_variants		= e1000_get_variants_82571,
 	.mac_ops		= &e82571_mac_ops,
@@ -1914,7 +2041,7 @@
 				  | FLAG_HAS_SMART_POWER_DOWN
 				  | FLAG_HAS_AMT
 				  | FLAG_HAS_CTRLEXT_ON_LOAD,
-	.pba			= 36,
+	.pba			= 32,
 	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
 	.get_variants		= e1000_get_variants_82571,
 	.mac_ops		= &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c..7245dc2 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -488,6 +488,9 @@
 #define E1000_BLK_PHY_RESET   12
 #define E1000_ERR_SWFW_SYNC 13
 #define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
 
 /* Loop limit on how long we wait for auto-negotiation to complete */
 #define FIBER_LINK_UP_LIMIT               50
@@ -516,6 +519,7 @@
 #define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
 
 /* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
 #define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
 #define E1000_RXCW_C          0x20000000        /* Receive config */
 #define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
@@ -649,13 +653,16 @@
 /* Mask bits for fields in Word 0x03 of the EEPROM */
 #define NVM_COMPAT_LOM    0x0800
 
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH             11
+
 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
 #define NVM_SUM                    0xBABA
 
 /* PBA (printed board assembly) number words */
 #define NVM_PBA_OFFSET_0           8
 #define NVM_PBA_OFFSET_1           9
-
+#define NVM_PBA_PTR_GUARD          0xFAFA
 #define NVM_WORD_SIZE_BASE_SHIFT   6
 
 /* NVM Commands - SPI */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index fdc67fe..2c913b8 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -482,6 +482,7 @@
 
 extern void e1000e_check_options(struct e1000_adapter *adapter);
 extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+extern void e1000e_led_blink_task(struct work_struct *work);
 
 extern int e1000e_up(struct e1000_adapter *adapter);
 extern void e1000e_down(struct e1000_adapter *adapter);
@@ -513,7 +514,8 @@
 extern struct e1000_info e1000_pch2_info;
 extern struct e1000_info e1000_es2_info;
 
-extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+					 u32 pba_num_size);
 
 extern s32  e1000e_commit_phy(struct e1000_hw *hw);
 
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 8984d16..39349d6 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -194,20 +194,6 @@
 	return 0;
 }
 
-static u32 e1000_get_link(struct net_device *netdev)
-{
-	struct e1000_adapter *adapter = netdev_priv(netdev);
-	struct e1000_hw *hw = &adapter->hw;
-
-	/*
-	 * Avoid touching hardware registers when possible, otherwise
-	 * link negotiation can get messed up when user-level scripts
-	 * are rapidly polling the driver to see if link is up.
-	 */
-	return netif_running(netdev) ? netif_carrier_ok(netdev) :
-	    !!(er32(STATUS) & E1000_STATUS_LU);
-}
-
 static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
 {
 	struct e1000_mac_info *mac = &adapter->hw.mac;
@@ -1263,6 +1249,7 @@
 	u32 ctrl_reg = 0;
 	u32 stat_reg = 0;
 	u16 phy_reg = 0;
+	s32 ret_val = 0;
 
 	hw->mac.autoneg = 0;
 
@@ -1322,7 +1309,13 @@
 	case e1000_phy_82577:
 	case e1000_phy_82578:
 		/* Workaround: K1 must be disabled for stable 1Gbps operation */
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_err("Cannot setup 1Gbps loopback.\n");
+			return ret_val;
+		}
 		e1000_configure_k1_ich8lan(hw, false);
+		hw->phy.ops.release(hw);
 		break;
 	case e1000_phy_82579:
 		/* Disable PHY energy detect power down */
@@ -1860,7 +1853,7 @@
 /* bit defines for adapter->led_status */
 #define E1000_LED_ON		0
 
-static void e1000e_led_blink_task(struct work_struct *work)
+void e1000e_led_blink_task(struct work_struct *work)
 {
 	struct e1000_adapter *adapter = container_of(work,
 	                                struct e1000_adapter, led_blink_task);
@@ -1892,7 +1885,6 @@
 	    (hw->mac.type == e1000_pch2lan) ||
 	    (hw->mac.type == e1000_82583) ||
 	    (hw->mac.type == e1000_82574)) {
-		INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
 		if (!adapter->blink_timer.function) {
 			init_timer(&adapter->blink_timer);
 			adapter->blink_timer.function =
@@ -1986,6 +1978,9 @@
 			p = (char *) adapter +
 					e1000_gstrings_stats[i].stat_offset;
 			break;
+		default:
+			data[i] = 0;
+			continue;
 		}
 
 		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
@@ -2024,7 +2019,7 @@
 	.get_msglevel		= e1000_get_msglevel,
 	.set_msglevel		= e1000_set_msglevel,
 	.nway_reset		= e1000_nway_reset,
-	.get_link		= e1000_get_link,
+	.get_link		= ethtool_op_get_link,
 	.get_eeprom_len		= e1000_get_eeprom_len,
 	.get_eeprom		= e1000_get_eeprom,
 	.set_eeprom		= e1000_set_eeprom,
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index e3374d9..5080372 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,12 +338,17 @@
 	}
 
 	phy->id = e1000_phy_unknown;
-	ret_val = e1000e_get_phy_id(hw);
-	if (ret_val)
-		goto out;
-	if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) {
+	switch (hw->mac.type) {
+	default:
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+			break;
+		/* fall-through */
+	case e1000_pch2lan:
 		/*
-		 * In case the PHY needs to be in mdio slow mode (eg. 82577),
+		 * In case the PHY needs to be in mdio slow mode,
 		 * set slow mode and try to get the PHY id again.
 		 */
 		ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -352,6 +357,7 @@
 		ret_val = e1000e_get_phy_id(hw);
 		if (ret_val)
 			goto out;
+		break;
 	}
 	phy->type = e1000e_get_phy_type_from_id(phy->id);
 
@@ -3591,7 +3597,7 @@
 	ew32(PHY_CTRL, phy_ctrl);
 
 	if (hw->mac.type >= e1000_pchlan) {
-		e1000_oem_bits_config_ich8lan(hw, true);
+		e1000_oem_bits_config_ich8lan(hw, false);
 		ret_val = hw->phy.ops.acquire(hw);
 		if (ret_val)
 			return;
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 0fd4eb5..8377523 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -2139,6 +2139,119 @@
 }
 
 /**
+ *  e1000_read_pba_string_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+				  u32 pba_num_size)
+{
+	s32 ret_val;
+	u16 nvm_data;
+	u16 pba_ptr;
+	u16 offset;
+	u16 length;
+
+	if (pba_num == NULL) {
+		e_dbg("PBA string buffer was null\n");
+		ret_val = E1000_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	/*
+	 * if nvm_data is not ptr guard the PBA must be in legacy format which
+	 * means pba_ptr is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (nvm_data != NVM_PBA_PTR_GUARD) {
+		e_dbg("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (pba_num_size < 11) {
+			e_dbg("PBA string buffer too small\n");
+			return E1000_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pba_ptr */
+		pba_num[0] = (nvm_data >> 12) & 0xF;
+		pba_num[1] = (nvm_data >> 8) & 0xF;
+		pba_num[2] = (nvm_data >> 4) & 0xF;
+		pba_num[3] = nvm_data & 0xF;
+		pba_num[4] = (pba_ptr >> 12) & 0xF;
+		pba_num[5] = (pba_ptr >> 8) & 0xF;
+		pba_num[6] = '-';
+		pba_num[7] = 0;
+		pba_num[8] = (pba_ptr >> 4) & 0xF;
+		pba_num[9] = pba_ptr & 0xF;
+
+		/* put a null character on the end of our string */
+		pba_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (pba_num[offset] < 0xA)
+				pba_num[offset] += '0';
+			else if (pba_num[offset] < 0x10)
+				pba_num[offset] += 'A' - 0xA;
+		}
+
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		e_dbg("NVM PBA number section invalid length\n");
+		ret_val = E1000_ERR_NVM_PBA_SECTION;
+		goto out;
+	}
+	/* check if pba_num buffer is big enough */
+	if (pba_num_size < (((u32)length * 2) - 1)) {
+		e_dbg("PBA string buffer too small\n");
+		ret_val = E1000_ERR_NO_SPACE;
+		goto out;
+	}
+
+	/* trim pba length from start of string */
+	pba_ptr++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			goto out;
+		}
+		pba_num[offset * 2] = (u8)(nvm_data >> 8);
+		pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+	}
+	pba_num[offset * 2] = '\0';
+
+out:
+	return ret_val;
+}
+
+/**
  *  e1000_read_mac_addr_generic - Read device MAC address
  *  @hw: pointer to the HW structure
  *
@@ -2579,25 +2692,3 @@
 out:
 	return ret_val;
 }
-
-s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
-{
-	s32 ret_val;
-	u16 nvm_data;
-
-	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
-	if (ret_val) {
-		e_dbg("NVM Read Error\n");
-		return ret_val;
-	}
-	*pba_num = (u32)(nvm_data << 16);
-
-	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
-	if (ret_val) {
-		e_dbg("NVM Read Error\n");
-		return ret_val;
-	}
-	*pba_num |= nvm_data;
-
-	return 0;
-}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca162..02d093d 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -54,7 +54,7 @@
 
 #define DRV_EXTRAVERSION "-k2"
 
-#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION
+#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION
 char e1000e_driver_name[] = "e1000e";
 const char e1000e_driver_version[] = DRV_VERSION;
 
@@ -2059,10 +2059,9 @@
 	int err = -ENOMEM, size;
 
 	size = sizeof(struct e1000_buffer) * tx_ring->count;
-	tx_ring->buffer_info = vmalloc(size);
+	tx_ring->buffer_info = vzalloc(size);
 	if (!tx_ring->buffer_info)
 		goto err;
-	memset(tx_ring->buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2094,9 @@
 	int i, size, desc_len, err = -ENOMEM;
 
 	size = sizeof(struct e1000_buffer) * rx_ring->count;
-	rx_ring->buffer_info = vmalloc(size);
+	rx_ring->buffer_info = vzalloc(size);
 	if (!rx_ring->buffer_info)
 		goto err;
-	memset(rx_ring->buffer_info, 0, size);
 
 	for (i = 0; i < rx_ring->count; i++) {
 		buffer_info = &rx_ring->buffer_info[i];
@@ -2132,7 +2130,7 @@
 	}
 err:
 	vfree(rx_ring->buffer_info);
-	e_err("Unable to allocate memory for the transmit descriptor ring\n");
+	e_err("Unable to allocate memory for the receive descriptor ring\n");
 	return err;
 }
 
@@ -4595,7 +4593,7 @@
 			i += tx_ring->count;
 		i--;
 		buffer_info = &tx_ring->buffer_info[i];
-		e1000_put_txbuf(adapter, buffer_info);;
+		e1000_put_txbuf(adapter, buffer_info);
 	}
 
 	return 0;
@@ -4631,7 +4629,7 @@
 
 	i = tx_ring->next_to_use;
 
-	while (count--) {
+	do {
 		buffer_info = &tx_ring->buffer_info[i];
 		tx_desc = E1000_TX_DESC(*tx_ring, i);
 		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4642,7 +4640,7 @@
 		i++;
 		if (i == tx_ring->count)
 			i = 0;
-	}
+	} while (--count > 0);
 
 	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
 
@@ -5465,6 +5463,36 @@
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
+
+static irqreturn_t e1000_intr_msix(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int vector, msix_irq;
+
+	if (adapter->msix_entries) {
+		vector = 0;
+		msix_irq = adapter->msix_entries[vector].vector;
+		disable_irq(msix_irq);
+		e1000_intr_msix_rx(msix_irq, netdev);
+		enable_irq(msix_irq);
+
+		vector++;
+		msix_irq = adapter->msix_entries[vector].vector;
+		disable_irq(msix_irq);
+		e1000_intr_msix_tx(msix_irq, netdev);
+		enable_irq(msix_irq);
+
+		vector++;
+		msix_irq = adapter->msix_entries[vector].vector;
+		disable_irq(msix_irq);
+		e1000_msix_other(msix_irq, netdev);
+		enable_irq(msix_irq);
+	}
+
+	return IRQ_HANDLED;
+}
+
 /*
  * Polling 'interrupt' - used by things like netconsole to send skbs
  * without having to re-enable interrupts. It's not called while
@@ -5474,10 +5502,21 @@
 {
 	struct e1000_adapter *adapter = netdev_priv(netdev);
 
-	disable_irq(adapter->pdev->irq);
-	e1000_intr(adapter->pdev->irq, netdev);
-
-	enable_irq(adapter->pdev->irq);
+	switch (adapter->int_mode) {
+	case E1000E_INT_MODE_MSIX:
+		e1000_intr_msix(adapter->pdev->irq, netdev);
+		break;
+	case E1000E_INT_MODE_MSI:
+		disable_irq(adapter->pdev->irq);
+		e1000_intr_msi(adapter->pdev->irq, netdev);
+		enable_irq(adapter->pdev->irq);
+		break;
+	default: /* E1000E_INT_MODE_LEGACY */
+		disable_irq(adapter->pdev->irq);
+		e1000_intr(adapter->pdev->irq, netdev);
+		enable_irq(adapter->pdev->irq);
+		break;
+	}
 }
 #endif
 
@@ -5587,7 +5626,8 @@
 {
 	struct e1000_hw *hw = &adapter->hw;
 	struct net_device *netdev = adapter->netdev;
-	u32 pba_num;
+	u32 ret_val;
+	u8 pba_str[E1000_PBANUM_LENGTH];
 
 	/* print bus type/speed/width info */
 	e_info("(PCI Express:2.5GB/s:%s) %pM\n",
@@ -5598,9 +5638,12 @@
 	       netdev->dev_addr);
 	e_info("Intel(R) PRO/%s Network Connection\n",
 	       (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
-	e1000e_read_pba_num(hw, &pba_num);
-	e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-	       hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff));
+	ret_val = e1000_read_pba_string_generic(hw, pba_str,
+						E1000_PBANUM_LENGTH);
+	if (ret_val)
+		strcpy(pba_str, "Unknown");
+	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+	       hw->mac.type, hw->phy.type, pba_str);
 }
 
 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
@@ -5864,6 +5907,7 @@
 	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
 	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
 	INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
+	INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
 
 	/* Initialize link parameters. User can change them with ethtool */
 	adapter->hw.mac.autoneg = 1;
@@ -5984,8 +6028,8 @@
 	bool down = test_bit(__E1000_DOWN, &adapter->state);
 
 	/*
-	 * flush_scheduled work may reschedule our watchdog task, so
-	 * explicitly disable watchdog tasks from being rescheduled
+	 * The timers may be rescheduled, so explicitly disable them
+	 * from being rescheduled.
 	 */
 	if (!down)
 		set_bit(__E1000_DOWN, &adapter->state);
@@ -5996,8 +6040,8 @@
 	cancel_work_sync(&adapter->watchdog_task);
 	cancel_work_sync(&adapter->downshift_task);
 	cancel_work_sync(&adapter->update_phy_task);
+	cancel_work_sync(&adapter->led_blink_task);
 	cancel_work_sync(&adapter->print_hang_task);
-	flush_scheduled_work();
 
 	if (!(netdev->flags & IFF_UP))
 		e1000_power_down_phy(adapter);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 3d36911..a9612b0 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -421,7 +421,7 @@
 		static const struct e1000_option opt = {
 			.type = enable_option,
 			.name = "CRC Stripping",
-			.err  = "defaulting to enabled",
+			.err  = "defaulting to Enabled",
 			.def  = OPTION_ENABLED
 		};
 
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index 3d3dc0c..95da386 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -226,6 +226,13 @@
 	}
 	*data = (u16) mdic;
 
+	/*
+	 * Allow some time after each MDIC transaction to avoid
+	 * reading duplicate data in the next MDIC transaction.
+	 */
+	if (hw->mac.type == e1000_pch2lan)
+		udelay(100);
+
 	return 0;
 }
 
@@ -279,6 +286,13 @@
 		return -E1000_ERR_PHY;
 	}
 
+	/*
+	 * Allow some time after each MDIC transaction to avoid
+	 * reading duplicate data in the next MDIC transaction.
+	 */
+	if (hw->mac.type == e1000_pch2lan)
+		udelay(100);
+
 	return 0;
 }
 
@@ -1840,11 +1854,12 @@
 	u16 phy_data, i, agc_value = 0;
 	u16 cur_agc_index, max_agc_index = 0;
 	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
-	u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
-							 {IGP02E1000_PHY_AGC_A,
-							  IGP02E1000_PHY_AGC_B,
-							  IGP02E1000_PHY_AGC_C,
-							  IGP02E1000_PHY_AGC_D};
+	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+	       IGP02E1000_PHY_AGC_A,
+	       IGP02E1000_PHY_AGC_B,
+	       IGP02E1000_PHY_AGC_C,
+	       IGP02E1000_PHY_AGC_D
+	};
 
 	/* Read the AGC registers for all channels */
 	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 7c82631..9e19fbc 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@
 #define ee_id_eepro10p0 0x10   /* ID for eepro/10+ */
 #define ee_id_eepro10p1 0x31
 
-#define TX_TIMEOUT 40
+#define TX_TIMEOUT ((4*HZ)/10)
 
 /* Index to functions, as function prototypes. */
 
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 8e745e7..a724a2d 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -130,19 +130,6 @@
 
 /* utility functions */
 
-#define ehea_info(fmt, args...) \
-	printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
-
-#define ehea_error(fmt, args...) \
-	printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
-
-#ifdef DEBUG
-#define ehea_debug(fmt, args...) \
-	printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
-#else
-#define ehea_debug(fmt, args...) do {} while (0)
-#endif
-
 void ehea_dump(void *adr, int len, char *msg);
 
 #define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
@@ -515,6 +502,4 @@
 int ehea_sense_port_attr(struct ehea_port *port);
 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
 
-extern struct work_struct ehea_rereg_mr_task;
-
 #endif	/* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 75b099c..afebf20 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -26,6 +26,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ehea.h"
 #include "ehea_phyp.h"
 
@@ -118,10 +120,10 @@
 	ret = ehea_set_portspeed(port, sp);
 
 	if (!ret)
-		ehea_info("%s: Port speed successfully set: %dMbps "
-			  "%s Duplex",
-			  port->netdev->name, port->port_speed,
-			  port->full_duplex == 1 ? "Full" : "Half");
+		netdev_info(dev,
+			    "Port speed successfully set: %dMbps %s Duplex\n",
+			    port->port_speed,
+			    port->full_duplex == 1 ? "Full" : "Half");
 out:
 	return ret;
 }
@@ -134,10 +136,10 @@
 	ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
 
 	if (!ret)
-		ehea_info("%s: Port speed successfully set: %dMbps "
-			  "%s Duplex",
-			  port->netdev->name, port->port_speed,
-			  port->full_duplex == 1 ? "Full" : "Half");
+		netdev_info(port->netdev,
+			    "Port speed successfully set: %dMbps %s Duplex\n",
+			    port->port_speed,
+			    port->full_duplex == 1 ? "Full" : "Half");
 	return ret;
 }
 
@@ -261,6 +263,13 @@
 
 }
 
+static int ehea_set_flags(struct net_device *dev, u32 data)
+{
+	return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
+					| ETH_FLAG_TXVLAN
+					| ETH_FLAG_RXVLAN);
+}
+
 const struct ethtool_ops ehea_ethtool_ops = {
 	.get_settings = ehea_get_settings,
 	.get_drvinfo = ehea_get_drvinfo,
@@ -273,6 +282,8 @@
 	.get_ethtool_stats = ehea_get_ethtool_stats,
 	.get_rx_csum = ehea_get_rx_csum,
 	.set_settings = ehea_set_settings,
+	.get_flags = ethtool_op_get_flags,
+	.set_flags = ehea_set_flags,
 	.nway_reset = ehea_nway_reset,		/* Restart autonegotiation */
 };
 
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 182b2a7..0dfef6d 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -26,6 +26,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/in.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
@@ -101,7 +103,6 @@
 static int port_name_cnt;
 static LIST_HEAD(adapter_list);
 static unsigned long ehea_driver_flags;
-struct work_struct ehea_rereg_mr_task;
 static DEFINE_MUTEX(dlpar_mem_lock);
 struct ehea_fw_handle_array ehea_fw_handles;
 struct ehea_bcmc_reg_array ehea_bcmc_regs;
@@ -136,8 +137,8 @@
 	int x;
 	unsigned char *deb = adr;
 	for (x = 0; x < len; x += 16) {
-		printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg,
-			  deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
+		pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
+			msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
 		deb += 16;
 	}
 }
@@ -337,7 +338,7 @@
 
 	cb2 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb2) {
-		ehea_error("no mem for cb2");
+		netdev_err(dev, "no mem for cb2\n");
 		goto out;
 	}
 
@@ -345,7 +346,7 @@
 				      port->logical_port_id,
 				      H_PORT_CB2, H_PORT_CB2_ALL, cb2);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_port failed");
+		netdev_err(dev, "query_ehea_port failed\n");
 		goto out_herr;
 	}
 
@@ -400,6 +401,7 @@
 			skb_arr_rq1[index] = netdev_alloc_skb(dev,
 							      EHEA_L_PKT_SIZE);
 			if (!skb_arr_rq1[index]) {
+				netdev_info(dev, "Unable to allocate enough skb in the array\n");
 				pr->rq1_skba.os_skbs = fill_wqes - i;
 				break;
 			}
@@ -422,13 +424,20 @@
 	struct net_device *dev = pr->port->netdev;
 	int i;
 
-	for (i = 0; i < pr->rq1_skba.len; i++) {
+	if (nr_rq1a > pr->rq1_skba.len) {
+		netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
+		return;
+	}
+
+	for (i = 0; i < nr_rq1a; i++) {
 		skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
-		if (!skb_arr_rq1[i])
+		if (!skb_arr_rq1[i]) {
+			netdev_info(dev, "Not enough memory to allocate skb array\n");
 			break;
+		}
 	}
 	/* Ring doorbell */
-	ehea_update_rq1a(pr->qp, nr_rq1a);
+	ehea_update_rq1a(pr->qp, i);
 }
 
 static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -461,8 +470,9 @@
 		if (!skb) {
 			q_skba->os_skbs = fill_wqes - i;
 			if (q_skba->os_skbs == q_skba->len - 2) {
-				ehea_info("%s: rq%i ran dry - no mem for skb",
-					  pr->port->netdev->name, rq_nr);
+				netdev_info(pr->port->netdev,
+					    "rq%i ran dry - no mem for skb\n",
+					    rq_nr);
 				ret = -ENOMEM;
 			}
 			break;
@@ -627,8 +637,8 @@
 
 	if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
 		if (netif_msg_rx_err(pr->port)) {
-			ehea_error("Critical receive error for QP %d. "
-				   "Resetting port.", pr->qp->init_attr.qp_nr);
+			pr_err("Critical receive error for QP %d. Resetting port.\n",
+			       pr->qp->init_attr.qp_nr);
 			ehea_dump(cqe, sizeof(*cqe), "CQE");
 		}
 		ehea_schedule_port_reset(pr->port);
@@ -675,7 +685,7 @@
 	int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
 			      pr->port->vgrp);
 
-	if (use_lro) {
+	if (skb->dev->features & NETIF_F_LRO) {
 		if (vlan_extracted)
 			lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
 						     pr->port->vgrp,
@@ -730,13 +740,15 @@
 							  skb_arr_rq1_len,
 							  wqe_index);
 				if (unlikely(!skb)) {
-					if (netif_msg_rx_err(port))
-						ehea_error("LL rq1: skb=NULL");
+					netif_err(port, rx_err, dev,
+						  "LL rq1: skb=NULL\n");
 
 					skb = netdev_alloc_skb(dev,
 							       EHEA_L_PKT_SIZE);
-					if (!skb)
+					if (!skb) {
+						netdev_info(dev, "Not enough memory to allocate skb\n");
 						break;
+					}
 				}
 				skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
 						 cqe->num_bytes_transfered - 4);
@@ -746,8 +758,8 @@
 				skb = get_skb_by_index(skb_arr_rq2,
 						       skb_arr_rq2_len, cqe);
 				if (unlikely(!skb)) {
-					if (netif_msg_rx_err(port))
-						ehea_error("rq2: skb=NULL");
+					netif_err(port, rx_err, dev,
+						  "rq2: skb=NULL\n");
 					break;
 				}
 				ehea_fill_skb(dev, skb, cqe);
@@ -757,8 +769,8 @@
 				skb = get_skb_by_index(skb_arr_rq3,
 						       skb_arr_rq3_len, cqe);
 				if (unlikely(!skb)) {
-					if (netif_msg_rx_err(port))
-						ehea_error("rq3: skb=NULL");
+					netif_err(port, rx_err, dev,
+						  "rq3: skb=NULL\n");
 					break;
 				}
 				ehea_fill_skb(dev, skb, cqe);
@@ -777,7 +789,7 @@
 		}
 		cqe = ehea_poll_rq1(qp, &wqe_index);
 	}
-	if (use_lro)
+	if (dev->features & NETIF_F_LRO)
 		lro_flush_all(&pr->lro_mgr);
 
 	pr->rx_packets += processed;
@@ -830,7 +842,7 @@
 					 msecs_to_jiffies(100));
 
 		if (!ret) {
-			ehea_error("HW/SW queues out of sync");
+			pr_err("HW/SW queues out of sync\n");
 			ehea_schedule_port_reset(pr->port);
 			return;
 		}
@@ -863,14 +875,14 @@
 		}
 
 		if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
-			ehea_error("Bad send completion status=0x%04X",
-				   cqe->status);
+			pr_err("Bad send completion status=0x%04X\n",
+			       cqe->status);
 
 			if (netif_msg_tx_err(pr->port))
 				ehea_dump(cqe, sizeof(*cqe), "Send CQE");
 
 			if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
-				ehea_error("Resetting port");
+				pr_err("Resetting port\n");
 				ehea_schedule_port_reset(pr->port);
 				break;
 			}
@@ -988,8 +1000,8 @@
 
 	while (eqe) {
 		qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
-		ehea_error("QP aff_err: entry=0x%llx, token=0x%x",
-			   eqe->entry, qp_token);
+		pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
+		       eqe->entry, qp_token);
 
 		qp = port->port_res[qp_token].qp;
 
@@ -1007,7 +1019,7 @@
 	}
 
 	if (reset_port) {
-		ehea_error("Resetting port");
+		pr_err("Resetting port\n");
 		ehea_schedule_port_reset(port);
 	}
 
@@ -1035,7 +1047,7 @@
 	/* may be called via ehea_neq_tasklet() */
 	cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
 	if (!cb0) {
-		ehea_error("no mem for cb0");
+		pr_err("no mem for cb0\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -1127,7 +1139,7 @@
 
 	cb4 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb4) {
-		ehea_error("no mem for cb4");
+		pr_err("no mem for cb4\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -1178,16 +1190,16 @@
 				break;
 			}
 		} else {
-			ehea_error("Failed sensing port speed");
+			pr_err("Failed sensing port speed\n");
 			ret = -EIO;
 		}
 	} else {
 		if (hret == H_AUTHORITY) {
-			ehea_info("Hypervisor denied setting port speed");
+			pr_info("Hypervisor denied setting port speed\n");
 			ret = -EPERM;
 		} else {
 			ret = -EIO;
-			ehea_error("Failed setting port speed");
+			pr_err("Failed setting port speed\n");
 		}
 	}
 	if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
@@ -1204,80 +1216,78 @@
 	u8 ec;
 	u8 portnum;
 	struct ehea_port *port;
+	struct net_device *dev;
 
 	ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
 	portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
 	port = ehea_get_port(adapter, portnum);
+	dev = port->netdev;
 
 	switch (ec) {
 	case EHEA_EC_PORTSTATE_CHG:	/* port state change */
 
 		if (!port) {
-			ehea_error("unknown portnum %x", portnum);
+			netdev_err(dev, "unknown portnum %x\n", portnum);
 			break;
 		}
 
 		if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
-			if (!netif_carrier_ok(port->netdev)) {
+			if (!netif_carrier_ok(dev)) {
 				ret = ehea_sense_port_attr(port);
 				if (ret) {
-					ehea_error("failed resensing port "
-						   "attributes");
+					netdev_err(dev, "failed resensing port attributes\n");
 					break;
 				}
 
-				if (netif_msg_link(port))
-					ehea_info("%s: Logical port up: %dMbps "
-						  "%s Duplex",
-						  port->netdev->name,
-						  port->port_speed,
-						  port->full_duplex ==
-						  1 ? "Full" : "Half");
+				netif_info(port, link, dev,
+					   "Logical port up: %dMbps %s Duplex\n",
+					   port->port_speed,
+					   port->full_duplex == 1 ?
+					   "Full" : "Half");
 
-				netif_carrier_on(port->netdev);
-				netif_wake_queue(port->netdev);
+				netif_carrier_on(dev);
+				netif_wake_queue(dev);
 			}
 		} else
-			if (netif_carrier_ok(port->netdev)) {
-				if (netif_msg_link(port))
-					ehea_info("%s: Logical port down",
-						  port->netdev->name);
-				netif_carrier_off(port->netdev);
-				netif_stop_queue(port->netdev);
+			if (netif_carrier_ok(dev)) {
+				netif_info(port, link, dev,
+					   "Logical port down\n");
+				netif_carrier_off(dev);
+				netif_stop_queue(dev);
 			}
 
 		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
 			port->phy_link = EHEA_PHY_LINK_UP;
-			if (netif_msg_link(port))
-				ehea_info("%s: Physical port up",
-					  port->netdev->name);
+			netif_info(port, link, dev,
+				   "Physical port up\n");
 			if (prop_carrier_state)
-				netif_carrier_on(port->netdev);
+				netif_carrier_on(dev);
 		} else {
 			port->phy_link = EHEA_PHY_LINK_DOWN;
-			if (netif_msg_link(port))
-				ehea_info("%s: Physical port down",
-					  port->netdev->name);
+			netif_info(port, link, dev,
+				   "Physical port down\n");
 			if (prop_carrier_state)
-				netif_carrier_off(port->netdev);
+				netif_carrier_off(dev);
 		}
 
 		if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
-			ehea_info("External switch port is primary port");
+			netdev_info(dev,
+				    "External switch port is primary port\n");
 		else
-			ehea_info("External switch port is backup port");
+			netdev_info(dev,
+				    "External switch port is backup port\n");
 
 		break;
 	case EHEA_EC_ADAPTER_MALFUNC:
-		ehea_error("Adapter malfunction");
+		netdev_err(dev, "Adapter malfunction\n");
 		break;
 	case EHEA_EC_PORT_MALFUNC:
-		ehea_info("Port malfunction: Device: %s", port->netdev->name);
-		netif_carrier_off(port->netdev);
-		netif_stop_queue(port->netdev);
+		netdev_info(dev, "Port malfunction\n");
+		netif_carrier_off(dev);
+		netif_stop_queue(dev);
 		break;
 	default:
-		ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe);
+		netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
 		break;
 	}
 }
@@ -1289,13 +1299,13 @@
 	u64 event_mask;
 
 	eqe = ehea_poll_eq(adapter->neq);
-	ehea_debug("eqe=%p", eqe);
+	pr_debug("eqe=%p\n", eqe);
 
 	while (eqe) {
-		ehea_debug("*eqe=%lx", eqe->entry);
+		pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
 		ehea_parse_eqe(adapter, eqe->entry);
 		eqe = ehea_poll_eq(adapter->neq);
-		ehea_debug("next eqe=%p", eqe);
+		pr_debug("next eqe=%p\n", eqe);
 	}
 
 	event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
@@ -1344,14 +1354,14 @@
 				  ehea_qp_aff_irq_handler,
 				  IRQF_DISABLED, port->int_aff_name, port);
 	if (ret) {
-		ehea_error("failed registering irq for qp_aff_irq_handler:"
-			   "ist=%X", port->qp_eq->attr.ist1);
+		netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
+			   port->qp_eq->attr.ist1);
 		goto out_free_qpeq;
 	}
 
-	if (netif_msg_ifup(port))
-		ehea_info("irq_handle 0x%X for function qp_aff_irq_handler "
-			  "registered", port->qp_eq->attr.ist1);
+	netif_info(port, ifup, dev,
+		   "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
+		   port->qp_eq->attr.ist1);
 
 
 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -1363,14 +1373,13 @@
 					  IRQF_DISABLED, pr->int_send_name,
 					  pr);
 		if (ret) {
-			ehea_error("failed registering irq for ehea_queue "
-				   "port_res_nr:%d, ist=%X", i,
-				   pr->eq->attr.ist1);
+			netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
+				   i, pr->eq->attr.ist1);
 			goto out_free_req;
 		}
-		if (netif_msg_ifup(port))
-			ehea_info("irq_handle 0x%X for function ehea_queue_int "
-				  "%d registered", pr->eq->attr.ist1, i);
+		netif_info(port, ifup, dev,
+			   "irq_handle 0x%X for function ehea_queue_int %d registered\n",
+			   pr->eq->attr.ist1, i);
 	}
 out:
 	return ret;
@@ -1401,16 +1410,16 @@
 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
 		pr = &port->port_res[i];
 		ibmebus_free_irq(pr->eq->attr.ist1, pr);
-		if (netif_msg_intr(port))
-			ehea_info("free send irq for res %d with handle 0x%X",
-				  i, pr->eq->attr.ist1);
+		netif_info(port, intr, dev,
+			   "free send irq for res %d with handle 0x%X\n",
+			   i, pr->eq->attr.ist1);
 	}
 
 	/* associated events */
 	ibmebus_free_irq(port->qp_eq->attr.ist1, port);
-	if (netif_msg_intr(port))
-		ehea_info("associated event interrupt for handle 0x%X freed",
-			  port->qp_eq->attr.ist1);
+	netif_info(port, intr, dev,
+		   "associated event interrupt for handle 0x%X freed\n",
+		   port->qp_eq->attr.ist1);
 }
 
 static int ehea_configure_port(struct ehea_port *port)
@@ -1479,7 +1488,7 @@
 out_free:
 	ehea_rem_mr(&pr->send_mr);
 out:
-	ehea_error("Generating SMRS failed\n");
+	pr_err("Generating SMRS failed\n");
 	return -EIO;
 }
 
@@ -1496,12 +1505,10 @@
 {
 	int arr_size = sizeof(void *) * max_q_entries;
 
-	q_skba->arr = vmalloc(arr_size);
+	q_skba->arr = vzalloc(arr_size);
 	if (!q_skba->arr)
 		return -ENOMEM;
 
-	memset(q_skba->arr, 0, arr_size);
-
 	q_skba->len = max_q_entries;
 	q_skba->index = 0;
 	q_skba->os_skbs = 0;
@@ -1536,7 +1543,7 @@
 
 	pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
 	if (!pr->eq) {
-		ehea_error("create_eq failed (eq)");
+		pr_err("create_eq failed (eq)\n");
 		goto out_free;
 	}
 
@@ -1544,7 +1551,7 @@
 				     pr->eq->fw_handle,
 				     port->logical_port_id);
 	if (!pr->recv_cq) {
-		ehea_error("create_cq failed (cq_recv)");
+		pr_err("create_cq failed (cq_recv)\n");
 		goto out_free;
 	}
 
@@ -1552,19 +1559,19 @@
 				     pr->eq->fw_handle,
 				     port->logical_port_id);
 	if (!pr->send_cq) {
-		ehea_error("create_cq failed (cq_send)");
+		pr_err("create_cq failed (cq_send)\n");
 		goto out_free;
 	}
 
 	if (netif_msg_ifup(port))
-		ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d",
-			  pr->send_cq->attr.act_nr_of_cqes,
-			  pr->recv_cq->attr.act_nr_of_cqes);
+		pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
+			pr->send_cq->attr.act_nr_of_cqes,
+			pr->recv_cq->attr.act_nr_of_cqes);
 
 	init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
 	if (!init_attr) {
 		ret = -ENOMEM;
-		ehea_error("no mem for ehea_qp_init_attr");
+		pr_err("no mem for ehea_qp_init_attr\n");
 		goto out_free;
 	}
 
@@ -1589,18 +1596,18 @@
 
 	pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
 	if (!pr->qp) {
-		ehea_error("create_qp failed");
+		pr_err("create_qp failed\n");
 		ret = -EIO;
 		goto out_free;
 	}
 
 	if (netif_msg_ifup(port))
-		ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n "
-			  "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr,
-			  init_attr->act_nr_send_wqes,
-			  init_attr->act_nr_rwqes_rq1,
-			  init_attr->act_nr_rwqes_rq2,
-			  init_attr->act_nr_rwqes_rq3);
+		pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
+			init_attr->qp_nr,
+			init_attr->act_nr_send_wqes,
+			init_attr->act_nr_rwqes_rq1,
+			init_attr->act_nr_rwqes_rq2,
+			init_attr->act_nr_rwqes_rq3);
 
 	pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
 
@@ -1751,7 +1758,7 @@
 			swqe->descriptors++;
 		}
 	} else
-		ehea_error("cannot handle fragmented headers");
+		pr_err("cannot handle fragmented headers\n");
 }
 
 static void write_swqe2_nonTSO(struct sk_buff *skb,
@@ -1847,8 +1854,8 @@
 				     port->logical_port_id,
 				     reg_type, port->mac_addr, 0, hcallid);
 	if (hret != H_SUCCESS) {
-		ehea_error("%sregistering bc address failed (tagged)",
-			   hcallid == H_REG_BCMC ? "" : "de");
+		pr_err("%sregistering bc address failed (tagged)\n",
+		       hcallid == H_REG_BCMC ? "" : "de");
 		ret = -EIO;
 		goto out_herr;
 	}
@@ -1859,8 +1866,8 @@
 				     port->logical_port_id,
 				     reg_type, port->mac_addr, 0, hcallid);
 	if (hret != H_SUCCESS) {
-		ehea_error("%sregistering bc address failed (vlan)",
-			   hcallid == H_REG_BCMC ? "" : "de");
+		pr_err("%sregistering bc address failed (vlan)\n",
+		       hcallid == H_REG_BCMC ? "" : "de");
 		ret = -EIO;
 	}
 out_herr:
@@ -1882,7 +1889,7 @@
 
 	cb0 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb0) {
-		ehea_error("no mem for cb0");
+		pr_err("no mem for cb0\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -1930,11 +1937,11 @@
 static void ehea_promiscuous_error(u64 hret, int enable)
 {
 	if (hret == H_AUTHORITY)
-		ehea_info("Hypervisor denied %sabling promiscuous mode",
-			  enable == 1 ? "en" : "dis");
+		pr_info("Hypervisor denied %sabling promiscuous mode\n",
+			enable == 1 ? "en" : "dis");
 	else
-		ehea_error("failed %sabling promiscuous mode",
-			   enable == 1 ? "en" : "dis");
+		pr_err("failed %sabling promiscuous mode\n",
+		       enable == 1 ? "en" : "dis");
 }
 
 static void ehea_promiscuous(struct net_device *dev, int enable)
@@ -1948,7 +1955,7 @@
 
 	cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
 	if (!cb7) {
-		ehea_error("no mem for cb7");
+		pr_err("no mem for cb7\n");
 		goto out;
 	}
 
@@ -2008,7 +2015,7 @@
 		hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
 						 H_DEREG_BCMC);
 		if (hret) {
-			ehea_error("failed deregistering mcast MAC");
+			pr_err("failed deregistering mcast MAC\n");
 			ret = -EIO;
 		}
 
@@ -2031,7 +2038,8 @@
 			if (!hret)
 				port->allmulti = 1;
 			else
-				ehea_error("failed enabling IFF_ALLMULTI");
+				netdev_err(dev,
+					   "failed enabling IFF_ALLMULTI\n");
 		}
 	} else
 		if (!enable) {
@@ -2040,7 +2048,8 @@
 			if (!hret)
 				port->allmulti = 0;
 			else
-				ehea_error("failed disabling IFF_ALLMULTI");
+				netdev_err(dev,
+					   "failed disabling IFF_ALLMULTI\n");
 		}
 }
 
@@ -2051,7 +2060,7 @@
 
 	ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
 	if (!ehea_mcl_entry) {
-		ehea_error("no mem for mcl_entry");
+		pr_err("no mem for mcl_entry\n");
 		return;
 	}
 
@@ -2064,7 +2073,7 @@
 	if (!hret)
 		list_add(&ehea_mcl_entry->list, &port->mc_list->list);
 	else {
-		ehea_error("failed registering mcast MAC");
+		pr_err("failed registering mcast MAC\n");
 		kfree(ehea_mcl_entry);
 	}
 }
@@ -2097,9 +2106,8 @@
 		}
 
 		if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
-			ehea_info("Mcast registration limit reached (0x%llx). "
-				  "Use ALLMULTI!",
-				  port->adapter->max_mc_mac);
+			pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
+				port->adapter->max_mc_mac);
 			goto out;
 		}
 
@@ -2305,10 +2313,10 @@
 	}
 	pr->swqe_id_counter += 1;
 
-	if (netif_msg_tx_queued(port)) {
-		ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
+	netif_info(port, tx_queued, dev,
+		   "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
+	if (netif_msg_tx_queued(port))
 		ehea_dump(swqe, 512, "swqe");
-	}
 
 	if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
 		netif_stop_queue(dev);
@@ -2344,14 +2352,14 @@
 
 	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb1) {
-		ehea_error("no mem for cb1");
+		pr_err("no mem for cb1\n");
 		goto out;
 	}
 
 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS)
-		ehea_error("modify_ehea_port failed");
+		pr_err("modify_ehea_port failed\n");
 
 	free_page((unsigned long)cb1);
 out:
@@ -2368,14 +2376,14 @@
 
 	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb1) {
-		ehea_error("no mem for cb1");
+		pr_err("no mem for cb1\n");
 		goto out;
 	}
 
 	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
 				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_port failed");
+		pr_err("query_ehea_port failed\n");
 		goto out;
 	}
 
@@ -2385,7 +2393,7 @@
 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS)
-		ehea_error("modify_ehea_port failed");
+		pr_err("modify_ehea_port failed\n");
 out:
 	free_page((unsigned long)cb1);
 	return;
@@ -2403,14 +2411,14 @@
 
 	cb1 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb1) {
-		ehea_error("no mem for cb1");
+		pr_err("no mem for cb1\n");
 		goto out;
 	}
 
 	hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
 				      H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_port failed");
+		pr_err("query_ehea_port failed\n");
 		goto out;
 	}
 
@@ -2420,7 +2428,7 @@
 	hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
 				       H_PORT_CB1, H_PORT_CB1_ALL, cb1);
 	if (hret != H_SUCCESS)
-		ehea_error("modify_ehea_port failed");
+		pr_err("modify_ehea_port failed\n");
 out:
 	free_page((unsigned long)cb1);
 }
@@ -2442,7 +2450,7 @@
 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_qp failed (1)");
+		pr_err("query_ehea_qp failed (1)\n");
 		goto out;
 	}
 
@@ -2451,14 +2459,14 @@
 				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
 				     &dummy64, &dummy64, &dummy16, &dummy16);
 	if (hret != H_SUCCESS) {
-		ehea_error("modify_ehea_qp failed (1)");
+		pr_err("modify_ehea_qp failed (1)\n");
 		goto out;
 	}
 
 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_qp failed (2)");
+		pr_err("query_ehea_qp failed (2)\n");
 		goto out;
 	}
 
@@ -2467,14 +2475,14 @@
 				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
 				     &dummy64, &dummy64, &dummy16, &dummy16);
 	if (hret != H_SUCCESS) {
-		ehea_error("modify_ehea_qp failed (2)");
+		pr_err("modify_ehea_qp failed (2)\n");
 		goto out;
 	}
 
 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_qp failed (3)");
+		pr_err("query_ehea_qp failed (3)\n");
 		goto out;
 	}
 
@@ -2483,14 +2491,14 @@
 				     EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
 				     &dummy64, &dummy64, &dummy16, &dummy16);
 	if (hret != H_SUCCESS) {
-		ehea_error("modify_ehea_qp failed (3)");
+		pr_err("modify_ehea_qp failed (3)\n");
 		goto out;
 	}
 
 	hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
 				    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
 	if (hret != H_SUCCESS) {
-		ehea_error("query_ehea_qp failed (4)");
+		pr_err("query_ehea_qp failed (4)\n");
 		goto out;
 	}
 
@@ -2511,7 +2519,7 @@
 				   EHEA_MAX_ENTRIES_EQ, 1);
 	if (!port->qp_eq) {
 		ret = -EINVAL;
-		ehea_error("ehea_create_eq failed (qp_eq)");
+		pr_err("ehea_create_eq failed (qp_eq)\n");
 		goto out_kill_eq;
 	}
 
@@ -2592,27 +2600,27 @@
 	ret = ehea_port_res_setup(port, port->num_def_qps,
 				  port->num_add_tx_qps);
 	if (ret) {
-		ehea_error("port_res_failed");
+		netdev_err(dev, "port_res_failed\n");
 		goto out;
 	}
 
 	/* Set default QP for this port */
 	ret = ehea_configure_port(port);
 	if (ret) {
-		ehea_error("ehea_configure_port failed. ret:%d", ret);
+		netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
 		goto out_clean_pr;
 	}
 
 	ret = ehea_reg_interrupts(dev);
 	if (ret) {
-		ehea_error("reg_interrupts failed. ret:%d", ret);
+		netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
 		goto out_clean_pr;
 	}
 
 	for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
 		ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
 		if (ret) {
-			ehea_error("activate_qp failed");
+			netdev_err(dev, "activate_qp failed\n");
 			goto out_free_irqs;
 		}
 	}
@@ -2620,7 +2628,7 @@
 	for (i = 0; i < port->num_def_qps; i++) {
 		ret = ehea_fill_port_res(&port->port_res[i]);
 		if (ret) {
-			ehea_error("out_free_irqs");
+			netdev_err(dev, "out_free_irqs\n");
 			goto out_free_irqs;
 		}
 	}
@@ -2643,7 +2651,7 @@
 	ehea_clean_all_portres(port);
 out:
 	if (ret)
-		ehea_info("Failed starting %s. ret=%i", dev->name, ret);
+		netdev_info(dev, "Failed starting. ret=%i\n", ret);
 
 	ehea_update_bcmc_registrations();
 	ehea_update_firmware_handles();
@@ -2674,8 +2682,7 @@
 
 	mutex_lock(&port->port_lock);
 
-	if (netif_msg_ifup(port))
-		ehea_info("enabling port %s", dev->name);
+	netif_info(port, ifup, dev, "enabling port\n");
 
 	ret = ehea_up(dev);
 	if (!ret) {
@@ -2710,8 +2717,7 @@
 
 	ret = ehea_clean_all_portres(port);
 	if (ret)
-		ehea_info("Failed freeing resources for %s. ret=%i",
-			  dev->name, ret);
+		netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
 
 	ehea_update_firmware_handles();
 
@@ -2723,8 +2729,7 @@
 	int ret;
 	struct ehea_port *port = netdev_priv(dev);
 
-	if (netif_msg_ifdown(port))
-		ehea_info("disabling port %s", dev->name);
+	netif_info(port, ifdown, dev, "disabling port\n");
 
 	set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
 	cancel_work_sync(&port->reset_task);
@@ -2765,7 +2770,7 @@
 			 msecs_to_jiffies(100));
 
 		if (!ret) {
-			ehea_error("WARNING: sq not flushed completely");
+			pr_err("WARNING: sq not flushed completely\n");
 			break;
 		}
 	}
@@ -2801,7 +2806,7 @@
 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
 					    cb0);
 		if (hret != H_SUCCESS) {
-			ehea_error("query_ehea_qp failed (1)");
+			pr_err("query_ehea_qp failed (1)\n");
 			goto out;
 		}
 
@@ -2813,7 +2818,7 @@
 							    1), cb0, &dummy64,
 					     &dummy64, &dummy16, &dummy16);
 		if (hret != H_SUCCESS) {
-			ehea_error("modify_ehea_qp failed (1)");
+			pr_err("modify_ehea_qp failed (1)\n");
 			goto out;
 		}
 
@@ -2821,14 +2826,14 @@
 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
 					    cb0);
 		if (hret != H_SUCCESS) {
-			ehea_error("query_ehea_qp failed (2)");
+			pr_err("query_ehea_qp failed (2)\n");
 			goto out;
 		}
 
 		/* deregister shared memory regions */
 		dret = ehea_rem_smrs(pr);
 		if (dret) {
-			ehea_error("unreg shared memory region failed");
+			pr_err("unreg shared memory region failed\n");
 			goto out;
 		}
 	}
@@ -2897,7 +2902,7 @@
 
 		ret = ehea_gen_smrs(pr);
 		if (ret) {
-			ehea_error("creation of shared memory regions failed");
+			netdev_err(dev, "creation of shared memory regions failed\n");
 			goto out;
 		}
 
@@ -2908,7 +2913,7 @@
 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
 					    cb0);
 		if (hret != H_SUCCESS) {
-			ehea_error("query_ehea_qp failed (1)");
+			netdev_err(dev, "query_ehea_qp failed (1)\n");
 			goto out;
 		}
 
@@ -2920,7 +2925,7 @@
 							    1), cb0, &dummy64,
 					     &dummy64, &dummy16, &dummy16);
 		if (hret != H_SUCCESS) {
-			ehea_error("modify_ehea_qp failed (1)");
+			netdev_err(dev, "modify_ehea_qp failed (1)\n");
 			goto out;
 		}
 
@@ -2928,7 +2933,7 @@
 					    EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
 					    cb0);
 		if (hret != H_SUCCESS) {
-			ehea_error("query_ehea_qp failed (2)");
+			netdev_err(dev, "query_ehea_qp failed (2)\n");
 			goto out;
 		}
 
@@ -2965,8 +2970,7 @@
 
 	ehea_set_multicast_list(dev);
 
-	if (netif_msg_timer(port))
-		ehea_info("Device %s resetted successfully", dev->name);
+	netif_info(port, timer, dev, "reset successful\n");
 
 	port_napi_enable(port);
 
@@ -2976,12 +2980,12 @@
 	mutex_unlock(&dlpar_mem_lock);
 }
 
-static void ehea_rereg_mrs(struct work_struct *work)
+static void ehea_rereg_mrs(void)
 {
 	int ret, i;
 	struct ehea_adapter *adapter;
 
-	ehea_info("LPAR memory changed - re-initializing driver");
+	pr_info("LPAR memory changed - re-initializing driver\n");
 
 	list_for_each_entry(adapter, &adapter_list, list)
 		if (adapter->active_ports) {
@@ -3013,8 +3017,7 @@
 			/* Unregister old memory region */
 			ret = ehea_rem_mr(&adapter->mr);
 			if (ret) {
-				ehea_error("unregister MR failed - driver"
-					   " inoperable!");
+				pr_err("unregister MR failed - driver inoperable!\n");
 				goto out;
 			}
 		}
@@ -3026,8 +3029,7 @@
 			/* Register new memory region */
 			ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
 			if (ret) {
-				ehea_error("register MR failed - driver"
-					   " inoperable!");
+				pr_err("register MR failed - driver inoperable!\n");
 				goto out;
 			}
 
@@ -3050,7 +3052,7 @@
 				}
 			}
 		}
-	ehea_info("re-initializing driver complete");
+	pr_info("re-initializing driver complete\n");
 out:
 	return;
 }
@@ -3103,7 +3105,7 @@
 	/* (Try to) enable *jumbo frames */
 	cb4 = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!cb4) {
-		ehea_error("no mem for cb4");
+		pr_err("no mem for cb4\n");
 		ret = -ENOMEM;
 		goto out;
 	} else {
@@ -3165,13 +3167,13 @@
 
 	ret = of_device_register(&port->ofdev);
 	if (ret) {
-		ehea_error("failed to register device. ret=%d", ret);
+		pr_err("failed to register device. ret=%d\n", ret);
 		goto out;
 	}
 
 	ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
 	if (ret) {
-		ehea_error("failed to register attributes, ret=%d", ret);
+		pr_err("failed to register attributes, ret=%d\n", ret);
 		goto out_unreg_of_dev;
 	}
 
@@ -3221,7 +3223,7 @@
 	dev = alloc_etherdev(sizeof(struct ehea_port));
 
 	if (!dev) {
-		ehea_error("no mem for net_device");
+		pr_err("no mem for net_device\n");
 		ret = -ENOMEM;
 		goto out_err;
 	}
@@ -3268,11 +3270,14 @@
 		      | NETIF_F_LLTX;
 	dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
 
+	if (use_lro)
+		dev->features |= NETIF_F_LRO;
+
 	INIT_WORK(&port->reset_task, ehea_reset_port);
 
 	ret = register_netdev(dev);
 	if (ret) {
-		ehea_error("register_netdev failed. ret=%d", ret);
+		pr_err("register_netdev failed. ret=%d\n", ret);
 		goto out_unreg_port;
 	}
 
@@ -3280,11 +3285,10 @@
 
 	ret = ehea_get_jumboframe_status(port, &jumbo);
 	if (ret)
-		ehea_error("failed determining jumbo frame status for %s",
-			   port->netdev->name);
+		netdev_err(dev, "failed determining jumbo frame status\n");
 
-	ehea_info("%s: Jumbo frames are %sabled", dev->name,
-		  jumbo == 1 ? "en" : "dis");
+	netdev_info(dev, "Jumbo frames are %sabled\n",
+		    jumbo == 1 ? "en" : "dis");
 
 	adapter->active_ports++;
 
@@ -3300,14 +3304,16 @@
 	free_netdev(dev);
 
 out_err:
-	ehea_error("setting up logical port with id=%d failed, ret=%d",
-		   logical_port_id, ret);
+	pr_err("setting up logical port with id=%d failed, ret=%d\n",
+	       logical_port_id, ret);
 	return NULL;
 }
 
 static void ehea_shutdown_single_port(struct ehea_port *port)
 {
 	struct ehea_adapter *adapter = port->adapter;
+
+	cancel_work_sync(&port->reset_task);
 	unregister_netdev(port->netdev);
 	ehea_unregister_port(port);
 	kfree(port->mc_list);
@@ -3329,13 +3335,13 @@
 		dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
 						 NULL);
 		if (!dn_log_port_id) {
-			ehea_error("bad device node: eth_dn name=%s",
-				   eth_dn->full_name);
+			pr_err("bad device node: eth_dn name=%s\n",
+			       eth_dn->full_name);
 			continue;
 		}
 
 		if (ehea_add_adapter_mr(adapter)) {
-			ehea_error("creating MR failed");
+			pr_err("creating MR failed\n");
 			of_node_put(eth_dn);
 			return -EIO;
 		}
@@ -3344,9 +3350,8 @@
 							  *dn_log_port_id,
 							  eth_dn);
 		if (adapter->port[i])
-			ehea_info("%s -> logical port id #%d",
-				  adapter->port[i]->netdev->name,
-				  *dn_log_port_id);
+			netdev_info(adapter->port[i]->netdev,
+				    "logical port id #%d\n", *dn_log_port_id);
 		else
 			ehea_remove_adapter_mr(adapter);
 
@@ -3391,21 +3396,20 @@
 	port = ehea_get_port(adapter, logical_port_id);
 
 	if (port) {
-		ehea_info("adding port with logical port id=%d failed. port "
-			  "already configured as %s.", logical_port_id,
-			  port->netdev->name);
+		netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
+			    logical_port_id);
 		return -EINVAL;
 	}
 
 	eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
 
 	if (!eth_dn) {
-		ehea_info("no logical port with id %d found", logical_port_id);
+		pr_info("no logical port with id %d found\n", logical_port_id);
 		return -EINVAL;
 	}
 
 	if (ehea_add_adapter_mr(adapter)) {
-		ehea_error("creating MR failed");
+		pr_err("creating MR failed\n");
 		return -EIO;
 	}
 
@@ -3420,8 +3424,8 @@
 				break;
 			}
 
-		ehea_info("added %s (logical port id=%d)", port->netdev->name,
-			  logical_port_id);
+		netdev_info(port->netdev, "added: (logical port id=%d)\n",
+			    logical_port_id);
 	} else {
 		ehea_remove_adapter_mr(adapter);
 		return -EIO;
@@ -3444,8 +3448,8 @@
 	port = ehea_get_port(adapter, logical_port_id);
 
 	if (port) {
-		ehea_info("removed %s (logical port id=%d)", port->netdev->name,
-			  logical_port_id);
+		netdev_info(port->netdev, "removed: (logical port id=%d)\n",
+			    logical_port_id);
 
 		ehea_shutdown_single_port(port);
 
@@ -3455,8 +3459,8 @@
 				break;
 			}
 	} else {
-		ehea_error("removing port with logical port id=%d failed. port "
-			   "not configured.", logical_port_id);
+		pr_err("removing port with logical port id=%d failed. port not configured.\n",
+		       logical_port_id);
 		return -EINVAL;
 	}
 
@@ -3493,7 +3497,7 @@
 	int ret;
 
 	if (!dev || !dev->dev.of_node) {
-		ehea_error("Invalid ibmebus device probed");
+		pr_err("Invalid ibmebus device probed\n");
 		return -EINVAL;
 	}
 
@@ -3597,8 +3601,6 @@
 
 	ehea_remove_device_sysfs(dev);
 
-	flush_scheduled_work();
-
 	ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
 	tasklet_kill(&adapter->neq_tasklet);
 
@@ -3641,21 +3643,21 @@
 
 	switch (action) {
 	case MEM_CANCEL_OFFLINE:
-		ehea_info("memory offlining canceled");
+		pr_info("memory offlining canceled");
 		/* Readd canceled memory block */
 	case MEM_ONLINE:
-		ehea_info("memory is going online");
+		pr_info("memory is going online");
 		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
 		if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
 			goto out_unlock;
-		ehea_rereg_mrs(NULL);
+		ehea_rereg_mrs();
 		break;
 	case MEM_GOING_OFFLINE:
-		ehea_info("memory is going offline");
+		pr_info("memory is going offline");
 		set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
 		if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
 			goto out_unlock;
-		ehea_rereg_mrs(NULL);
+		ehea_rereg_mrs();
 		break;
 	default:
 		break;
@@ -3677,7 +3679,7 @@
 				unsigned long action, void *unused)
 {
 	if (action == SYS_RESTART) {
-		ehea_info("Reboot: freeing all eHEA resources");
+		pr_info("Reboot: freeing all eHEA resources\n");
 		ibmebus_unregister_driver(&ehea_driver);
 	}
 	return NOTIFY_DONE;
@@ -3693,22 +3695,22 @@
 
 	if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
 	    (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
-		ehea_info("Bad parameter: rq1_entries");
+		pr_info("Bad parameter: rq1_entries\n");
 		ret = -EINVAL;
 	}
 	if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
 	    (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
-		ehea_info("Bad parameter: rq2_entries");
+		pr_info("Bad parameter: rq2_entries\n");
 		ret = -EINVAL;
 	}
 	if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
 	    (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
-		ehea_info("Bad parameter: rq3_entries");
+		pr_info("Bad parameter: rq3_entries\n");
 		ret = -EINVAL;
 	}
 	if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
 	    (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
-		ehea_info("Bad parameter: sq_entries");
+		pr_info("Bad parameter: sq_entries\n");
 		ret = -EINVAL;
 	}
 
@@ -3728,11 +3730,8 @@
 {
 	int ret;
 
-	printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n",
-	       DRV_VERSION);
+	pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
 
-
-	INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
 	memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
 	memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
 
@@ -3749,27 +3748,27 @@
 
 	ret = register_reboot_notifier(&ehea_reboot_nb);
 	if (ret)
-		ehea_info("failed registering reboot notifier");
+		pr_info("failed registering reboot notifier\n");
 
 	ret = register_memory_notifier(&ehea_mem_nb);
 	if (ret)
-		ehea_info("failed registering memory remove notifier");
+		pr_info("failed registering memory remove notifier\n");
 
 	ret = crash_shutdown_register(ehea_crash_handler);
 	if (ret)
-		ehea_info("failed registering crash handler");
+		pr_info("failed registering crash handler\n");
 
 	ret = ibmebus_register_driver(&ehea_driver);
 	if (ret) {
-		ehea_error("failed registering eHEA device driver on ebus");
+		pr_err("failed registering eHEA device driver on ebus\n");
 		goto out2;
 	}
 
 	ret = driver_create_file(&ehea_driver.driver,
 				 &driver_attr_capabilities);
 	if (ret) {
-		ehea_error("failed to register capabilities attribute, ret=%d",
-			   ret);
+		pr_err("failed to register capabilities attribute, ret=%d\n",
+		       ret);
 		goto out3;
 	}
 
@@ -3789,13 +3788,12 @@
 {
 	int ret;
 
-	flush_scheduled_work();
 	driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
 	ibmebus_unregister_driver(&ehea_driver);
 	unregister_reboot_notifier(&ehea_reboot_nb);
 	ret = crash_shutdown_unregister(ehea_crash_handler);
 	if (ret)
-		ehea_info("failed unregistering crash handler");
+		pr_info("failed unregistering crash handler\n");
 	unregister_memory_notifier(&ehea_mem_nb);
 	kfree(ehea_fw_handles.arr);
 	kfree(ehea_bcmc_regs.arr);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 8fe9dca..0506967 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -26,6 +26,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include "ehea_phyp.h"
 
 
@@ -67,12 +69,11 @@
 		}
 
 		if (ret < H_SUCCESS)
-			ehea_error("opcode=%lx ret=%lx"
-				   " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
-				   " arg5=%lx arg6=%lx arg7=%lx ",
-				   opcode, ret,
-				   arg1, arg2, arg3, arg4, arg5,
-				   arg6, arg7);
+			pr_err("opcode=%lx ret=%lx"
+			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+			       " arg5=%lx arg6=%lx arg7=%lx\n",
+			       opcode, ret,
+			       arg1, arg2, arg3, arg4, arg5, arg6, arg7);
 
 		return ret;
 	}
@@ -114,19 +115,18 @@
 		    && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
 		    || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
 		    && (arg3 == H_PORT_CB7_DUCQPN)))))
-			ehea_error("opcode=%lx ret=%lx"
-				   " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
-				   " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
-				   " arg9=%lx"
-				   " out1=%lx out2=%lx out3=%lx out4=%lx"
-				   " out5=%lx out6=%lx out7=%lx out8=%lx"
-				   " out9=%lx",
-				   opcode, ret,
-				   arg1, arg2, arg3, arg4, arg5,
-				   arg6, arg7, arg8, arg9,
-				   outs[0], outs[1], outs[2], outs[3],
-				   outs[4], outs[5], outs[6], outs[7],
-				   outs[8]);
+			pr_err("opcode=%lx ret=%lx"
+			       " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
+			       " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
+			       " arg9=%lx"
+			       " out1=%lx out2=%lx out3=%lx out4=%lx"
+			       " out5=%lx out6=%lx out7=%lx out8=%lx"
+			       " out9=%lx\n",
+			       opcode, ret,
+			       arg1, arg2, arg3, arg4, arg5,
+			       arg6, arg7, arg8, arg9,
+			       outs[0], outs[1], outs[2], outs[3], outs[4],
+			       outs[5], outs[6], outs[7], outs[8]);
 		return ret;
 	}
 
@@ -515,7 +515,7 @@
 			     const u64 log_pageaddr, const u64 count)
 {
 	if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
-		ehea_error("not on pageboundary");
+		pr_err("not on pageboundary\n");
 		return H_PARAMETER;
 	}
 
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 89128b6..cd44bb8 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,8 @@
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/mm.h>
 #include <linux/slab.h>
 #include "ehea.h"
@@ -45,7 +47,7 @@
 		queue->current_q_offset -= queue->pagesize;
 		retvalue = NULL;
 	} else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
-		ehea_error("not on pageboundary");
+		pr_err("not on pageboundary\n");
 		retvalue = NULL;
 	}
 	return retvalue;
@@ -58,15 +60,15 @@
 	int i, k;
 
 	if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
-		ehea_error("pagesize conflict! kernel pagesize=%d, "
-			   "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
+		pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
+		       (int)PAGE_SIZE, (int)pagesize);
 		return -EINVAL;
 	}
 
 	queue->queue_length = nr_of_pages * pagesize;
 	queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
 	if (!queue->queue_pages) {
-		ehea_error("no mem for queue_pages");
+		pr_err("no mem for queue_pages\n");
 		return -ENOMEM;
 	}
 
@@ -130,7 +132,7 @@
 
 	cq = kzalloc(sizeof(*cq), GFP_KERNEL);
 	if (!cq) {
-		ehea_error("no mem for cq");
+		pr_err("no mem for cq\n");
 		goto out_nomem;
 	}
 
@@ -147,7 +149,7 @@
 	hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
 					&cq->fw_handle, &cq->epas);
 	if (hret != H_SUCCESS) {
-		ehea_error("alloc_resource_cq failed");
+		pr_err("alloc_resource_cq failed\n");
 		goto out_freemem;
 	}
 
@@ -159,7 +161,7 @@
 	for (counter = 0; counter < cq->attr.nr_pages; counter++) {
 		vpage = hw_qpageit_get_inc(&cq->hw_queue);
 		if (!vpage) {
-			ehea_error("hw_qpageit_get_inc failed");
+			pr_err("hw_qpageit_get_inc failed\n");
 			goto out_kill_hwq;
 		}
 
@@ -168,9 +170,8 @@
 					     0, EHEA_CQ_REGISTER_ORIG,
 					     cq->fw_handle, rpage, 1);
 		if (hret < H_SUCCESS) {
-			ehea_error("register_rpage_cq failed ehea_cq=%p "
-				   "hret=%llx counter=%i act_pages=%i",
-				   cq, hret, counter, cq->attr.nr_pages);
+			pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
+			       cq, hret, counter, cq->attr.nr_pages);
 			goto out_kill_hwq;
 		}
 
@@ -178,14 +179,14 @@
 			vpage = hw_qpageit_get_inc(&cq->hw_queue);
 
 			if ((hret != H_SUCCESS) || (vpage)) {
-				ehea_error("registration of pages not "
-					   "complete hret=%llx\n", hret);
+				pr_err("registration of pages not complete hret=%llx\n",
+				       hret);
 				goto out_kill_hwq;
 			}
 		} else {
 			if (hret != H_PAGE_REGISTERED) {
-				ehea_error("CQ: registration of page failed "
-					   "hret=%llx\n", hret);
+				pr_err("CQ: registration of page failed hret=%llx\n",
+				       hret);
 				goto out_kill_hwq;
 			}
 		}
@@ -241,7 +242,7 @@
 	}
 
 	if (hret != H_SUCCESS) {
-		ehea_error("destroy CQ failed");
+		pr_err("destroy CQ failed\n");
 		return -EIO;
 	}
 
@@ -259,7 +260,7 @@
 
 	eq = kzalloc(sizeof(*eq), GFP_KERNEL);
 	if (!eq) {
-		ehea_error("no mem for eq");
+		pr_err("no mem for eq\n");
 		return NULL;
 	}
 
@@ -272,21 +273,21 @@
 	hret = ehea_h_alloc_resource_eq(adapter->handle,
 					&eq->attr, &eq->fw_handle);
 	if (hret != H_SUCCESS) {
-		ehea_error("alloc_resource_eq failed");
+		pr_err("alloc_resource_eq failed\n");
 		goto out_freemem;
 	}
 
 	ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
 			    EHEA_PAGESIZE, sizeof(struct ehea_eqe));
 	if (ret) {
-		ehea_error("can't allocate eq pages");
+		pr_err("can't allocate eq pages\n");
 		goto out_freeres;
 	}
 
 	for (i = 0; i < eq->attr.nr_pages; i++) {
 		vpage = hw_qpageit_get_inc(&eq->hw_queue);
 		if (!vpage) {
-			ehea_error("hw_qpageit_get_inc failed");
+			pr_err("hw_qpageit_get_inc failed\n");
 			hret = H_RESOURCE;
 			goto out_kill_hwq;
 		}
@@ -370,7 +371,7 @@
 	}
 
 	if (hret != H_SUCCESS) {
-		ehea_error("destroy EQ failed");
+		pr_err("destroy EQ failed\n");
 		return -EIO;
 	}
 
@@ -395,7 +396,7 @@
 	for (cnt = 0; cnt < nr_pages; cnt++) {
 		vpage = hw_qpageit_get_inc(hw_queue);
 		if (!vpage) {
-			ehea_error("hw_qpageit_get_inc failed");
+			pr_err("hw_qpageit_get_inc failed\n");
 			goto out_kill_hwq;
 		}
 		rpage = virt_to_abs(vpage);
@@ -403,7 +404,7 @@
 					     0, h_call_q_selector,
 					     qp->fw_handle, rpage, 1);
 		if (hret < H_SUCCESS) {
-			ehea_error("register_rpage_qp failed");
+			pr_err("register_rpage_qp failed\n");
 			goto out_kill_hwq;
 		}
 	}
@@ -432,7 +433,7 @@
 
 	qp = kzalloc(sizeof(*qp), GFP_KERNEL);
 	if (!qp) {
-		ehea_error("no mem for qp");
+		pr_err("no mem for qp\n");
 		return NULL;
 	}
 
@@ -441,7 +442,7 @@
 	hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
 					&qp->fw_handle, &qp->epas);
 	if (hret != H_SUCCESS) {
-		ehea_error("ehea_h_alloc_resource_qp failed");
+		pr_err("ehea_h_alloc_resource_qp failed\n");
 		goto out_freemem;
 	}
 
@@ -455,7 +456,7 @@
 				     init_attr->act_wqe_size_enc_sq, adapter,
 				     0);
 	if (ret) {
-		ehea_error("can't register for sq ret=%x", ret);
+		pr_err("can't register for sq ret=%x\n", ret);
 		goto out_freeres;
 	}
 
@@ -465,7 +466,7 @@
 				     init_attr->act_wqe_size_enc_rq1,
 				     adapter, 1);
 	if (ret) {
-		ehea_error("can't register for rq1 ret=%x", ret);
+		pr_err("can't register for rq1 ret=%x\n", ret);
 		goto out_kill_hwsq;
 	}
 
@@ -476,7 +477,7 @@
 					     init_attr->act_wqe_size_enc_rq2,
 					     adapter, 2);
 		if (ret) {
-			ehea_error("can't register for rq2 ret=%x", ret);
+			pr_err("can't register for rq2 ret=%x\n", ret);
 			goto out_kill_hwr1q;
 		}
 	}
@@ -488,7 +489,7 @@
 					     init_attr->act_wqe_size_enc_rq3,
 					     adapter, 3);
 		if (ret) {
-			ehea_error("can't register for rq3 ret=%x", ret);
+			pr_err("can't register for rq3 ret=%x\n", ret);
 			goto out_kill_hwr2q;
 		}
 	}
@@ -553,7 +554,7 @@
 	}
 
 	if (hret != H_SUCCESS) {
-		ehea_error("destroy QP failed");
+		pr_err("destroy QP failed\n");
 		return -EIO;
 	}
 
@@ -842,7 +843,7 @@
 		    (hret != H_PAGE_REGISTERED)) {
 			ehea_h_free_resource(adapter->handle, mr->handle,
 					     FORCE_FREE);
-			ehea_error("register_rpage_mr failed");
+			pr_err("register_rpage_mr failed\n");
 			return hret;
 		}
 	}
@@ -896,7 +897,7 @@
 
 	pt = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!pt) {
-		ehea_error("no mem");
+		pr_err("no mem\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -906,14 +907,14 @@
 					&mr->handle, &mr->lkey);
 
 	if (hret != H_SUCCESS) {
-		ehea_error("alloc_resource_mr failed");
+		pr_err("alloc_resource_mr failed\n");
 		ret = -EIO;
 		goto out;
 	}
 
 	if (!ehea_bmap) {
 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
-		ehea_error("no busmap available");
+		pr_err("no busmap available\n");
 		ret = -EIO;
 		goto out;
 	}
@@ -929,7 +930,7 @@
 
 	if (hret != H_SUCCESS) {
 		ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
-		ehea_error("registering mr failed");
+		pr_err("registering mr failed\n");
 		ret = -EIO;
 		goto out;
 	}
@@ -952,7 +953,7 @@
 	hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
 				    FORCE_FREE);
 	if (hret != H_SUCCESS) {
-		ehea_error("destroy MR failed");
+		pr_err("destroy MR failed\n");
 		return -EIO;
 	}
 
@@ -987,14 +988,14 @@
 		length = EHEA_PAGESIZE;
 
 	if (type == EHEA_AER_RESTYPE_QP)
-		ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, "
-			   "port=%llX", resource, data[6], data[12], data[22]);
+		pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
+		       resource, data[6], data[12], data[22]);
 	else if (type == EHEA_AER_RESTYPE_CQ)
-		ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource,
-			   data[6]);
+		pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
+		       resource, data[6]);
 	else if (type == EHEA_AER_RESTYPE_EQ)
-		ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource,
-			   data[6]);
+		pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
+		       resource, data[6]);
 
 	ehea_dump(data, length, "error data");
 }
@@ -1008,7 +1009,7 @@
 
 	rblock = (void *)get_zeroed_page(GFP_KERNEL);
 	if (!rblock) {
-		ehea_error("Cannot allocate rblock memory.");
+		pr_err("Cannot allocate rblock memory\n");
 		goto out;
 	}
 
@@ -1020,9 +1021,9 @@
 		*aerr = rblock[12];
 		print_error_data(rblock);
 	} else if (ret == H_R_STATE) {
-		ehea_error("No error data available: %llX.", res_handle);
+		pr_err("No error data available: %llX\n", res_handle);
 	} else
-		ehea_error("Error data could not be fetched: %llX", res_handle);
+		pr_err("Error data could not be fetched: %llX\n", res_handle);
 
 	free_page((unsigned long)rblock);
 out:
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364..a937f49 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
 
 #define DRV_NAME		"enic"
 #define DRV_DESCRIPTION		"Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION		"1.4.1.6"
+#define DRV_VERSION		"1.4.1.10"
 #define DRV_COPYRIGHT		"Copyright 2008-2010 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX		6
@@ -61,6 +61,8 @@
 	char name[PORT_PROFILE_MAX];
 	u8 instance_uuid[PORT_UUID_MAX];
 	u8 host_uuid[PORT_UUID_MAX];
+	u8 vf_mac[ETH_ALEN];
+	u8 mac_addr[ETH_ALEN];
 };
 
 /* Per-instance private data structure */
@@ -78,8 +80,10 @@
 	spinlock_t devcmd_lock;
 	u8 mac_addr[ETH_ALEN];
 	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+	u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
 	unsigned int flags;
 	unsigned int mc_count;
+	unsigned int uc_count;
 	int csum_rx_enabled;
 	u32 port_mtu;
 	u32 rx_coalesce_usecs;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index a466ef9..77d9138 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -1002,7 +1002,7 @@
 	return err;
 }
 
-static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
+static int enic_dev_add_addr(struct enic *enic, u8 *addr)
 {
 	int err;
 
@@ -1013,7 +1013,7 @@
 	return err;
 }
 
-static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
+static int enic_dev_del_addr(struct enic *enic, u8 *addr)
 {
 	int err;
 
@@ -1024,29 +1024,19 @@
 	return err;
 }
 
-/* netif_tx_lock held, BHs disabled */
-static void enic_set_multicast_list(struct net_device *netdev)
+static void enic_add_multicast_addr_list(struct enic *enic)
 {
-	struct enic *enic = netdev_priv(netdev);
+	struct net_device *netdev = enic->netdev;
 	struct netdev_hw_addr *ha;
-	int directed = 1;
-	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
-	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
-	int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
 	unsigned int mc_count = netdev_mc_count(netdev);
-	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
-		mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
-	unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
 	u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
 	unsigned int i, j;
 
-	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
+	if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
+		netdev_warn(netdev, "Registering only %d out of %d "
+			"multicast addresses\n",
+			ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
 		mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
-
-	if (enic->flags != flags) {
-		enic->flags = flags;
-		enic_dev_packet_filter(enic, directed,
-			multicast, broadcast, promisc, allmulti);
 	}
 
 	/* Is there an easier way?  Trying to minimize to
@@ -1068,7 +1058,7 @@
 				mc_addr[j]) == 0)
 				break;
 		if (j == mc_count)
-			enic_dev_del_multicast_addr(enic, enic->mc_addr[i]);
+			enic_dev_del_addr(enic, enic->mc_addr[i]);
 	}
 
 	for (i = 0; i < mc_count; i++) {
@@ -1077,7 +1067,7 @@
 				enic->mc_addr[j]) == 0)
 				break;
 		if (j == enic->mc_count)
-			enic_dev_add_multicast_addr(enic, mc_addr[i]);
+			enic_dev_add_addr(enic, mc_addr[i]);
 	}
 
 	/* Save the list to compare against next time
@@ -1089,6 +1079,89 @@
 	enic->mc_count = mc_count;
 }
 
+static void enic_add_unicast_addr_list(struct enic *enic)
+{
+	struct net_device *netdev = enic->netdev;
+	struct netdev_hw_addr *ha;
+	unsigned int uc_count = netdev_uc_count(netdev);
+	u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
+	unsigned int i, j;
+
+	if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
+		netdev_warn(netdev, "Registering only %d out of %d "
+			"unicast addresses\n",
+			ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+		uc_count = ENIC_UNICAST_PERFECT_FILTERS;
+	}
+
+	/* Is there an easier way?  Trying to minimize to
+	 * calls to add/del unicast addrs.  We keep the
+	 * addrs from the last call in enic->uc_addr and
+	 * look for changes to add/del.
+	 */
+
+	i = 0;
+	netdev_for_each_uc_addr(ha, netdev) {
+		if (i == uc_count)
+			break;
+		memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
+	}
+
+	for (i = 0; i < enic->uc_count; i++) {
+		for (j = 0; j < uc_count; j++)
+			if (compare_ether_addr(enic->uc_addr[i],
+				uc_addr[j]) == 0)
+				break;
+		if (j == uc_count)
+			enic_dev_del_addr(enic, enic->uc_addr[i]);
+	}
+
+	for (i = 0; i < uc_count; i++) {
+		for (j = 0; j < enic->uc_count; j++)
+			if (compare_ether_addr(uc_addr[i],
+				enic->uc_addr[j]) == 0)
+				break;
+		if (j == enic->uc_count)
+			enic_dev_add_addr(enic, uc_addr[i]);
+	}
+
+	/* Save the list to compare against next time
+	 */
+
+	for (i = 0; i < uc_count; i++)
+		memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
+
+	enic->uc_count = uc_count;
+}
+
+/* netif_tx_lock held, BHs disabled */
+static void enic_set_rx_mode(struct net_device *netdev)
+{
+	struct enic *enic = netdev_priv(netdev);
+	int directed = 1;
+	int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
+	int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
+	int promisc = (netdev->flags & IFF_PROMISC) ||
+		netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
+	int allmulti = (netdev->flags & IFF_ALLMULTI) ||
+		netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
+	unsigned int flags = netdev->flags |
+		(allmulti ? IFF_ALLMULTI : 0) |
+		(promisc ? IFF_PROMISC : 0);
+
+	if (enic->flags != flags) {
+		enic->flags = flags;
+		enic_dev_packet_filter(enic, directed,
+			multicast, broadcast, promisc, allmulti);
+	}
+
+	if (!promisc) {
+		enic_add_unicast_addr_list(enic);
+		if (!allmulti)
+			enic_add_multicast_addr_list(enic);
+	}
+}
+
 /* rtnl lock is held */
 static void enic_vlan_rx_register(struct net_device *netdev,
 	struct vlan_group *vlan_group)
@@ -1158,11 +1231,31 @@
 	return err;
 }
 
+static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+	struct enic *enic = netdev_priv(netdev);
+
+	if (vf != PORT_SELF_VF)
+		return -EOPNOTSUPP;
+
+	/* Ignore the vf argument for now. We can assume the request
+	 * is coming on a vf.
+	 */
+	if (is_valid_ether_addr(mac)) {
+		memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
 static int enic_set_port_profile(struct enic *enic, u8 *mac)
 {
 	struct vic_provinfo *vp;
 	u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
+	u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
 	char uuid_str[38];
+	char client_mac_str[18];
+	u8 *client_mac;
 	int err;
 
 	err = enic_vnic_dev_deinit(enic);
@@ -1180,46 +1273,63 @@
 			return -EADDRNOTAVAIL;
 
 		vp = vic_provinfo_alloc(GFP_KERNEL, oui,
-			VIC_PROVINFO_LINUX_TYPE);
+			VIC_PROVINFO_GENERIC_TYPE);
 		if (!vp)
 			return -ENOMEM;
 
 		vic_provinfo_add_tlv(vp,
-			VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR,
+			VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
 			strlen(enic->pp.name) + 1, enic->pp.name);
 
+		if (!is_zero_ether_addr(enic->pp.mac_addr))
+			client_mac = enic->pp.mac_addr;
+		else
+			client_mac = mac;
+
 		vic_provinfo_add_tlv(vp,
-			VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR,
-			ETH_ALEN, mac);
+			VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
+			ETH_ALEN, client_mac);
+
+		sprintf(client_mac_str, "%pM", client_mac);
+		vic_provinfo_add_tlv(vp,
+			VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
+			sizeof(client_mac_str), client_mac_str);
 
 		if (enic->pp.set & ENIC_SET_INSTANCE) {
 			sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
 			vic_provinfo_add_tlv(vp,
-				VIC_LINUX_PROV_TLV_CLIENT_UUID_STR,
+				VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
 				sizeof(uuid_str), uuid_str);
 		}
 
 		if (enic->pp.set & ENIC_SET_HOST) {
 			sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
 			vic_provinfo_add_tlv(vp,
-				VIC_LINUX_PROV_TLV_HOST_UUID_STR,
+				VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
 				sizeof(uuid_str), uuid_str);
 		}
 
+		os_type = htons(os_type);
+		vic_provinfo_add_tlv(vp,
+			VIC_GENERIC_PROV_TLV_OS_TYPE,
+			sizeof(os_type), &os_type);
+
 		err = enic_dev_init_prov(enic, vp);
 		vic_provinfo_free(vp);
 		if (err)
 			return err;
+
+		enic->pp.set |= ENIC_SET_APPLIED;
 		break;
 
 	case PORT_REQUEST_DISASSOCIATE:
+		enic->pp.set &= ~ENIC_SET_APPLIED;
 		break;
 
 	default:
 		return -EINVAL;
 	}
 
-	enic->pp.set |= ENIC_SET_APPLIED;
 	return 0;
 }
 
@@ -1227,29 +1337,31 @@
 	struct nlattr *port[])
 {
 	struct enic *enic = netdev_priv(netdev);
+	struct enic_port_profile new_pp;
+	int err = 0;
 
-	memset(&enic->pp, 0, sizeof(enic->pp));
+	memset(&new_pp, 0, sizeof(new_pp));
 
 	if (port[IFLA_PORT_REQUEST]) {
-		enic->pp.set |= ENIC_SET_REQUEST;
-		enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+		new_pp.set |= ENIC_SET_REQUEST;
+		new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
 	}
 
 	if (port[IFLA_PORT_PROFILE]) {
-		enic->pp.set |= ENIC_SET_NAME;
-		memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]),
+		new_pp.set |= ENIC_SET_NAME;
+		memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
 			PORT_PROFILE_MAX);
 	}
 
 	if (port[IFLA_PORT_INSTANCE_UUID]) {
-		enic->pp.set |= ENIC_SET_INSTANCE;
-		memcpy(enic->pp.instance_uuid,
+		new_pp.set |= ENIC_SET_INSTANCE;
+		memcpy(new_pp.instance_uuid,
 			nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
 	}
 
 	if (port[IFLA_PORT_HOST_UUID]) {
-		enic->pp.set |= ENIC_SET_HOST;
-		memcpy(enic->pp.host_uuid,
+		new_pp.set |= ENIC_SET_HOST;
+		memcpy(new_pp.host_uuid,
 			nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
 	}
 
@@ -1257,21 +1369,39 @@
 	if (vf != PORT_SELF_VF)
 		return -EOPNOTSUPP;
 
-	if (!(enic->pp.set & ENIC_SET_REQUEST))
+	if (!(new_pp.set & ENIC_SET_REQUEST))
 		return -EOPNOTSUPP;
 
-	if (enic->pp.request == PORT_REQUEST_ASSOCIATE) {
-
-		/* If the interface mac addr hasn't been assigned,
-		 * assign a random mac addr before setting port-
-		 * profile.
-		 */
+	if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
+		/* Special case handling */
+		if (!is_zero_ether_addr(enic->pp.vf_mac))
+			memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
 
 		if (is_zero_ether_addr(netdev->dev_addr))
 			random_ether_addr(netdev->dev_addr);
+	} else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
+		if (!is_zero_ether_addr(enic->pp.mac_addr))
+			enic_dev_del_addr(enic, enic->pp.mac_addr);
 	}
 
-	return enic_set_port_profile(enic, netdev->dev_addr);
+	memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
+
+	err = enic_set_port_profile(enic, netdev->dev_addr);
+	if (err)
+		goto set_port_profile_cleanup;
+
+	if (!is_zero_ether_addr(enic->pp.mac_addr))
+		enic_dev_add_addr(enic, enic->pp.mac_addr);
+
+set_port_profile_cleanup:
+	memset(enic->pp.vf_mac, 0, ETH_ALEN);
+
+	if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
+		memset(netdev->dev_addr, 0, ETH_ALEN);
+		memset(enic->pp.mac_addr, 0, ETH_ALEN);
+	}
+
+	return err;
 }
 
 static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1851,8 +1981,11 @@
 	for (i = 0; i < enic->rq_count; i++)
 		vnic_rq_enable(&enic->rq[i]);
 
-	enic_dev_add_station_addr(enic);
-	enic_set_multicast_list(netdev);
+	if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
+		enic_dev_add_addr(enic, enic->pp.mac_addr);
+	else
+		enic_dev_add_station_addr(enic);
+	enic_set_rx_mode(netdev);
 
 	netif_wake_queue(netdev);
 
@@ -1899,7 +2032,10 @@
 
 	netif_carrier_off(netdev);
 	netif_tx_disable(netdev);
-	enic_dev_del_station_addr(enic);
+	if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
+		enic_dev_del_addr(enic, enic->pp.mac_addr);
+	else
+		enic_dev_del_station_addr(enic);
 
 	for (i = 0; i < enic->wq_count; i++) {
 		err = vnic_wq_disable(&enic->wq[i]);
@@ -2042,7 +2178,7 @@
 
 static int enic_set_rsskey(struct enic *enic)
 {
-	u64 rss_key_buf_pa;
+	dma_addr_t rss_key_buf_pa;
 	union vnic_rss_key *rss_key_buf_va = NULL;
 	union vnic_rss_key rss_key = {
 		.key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2073,7 +2209,7 @@
 
 static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
 {
-	u64 rss_cpu_buf_pa;
+	dma_addr_t rss_cpu_buf_pa;
 	union vnic_rss_cpu *rss_cpu_buf_va = NULL;
 	unsigned int i;
 	int err;
@@ -2328,7 +2464,8 @@
 	.ndo_start_xmit		= enic_hard_start_xmit,
 	.ndo_get_stats		= enic_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_multicast_list	= enic_set_multicast_list,
+	.ndo_set_rx_mode	= enic_set_rx_mode,
+	.ndo_set_multicast_list	= enic_set_rx_mode,
 	.ndo_set_mac_address	= enic_set_mac_address_dynamic,
 	.ndo_change_mtu		= enic_change_mtu,
 	.ndo_vlan_rx_register	= enic_vlan_rx_register,
@@ -2337,6 +2474,9 @@
 	.ndo_tx_timeout		= enic_tx_timeout,
 	.ndo_set_vf_port	= enic_set_vf_port,
 	.ndo_get_vf_port	= enic_get_vf_port,
+#ifdef IFLA_VF_MAX
+	.ndo_set_vf_mac		= enic_set_vf_mac,
+#endif
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	= enic_poll_controller,
 #endif
@@ -2349,7 +2489,8 @@
 	.ndo_get_stats		= enic_get_stats,
 	.ndo_validate_addr	= eth_validate_addr,
 	.ndo_set_mac_address	= enic_set_mac_address,
-	.ndo_set_multicast_list	= enic_set_multicast_list,
+	.ndo_set_rx_mode	= enic_set_rx_mode,
+	.ndo_set_multicast_list	= enic_set_rx_mode,
 	.ndo_change_mtu		= enic_change_mtu,
 	.ndo_vlan_rx_register	= enic_vlan_rx_register,
 	.ndo_vlan_rx_add_vid	= enic_vlan_rx_add_vid,
@@ -2693,7 +2834,7 @@
 	if (netdev) {
 		struct enic *enic = netdev_priv(netdev);
 
-		flush_scheduled_work();
+		cancel_work_sync(&enic->reset);
 		unregister_netdev(netdev);
 		enic_dev_deinit(enic);
 		vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 9a103d9..25be273 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -34,6 +34,7 @@
 #define ENIC_MAX_MTU			9000
 
 #define ENIC_MULTICAST_PERFECT_FILTERS	32
+#define ENIC_UNICAST_PERFECT_FILTERS	32
 
 #define ENIC_NON_TSO_MAX_DESC		16
 
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 7e46e5e..f700f5d 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -24,14 +24,29 @@
 /* Note: String field lengths include null char */
 
 #define VIC_PROVINFO_CISCO_OUI		{ 0x00, 0x00, 0x0c }
-#define VIC_PROVINFO_LINUX_TYPE		0x2
+#define VIC_PROVINFO_GENERIC_TYPE		0x4
 
-enum vic_linux_prov_tlv_type {
-	VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
-	VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1,			/* u8[6] */
-	VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2,
-	VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8,
-	VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9,
+enum vic_generic_prov_tlv_type {
+	VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
+	VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1,
+	VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2,
+	VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3,
+	VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4,
+	VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5,
+	VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7,
+	VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8,
+	VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9,
+	VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10,
+	VIC_GENERIC_PROV_TLV_OS_TYPE = 11,
+	VIC_GENERIC_PROV_TLV_OS_VENDOR = 12,
+	VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15,
+};
+
+enum vic_generic_prov_os_type {
+	VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0,
+	VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
+	VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
+	VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
 };
 
 struct vic_provinfo {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe0..b79d7e1 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/of.h>
 #include <net/ethoc.h>
 
 static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@
  * @netdev:	pointer to network device structure
  * @napi:	NAPI structure
  * @msg_enable:	device state flags
- * @rx_lock:	receive lock
  * @lock:	device lock
  * @phy:	attached PHY
  * @mdio:	MDIO bus for PHY access
@@ -209,7 +209,6 @@
 	struct napi_struct napi;
 	u32 msg_enable;
 
-	spinlock_t rx_lock;
 	spinlock_t lock;
 
 	struct phy_device *phy;
@@ -413,10 +412,21 @@
 		unsigned int entry;
 		struct ethoc_bd bd;
 
-		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
+		entry = priv->num_tx + priv->cur_rx;
 		ethoc_read_bd(priv, entry, &bd);
-		if (bd.stat & RX_BD_EMPTY)
-			break;
+		if (bd.stat & RX_BD_EMPTY) {
+			ethoc_ack_irq(priv, INT_MASK_RX);
+			/* If packet (interrupt) came in between checking
+			 * BD_EMTPY and clearing the interrupt source, then we
+			 * risk missing the packet as the RX interrupt won't
+			 * trigger right away when we reenable it; hence, check
+			 * BD_EMTPY here again to make sure there isn't such a
+			 * packet waiting for us...
+			 */
+			ethoc_read_bd(priv, entry, &bd);
+			if (bd.stat & RX_BD_EMPTY)
+				break;
+		}
 
 		if (ethoc_update_rx_stats(priv, &bd) == 0) {
 			int size = bd.stat >> 16;
@@ -446,13 +456,14 @@
 		bd.stat &= ~RX_BD_STATS;
 		bd.stat |=  RX_BD_EMPTY;
 		ethoc_write_bd(priv, entry, &bd);
-		priv->cur_rx++;
+		if (++priv->cur_rx == priv->num_rx)
+			priv->cur_rx = 0;
 	}
 
 	return count;
 }
 
-static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
+static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
 {
 	struct net_device *netdev = dev->netdev;
 
@@ -482,32 +493,44 @@
 	netdev->stats.collisions += (bd->stat >> 4) & 0xf;
 	netdev->stats.tx_bytes += bd->stat >> 16;
 	netdev->stats.tx_packets++;
-	return 0;
 }
 
-static void ethoc_tx(struct net_device *dev)
+static int ethoc_tx(struct net_device *dev, int limit)
 {
 	struct ethoc *priv = netdev_priv(dev);
+	int count;
+	struct ethoc_bd bd;
 
-	spin_lock(&priv->lock);
+	for (count = 0; count < limit; ++count) {
+		unsigned int entry;
 
-	while (priv->dty_tx != priv->cur_tx) {
-		unsigned int entry = priv->dty_tx % priv->num_tx;
-		struct ethoc_bd bd;
+		entry = priv->dty_tx & (priv->num_tx-1);
 
 		ethoc_read_bd(priv, entry, &bd);
-		if (bd.stat & TX_BD_READY)
-			break;
 
-		entry = (++priv->dty_tx) % priv->num_tx;
-		(void)ethoc_update_tx_stats(priv, &bd);
+		if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
+			ethoc_ack_irq(priv, INT_MASK_TX);
+			/* If interrupt came in between reading in the BD
+			 * and clearing the interrupt source, then we risk
+			 * missing the event as the TX interrupt won't trigger
+			 * right away when we reenable it; hence, check
+			 * BD_EMPTY here again to make sure there isn't such an
+			 * event pending...
+			 */
+			ethoc_read_bd(priv, entry, &bd);
+			if (bd.stat & TX_BD_READY ||
+			    (priv->dty_tx == priv->cur_tx))
+				break;
+		}
+
+		ethoc_update_tx_stats(priv, &bd);
+		priv->dty_tx++;
 	}
 
 	if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
 		netif_wake_queue(dev);
 
-	ethoc_ack_irq(priv, INT_MASK_TX);
-	spin_unlock(&priv->lock);
+	return count;
 }
 
 static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@
 	struct net_device *dev = dev_id;
 	struct ethoc *priv = netdev_priv(dev);
 	u32 pending;
+	u32 mask;
 
-	ethoc_disable_irq(priv, INT_MASK_ALL);
+	/* Figure out what triggered the interrupt...
+	 * The tricky bit here is that the interrupt source bits get
+	 * set in INT_SOURCE for an event irregardless of whether that
+	 * event is masked or not.  Thus, in order to figure out what
+	 * triggered the interrupt, we need to remove the sources
+	 * for all events that are currently masked.  This behaviour
+	 * is not particularly well documented but reasonable...
+	 */
+	mask = ethoc_read(priv, INT_MASK);
 	pending = ethoc_read(priv, INT_SOURCE);
+	pending &= mask;
+
 	if (unlikely(pending == 0)) {
-		ethoc_enable_irq(priv, INT_MASK_ALL);
 		return IRQ_NONE;
 	}
 
 	ethoc_ack_irq(priv, pending);
 
+	/* We always handle the dropped packet interrupt */
 	if (pending & INT_MASK_BUSY) {
 		dev_err(&dev->dev, "packet dropped\n");
 		dev->stats.rx_dropped++;
 	}
 
-	if (pending & INT_MASK_RX) {
-		if (napi_schedule_prep(&priv->napi))
-			__napi_schedule(&priv->napi);
-	} else {
-		ethoc_enable_irq(priv, INT_MASK_RX);
+	/* Handle receive/transmit event by switching to polling */
+	if (pending & (INT_MASK_TX | INT_MASK_RX)) {
+		ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+		napi_schedule(&priv->napi);
 	}
 
-	if (pending & INT_MASK_TX)
-		ethoc_tx(dev);
-
-	ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
 	return IRQ_HANDLED;
 }
 
@@ -566,26 +595,29 @@
 static int ethoc_poll(struct napi_struct *napi, int budget)
 {
 	struct ethoc *priv = container_of(napi, struct ethoc, napi);
-	int work_done = 0;
+	int rx_work_done = 0;
+	int tx_work_done = 0;
 
-	work_done = ethoc_rx(priv->netdev, budget);
-	if (work_done < budget) {
-		ethoc_enable_irq(priv, INT_MASK_RX);
+	rx_work_done = ethoc_rx(priv->netdev, budget);
+	tx_work_done = ethoc_tx(priv->netdev, budget);
+
+	if (rx_work_done < budget && tx_work_done < budget) {
 		napi_complete(napi);
+		ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
 	}
 
-	return work_done;
+	return rx_work_done;
 }
 
 static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
 {
-	unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
 	struct ethoc *priv = bus->priv;
+	int i;
 
 	ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
 	ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
 
-	while (time_before(jiffies, timeout)) {
+	for (i=0; i < 5; i++) {
 		u32 status = ethoc_read(priv, MIISTATUS);
 		if (!(status & MIISTATUS_BUSY)) {
 			u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@
 			ethoc_write(priv, MIICOMMAND, 0);
 			return data;
 		}
-
-		schedule();
+		usleep_range(100,200);
 	}
 
 	return -EBUSY;
@@ -602,22 +633,21 @@
 
 static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
 {
-	unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
 	struct ethoc *priv = bus->priv;
+	int i;
 
 	ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
 	ethoc_write(priv, MIITX_DATA, val);
 	ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
 
-	while (time_before(jiffies, timeout)) {
+	for (i=0; i < 5; i++) {
 		u32 stat = ethoc_read(priv, MIISTATUS);
 		if (!(stat & MIISTATUS_BUSY)) {
 			/* reset MII command register */
 			ethoc_write(priv, MIICOMMAND, 0);
 			return 0;
 		}
-
-		schedule();
+		usleep_range(100,200);
 	}
 
 	return -EBUSY;
@@ -971,9 +1001,17 @@
 	/* calculate the number of TX/RX buffers, maximum 128 supported */
 	num_bd = min_t(unsigned int,
 		128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
-	priv->num_tx = max(2, num_bd / 4);
+	if (num_bd < 4) {
+		ret = -ENODEV;
+		goto error;
+	}
+	/* num_tx must be a power of two */
+	priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
 	priv->num_rx = num_bd - priv->num_tx;
 
+	dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
+		priv->num_tx, priv->num_rx);
+
 	priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
 	if (!priv->vma) {
 		ret = -ENOMEM;
@@ -982,10 +1020,23 @@
 
 	/* Allow the platform setup code to pass in a MAC address. */
 	if (pdev->dev.platform_data) {
-		struct ethoc_platform_data *pdata =
-			(struct ethoc_platform_data *)pdev->dev.platform_data;
+		struct ethoc_platform_data *pdata = pdev->dev.platform_data;
 		memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
 		priv->phy_id = pdata->phy_id;
+	} else {
+		priv->phy_id = -1;
+
+#ifdef CONFIG_OF
+		{
+		const uint8_t* mac;
+
+		mac = of_get_property(pdev->dev.of_node,
+				      "local-mac-address",
+				      NULL);
+		if (mac)
+			memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
+		}
+#endif
 	}
 
 	/* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@
 	/* setup NAPI */
 	netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
 
-	spin_lock_init(&priv->rx_lock);
 	spin_lock_init(&priv->lock);
 
 	ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@
 # define ethoc_resume  NULL
 #endif
 
+#ifdef CONFIG_OF
+static struct of_device_id ethoc_match[] = {
+	{
+		.compatible = "opencores,ethoc",
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ethoc_match);
+#endif
+
 static struct platform_driver ethoc_driver = {
 	.probe   = ethoc_probe,
 	.remove  = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@
 	.resume  = ethoc_resume,
 	.driver  = {
 		.name = "ethoc",
+		.owner = THIS_MODULE,
+#ifdef CONFIG_OF
+		.of_match_table = ethoc_match,
+#endif
 	},
 };
 
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d03..50c1213 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@
 {
 	struct net_device *dev = dev_id;
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
-	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock(&priv->lock);
 	while (bcom_buffer_done(priv->tx_dmatsk)) {
 		struct sk_buff *skb;
 		struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@
 
 		dev_kfree_skb_irq(skb);
 	}
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock(&priv->lock);
 
 	netif_wake_queue(dev);
 
@@ -395,9 +394,8 @@
 	struct bcom_fec_bd *bd;
 	u32 status, physaddr;
 	int length;
-	unsigned long flags;
 
-	spin_lock_irqsave(&priv->lock, flags);
+	spin_lock(&priv->lock);
 
 	while (bcom_buffer_done(priv->rx_dmatsk)) {
 
@@ -429,7 +427,7 @@
 
 		/* Process the received skb - Drop the spin lock while
 		 * calling into the network stack */
-		spin_unlock_irqrestore(&priv->lock, flags);
+		spin_unlock(&priv->lock);
 
 		dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
 				 DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@
 		rskb->protocol = eth_type_trans(rskb, dev);
 		netif_rx(rskb);
 
-		spin_lock_irqsave(&priv->lock, flags);
+		spin_lock(&priv->lock);
 	}
 
-	spin_unlock_irqrestore(&priv->lock, flags);
+	spin_unlock(&priv->lock);
 
 	return IRQ_HANDLED;
 }
@@ -452,7 +450,6 @@
 	struct mpc52xx_fec_priv *priv = netdev_priv(dev);
 	struct mpc52xx_fec __iomem *fec = priv->fec;
 	u32 ievent;
-	unsigned long flags;
 
 	ievent = in_be32(&fec->ievent);
 
@@ -470,9 +467,9 @@
 		if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
 			dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
 
-		spin_lock_irqsave(&priv->lock, flags);
+		spin_lock(&priv->lock);
 		mpc52xx_fec_reset(dev);
-		spin_unlock_irqrestore(&priv->lock, flags);
+		spin_unlock(&priv->lock);
 
 		return IRQ_HANDLED;
 	}
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776..cd2d72d 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,6 +39,9 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #define FORCEDETH_VERSION		"0.64"
 #define DRV_NAME			"forcedeth"
 
@@ -60,18 +63,12 @@
 #include <linux/if_vlan.h>
 #include <linux/dma-mapping.h>
 #include <linux/slab.h>
+#include <linux/uaccess.h>
+#include  <linux/io.h>
 
 #include <asm/irq.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
 #include <asm/system.h>
 
-#if 0
-#define dprintk			printk
-#else
-#define dprintk(x...)		do { } while (0)
-#endif
-
 #define TX_WORK_PER_LOOP  64
 #define RX_WORK_PER_LOOP  64
 
@@ -186,9 +183,9 @@
 	NvRegSlotTime = 0x9c,
 #define NVREG_SLOTTIME_LEGBF_ENABLED	0x80000000
 #define NVREG_SLOTTIME_10_100_FULL	0x00007f00
-#define NVREG_SLOTTIME_1000_FULL 	0x0003ff00
+#define NVREG_SLOTTIME_1000_FULL	0x0003ff00
 #define NVREG_SLOTTIME_HALF		0x0000ff00
-#define NVREG_SLOTTIME_DEFAULT	 	0x00007f00
+#define NVREG_SLOTTIME_DEFAULT		0x00007f00
 #define NVREG_SLOTTIME_MASK		0x000000ff
 
 	NvRegTxDeferral = 0xA0,
@@ -297,7 +294,7 @@
 #define NVREG_WAKEUPFLAGS_ENABLE	0x1111
 
 	NvRegMgmtUnitGetVersion = 0x204,
-#define NVREG_MGMTUNITGETVERSION     	0x01
+#define NVREG_MGMTUNITGETVERSION	0x01
 	NvRegMgmtUnitVersion = 0x208,
 #define NVREG_MGMTUNITVERSION		0x08
 	NvRegPowerCap = 0x268,
@@ -368,8 +365,8 @@
 };
 
 union ring_type {
-	struct ring_desc* orig;
-	struct ring_desc_ex* ex;
+	struct ring_desc *orig;
+	struct ring_desc_ex *ex;
 };
 
 #define FLAG_MASK_V1 0xffff0000
@@ -444,10 +441,10 @@
 #define NV_RX3_VLAN_TAG_MASK	(0x0000FFFF)
 
 /* Miscelaneous hardware related defines: */
-#define NV_PCI_REGSZ_VER1      	0x270
-#define NV_PCI_REGSZ_VER2      	0x2d4
-#define NV_PCI_REGSZ_VER3      	0x604
-#define NV_PCI_REGSZ_MAX       	0x604
+#define NV_PCI_REGSZ_VER1	0x270
+#define NV_PCI_REGSZ_VER2	0x2d4
+#define NV_PCI_REGSZ_VER3	0x604
+#define NV_PCI_REGSZ_MAX	0x604
 
 /* various timeout delays: all in usec */
 #define NV_TXRX_RESET_DELAY	4
@@ -717,7 +714,7 @@
 	{ NvRegMulticastAddrA, 0xffffffff },
 	{ NvRegTxWatermark, 0x0ff },
 	{ NvRegWakeUpFlags, 0x07777 },
-	{ 0,0 }
+	{ 0, 0 }
 };
 
 struct nv_skb_map {
@@ -911,7 +908,7 @@
  * Power down phy when interface is down (persists through reboot;
  * older Linux and other OSes may not power it up again)
  */
-static int phy_power_down = 0;
+static int phy_power_down;
 
 static inline struct fe_priv *get_nvpriv(struct net_device *dev)
 {
@@ -948,7 +945,7 @@
 }
 
 static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
-				int delay, int delaymax, const char *msg)
+		     int delay, int delaymax)
 {
 	u8 __iomem *base = get_hwbase(dev);
 
@@ -956,11 +953,8 @@
 	do {
 		udelay(delay);
 		delaymax -= delay;
-		if (delaymax < 0) {
-			if (msg)
-				printk("%s", msg);
+		if (delaymax < 0)
 			return 1;
-		}
 	} while ((readl(base + offset) & mask) != target);
 	return 0;
 }
@@ -984,12 +978,10 @@
 	u8 __iomem *base = get_hwbase(dev);
 
 	if (!nv_optimized(np)) {
-		if (rxtx_flags & NV_SETUP_RX_RING) {
+		if (rxtx_flags & NV_SETUP_RX_RING)
 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
-		}
-		if (rxtx_flags & NV_SETUP_TX_RING) {
+		if (rxtx_flags & NV_SETUP_TX_RING)
 			writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
-		}
 	} else {
 		if (rxtx_flags & NV_SETUP_RX_RING) {
 			writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1007,8 @@
 			pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
 					    np->rx_ring.ex, np->ring_addr);
 	}
-	if (np->rx_skb)
-		kfree(np->rx_skb);
-	if (np->tx_skb)
-		kfree(np->tx_skb);
+	kfree(np->rx_skb);
+	kfree(np->tx_skb);
 }
 
 static int using_multi_irqs(struct net_device *dev)
@@ -1145,23 +1135,15 @@
 	writel(reg, base + NvRegMIIControl);
 
 	if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
-			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) {
-		dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
-				dev->name, miireg, addr);
+			NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
 		retval = -1;
 	} else if (value != MII_READ) {
 		/* it was a write operation - fewer failures are detectable */
-		dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
-				dev->name, value, miireg, addr);
 		retval = 0;
 	} else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
-		dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
-				dev->name, miireg, addr);
 		retval = -1;
 	} else {
 		retval = readl(base + NvRegMIIData);
-		dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
-				dev->name, miireg, addr, retval);
 	}
 
 	return retval;
@@ -1174,16 +1156,15 @@
 	unsigned int tries = 0;
 
 	miicontrol = BMCR_RESET | bmcr_setup;
-	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) {
+	if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
 		return -1;
-	}
 
 	/* wait for 500ms */
 	msleep(500);
 
 	/* must wait till reset is deasserted */
 	while (miicontrol & BMCR_RESET) {
-		msleep(10);
+		usleep_range(10000, 20000);
 		miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
 		/* FIXME: 100 tries seem excessive */
 		if (tries++ > 100)
@@ -1192,106 +1173,239 @@
 	return 0;
 }
 
+static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
+{
+	static const struct {
+		int reg;
+		int init;
+	} ri[] = {
+		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+		{ PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
+		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
+		{ PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
+		{ PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
+		{ PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
+		{ PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
+	};
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ri); i++) {
+		if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
+			return PHY_ERROR;
+	}
+
+	return 0;
+}
+
+static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
+{
+	u32 reg;
+	u8 __iomem *base = get_hwbase(dev);
+	u32 powerstate = readl(base + NvRegPowerState2);
+
+	/* need to perform hw phy reset */
+	powerstate |= NVREG_POWERSTATE2_PHY_RESET;
+	writel(powerstate, base + NvRegPowerState2);
+	msleep(25);
+
+	powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
+	writel(powerstate, base + NvRegPowerState2);
+	msleep(25);
+
+	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
+	reg |= PHY_REALTEK_INIT9;
+	if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
+		return PHY_ERROR;
+	reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
+	if (!(reg & PHY_REALTEK_INIT11)) {
+		reg |= PHY_REALTEK_INIT11;
+		if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
+			return PHY_ERROR;
+	}
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+		return PHY_ERROR;
+
+	return 0;
+}
+
+static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
+{
+	u32 phy_reserved;
+
+	if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
+		phy_reserved = mii_rw(dev, np->phyaddr,
+				      PHY_REALTEK_INIT_REG6, MII_READ);
+		phy_reserved |= PHY_REALTEK_INIT7;
+		if (mii_rw(dev, np->phyaddr,
+			   PHY_REALTEK_INIT_REG6, phy_reserved))
+			return PHY_ERROR;
+	}
+
+	return 0;
+}
+
+static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
+{
+	u32 phy_reserved;
+
+	if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
+		if (mii_rw(dev, np->phyaddr,
+			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
+			return PHY_ERROR;
+		phy_reserved = mii_rw(dev, np->phyaddr,
+				      PHY_REALTEK_INIT_REG2, MII_READ);
+		phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
+		phy_reserved |= PHY_REALTEK_INIT3;
+		if (mii_rw(dev, np->phyaddr,
+			   PHY_REALTEK_INIT_REG2, phy_reserved))
+			return PHY_ERROR;
+		if (mii_rw(dev, np->phyaddr,
+			   PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
+			return PHY_ERROR;
+	}
+
+	return 0;
+}
+
+static int init_cicada(struct net_device *dev, struct fe_priv *np,
+		       u32 phyinterface)
+{
+	u32 phy_reserved;
+
+	if (phyinterface & PHY_RGMII) {
+		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
+		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
+		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
+		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
+			return PHY_ERROR;
+		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
+		phy_reserved |= PHY_CICADA_INIT5;
+		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
+			return PHY_ERROR;
+	}
+	phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
+	phy_reserved |= PHY_CICADA_INIT6;
+	if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
+		return PHY_ERROR;
+
+	return 0;
+}
+
+static int init_vitesse(struct net_device *dev, struct fe_priv *np)
+{
+	u32 phy_reserved;
+
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
+		return PHY_ERROR;
+	phy_reserved = mii_rw(dev, np->phyaddr,
+			      PHY_VITESSE_INIT_REG4, MII_READ);
+	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+		return PHY_ERROR;
+	phy_reserved = mii_rw(dev, np->phyaddr,
+			      PHY_VITESSE_INIT_REG3, MII_READ);
+	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+	phy_reserved |= PHY_VITESSE_INIT3;
+	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
+		return PHY_ERROR;
+	phy_reserved = mii_rw(dev, np->phyaddr,
+			      PHY_VITESSE_INIT_REG4, MII_READ);
+	phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
+	phy_reserved |= PHY_VITESSE_INIT3;
+	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+		return PHY_ERROR;
+	phy_reserved = mii_rw(dev, np->phyaddr,
+			      PHY_VITESSE_INIT_REG3, MII_READ);
+	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
+		return PHY_ERROR;
+	phy_reserved = mii_rw(dev, np->phyaddr,
+			      PHY_VITESSE_INIT_REG4, MII_READ);
+	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
+		return PHY_ERROR;
+	phy_reserved = mii_rw(dev, np->phyaddr,
+			      PHY_VITESSE_INIT_REG3, MII_READ);
+	phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
+	phy_reserved |= PHY_VITESSE_INIT8;
+	if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
+		return PHY_ERROR;
+	if (mii_rw(dev, np->phyaddr,
+		   PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
+		return PHY_ERROR;
+
+	return 0;
+}
+
 static int phy_init(struct net_device *dev)
 {
 	struct fe_priv *np = get_nvpriv(dev);
 	u8 __iomem *base = get_hwbase(dev);
-	u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg;
+	u32 phyinterface;
+	u32 mii_status, mii_control, mii_control_1000, reg;
 
 	/* phy errata for E3016 phy */
 	if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
 		reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
 		reg &= ~PHY_MARVELL_E3016_INITMASK;
 		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
-			printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev));
+			netdev_info(dev, "%s: phy write to errata reg failed\n",
+				    pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
 	}
 	if (np->phy_oui == PHY_OUI_REALTEK) {
 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			if (init_realtek_8211b(dev, np)) {
+				netdev_info(dev, "%s: phy init failed\n",
+					    pci_name(np->pci_dev));
 				return PHY_ERROR;
 			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+		} else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
+			   np->phy_rev == PHY_REV_REALTEK_8211C) {
+			if (init_realtek_8211c(dev, np)) {
+				netdev_info(dev, "%s: phy init failed\n",
+					    pci_name(np->pci_dev));
 				return PHY_ERROR;
 			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+			if (init_realtek_8201(dev, np)) {
+				netdev_info(dev, "%s: phy init failed\n",
+					    pci_name(np->pci_dev));
 				return PHY_ERROR;
 			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-		}
-		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
-		    np->phy_rev == PHY_REV_REALTEK_8211C) {
-			u32 powerstate = readl(base + NvRegPowerState2);
-
-			/* need to perform hw phy reset */
-			powerstate |= NVREG_POWERSTATE2_PHY_RESET;
-			writel(powerstate, base + NvRegPowerState2);
-			msleep(25);
-
-			powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
-			writel(powerstate, base + NvRegPowerState2);
-			msleep(25);
-
-			reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
-			reg |= PHY_REALTEK_INIT9;
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
-			if (!(reg & PHY_REALTEK_INIT11)) {
-				reg |= PHY_REALTEK_INIT11;
-				if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
-					printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-					return PHY_ERROR;
-				}
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-		}
-		if (np->phy_model == PHY_MODEL_REALTEK_8201) {
-			if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
-				phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
-				phy_reserved |= PHY_REALTEK_INIT7;
-				if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
-					printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-					return PHY_ERROR;
-				}
-			}
 		}
 	}
 
 	/* set advertise register */
 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-	reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
+	reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
+		ADVERTISE_100HALF | ADVERTISE_100FULL |
+		ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
-		printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
+		netdev_info(dev, "%s: phy write to advertise failed\n",
+			    pci_name(np->pci_dev));
 		return PHY_ERROR;
 	}
 
@@ -1302,7 +1416,8 @@
 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
 	if (mii_status & PHY_GIGABIT) {
 		np->gigabit = PHY_GIGABIT;
-		mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+		mii_control_1000 = mii_rw(dev, np->phyaddr,
+					  MII_CTRL1000, MII_READ);
 		mii_control_1000 &= ~ADVERTISE_1000HALF;
 		if (phyinterface & PHY_RGMII)
 			mii_control_1000 |= ADVERTISE_1000FULL;
@@ -1310,11 +1425,11 @@
 			mii_control_1000 &= ~ADVERTISE_1000FULL;
 
 		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			netdev_info(dev, "%s: phy init failed\n",
+				    pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
-	}
-	else
+	} else
 		np->gigabit = 0;
 
 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1326,7 +1441,8 @@
 		/* start autoneg since we already performed hw reset above */
 		mii_control |= BMCR_ANRESTART;
 		if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
-			printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev));
+			netdev_info(dev, "%s: phy init failed\n",
+				    pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
 	} else {
@@ -1334,164 +1450,41 @@
 		 * (certain phys need bmcr to be setup with reset)
 		 */
 		if (phy_reset(dev, mii_control)) {
-			printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev));
+			netdev_info(dev, "%s: phy reset failed\n",
+				    pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
 	}
 
 	/* phy vendor specific configuration */
-	if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) {
-		phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
-		phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
-		phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
-		if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+	if ((np->phy_oui == PHY_OUI_CICADA)) {
+		if (init_cicada(dev, np, phyinterface)) {
+			netdev_info(dev, "%s: phy init failed\n",
+				    pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
-		phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
-		phy_reserved |= PHY_CICADA_INIT5;
-		if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+	} else if (np->phy_oui == PHY_OUI_VITESSE) {
+		if (init_vitesse(dev, np)) {
+			netdev_info(dev, "%s: phy init failed\n",
+				    pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
-	}
-	if (np->phy_oui == PHY_OUI_CICADA) {
-		phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
-		phy_reserved |= PHY_CICADA_INIT6;
-		if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-	}
-	if (np->phy_oui == PHY_OUI_VITESSE) {
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
-		phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
-		phy_reserved |= PHY_VITESSE_INIT3;
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
-		phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
-		phy_reserved |= PHY_VITESSE_INIT3;
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
-		phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
-		phy_reserved |= PHY_VITESSE_INIT8;
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-		if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
-			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-			return PHY_ERROR;
-		}
-	}
-	if (np->phy_oui == PHY_OUI_REALTEK) {
+	} else if (np->phy_oui == PHY_OUI_REALTEK) {
 		if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
 		    np->phy_rev == PHY_REV_REALTEK_8211B) {
 			/* reset could have cleared these out, set them back */
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+			if (init_realtek_8211b(dev, np)) {
+				netdev_info(dev, "%s: phy init failed\n",
+					    pci_name(np->pci_dev));
 				return PHY_ERROR;
 			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
+		} else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
+			if (init_realtek_8201(dev, np) ||
+			    init_realtek_8201_cross(dev, np)) {
+				netdev_info(dev, "%s: phy init failed\n",
+					    pci_name(np->pci_dev));
 				return PHY_ERROR;
 			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-			if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-				printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-				return PHY_ERROR;
-			}
-		}
-		if (np->phy_model == PHY_MODEL_REALTEK_8201) {
-			if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
-				phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
-				phy_reserved |= PHY_REALTEK_INIT7;
-				if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
-					printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-					return PHY_ERROR;
-				}
-			}
-			if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
-				if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
-					printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-					return PHY_ERROR;
-				}
-				phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
-				phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
-				phy_reserved |= PHY_REALTEK_INIT3;
-				if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
-					printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-					return PHY_ERROR;
-				}
-				if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
-					printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
-					return PHY_ERROR;
-				}
-			}
 		}
 	}
 
@@ -1501,12 +1494,10 @@
 	/* restart auto negotiation, power down phy */
 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
 	mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
-	if (phy_power_down) {
+	if (phy_power_down)
 		mii_control |= BMCR_PDOWN;
-	}
-	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
+	if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
 		return PHY_ERROR;
-	}
 
 	return 0;
 }
@@ -1517,7 +1508,6 @@
 	u8 __iomem *base = get_hwbase(dev);
 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
 
-	dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
 	/* Already running? Stop it. */
 	if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
 		rx_ctrl &= ~NVREG_RCVCTL_START;
@@ -1526,12 +1516,10 @@
 	}
 	writel(np->linkspeed, base + NvRegLinkSpeed);
 	pci_push(base);
-        rx_ctrl |= NVREG_RCVCTL_START;
-        if (np->mac_in_use)
+	rx_ctrl |= NVREG_RCVCTL_START;
+	if (np->mac_in_use)
 		rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
 	writel(rx_ctrl, base + NvRegReceiverControl);
-	dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
-				dev->name, np->duplex, np->linkspeed);
 	pci_push(base);
 }
 
@@ -1541,15 +1529,15 @@
 	u8 __iomem *base = get_hwbase(dev);
 	u32 rx_ctrl = readl(base + NvRegReceiverControl);
 
-	dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
 	if (!np->mac_in_use)
 		rx_ctrl &= ~NVREG_RCVCTL_START;
 	else
 		rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
 	writel(rx_ctrl, base + NvRegReceiverControl);
-	reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
-			NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX,
-			KERN_INFO "nv_stop_rx: ReceiverStatus remained busy");
+	if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
+		      NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
+		netdev_info(dev, "%s: ReceiverStatus remained busy\n",
+			    __func__);
 
 	udelay(NV_RXSTOP_DELAY2);
 	if (!np->mac_in_use)
@@ -1562,7 +1550,6 @@
 	u8 __iomem *base = get_hwbase(dev);
 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
 
-	dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
 	tx_ctrl |= NVREG_XMITCTL_START;
 	if (np->mac_in_use)
 		tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
@@ -1576,15 +1563,15 @@
 	u8 __iomem *base = get_hwbase(dev);
 	u32 tx_ctrl = readl(base + NvRegTransmitterControl);
 
-	dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
 	if (!np->mac_in_use)
 		tx_ctrl &= ~NVREG_XMITCTL_START;
 	else
 		tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
 	writel(tx_ctrl, base + NvRegTransmitterControl);
-	reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
-			NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX,
-			KERN_INFO "nv_stop_tx: TransmitterStatus remained busy");
+	if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
+		      NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
+		netdev_info(dev, "%s: TransmitterStatus remained busy\n",
+			    __func__);
 
 	udelay(NV_TXSTOP_DELAY2);
 	if (!np->mac_in_use)
@@ -1609,7 +1596,6 @@
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 
-	dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
 	pci_push(base);
 	udelay(NV_TXRX_RESET_DELAY);
@@ -1623,8 +1609,6 @@
 	u8 __iomem *base = get_hwbase(dev);
 	u32 temp1, temp2, temp3;
 
-	dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
-
 	writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
 	pci_push(base);
 
@@ -1745,7 +1729,7 @@
 static int nv_alloc_rx(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
-	struct ring_desc* less_rx;
+	struct ring_desc *less_rx;
 
 	less_rx = np->get_rx.orig;
 	if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1751,8 @@
 				np->put_rx.orig = np->first_rx.orig;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
 				np->put_rx_ctx = np->first_rx_ctx;
-		} else {
+		} else
 			return 1;
-		}
 	}
 	return 0;
 }
@@ -1777,7 +1760,7 @@
 static int nv_alloc_rx_optimized(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
-	struct ring_desc_ex* less_rx;
+	struct ring_desc_ex *less_rx;
 
 	less_rx = np->get_rx.ex;
 	if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1783,8 @@
 				np->put_rx.ex = np->first_rx.ex;
 			if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
 				np->put_rx_ctx = np->first_rx_ctx;
-		} else {
+		} else
 			return 1;
-		}
 	}
 	return 0;
 }
@@ -2018,24 +2000,24 @@
 
 /* Known Good seed sets */
 static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
-    {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
-    {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
-    {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
-    {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
-    {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
-    {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
-    {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
-    {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}};
+	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
+	{145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
+	{245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
+	{266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
+	{266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
+	{366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800,  84},
+	{466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
 
 static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
-    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
-    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
-    {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
-    {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}};
+	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
+	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+	{251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375,  30, 295},
+	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
+	{351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
 
 static void nv_gear_backoff_reseed(struct net_device *dev)
 {
@@ -2083,13 +2065,12 @@
 	temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
 	temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
 	temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
-	writel(temp,base + NvRegBackOffControl);
+	writel(temp, base + NvRegBackOffControl);
 
-    	/* Setup seeds for all gear LFSRs. */
+	/* Setup seeds for all gear LFSRs. */
 	get_random_bytes(&seedset, sizeof(seedset));
 	seedset = seedset % BACKOFF_SEEDSET_ROWS;
-	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++)
-	{
+	for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
 		temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
 		temp |= main_seedset[seedset][i-1] & 0x3ff;
 		temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2094,10 @@
 	u32 size = skb_headlen(skb);
 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
 	u32 empty_slots;
-	struct ring_desc* put_tx;
-	struct ring_desc* start_tx;
-	struct ring_desc* prev_tx;
-	struct nv_skb_map* prev_tx_ctx;
+	struct ring_desc *put_tx;
+	struct ring_desc *start_tx;
+	struct ring_desc *prev_tx;
+	struct nv_skb_map *prev_tx_ctx;
 	unsigned long flags;
 
 	/* add fragments to entries count */
@@ -2204,18 +2185,6 @@
 
 	spin_unlock_irqrestore(&np->lock, flags);
 
-	dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
-		dev->name, entries, tx_flags_extra);
-	{
-		int j;
-		for (j=0; j<64; j++) {
-			if ((j%16) == 0)
-				dprintk("\n%03x:", j);
-			dprintk(" %02x", ((unsigned char*)skb->data)[j]);
-		}
-		dprintk("\n");
-	}
-
 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 	return NETDEV_TX_OK;
 }
@@ -2233,11 +2202,11 @@
 	u32 size = skb_headlen(skb);
 	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
 	u32 empty_slots;
-	struct ring_desc_ex* put_tx;
-	struct ring_desc_ex* start_tx;
-	struct ring_desc_ex* prev_tx;
-	struct nv_skb_map* prev_tx_ctx;
-	struct nv_skb_map* start_tx_ctx;
+	struct ring_desc_ex *put_tx;
+	struct ring_desc_ex *start_tx;
+	struct ring_desc_ex *prev_tx;
+	struct nv_skb_map *prev_tx_ctx;
+	struct nv_skb_map *start_tx_ctx;
 	unsigned long flags;
 
 	/* add fragments to entries count */
@@ -2355,18 +2324,6 @@
 
 	spin_unlock_irqrestore(&np->lock, flags);
 
-	dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
-		dev->name, entries, tx_flags_extra);
-	{
-		int j;
-		for (j=0; j<64; j++) {
-			if ((j%16) == 0)
-				dprintk("\n%03x:", j);
-			dprintk(" %02x", ((unsigned char*)skb->data)[j]);
-		}
-		dprintk("\n");
-	}
-
 	writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
 	return NETDEV_TX_OK;
 }
@@ -2399,15 +2356,12 @@
 	struct fe_priv *np = netdev_priv(dev);
 	u32 flags;
 	int tx_work = 0;
-	struct ring_desc* orig_get_tx = np->get_tx.orig;
+	struct ring_desc *orig_get_tx = np->get_tx.orig;
 
 	while ((np->get_tx.orig != np->put_tx.orig) &&
 	       !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
 	       (tx_work < limit)) {
 
-		dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
-					dev->name, flags);
-
 		nv_unmap_txskb(np, np->get_tx_ctx);
 
 		if (np->desc_ver == DESC_VER_1) {
@@ -2464,15 +2418,12 @@
 	struct fe_priv *np = netdev_priv(dev);
 	u32 flags;
 	int tx_work = 0;
-	struct ring_desc_ex* orig_get_tx = np->get_tx.ex;
+	struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
 
 	while ((np->get_tx.ex != np->put_tx.ex) &&
 	       !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
 	       (tx_work < limit)) {
 
-		dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
-					dev->name, flags);
-
 		nv_unmap_txskb(np, np->get_tx_ctx);
 
 		if (flags & NV_TX2_LASTPACKET) {
@@ -2491,9 +2442,8 @@
 			np->get_tx_ctx->skb = NULL;
 			tx_work++;
 
-			if (np->tx_limit) {
+			if (np->tx_limit)
 				nv_tx_flip_ownership(dev);
-			}
 		}
 		if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
 			np->get_tx.ex = np->first_tx.ex;
@@ -2518,57 +2468,56 @@
 	u32 status;
 	union ring_type put_tx;
 	int saved_tx_limit;
+	int i;
 
 	if (np->msi_flags & NV_MSI_X_ENABLED)
 		status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
 	else
 		status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
 
-	printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status);
+	netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
 
-	{
-		int i;
-
-		printk(KERN_INFO "%s: Ring at %lx\n",
-		       dev->name, (unsigned long)np->ring_addr);
-		printk(KERN_INFO "%s: Dumping tx registers\n", dev->name);
-		for (i=0;i<=np->register_size;i+= 32) {
-			printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
-					i,
-					readl(base + i + 0), readl(base + i + 4),
-					readl(base + i + 8), readl(base + i + 12),
-					readl(base + i + 16), readl(base + i + 20),
-					readl(base + i + 24), readl(base + i + 28));
-		}
-		printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
-		for (i=0;i<np->tx_ring_size;i+= 4) {
-			if (!nv_optimized(np)) {
-				printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
-				       i,
-				       le32_to_cpu(np->tx_ring.orig[i].buf),
-				       le32_to_cpu(np->tx_ring.orig[i].flaglen),
-				       le32_to_cpu(np->tx_ring.orig[i+1].buf),
-				       le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
-				       le32_to_cpu(np->tx_ring.orig[i+2].buf),
-				       le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
-				       le32_to_cpu(np->tx_ring.orig[i+3].buf),
-				       le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
-			} else {
-				printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
-				       i,
-				       le32_to_cpu(np->tx_ring.ex[i].bufhigh),
-				       le32_to_cpu(np->tx_ring.ex[i].buflow),
-				       le32_to_cpu(np->tx_ring.ex[i].flaglen),
-				       le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
-				       le32_to_cpu(np->tx_ring.ex[i+1].buflow),
-				       le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
-				       le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
-				       le32_to_cpu(np->tx_ring.ex[i+2].buflow),
-				       le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
-				       le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
-				       le32_to_cpu(np->tx_ring.ex[i+3].buflow),
-				       le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
-			}
+	netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
+	netdev_info(dev, "Dumping tx registers\n");
+	for (i = 0; i <= np->register_size; i += 32) {
+		netdev_info(dev,
+			    "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
+			    i,
+			    readl(base + i + 0), readl(base + i + 4),
+			    readl(base + i + 8), readl(base + i + 12),
+			    readl(base + i + 16), readl(base + i + 20),
+			    readl(base + i + 24), readl(base + i + 28));
+	}
+	netdev_info(dev, "Dumping tx ring\n");
+	for (i = 0; i < np->tx_ring_size; i += 4) {
+		if (!nv_optimized(np)) {
+			netdev_info(dev,
+				    "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
+				    i,
+				    le32_to_cpu(np->tx_ring.orig[i].buf),
+				    le32_to_cpu(np->tx_ring.orig[i].flaglen),
+				    le32_to_cpu(np->tx_ring.orig[i+1].buf),
+				    le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
+				    le32_to_cpu(np->tx_ring.orig[i+2].buf),
+				    le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
+				    le32_to_cpu(np->tx_ring.orig[i+3].buf),
+				    le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
+		} else {
+			netdev_info(dev,
+				    "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
+				    i,
+				    le32_to_cpu(np->tx_ring.ex[i].bufhigh),
+				    le32_to_cpu(np->tx_ring.ex[i].buflow),
+				    le32_to_cpu(np->tx_ring.ex[i].flaglen),
+				    le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
+				    le32_to_cpu(np->tx_ring.ex[i+1].buflow),
+				    le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
+				    le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
+				    le32_to_cpu(np->tx_ring.ex[i+2].buflow),
+				    le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
+				    le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
+				    le32_to_cpu(np->tx_ring.ex[i+3].buflow),
+				    le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
 		}
 	}
 
@@ -2616,15 +2565,13 @@
 	int protolen;	/* length as stored in the proto field */
 
 	/* 1) calculate len according to header */
-	if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
-		protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto );
+	if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
+		protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
 		hdrlen = VLAN_HLEN;
 	} else {
-		protolen = ntohs( ((struct ethhdr *)packet)->h_proto);
+		protolen = ntohs(((struct ethhdr *)packet)->h_proto);
 		hdrlen = ETH_HLEN;
 	}
-	dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
-				dev->name, datalen, protolen, hdrlen);
 	if (protolen > ETH_DATA_LEN)
 		return datalen; /* Value in proto field not a len, no checks possible */
 
@@ -2635,26 +2582,18 @@
 			/* more data on wire than in 802 header, trim of
 			 * additional data.
 			 */
-			dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
-					dev->name, protolen);
 			return protolen;
 		} else {
 			/* less data on wire than mentioned in header.
 			 * Discard the packet.
 			 */
-			dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
-					dev->name);
 			return -1;
 		}
 	} else {
 		/* short packet. Accept only if 802 values are also short */
 		if (protolen > ETH_ZLEN) {
-			dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
-					dev->name);
 			return -1;
 		}
-		dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
-				dev->name, datalen);
 		return datalen;
 	}
 }
@@ -2667,13 +2606,10 @@
 	struct sk_buff *skb;
 	int len;
 
-	while((np->get_rx.orig != np->put_rx.orig) &&
+	while ((np->get_rx.orig != np->put_rx.orig) &&
 	      !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
 		(rx_work < limit)) {
 
-		dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
-					dev->name, flags);
-
 		/*
 		 * the packet is for us - immediately tear down the pci mapping.
 		 * TODO: check if a prefetch of the first cacheline improves
@@ -2685,16 +2621,6 @@
 		skb = np->get_rx_ctx->skb;
 		np->get_rx_ctx->skb = NULL;
 
-		{
-			int j;
-			dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
-			for (j=0; j<64; j++) {
-				if ((j%16) == 0)
-					dprintk("\n%03x:", j);
-				dprintk(" %02x", ((unsigned char*)skb->data)[j]);
-			}
-			dprintk("\n");
-		}
 		/* look at what we actually got: */
 		if (np->desc_ver == DESC_VER_1) {
 			if (likely(flags & NV_RX_DESCRIPTORVALID)) {
@@ -2710,9 +2636,8 @@
 					}
 					/* framing errors are soft errors */
 					else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
-						if (flags & NV_RX_SUBSTRACT1) {
+						if (flags & NV_RX_SUBSTRACT1)
 							len--;
-						}
 					}
 					/* the rest are hard errors */
 					else {
@@ -2745,9 +2670,8 @@
 					}
 					/* framing errors are soft errors */
 					else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
-						if (flags & NV_RX2_SUBSTRACT1) {
+						if (flags & NV_RX2_SUBSTRACT1)
 							len--;
-						}
 					}
 					/* the rest are hard errors */
 					else {
@@ -2771,8 +2695,6 @@
 		/* got a valid packet - forward it to the network core */
 		skb_put(skb, len);
 		skb->protocol = eth_type_trans(skb, dev);
-		dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
-					dev->name, len, skb->protocol);
 		napi_gro_receive(&np->napi, skb);
 		dev->stats.rx_packets++;
 		dev->stats.rx_bytes += len;
@@ -2797,13 +2719,10 @@
 	struct sk_buff *skb;
 	int len;
 
-	while((np->get_rx.ex != np->put_rx.ex) &&
+	while ((np->get_rx.ex != np->put_rx.ex) &&
 	      !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
 	      (rx_work < limit)) {
 
-		dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
-					dev->name, flags);
-
 		/*
 		 * the packet is for us - immediately tear down the pci mapping.
 		 * TODO: check if a prefetch of the first cacheline improves
@@ -2815,16 +2734,6 @@
 		skb = np->get_rx_ctx->skb;
 		np->get_rx_ctx->skb = NULL;
 
-		{
-			int j;
-			dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
-			for (j=0; j<64; j++) {
-				if ((j%16) == 0)
-					dprintk("\n%03x:", j);
-				dprintk(" %02x", ((unsigned char*)skb->data)[j]);
-			}
-			dprintk("\n");
-		}
 		/* look at what we actually got: */
 		if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
 			len = flags & LEN_MASK_V2;
@@ -2838,9 +2747,8 @@
 				}
 				/* framing errors are soft errors */
 				else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
-					if (flags & NV_RX2_SUBSTRACT1) {
+					if (flags & NV_RX2_SUBSTRACT1)
 						len--;
-					}
 				}
 				/* the rest are hard errors */
 				else {
@@ -2858,9 +2766,6 @@
 			skb->protocol = eth_type_trans(skb, dev);
 			prefetch(skb->data);
 
-			dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
-				dev->name, len, skb->protocol);
-
 			if (likely(!np->vlangrp)) {
 				napi_gro_receive(&np->napi, skb);
 			} else {
@@ -2949,7 +2854,7 @@
 		/* reinit nic view of the rx queue */
 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-		writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 			base + NvRegRingSizes);
 		pci_push(base);
 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2891,7 @@
 static int nv_set_mac_address(struct net_device *dev, void *addr)
 {
 	struct fe_priv *np = netdev_priv(dev);
-	struct sockaddr *macaddr = (struct sockaddr*)addr;
+	struct sockaddr *macaddr = (struct sockaddr *)addr;
 
 	if (!is_valid_ether_addr(macaddr->sa_data))
 		return -EADDRNOTAVAIL;
@@ -3076,8 +2981,6 @@
 	writel(mask[0], base + NvRegMulticastMaskA);
 	writel(mask[1], base + NvRegMulticastMaskB);
 	writel(pff, base + NvRegPacketFilterFlags);
-	dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
-		dev->name);
 	nv_start_rx(dev);
 	spin_unlock_irq(&np->lock);
 }
@@ -3152,8 +3055,6 @@
 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
 
 	if (!(mii_status & BMSR_LSTATUS)) {
-		dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
-				dev->name);
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 0;
 		retval = 0;
@@ -3161,8 +3062,6 @@
 	}
 
 	if (np->autoneg == 0) {
-		dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
-				dev->name, np->fixed_mode);
 		if (np->fixed_mode & LPA_100FULL) {
 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
 			newdup = 1;
@@ -3185,14 +3084,11 @@
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 0;
 		retval = 0;
-		dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
 		goto set_speed;
 	}
 
 	adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
 	lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
-	dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
-				dev->name, adv, lpa);
 
 	retval = 1;
 	if (np->gigabit == PHY_GIGABIT) {
@@ -3201,8 +3097,6 @@
 
 		if ((control_1000 & ADVERTISE_1000FULL) &&
 			(status_1000 & LPA_1000FULL)) {
-			dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
-				dev->name);
 			newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
 			newdup = 1;
 			goto set_speed;
@@ -3224,7 +3118,6 @@
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 0;
 	} else {
-		dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 0;
 	}
@@ -3233,9 +3126,6 @@
 	if (np->duplex == newdup && np->linkspeed == newls)
 		return retval;
 
-	dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
-			dev->name, np->linkspeed, np->duplex, newls, newdup);
-
 	np->duplex = newdup;
 	np->linkspeed = newls;
 
@@ -3302,7 +3192,7 @@
 	}
 	writel(txreg, base + NvRegTxWatermark);
 
-	writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD),
+	writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
 		base + NvRegMisc1);
 	pci_push(base);
 	writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3202,8 @@
 	/* setup pause frame */
 	if (np->duplex != 0) {
 		if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
-			adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
-			lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+			adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
+			lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
 
 			switch (adv_pause) {
 			case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3214,17 @@
 				}
 				break;
 			case ADVERTISE_PAUSE_ASYM:
-				if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
-				{
+				if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
 					pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
-				}
 				break;
-			case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM:
-				if (lpa_pause & LPA_PAUSE_CAP)
-				{
+			case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
+				if (lpa_pause & LPA_PAUSE_CAP) {
 					pause_flags |=  NV_PAUSEFRAME_RX_ENABLE;
 					if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
 						pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
 				}
 				if (lpa_pause == LPA_PAUSE_ASYM)
-				{
 					pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
-				}
 				break;
 			}
 		} else {
@@ -3361,14 +3246,14 @@
 	if (nv_update_linkspeed(dev)) {
 		if (!netif_carrier_ok(dev)) {
 			netif_carrier_on(dev);
-			printk(KERN_INFO "%s: link up.\n", dev->name);
+			netdev_info(dev, "link up\n");
 			nv_txrx_gate(dev, false);
 			nv_start_rx(dev);
 		}
 	} else {
 		if (netif_carrier_ok(dev)) {
 			netif_carrier_off(dev);
-			printk(KERN_INFO "%s: link down.\n", dev->name);
+			netdev_info(dev, "link down\n");
 			nv_txrx_gate(dev, true);
 			nv_stop_rx(dev);
 		}
@@ -3382,11 +3267,9 @@
 
 	miistat = readl(base + NvRegMIIStatus);
 	writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
-	dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
 
 	if (miistat & (NVREG_MIISTAT_LINKCHANGE))
 		nv_linkchange(dev);
-	dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
 }
 
 static void nv_msi_workaround(struct fe_priv *np)
@@ -3437,8 +3320,6 @@
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
-
 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
 		np->events = readl(base + NvRegIrqStatus);
 		writel(np->events, base + NvRegIrqStatus);
@@ -3446,7 +3327,6 @@
 		np->events = readl(base + NvRegMSIXIrqStatus);
 		writel(np->events, base + NvRegMSIXIrqStatus);
 	}
-	dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
 	if (!(np->events & np->irqmask))
 		return IRQ_NONE;
 
@@ -3460,8 +3340,6 @@
 		__napi_schedule(&np->napi);
 	}
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
-
 	return IRQ_HANDLED;
 }
 
@@ -3476,8 +3354,6 @@
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
-
 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
 		np->events = readl(base + NvRegIrqStatus);
 		writel(np->events, base + NvRegIrqStatus);
@@ -3485,7 +3361,6 @@
 		np->events = readl(base + NvRegMSIXIrqStatus);
 		writel(np->events, base + NvRegMSIXIrqStatus);
 	}
-	dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
 	if (!(np->events & np->irqmask))
 		return IRQ_NONE;
 
@@ -3498,7 +3373,6 @@
 		writel(0, base + NvRegIrqMask);
 		__napi_schedule(&np->napi);
 	}
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
 
 	return IRQ_HANDLED;
 }
@@ -3512,12 +3386,9 @@
 	int i;
 	unsigned long flags;
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
-
-	for (i=0; ; i++) {
+	for (i = 0;; i++) {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
 		writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
-		dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
 		if (!(events & np->irqmask))
 			break;
 
@@ -3536,12 +3407,12 @@
 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
 			}
 			spin_unlock_irqrestore(&np->lock, flags);
-			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
+			netdev_dbg(dev, "%s: too many iterations (%d)\n",
+				   __func__, i);
 			break;
 		}
 
 	}
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
 
 	return IRQ_RETVAL(i);
 }
@@ -3553,7 +3424,7 @@
 	u8 __iomem *base = get_hwbase(dev);
 	unsigned long flags;
 	int retcode;
-	int rx_count, tx_work=0, rx_work=0;
+	int rx_count, tx_work = 0, rx_work = 0;
 
 	do {
 		if (!nv_optimized(np)) {
@@ -3626,12 +3497,9 @@
 	int i;
 	unsigned long flags;
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
-
-	for (i=0; ; i++) {
+	for (i = 0;; i++) {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
 		writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
-		dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
 		if (!(events & np->irqmask))
 			break;
 
@@ -3655,11 +3523,11 @@
 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
 			}
 			spin_unlock_irqrestore(&np->lock, flags);
-			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
+			netdev_dbg(dev, "%s: too many iterations (%d)\n",
+				   __func__, i);
 			break;
 		}
 	}
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
 
 	return IRQ_RETVAL(i);
 }
@@ -3673,12 +3541,9 @@
 	int i;
 	unsigned long flags;
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
-
-	for (i=0; ; i++) {
+	for (i = 0;; i++) {
 		events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
 		writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
-		dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
 		if (!(events & np->irqmask))
 			break;
 
@@ -3723,12 +3588,12 @@
 				mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
 			}
 			spin_unlock_irqrestore(&np->lock, flags);
-			printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
+			netdev_dbg(dev, "%s: too many iterations (%d)\n",
+				   __func__, i);
 			break;
 		}
 
 	}
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
 
 	return IRQ_RETVAL(i);
 }
@@ -3740,8 +3605,6 @@
 	u8 __iomem *base = get_hwbase(dev);
 	u32 events;
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
-
 	if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
 		events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
 		writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
@@ -3750,7 +3613,6 @@
 		writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
 	}
 	pci_push(base);
-	dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
 	if (!(events & NVREG_IRQ_TIMER))
 		return IRQ_RETVAL(0);
 
@@ -3760,8 +3622,6 @@
 	np->intr_test = 1;
 	spin_unlock(&np->lock);
 
-	dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
-
 	return IRQ_RETVAL(1);
 }
 
@@ -3776,17 +3636,15 @@
 	 * the remaining 8 interrupts.
 	 */
 	for (i = 0; i < 8; i++) {
-		if ((irqmask >> i) & 0x1) {
+		if ((irqmask >> i) & 0x1)
 			msixmap |= vector << (i << 2);
-		}
 	}
 	writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
 
 	msixmap = 0;
 	for (i = 0; i < 8; i++) {
-		if ((irqmask >> (i + 8)) & 0x1) {
+		if ((irqmask >> (i + 8)) & 0x1)
 			msixmap |= vector << (i << 2);
-		}
 	}
 	writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
 }
@@ -3809,17 +3667,19 @@
 	}
 
 	if (np->msi_flags & NV_MSI_X_CAPABLE) {
-		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
 			np->msi_x_entry[i].entry = i;
-		}
-		if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) {
+		ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
+		if (ret == 0) {
 			np->msi_flags |= NV_MSI_X_ENABLED;
 			if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
 				/* Request irq for rx handling */
 				sprintf(np->name_rx, "%s-rx", dev->name);
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
 						nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret);
+					netdev_info(dev,
+						    "request_irq failed for rx %d\n",
+						    ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
 					goto out_err;
@@ -3828,7 +3688,9 @@
 				sprintf(np->name_tx, "%s-tx", dev->name);
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
 						nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret);
+					netdev_info(dev,
+						    "request_irq failed for tx %d\n",
+						    ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
 					goto out_free_rx;
@@ -3837,7 +3699,9 @@
 				sprintf(np->name_other, "%s-other", dev->name);
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
 						nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret);
+					netdev_info(dev,
+						    "request_irq failed for link %d\n",
+						    ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
 					goto out_free_tx;
@@ -3851,7 +3715,9 @@
 			} else {
 				/* Request irq for all interrupts */
 				if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
-					printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+					netdev_info(dev,
+						    "request_irq failed %d\n",
+						    ret);
 					pci_disable_msix(np->pci_dev);
 					np->msi_flags &= ~NV_MSI_X_ENABLED;
 					goto out_err;
@@ -3864,11 +3730,13 @@
 		}
 	}
 	if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
-		if ((ret = pci_enable_msi(np->pci_dev)) == 0) {
+		ret = pci_enable_msi(np->pci_dev);
+		if (ret == 0) {
 			np->msi_flags |= NV_MSI_ENABLED;
 			dev->irq = np->pci_dev->irq;
 			if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
-				printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret);
+				netdev_info(dev, "request_irq failed %d\n",
+					    ret);
 				pci_disable_msi(np->pci_dev);
 				np->msi_flags &= ~NV_MSI_ENABLED;
 				dev->irq = np->pci_dev->irq;
@@ -3903,9 +3771,8 @@
 	int i;
 
 	if (np->msi_flags & NV_MSI_X_ENABLED) {
-		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) {
+		for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
 			free_irq(np->msi_x_entry[i].vector, dev);
-		}
 		pci_disable_msix(np->pci_dev);
 		np->msi_flags &= ~NV_MSI_X_ENABLED;
 	} else {
@@ -3954,7 +3821,7 @@
 
 	if (np->recover_error) {
 		np->recover_error = 0;
-		printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name);
+		netdev_info(dev, "MAC in recoverable error state\n");
 		if (netif_running(dev)) {
 			netif_tx_lock_bh(dev);
 			netif_addr_lock(dev);
@@ -3975,7 +3842,7 @@
 			/* reinit nic view of the rx queue */
 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-			writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 				base + NvRegRingSizes);
 			pci_push(base);
 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4105,7 +3972,7 @@
 	}
 
 	if (netif_carrier_ok(dev)) {
-		switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) {
+		switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
 		case NVREG_LINKSPEED_10:
 			ecmd->speed = SPEED_10;
 			break;
@@ -4250,14 +4117,14 @@
 		}
 
 		if (netif_running(dev))
-			printk(KERN_INFO "%s: link down.\n", dev->name);
+			netdev_info(dev, "link down\n");
 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
 		if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
 			bmcr |= BMCR_ANENABLE;
 			/* reset the phy in order for settings to stick,
 			 * and cause autoneg to start */
 			if (phy_reset(dev, bmcr)) {
-				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+				netdev_info(dev, "phy reset failed\n");
 				return -EINVAL;
 			}
 		} else {
@@ -4306,7 +4173,7 @@
 		if (np->phy_oui == PHY_OUI_MARVELL) {
 			/* reset the phy in order for forced mode settings to stick */
 			if (phy_reset(dev, bmcr)) {
-				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+				netdev_info(dev, "phy reset failed\n");
 				return -EINVAL;
 			}
 		} else {
@@ -4344,7 +4211,7 @@
 
 	regs->version = FORCEDETH_REGS_VER;
 	spin_lock_irq(&np->lock);
-	for (i = 0;i <= np->register_size/sizeof(u32); i++)
+	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		rbuf[i] = readl(base + i*sizeof(u32));
 	spin_unlock_irq(&np->lock);
 }
@@ -4368,7 +4235,7 @@
 			spin_unlock(&np->lock);
 			netif_addr_unlock(dev);
 			netif_tx_unlock_bh(dev);
-			printk(KERN_INFO "%s: link down.\n", dev->name);
+			netdev_info(dev, "link down\n");
 		}
 
 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -4376,7 +4243,7 @@
 			bmcr |= BMCR_ANENABLE;
 			/* reset the phy in order for settings to stick*/
 			if (phy_reset(dev, bmcr)) {
-				printk(KERN_INFO "%s: phy reset failed\n", dev->name);
+				netdev_info(dev, "phy reset failed\n");
 				return -EINVAL;
 			}
 		} else {
@@ -4464,10 +4331,9 @@
 				pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
 						    rxtx_ring, ring_addr);
 		}
-		if (rx_skbuff)
-			kfree(rx_skbuff);
-		if (tx_skbuff)
-			kfree(tx_skbuff);
+
+		kfree(rx_skbuff);
+		kfree(tx_skbuff);
 		goto exit;
 	}
 
@@ -4491,14 +4357,14 @@
 	np->tx_ring_size = ring->tx_pending;
 
 	if (!nv_optimized(np)) {
-		np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
+		np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
 		np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
 	} else {
-		np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring;
+		np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
 		np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
 	}
-	np->rx_skb = (struct nv_skb_map*)rx_skbuff;
-	np->tx_skb = (struct nv_skb_map*)tx_skbuff;
+	np->rx_skb = (struct nv_skb_map *)rx_skbuff;
+	np->tx_skb = (struct nv_skb_map *)tx_skbuff;
 	np->ring_addr = ring_addr;
 
 	memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4381,7 @@
 		/* reinit nic view of the queues */
 		writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 		setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-		writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+		writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 			base + NvRegRingSizes);
 		pci_push(base);
 		writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4550,12 +4416,11 @@
 
 	if ((!np->autoneg && np->duplex == 0) ||
 	    (np->autoneg && !pause->autoneg && np->duplex == 0)) {
-		printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n",
-		       dev->name);
+		netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
 		return -EINVAL;
 	}
 	if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
-		printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name);
+		netdev_info(dev, "hardware does not support tx pause frames\n");
 		return -EINVAL;
 	}
 
@@ -4590,7 +4455,7 @@
 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
 
 		if (netif_running(dev))
-			printk(KERN_INFO "%s: link down.\n", dev->name);
+			netdev_info(dev, "link down\n");
 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
 		bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
 		mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -4841,7 +4706,7 @@
 	/* reinit nic view of the rx queue */
 	writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-	writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 		base + NvRegRingSizes);
 	pci_push(base);
 
@@ -4852,8 +4717,7 @@
 	pkt_len = ETH_DATA_LEN;
 	tx_skb = dev_alloc_skb(pkt_len);
 	if (!tx_skb) {
-		printk(KERN_ERR "dev_alloc_skb() failed during loopback test"
-			 " of %s\n", dev->name);
+		netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
 		ret = 0;
 		goto out;
 	}
@@ -4893,29 +4757,22 @@
 		if (flags & NV_RX_ERROR)
 			ret = 0;
 	} else {
-		if (flags & NV_RX2_ERROR) {
+		if (flags & NV_RX2_ERROR)
 			ret = 0;
-		}
 	}
 
 	if (ret) {
 		if (len != pkt_len) {
 			ret = 0;
-			dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
-				dev->name, len, pkt_len);
 		} else {
 			rx_skb = np->rx_skb[0].skb;
 			for (i = 0; i < pkt_len; i++) {
 				if (rx_skb->data[i] != (u8)(i & 0xff)) {
 					ret = 0;
-					dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
-						dev->name, i);
 					break;
 				}
 			}
 		}
-	} else {
-		dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
 	}
 
 	pci_unmap_single(np->pci_dev, test_dma_addr,
@@ -4958,11 +4815,10 @@
 			netif_addr_lock(dev);
 			spin_lock_irq(&np->lock);
 			nv_disable_hw_interrupts(dev, np->irqmask);
-			if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
+			if (!(np->msi_flags & NV_MSI_X_ENABLED))
 				writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
-			} else {
+			else
 				writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
-			}
 			/* stop engines */
 			nv_stop_rxtx(dev);
 			nv_txrx_reset(dev);
@@ -5003,7 +4859,7 @@
 			/* reinit nic view of the rx queue */
 			writel(np->rx_buf_sz, base + NvRegOffloadConfig);
 			setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-			writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+			writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 				base + NvRegRingSizes);
 			pci_push(base);
 			writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +4962,7 @@
 		    ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
 			np->mgmt_sema = 1;
 			return 1;
-		}
-		else
+		} else
 			udelay(50);
 	}
 
@@ -5167,8 +5022,6 @@
 	int oom, i;
 	u32 low;
 
-	dprintk(KERN_DEBUG "nv_open: begin\n");
-
 	/* power up phy */
 	mii_rw(dev, np->phyaddr, MII_BMCR,
 	       mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
@@ -5204,7 +5057,7 @@
 
 	/* give hw rings */
 	setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
-	writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
+	writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
 		base + NvRegRingSizes);
 
 	writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5216,9 +5069,11 @@
 	writel(np->vlanctl_bits, base + NvRegVlanControl);
 	pci_push(base);
 	writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
-	reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
-			NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX,
-			KERN_INFO "open: SetupReg5, Bit 31 remained off\n");
+	if (reg_delay(dev, NvRegUnknownSetupReg5,
+		      NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
+		      NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
+		netdev_info(dev,
+			    "%s: SetupReg5, Bit 31 remained off\n", __func__);
 
 	writel(0, base + NvRegMIIMask);
 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
@@ -5251,8 +5106,7 @@
 			writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
 		else
 			writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
-	}
-	else
+	} else
 		writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
 	writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
 	writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5117,7 @@
 		writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
 
 	i = readl(base + NvRegPowerState);
-	if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0)
+	if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
 		writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
 
 	pci_push(base);
@@ -5276,9 +5130,8 @@
 	writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
 	pci_push(base);
 
-	if (nv_request_irq(dev, 0)) {
+	if (nv_request_irq(dev, 0))
 		goto out_drain;
-	}
 
 	/* ask for interrupts */
 	nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5296,7 +5149,6 @@
 		u32 miistat;
 		miistat = readl(base + NvRegMIIStatus);
 		writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
-		dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
 	}
 	/* set linkspeed to invalid value, thus force nv_update_linkspeed
 	 * to init hw */
@@ -5309,7 +5161,7 @@
 	if (ret) {
 		netif_carrier_on(dev);
 	} else {
-		printk(KERN_INFO "%s: no link during initialization.\n", dev->name);
+		netdev_info(dev, "no link during initialization\n");
 		netif_carrier_off(dev);
 	}
 	if (oom)
@@ -5352,7 +5204,6 @@
 	base = get_hwbase(dev);
 	nv_disable_hw_interrupts(dev, np->irqmask);
 	pci_push(base);
-	dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
 
 	spin_unlock_irq(&np->lock);
 
@@ -5421,8 +5272,8 @@
 	static int printed_version;
 
 	if (!printed_version++)
-		printk(KERN_INFO "%s: Reverse Engineered nForce ethernet"
-		       " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION);
+		pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
+			FORCEDETH_VERSION);
 
 	dev = alloc_etherdev(sizeof(struct fe_priv));
 	err = -ENOMEM;
@@ -5465,10 +5316,6 @@
 	err = -EINVAL;
 	addr = 0;
 	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
-		dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
-				pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
-				pci_resource_len(pci_dev, i),
-				pci_resource_flags(pci_dev, i));
 		if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
 				pci_resource_len(pci_dev, i) >= np->register_size) {
 			addr = pci_resource_start(pci_dev, i);
@@ -5476,8 +5323,7 @@
 		}
 	}
 	if (i == DEVICE_COUNT_RESOURCE) {
-		dev_printk(KERN_INFO, &pci_dev->dev,
-			   "Couldn't find register window\n");
+		dev_info(&pci_dev->dev, "Couldn't find register window\n");
 		goto out_relreg;
 	}
 
@@ -5493,13 +5339,13 @@
 		np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
 		if (dma_64bit) {
 			if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
-				dev_printk(KERN_INFO, &pci_dev->dev,
-					"64-bit DMA failed, using 32-bit addressing\n");
+				dev_info(&pci_dev->dev,
+					 "64-bit DMA failed, using 32-bit addressing\n");
 			else
 				dev->features |= NETIF_F_HIGHDMA;
 			if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
-				dev_printk(KERN_INFO, &pci_dev->dev,
-					"64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
+				dev_info(&pci_dev->dev,
+					 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
 			}
 		}
 	} else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -5620,7 +5466,9 @@
 		dev->dev_addr[4] = (np->orig_mac[0] >>  8) & 0xff;
 		dev->dev_addr[5] = (np->orig_mac[0] >>  0) & 0xff;
 		writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
-		printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n");
+		dev_dbg(&pci_dev->dev,
+			"%s: set workaround bit for reversed mac addr\n",
+			__func__);
 	}
 	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
 
@@ -5629,17 +5477,14 @@
 		 * Bad mac address. At least one bios sets the mac address
 		 * to 01:23:45:67:89:ab
 		 */
-		dev_printk(KERN_ERR, &pci_dev->dev,
-			"Invalid Mac address detected: %pM\n",
-		        dev->dev_addr);
-		dev_printk(KERN_ERR, &pci_dev->dev,
-			"Please complain to your hardware vendor. Switching to a random MAC.\n");
+		dev_err(&pci_dev->dev,
+			"Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
+			dev->dev_addr);
 		random_ether_addr(dev->dev_addr);
+		dev_err(&pci_dev->dev,
+			"Using random MAC address: %pM\n", dev->dev_addr);
 	}
 
-	dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
-		pci_name(pci_dev), dev->dev_addr);
-
 	/* set mac address */
 	nv_copy_mac_to_hw(dev);
 
@@ -5663,16 +5508,15 @@
 		writel(powerstate, base + NvRegPowerState2);
 	}
 
-	if (np->desc_ver == DESC_VER_1) {
+	if (np->desc_ver == DESC_VER_1)
 		np->tx_flags = NV_TX_VALID;
-	} else {
+	else
 		np->tx_flags = NV_TX2_VALID;
-	}
 
 	np->msi_flags = 0;
-	if ((id->driver_data & DEV_HAS_MSI) && msi) {
+	if ((id->driver_data & DEV_HAS_MSI) && msi)
 		np->msi_flags |= NV_MSI_CAPABLE;
-	}
+
 	if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
 		/* msix has had reported issues when modifying irqmask
 		   as in the case of napi, therefore, disable for now
@@ -5702,11 +5546,9 @@
 	if (id->driver_data & DEV_NEED_TIMERIRQ)
 		np->irqmask |= NVREG_IRQ_TIMER;
 	if (id->driver_data & DEV_NEED_LINKTIMER) {
-		dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
 		np->need_linktimer = 1;
 		np->link_timeout = jiffies + LINK_TIMEOUT;
 	} else {
-		dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
 		np->need_linktimer = 0;
 	}
 
@@ -5735,19 +5577,14 @@
 		    nv_mgmt_acquire_sema(dev) &&
 		    nv_mgmt_get_version(dev)) {
 			np->mac_in_use = 1;
-			if (np->mgmt_version > 0) {
+			if (np->mgmt_version > 0)
 				np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
-			}
-			dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
-				pci_name(pci_dev), np->mac_in_use);
 			/* management unit setup the phy already? */
 			if (np->mac_in_use &&
 			    ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
 			     NVREG_XMITCTL_SYNC_PHY_INIT)) {
 				/* phy is inited by mgmt unit */
 				phyinitialized = 1;
-				dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
-					pci_name(pci_dev));
 			} else {
 				/* we need to init the phy */
 			}
@@ -5773,8 +5610,6 @@
 		np->phy_model = id2 & PHYID2_MODEL_MASK;
 		id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
 		id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
-		dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
-			pci_name(pci_dev), id1, id2, phyaddr);
 		np->phyaddr = phyaddr;
 		np->phy_oui = id1 | id2;
 
@@ -5788,8 +5623,7 @@
 		break;
 	}
 	if (i == 33) {
-		dev_printk(KERN_INFO, &pci_dev->dev,
-			"open: Could not find a valid PHY.\n");
+		dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
 		goto out_error;
 	}
 
@@ -5799,9 +5633,8 @@
 	} else {
 		/* see if it is a gigabit phy */
 		u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
-		if (mii_status & PHY_GIGABIT) {
+		if (mii_status & PHY_GIGABIT)
 			np->gigabit = PHY_GIGABIT;
-		}
 	}
 
 	/* set default link speed settings */
@@ -5811,37 +5644,27 @@
 
 	err = register_netdev(dev);
 	if (err) {
-		dev_printk(KERN_INFO, &pci_dev->dev,
-			   "unable to register netdev: %d\n", err);
+		dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
 		goto out_error;
 	}
 
-	dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, "
-		   "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
-		   dev->name,
-		   np->phy_oui,
-		   np->phyaddr,
-		   dev->dev_addr[0],
-		   dev->dev_addr[1],
-		   dev->dev_addr[2],
-		   dev->dev_addr[3],
-		   dev->dev_addr[4],
-		   dev->dev_addr[5]);
+	dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
+		 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
 
-	dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
-		   dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
-		   dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
-		   	"csum " : "",
-		   dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
-		   	"vlan " : "",
-		   id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
-		   id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
-		   id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
-		   np->gigabit == PHY_GIGABIT ? "gbit " : "",
-		   np->need_linktimer ? "lnktim " : "",
-		   np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
-		   np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
-		   np->desc_ver);
+	dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
+		 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
+		 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
+			"csum " : "",
+		 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
+			"vlan " : "",
+		 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
+		 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
+		 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
+		 np->gigabit == PHY_GIGABIT ? "gbit " : "",
+		 np->need_linktimer ? "lnktim " : "",
+		 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
+		 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
+		 np->desc_ver);
 
 	return 0;
 
@@ -5931,13 +5754,13 @@
 	int i;
 
 	if (netif_running(dev)) {
-		// Gross.
+		/* Gross. */
 		nv_close(dev);
 	}
 	netif_device_detach(dev);
 
 	/* save non-pci configuration space */
-	for (i = 0;i <= np->register_size/sizeof(u32); i++)
+	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		np->saved_config_space[i] = readl(base + i*sizeof(u32));
 
 	pci_save_state(pdev);
@@ -5960,7 +5783,7 @@
 	pci_enable_wake(pdev, PCI_D0, 0);
 
 	/* restore non-pci configuration space */
-	for (i = 0;i <= np->register_size/sizeof(u32); i++)
+	for (i = 0; i <= np->register_size/sizeof(u32); i++)
 		writel(np->saved_config_space[i], base+i*sizeof(u32));
 
 	if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5990,9 +5813,8 @@
 	 * If we really go for poweroff, we must not restore the MAC,
 	 * otherwise the MAC for WOL will be reversed at least on some boards.
 	 */
-	if (system_state != SYSTEM_POWER_OFF) {
+	if (system_state != SYSTEM_POWER_OFF)
 		nv_restore_mac_addr(pdev);
-	}
 
 	pci_disable_device(pdev);
 	/*
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 49e4ce1..d1bec62 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -577,11 +577,10 @@
 			irq_of_parse_and_map(np, 1);
 		priv->gfargrp[priv->num_grps].interruptError =
 			irq_of_parse_and_map(np,2);
-		if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
-			priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
-			priv->gfargrp[priv->num_grps].interruptError < 0) {
+		if (priv->gfargrp[priv->num_grps].interruptTransmit == NO_IRQ ||
+		    priv->gfargrp[priv->num_grps].interruptReceive  == NO_IRQ ||
+		    priv->gfargrp[priv->num_grps].interruptError    == NO_IRQ)
 			return -EINVAL;
-		}
 	}
 
 	priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 5c566eb..3bc8e27 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -635,9 +635,10 @@
 	if (wol->wolopts & ~WAKE_MAGIC)
 		return -EINVAL;
 
+	device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
+
 	spin_lock_irqsave(&priv->bflock, flags);
-	priv->wol_en = wol->wolopts & WAKE_MAGIC ? 1 : 0;
-	device_set_wakeup_enable(&dev->dev, priv->wol_en);
+	priv->wol_en =  !!device_may_wakeup(&dev->dev);
 	spin_unlock_irqrestore(&priv->bflock, flags);
 
 	return 0;
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc32..8f11d29 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@
 	SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
 
 	netif_carrier_off(ndev);
-	netif_stop_queue(ndev);
 
 	err = register_netdev(ndev);
 	if (err) {
@@ -2951,7 +2950,7 @@
 
 	unregister_netdev(dev->ndev);
 
-	flush_scheduled_work();
+	cancel_work_sync(&dev->reset_work);
 
 	if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
 		tah_detach(dev->tah_dev, dev->tah_port);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c454b45..5522d45 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -729,11 +729,6 @@
 		sizeof(info->version) - 1);
 }
 
-static u32 netdev_get_link(struct net_device *dev)
-{
-	return 1;
-}
-
 static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
 {
 	struct ibmveth_adapter *adapter = netdev_priv(dev);
@@ -918,7 +913,7 @@
 static const struct ethtool_ops netdev_ethtool_ops = {
 	.get_drvinfo		= netdev_get_drvinfo,
 	.get_settings		= netdev_get_settings,
-	.get_link		= netdev_get_link,
+	.get_link		= ethtool_op_get_link,
 	.set_tx_csum		= ibmveth_set_tx_csum,
 	.get_rx_csum		= ibmveth_get_rx_csum,
 	.set_rx_csum		= ibmveth_set_rx_csum,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index ab9f675..bfa03db 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -36,22 +36,10 @@
 #include <net/pkt_sched.h>
 #include <net/net_namespace.h>
 
-#define TX_TIMEOUT  (2*HZ)
-
 #define TX_Q_LIMIT    32
 struct ifb_private {
 	struct tasklet_struct   ifb_tasklet;
 	int     tasklet_pending;
-	/* mostly debug stats leave in for now */
-	unsigned long   st_task_enter; /* tasklet entered */
-	unsigned long   st_txq_refl_try; /* transmit queue refill attempt */
-	unsigned long   st_rxq_enter; /* receive queue entered */
-	unsigned long   st_rx2tx_tran; /* receive to trasmit transfers */
-	unsigned long   st_rxq_notenter; /*receiveQ not entered, resched */
-	unsigned long   st_rx_frm_egr; /* received from egress path */
-	unsigned long   st_rx_frm_ing; /* received from ingress path */
-	unsigned long   st_rxq_check;
-	unsigned long   st_rxq_rsch;
 	struct sk_buff_head     rq;
 	struct sk_buff_head     tq;
 };
@@ -73,19 +61,12 @@
 	struct sk_buff *skb;
 
 	txq = netdev_get_tx_queue(_dev, 0);
-	dp->st_task_enter++;
 	if ((skb = skb_peek(&dp->tq)) == NULL) {
-		dp->st_txq_refl_try++;
 		if (__netif_tx_trylock(txq)) {
-			dp->st_rxq_enter++;
-			while ((skb = skb_dequeue(&dp->rq)) != NULL) {
-				skb_queue_tail(&dp->tq, skb);
-				dp->st_rx2tx_tran++;
-			}
+			skb_queue_splice_tail_init(&dp->rq, &dp->tq);
 			__netif_tx_unlock(txq);
 		} else {
 			/* reschedule */
-			dp->st_rxq_notenter++;
 			goto resched;
 		}
 	}
@@ -104,16 +85,16 @@
 			rcu_read_unlock();
 			dev_kfree_skb(skb);
 			stats->tx_dropped++;
+			if (skb_queue_len(&dp->tq) != 0)
+				goto resched;
 			break;
 		}
 		rcu_read_unlock();
 		skb->skb_iif = _dev->ifindex;
 
 		if (from & AT_EGRESS) {
-			dp->st_rx_frm_egr++;
 			dev_queue_xmit(skb);
 		} else if (from & AT_INGRESS) {
-			dp->st_rx_frm_ing++;
 			skb_pull(skb, skb->dev->hard_header_len);
 			netif_rx(skb);
 		} else
@@ -121,13 +102,11 @@
 	}
 
 	if (__netif_tx_trylock(txq)) {
-		dp->st_rxq_check++;
 		if ((skb = skb_peek(&dp->rq)) == NULL) {
 			dp->tasklet_pending = 0;
 			if (netif_queue_stopped(_dev))
 				netif_wake_queue(_dev);
 		} else {
-			dp->st_rxq_rsch++;
 			__netif_tx_unlock(txq);
 			goto resched;
 		}
@@ -182,7 +161,7 @@
 		netif_stop_queue(dev);
 	}
 
-	skb_queue_tail(&dp->rq, skb);
+	__skb_queue_tail(&dp->rq, skb);
 	if (!dp->tasklet_pending) {
 		dp->tasklet_pending = 1;
 		tasklet_schedule(&dp->ifb_tasklet);
@@ -197,8 +176,8 @@
 
 	tasklet_kill(&dp->ifb_tasklet);
 	netif_stop_queue(dev);
-	skb_queue_purge(&dp->rq);
-	skb_queue_purge(&dp->tq);
+	__skb_queue_purge(&dp->rq);
+	__skb_queue_purge(&dp->tq);
 	return 0;
 }
 
@@ -207,8 +186,8 @@
 	struct ifb_private *dp = netdev_priv(dev);
 
 	tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
-	skb_queue_head_init(&dp->rq);
-	skb_queue_head_init(&dp->tq);
+	__skb_queue_head_init(&dp->rq);
+	__skb_queue_head_init(&dp->tq);
 	netif_start_queue(dev);
 
 	return 0;
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6222279..6319ed9 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -419,6 +419,9 @@
 #define E1000_ERR_SWFW_SYNC 13
 #define E1000_NOT_IMPLEMENTED 14
 #define E1000_ERR_MBX      15
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
 
 /* Loop limit on how long we wait for auto-negotiation to complete */
 #define COPPER_LINK_UP_LIMIT              10
@@ -580,11 +583,15 @@
 
 /* Mask bits for fields in Word 0x1a of the NVM */
 
+/* length of string needed to store part num */
+#define E1000_PBANUM_LENGTH         11
+
 /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
 #define NVM_SUM                    0xBABA
 
 #define NVM_PBA_OFFSET_0           8
 #define NVM_PBA_OFFSET_1           9
+#define NVM_PBA_PTR_GUARD          0xFAFA
 #define NVM_WORD_SIZE_BASE_SHIFT   6
 
 /* NVM Commands - Microwire */
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index d83b77fa..6b5cc2c 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -445,31 +445,112 @@
 }
 
 /**
- *  igb_read_part_num - Read device part number
+ *  igb_read_part_string - Read device part number
  *  @hw: pointer to the HW structure
  *  @part_num: pointer to device part number
+ *  @part_num_size: size of part number buffer
  *
  *  Reads the product board assembly (PBA) number from the EEPROM and stores
  *  the value in part_num.
  **/
-s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num)
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
 {
-	s32  ret_val;
+	s32 ret_val;
 	u16 nvm_data;
+	u16 pointer;
+	u16 offset;
+	u16 length;
+
+	if (part_num == NULL) {
+		hw_dbg("PBA string buffer was null\n");
+		ret_val = E1000_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
 
 	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
 	if (ret_val) {
 		hw_dbg("NVM Read Error\n");
 		goto out;
 	}
-	*part_num = (u32)(nvm_data << 16);
 
-	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
 	if (ret_val) {
 		hw_dbg("NVM Read Error\n");
 		goto out;
 	}
-	*part_num |= nvm_data;
+
+	/*
+	 * if nvm_data is not ptr guard the PBA must be in legacy format which
+	 * means pointer is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (nvm_data != NVM_PBA_PTR_GUARD) {
+		hw_dbg("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (part_num_size < 11) {
+			hw_dbg("PBA string buffer too small\n");
+			return E1000_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pointer */
+		part_num[0] = (nvm_data >> 12) & 0xF;
+		part_num[1] = (nvm_data >> 8) & 0xF;
+		part_num[2] = (nvm_data >> 4) & 0xF;
+		part_num[3] = nvm_data & 0xF;
+		part_num[4] = (pointer >> 12) & 0xF;
+		part_num[5] = (pointer >> 8) & 0xF;
+		part_num[6] = '-';
+		part_num[7] = 0;
+		part_num[8] = (pointer >> 4) & 0xF;
+		part_num[9] = pointer & 0xF;
+
+		/* put a null character on the end of our string */
+		part_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (part_num[offset] < 0xA)
+				part_num[offset] += '0';
+			else if (part_num[offset] < 0x10)
+				part_num[offset] += 'A' - 0xA;
+		}
+
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		hw_dbg("NVM PBA number section invalid length\n");
+		ret_val = E1000_ERR_NVM_PBA_SECTION;
+		goto out;
+	}
+	/* check if part_num buffer is big enough */
+	if (part_num_size < (((u32)length * 2) - 1)) {
+		hw_dbg("PBA string buffer too small\n");
+		ret_val = E1000_ERR_NO_SPACE;
+		goto out;
+	}
+
+	/* trim pba length from start of string */
+	pointer++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		part_num[offset * 2] = (u8)(nvm_data >> 8);
+		part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+	}
+	part_num[offset * 2] = '\0';
 
 out:
 	return ret_val;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 1041c34..29c956a 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -32,6 +32,8 @@
 void igb_release_nvm(struct e1000_hw *hw);
 s32  igb_read_mac_addr(struct e1000_hw *hw);
 s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32  igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+                          u32 part_num_size);
 s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
 s32  igb_validate_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ddd036a..6694bf3 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1757,11 +1757,12 @@
 	u16 phy_data, i, agc_value = 0;
 	u16 cur_agc_index, max_agc_index = 0;
 	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
-	u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
-							 {IGP02E1000_PHY_AGC_A,
-							  IGP02E1000_PHY_AGC_B,
-							  IGP02E1000_PHY_AGC_C,
-							  IGP02E1000_PHY_AGC_D};
+	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+	       IGP02E1000_PHY_AGC_A,
+	       IGP02E1000_PHY_AGC_B,
+	       IGP02E1000_PHY_AGC_C,
+	       IGP02E1000_PHY_AGC_D
+	};
 
 	/* Read the AGC registers for all channels */
 	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196..62348fc 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1729,12 +1729,13 @@
 	struct igb_adapter *adapter;
 	struct e1000_hw *hw;
 	u16 eeprom_data = 0;
+	s32 ret_val;
 	static int global_quad_port_a; /* global quad port a indication */
 	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
 	unsigned long mmio_start, mmio_len;
 	int err, pci_using_dac;
 	u16 eeprom_apme_mask = IGB_EEPROM_APME;
-	u32 part_num;
+	u8 part_str[E1000_PBANUM_LENGTH];
 
 	/* Catch broken hardware that put the wrong VF device ID in
 	 * the PCIe SR-IOV capability.
@@ -2000,10 +2001,10 @@
 		   "unknown"),
 		 netdev->dev_addr);
 
-	igb_read_part_num(hw, &part_num);
-	dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name,
-		(part_num >> 8), (part_num & 0xff));
-
+	ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
+	if (ret_val)
+		strcpy(part_str, "Unknown");
+	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
 	dev_info(&pdev->dev,
 		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
 		adapter->msix_entries ? "MSI-X" :
@@ -2049,13 +2050,16 @@
 	struct igb_adapter *adapter = netdev_priv(netdev);
 	struct e1000_hw *hw = &adapter->hw;
 
-	/* flush_scheduled work may reschedule our watchdog task, so
-	 * explicitly disable watchdog tasks from being rescheduled  */
+	/*
+	 * The watchdog timer may be rescheduled, so explicitly
+	 * disable watchdog from being rescheduled.
+	 */
 	set_bit(__IGB_DOWN, &adapter->state);
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
-	flush_scheduled_work();
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
 
 #ifdef CONFIG_IGB_DCA
 	if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
@@ -2436,10 +2440,9 @@
 	int size;
 
 	size = sizeof(struct igb_buffer) * tx_ring->count;
-	tx_ring->buffer_info = vmalloc(size);
+	tx_ring->buffer_info = vzalloc(size);
 	if (!tx_ring->buffer_info)
 		goto err;
-	memset(tx_ring->buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2590,9 @@
 	int size, desc_len;
 
 	size = sizeof(struct igb_buffer) * rx_ring->count;
-	rx_ring->buffer_info = vmalloc(size);
+	rx_ring->buffer_info = vzalloc(size);
 	if (!rx_ring->buffer_info)
 		goto err;
-	memset(rx_ring->buffer_info, 0, size);
 
 	desc_len = sizeof(union e1000_adv_rx_desc);
 
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d..0fa3db3 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel(R) 82576 Virtual Function Linux driver
-# Copyright(c) 2009 Intel Corporation.
+# Copyright(c) 2009 - 2010 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a4753..79f2604 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add70..ed6e3d9 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -110,11 +110,6 @@
 	return 0;
 }
 
-static u32 igbvf_get_link(struct net_device *netdev)
-{
-	return netif_carrier_ok(netdev);
-}
-
 static int igbvf_set_settings(struct net_device *netdev,
                               struct ethtool_cmd *ecmd)
 {
@@ -515,7 +510,7 @@
 	.get_msglevel		= igbvf_get_msglevel,
 	.set_msglevel		= igbvf_set_msglevel,
 	.nway_reset		= igbvf_nway_reset,
-	.get_link		= igbvf_get_link,
+	.get_link		= ethtool_op_get_link,
 	.get_eeprom_len		= igbvf_get_eeprom_len,
 	.get_eeprom		= igbvf_get_eeprom,
 	.set_eeprom		= igbvf_set_eeprom,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2..9d4d63e 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -126,7 +126,6 @@
 			unsigned int page_offset;
 		};
 	};
-	struct page *page;
 };
 
 union igbvf_desc {
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec..3d6f4cc 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609..c2883c4 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019..4fb023b 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
 
 #include "igbvf.h"
 
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.8-k0"
 char igbvf_driver_name[] = "igbvf";
 const char igbvf_driver_version[] = DRV_VERSION;
 static const char igbvf_driver_string[] =
 				"Intel(R) Virtual Function Network Driver";
-static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static const char igbvf_copyright[] =
+				"Copyright (c) 2009 - 2010 Intel Corporation.";
 
 static int igbvf_poll(struct napi_struct *napi, int budget);
 static void igbvf_reset(struct igbvf_adapter *);
@@ -429,10 +430,9 @@
 	int size;
 
 	size = sizeof(struct igbvf_buffer) * tx_ring->count;
-	tx_ring->buffer_info = vmalloc(size);
+	tx_ring->buffer_info = vzalloc(size);
 	if (!tx_ring->buffer_info)
 		goto err;
-	memset(tx_ring->buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -469,10 +469,9 @@
 	int size, desc_len;
 
 	size = sizeof(struct igbvf_buffer) * rx_ring->count;
-	rx_ring->buffer_info = vmalloc(size);
+	rx_ring->buffer_info = vzalloc(size);
 	if (!rx_ring->buffer_info)
 		goto err;
-	memset(rx_ring->buffer_info, 0, size);
 
 	desc_len = sizeof(union e1000_adv_rx_desc);
 
@@ -1851,8 +1850,6 @@
 
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
-			bool txb2b = 1;
-
 			mac->ops.get_link_up_info(&adapter->hw,
 			                          &adapter->link_speed,
 			                          &adapter->link_duplex);
@@ -1862,11 +1859,9 @@
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
-				txb2b = 0;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
-				txb2b = 0;
 				/* maybe add some timeout factor ? */
 				break;
 			}
@@ -2830,13 +2825,14 @@
 	struct e1000_hw *hw = &adapter->hw;
 
 	/*
-	 * flush_scheduled work may reschedule our watchdog task, so
-	 * explicitly disable watchdog tasks from being rescheduled
+	 * The watchdog timer may be rescheduled, so explicitly
+	 * disable it from being rescheduled.
 	 */
 	set_bit(__IGBVF_DOWN, &adapter->state);
 	del_timer_sync(&adapter->watchdog_timer);
 
-	flush_scheduled_work();
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
 
 	unregister_netdev(netdev);
 
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed..77e18d3 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61ef..0cc13c6 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce37..c36ea21 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) 82576 Virtual Function Linux driver
-  Copyright(c) 2009 Intel Corporation.
+  Copyright(c) 2009 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index dc01980..aa93655 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,16 +88,14 @@
 	"IC PLUS IP1000 1000/100/10 based NIC",
 	"Sundance Technology ST2021 based NIC",
 	"Tamarack Microelectronics TC9020/9021 based NIC",
-	"Tamarack Microelectronics TC9020/9021 based NIC",
 	"D-Link NIC IP1000A"
 };
 
 static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
 	{ PCI_VDEVICE(SUNDANCE,	0x1023), 0 },
 	{ PCI_VDEVICE(SUNDANCE,	0x2021), 1 },
-	{ PCI_VDEVICE(SUNDANCE,	0x1021), 2 },
-	{ PCI_VDEVICE(DLINK,	0x9021), 3 },
-	{ PCI_VDEVICE(DLINK,	0x4020), 4 },
+	{ PCI_VDEVICE(DLINK,	0x9021), 2 },
+	{ PCI_VDEVICE(DLINK,	0x4020), 3 },
 	{ 0, }
 };
 
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 74b20f1..cc821de 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -959,7 +959,7 @@
 	if (!mcs)
 		return;
 
-	flush_scheduled_work();
+	cancel_work_sync(&mcs->work);
 
 	unregister_netdev(mcs->netdev);
 	free_netdev(mcs->netdev);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index 00b38bc..52a7c86 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -258,7 +258,7 @@
 
 	/* Baud Rate Error Correction x 10000 */
 	u32 rate_err_array[] = {
-		0000, 0625, 1250, 1875,
+		   0,  625, 1250, 1875,
 		2500, 3125, 3750, 4375,
 		5000, 5625, 6250, 6875,
 		7500, 8125, 8750, 9375,
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e..9ece1fd 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -885,17 +885,8 @@
 	veth_kick_statemachine(cnx);
 	spin_unlock_irq(&cnx->lock);
 
-	/* There's a slim chance the reset code has just queued the
-	 * statemachine to run in five seconds. If so we need to cancel
-	 * that and requeue the work to run now. */
-	if (cancel_delayed_work(&cnx->statemachine_wq)) {
-		spin_lock_irq(&cnx->lock);
-		veth_kick_statemachine(cnx);
-		spin_unlock_irq(&cnx->lock);
-	}
-
-	/* Wait for the state machine to run. */
-	flush_scheduled_work();
+	/* ensure the statemachine runs now and waits for its completion */
+	flush_delayed_work_sync(&cnx->statemachine_wq);
 }
 
 static void veth_destroy_connection(struct veth_lpar_connection *cnx)
@@ -1009,15 +1000,10 @@
 	return 0;
 }
 
-static u32 veth_get_link(struct net_device *dev)
-{
-	return 1;
-}
-
 static const struct ethtool_ops ops = {
 	.get_drvinfo = veth_get_drvinfo,
 	.get_settings = veth_get_settings,
-	.get_link = veth_get_link,
+	.get_link = ethtool_op_get_link,
 };
 
 static const struct net_device_ops veth_netdev_ops = {
@@ -1605,7 +1591,7 @@
 	}
 	veth_dev[i] = dev;
 
-	port = (struct veth_port*)netdev_priv(dev);
+	port = netdev_priv(dev);
 
 	/* Start the state machine on each connection on this vlan. If we're
 	 * the first dev to do so this will commence link negotiation */
@@ -1658,15 +1644,14 @@
 	/* Disconnect our "irq" to stop events coming from the Hypervisor. */
 	HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
 
-	/* Make sure any work queued from Hypervisor callbacks is finished. */
-	flush_scheduled_work();
-
 	for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
 		cnx = veth_cnx[i];
 
 		if (!cnx)
 			continue;
 
+		/* Cancel work queued from Hypervisor callbacks */
+		cancel_delayed_work_sync(&cnx->statemachine_wq);
 		/* Remove the connection from sysfs */
 		kobject_del(&cnx->kobject);
 		/* Drop the driver's reference to the connection */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192..b021798 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -98,6 +98,8 @@
 static void ixgb_tx_timeout(struct net_device *dev);
 static void ixgb_tx_timeout_task(struct work_struct *work);
 
+static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
+static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
 static void ixgb_vlan_rx_register(struct net_device *netdev,
                                   struct vlan_group *grp);
 static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
@@ -525,7 +527,7 @@
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
-	flush_scheduled_work();
+	cancel_work_sync(&adapter->tx_timeout_task);
 
 	unregister_netdev(netdev);
 
@@ -669,13 +671,12 @@
 	int size;
 
 	size = sizeof(struct ixgb_buffer) * txdr->count;
-	txdr->buffer_info = vmalloc(size);
+	txdr->buffer_info = vzalloc(size);
 	if (!txdr->buffer_info) {
 		netif_err(adapter, probe, adapter->netdev,
 			  "Unable to allocate transmit descriptor ring memory\n");
 		return -ENOMEM;
 	}
-	memset(txdr->buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 
@@ -759,13 +760,12 @@
 	int size;
 
 	size = sizeof(struct ixgb_buffer) * rxdr->count;
-	rxdr->buffer_info = vmalloc(size);
+	rxdr->buffer_info = vzalloc(size);
 	if (!rxdr->buffer_info) {
 		netif_err(adapter, probe, adapter->netdev,
 			  "Unable to allocate receive descriptor ring\n");
 		return -ENOMEM;
 	}
-	memset(rxdr->buffer_info, 0, size);
 
 	/* Round up to nearest 4K */
 
@@ -1078,6 +1078,8 @@
 
 	if (netdev->flags & IFF_PROMISC) {
 		rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
+		/* disable VLAN filtering */
+		rctl &= ~IXGB_RCTL_CFIEN;
 		rctl &= ~IXGB_RCTL_VFE;
 	} else {
 		if (netdev->flags & IFF_ALLMULTI) {
@@ -1086,7 +1088,9 @@
 		} else {
 			rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
 		}
+		/* enable VLAN filtering */
 		rctl |= IXGB_RCTL_VFE;
+		rctl &= ~IXGB_RCTL_CFIEN;
 	}
 
 	if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
@@ -1105,6 +1109,12 @@
 
 		ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
 	}
+
+	if (netdev->features & NETIF_F_HW_VLAN_RX)
+		ixgb_vlan_strip_enable(adapter);
+	else
+		ixgb_vlan_strip_disable(adapter);
+
 }
 
 /**
@@ -2152,33 +2162,30 @@
 ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
-	u32 ctrl, rctl;
 
-	ixgb_irq_disable(adapter);
 	adapter->vlgrp = grp;
+}
 
-	if (grp) {
-		/* enable VLAN tag insert/strip */
-		ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
-		ctrl |= IXGB_CTRL0_VME;
-		IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+static void
+ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
+{
+	u32 ctrl;
 
-		/* enable VLAN receive filtering */
+	/* enable VLAN tag insert/strip */
+	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+	ctrl |= IXGB_CTRL0_VME;
+	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
+}
 
-		rctl = IXGB_READ_REG(&adapter->hw, RCTL);
-		rctl &= ~IXGB_RCTL_CFIEN;
-		IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
-	} else {
-		/* disable VLAN tag insert/strip */
+static void
+ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
+{
+	u32 ctrl;
 
-		ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
-		ctrl &= ~IXGB_CTRL0_VME;
-		IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
-	}
-
-	/* don't enable interrupts unless we are UP */
-	if (adapter->netdev->flags & IFF_UP)
-		ixgb_irq_enable(adapter);
+	/* disable VLAN tag insert/strip */
+	ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
+	ctrl &= ~IXGB_CTRL0_VME;
+	IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
 }
 
 static void
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 88a08f0..dd7fbeb 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -191,9 +191,9 @@
 		} r;
 		struct {	/* list_option info */
 			int nr;
-			struct ixgb_opt_list {
+			const struct ixgb_opt_list {
 				int i;
-				char *str;
+				const char *str;
 			} *p;
 		} l;
 	} arg;
@@ -226,7 +226,7 @@
 		break;
 	case list_option: {
 		int i;
-		struct ixgb_opt_list *ent;
+		const struct ixgb_opt_list *ent;
 
 		for (i = 0; i < opt->arg.l.nr; i++) {
 			ent = &opt->arg.l.p[i];
@@ -322,14 +322,15 @@
 	}
 	{ /* Flow Control */
 
-		struct ixgb_opt_list fc_list[] =
-			{{ ixgb_fc_none,	"Flow Control Disabled" },
-			 { ixgb_fc_rx_pause,"Flow Control Receive Only" },
-			 { ixgb_fc_tx_pause,"Flow Control Transmit Only" },
-			 { ixgb_fc_full,	"Flow Control Enabled" },
-			 { ixgb_fc_default, "Flow Control Hardware Default" }};
+		static const struct ixgb_opt_list fc_list[] = {
+		       { ixgb_fc_none, "Flow Control Disabled" },
+		       { ixgb_fc_rx_pause, "Flow Control Receive Only" },
+		       { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
+		       { ixgb_fc_full, "Flow Control Enabled" },
+		       { ixgb_fc_default, "Flow Control Hardware Default" }
+		};
 
-		const struct ixgb_option opt = {
+		static const struct ixgb_option opt = {
 			.type = list_option,
 			.name = "Flow Control",
 			.err  = "reading default settings from EEPROM",
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb..7d7387f 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@
 
 ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
               ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
-              ixgbe_mbx.o
+              ixgbe_mbx.o ixgbe_x540.o
 
 ixgbe-$(CONFIG_IXGBE_DCB) +=  ixgbe_dcb.o ixgbe_dcb_82598.o \
                               ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703c..3ae30b8 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -61,10 +61,8 @@
 #define IXGBE_MIN_RXD			     64
 
 /* flow control */
-#define IXGBE_DEFAULT_FCRTL		0x10000
 #define IXGBE_MIN_FCRTL			   0x40
 #define IXGBE_MAX_FCRTL			0x7FF80
-#define IXGBE_DEFAULT_FCRTH		0x20000
 #define IXGBE_MIN_FCRTH			  0x600
 #define IXGBE_MAX_FCRTH			0x7FFF0
 #define IXGBE_DEFAULT_FCPAUSE		 0xFFFF
@@ -130,7 +128,9 @@
 	unsigned long time_stamp;
 	u16 length;
 	u16 next_to_watch;
-	u16 mapped_as_page;
+	unsigned int bytecount;
+	u16 gso_segs;
+	u8 mapped_as_page;
 };
 
 struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@
 	u64 bytes;
 };
 
+struct ixgbe_tx_queue_stats {
+	u64 restart_queue;
+	u64 tx_busy;
+	u64 completed;
+	u64 tx_done_old;
+};
+
+struct ixgbe_rx_queue_stats {
+	u64 rsc_count;
+	u64 rsc_flush;
+	u64 non_eop_descs;
+	u64 alloc_rx_page_failed;
+	u64 alloc_rx_buff_failed;
+};
+
+enum ixbge_ring_state_t {
+	__IXGBE_TX_FDIR_INIT_DONE,
+	__IXGBE_TX_DETECT_HANG,
+	__IXGBE_HANG_CHECK_ARMED,
+	__IXGBE_RX_PS_ENABLED,
+	__IXGBE_RX_RSC_ENABLED,
+};
+
+#define ring_is_ps_enabled(ring) \
+	test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define set_ring_ps_enabled(ring) \
+	set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define clear_ring_ps_enabled(ring) \
+	clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
+#define check_for_tx_hang(ring) \
+	test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+	set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+	clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
+#define ring_is_rsc_enabled(ring) \
+	test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define set_ring_rsc_enabled(ring) \
+	set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
+#define clear_ring_rsc_enabled(ring) \
+	clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
 struct ixgbe_ring {
 	void *desc;			/* descriptor ring memory */
+	struct device *dev;             /* device for DMA mapping */
+	struct net_device *netdev;      /* netdev ring belongs to */
 	union {
 		struct ixgbe_tx_buffer *tx_buffer_info;
 		struct ixgbe_rx_buffer *rx_buffer_info;
 	};
+	unsigned long state;
 	u8 atr_sample_rate;
 	u8 atr_count;
 	u16 count;			/* amount of descriptors */
@@ -160,38 +204,30 @@
 	u16 next_to_clean;
 
 	u8 queue_index; /* needed for multiqueue queue management */
-
-#define IXGBE_RING_RX_PS_ENABLED                (u8)(1)
-	u8 flags;			/* per ring feature flags */
-	u16 head;
-	u16 tail;
-
-	unsigned int total_bytes;
-	unsigned int total_packets;
-
-#ifdef CONFIG_IXGBE_DCA
-	/* cpu for tx queue */
-	int cpu;
-#endif
-
-	u16 work_limit;			/* max work per interrupt */
-	u16 reg_idx;			/* holds the special value that gets
+	u8 reg_idx;			/* holds the special value that gets
 					 * the hardware register offset
 					 * associated with this ring, which is
 					 * different for DCB and RSS modes
 					 */
 
+	u16 work_limit;			/* max work per interrupt */
+
+	u8 __iomem *tail;
+
+	unsigned int total_bytes;
+	unsigned int total_packets;
+
 	struct ixgbe_queue_stats stats;
 	struct u64_stats_sync syncp;
+	union {
+		struct ixgbe_tx_queue_stats tx_stats;
+		struct ixgbe_rx_queue_stats rx_stats;
+	};
 	int numa_node;
-	unsigned long reinit_state;
-	u64 rsc_count;			/* stat for coalesced packets */
-	u64 rsc_flush;			/* stats for flushed packets */
-	u32 restart_queue;		/* track tx queue restarts */
-	u32 non_eop_descs;		/* track hardware descriptor chaining */
-
 	unsigned int size;		/* length in bytes */
 	dma_addr_t dma;			/* phys. address of descriptor ring */
+	struct rcu_head rcu;
+	struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
 } ____cacheline_internodealigned_in_smp;
 
 enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@
 	unsigned int v_idx; /* index of q_vector within array, also used for
 	                     * finding the bit in EICR and friends that
 	                     * represents the vector for this ring */
+#ifdef CONFIG_IXGBE_DCA
+	int cpu;	    /* CPU for DCA */
+#endif
 	struct napi_struct napi;
 	DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
 	DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@
 	u8 rx_itr;
 	u32 eitr;
 	cpumask_var_t affinity_mask;
+	char name[IFNAMSIZ + 9];
 };
 
 /* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@
 	u16 bd_number;
 	struct work_struct reset_task;
 	struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
-	char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
 	struct ixgbe_dcb_config dcb_cfg;
 	struct ixgbe_dcb_config temp_dcb_cfg;
 	u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@
 	int node;
 	struct work_struct check_overtemp_task;
 	u32 interrupt_event;
+	char lsc_int_name[IFNAMSIZ + 9];
 
 	/* SR-IOV */
 	DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@
 	__IXGBE_TESTING,
 	__IXGBE_RESETTING,
 	__IXGBE_DOWN,
-	__IXGBE_FDIR_INIT_DONE,
 	__IXGBE_SFP_MODULE_NOT_FOUND
 };
 
+struct ixgbe_rsc_cb {
+	dma_addr_t dma;
+	u16 skb_cnt;
+	bool delay_unmap;
+};
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
 enum ixgbe_boards {
 	board_82598,
 	board_82599,
+	board_X540,
 };
 
 extern struct ixgbe_info ixgbe_82598_info;
 extern struct ixgbe_info ixgbe_82599_info;
+extern struct ixgbe_info ixgbe_X540_info;
 #ifdef CONFIG_IXGBE_DCB
 extern const struct dcbnl_rtnl_ops dcbnl_ops;
 extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,26 +502,24 @@
 extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
 extern void ixgbe_reset(struct ixgbe_adapter *adapter);
 extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
-extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
-extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *);
+extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
+extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
+extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
 extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
 extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
 extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
 extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
-					 struct net_device *,
 					 struct ixgbe_adapter *,
 					 struct ixgbe_ring *);
-extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *,
+extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
                                              struct ixgbe_tx_buffer *);
-extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-                                   struct ixgbe_ring *rx_ring,
-                                   int cleaned_count);
+extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
 extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
 extern int ethtool_ioctl(struct ifreq *ifr);
+extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
 extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
 extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
 extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
@@ -498,6 +544,10 @@
                                          u16 flex_byte);
 extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
                                       u8 l4type);
+extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+                                   struct ixgbe_ring *ring);
+extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                               struct ixgbe_ring *ring);
 extern void ixgbe_set_rx_mode(struct net_device *netdev);
 #ifdef IXGBE_FCOE
 extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d60..d0f1d9d 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -38,9 +38,6 @@
 #define IXGBE_82598_MC_TBL_SIZE  128
 #define IXGBE_82598_VFT_TBL_SIZE 128
 
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg);
 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
                                          ixgbe_link_speed speed,
                                          bool autoneg,
@@ -156,7 +153,7 @@
 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
 		mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
 		mac->ops.get_link_capabilities =
-		                  &ixgbe_get_copper_link_capabilities_82598;
+			&ixgbe_get_copper_link_capabilities_generic;
 	}
 
 	switch (hw->phy.type) {
@@ -274,37 +271,6 @@
 }
 
 /**
- *  ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
-						    ixgbe_link_speed *speed,
-						    bool *autoneg)
-{
-	s32 status = IXGBE_ERR_LINK_SETUP;
-	u16 speed_ability;
-
-	*speed = 0;
-	*autoneg = true;
-
-	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-	                              &speed_ability);
-
-	if (status == 0) {
-		if (speed_ability & MDIO_SPEED_10G)
-		    *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (speed_ability & MDIO_PMA_SPEED_1000)
-		    *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-	}
-
-	return status;
-}
-
-/**
  *  ixgbe_get_media_type_82598 - Determines media type
  *  @hw: pointer to hardware structure
  *
@@ -357,6 +323,7 @@
 	u32 fctrl_reg;
 	u32 rmcs_reg;
 	u32 reg;
+	u32 rx_pba_size;
 	u32 link_speed = 0;
 	bool link_up;
 
@@ -459,16 +426,18 @@
 
 	/* Set up and enable Rx high/low water mark thresholds, enable XON. */
 	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-		if (hw->fc.send_xon) {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-			                (hw->fc.low_water | IXGBE_FCRTL_XONE));
-		} else {
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num),
-			                hw->fc.low_water);
-		}
+		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num),
-		                (hw->fc.high_water | IXGBE_FCRTH_FCEN));
+		reg = (rx_pba_size - hw->fc.low_water) << 6;
+		if (hw->fc.send_xon)
+			reg |= IXGBE_FCRTL_XONE;
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
+
+		reg = (rx_pba_size - hw->fc.high_water) << 10;
+		reg |= IXGBE_FCRTH_FCEN;
+
+		IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
 	}
 
 	/* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@
 static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
 	.init_params		= &ixgbe_init_eeprom_params_generic,
 	.read			= &ixgbe_read_eerd_generic,
+	.calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
 	.validate_checksum	= &ixgbe_validate_eeprom_checksum_generic,
 	.update_checksum	= &ixgbe_update_eeprom_checksum_generic,
 };
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 0bd8fbb..6827ddd 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -56,9 +56,6 @@
                                ixgbe_link_speed speed,
                                bool autoneg,
                                bool autoneg_wait_to_complete);
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
-                                             ixgbe_link_speed *speed,
-                                             bool *autoneg);
 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
                                          ixgbe_link_speed speed,
                                          bool autoneg,
@@ -68,9 +65,9 @@
 static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 {
 	struct ixgbe_mac_info *mac = &hw->mac;
-	if (hw->phy.multispeed_fiber) {
-		/* Set up dual speed SFP+ support */
-		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+
+	/* enable the laser control functions for SFP+ fiber */
+	if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
 		mac->ops.disable_tx_laser =
 		                       &ixgbe_disable_tx_laser_multispeed_fiber;
 		mac->ops.enable_tx_laser =
@@ -80,6 +77,12 @@
 		mac->ops.disable_tx_laser = NULL;
 		mac->ops.enable_tx_laser = NULL;
 		mac->ops.flap_tx_laser = NULL;
+	}
+
+	if (hw->phy.multispeed_fiber) {
+		/* Set up dual speed SFP+ support */
+		mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
+	} else {
 		if ((mac->ops.get_media_type(hw) ==
 		     ixgbe_media_type_backplane) &&
 		    (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -93,6 +96,8 @@
 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
 	s32 ret_val = 0;
+	u32 reg_anlp1 = 0;
+	u32 i = 0;
 	u16 list_offset, data_offset, data_value;
 
 	if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -119,14 +124,34 @@
 			IXGBE_WRITE_FLUSH(hw);
 			hw->eeprom.ops.read(hw, ++data_offset, &data_value);
 		}
-		/* Now restart DSP by setting Restart_AN */
-		IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
-		    (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
 
 		/* Release the semaphore */
 		ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
 		/* Delay obtaining semaphore again to allow FW access */
 		msleep(hw->eeprom.semaphore_delay);
+
+		/* Now restart DSP by setting Restart_AN and clearing LMS */
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
+		                IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
+		                IXGBE_AUTOC_AN_RESTART));
+
+		/* Wait for AN to leave state 0 */
+		for (i = 0; i < 10; i++) {
+			msleep(4);
+			reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
+			if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
+				break;
+		}
+		if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
+			hw_dbg(hw, "sfp module setup not complete\n");
+			ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
+			goto setup_sfp_out;
+		}
+
+		/* Restart DSP by setting Restart_AN and return to SFI mode */
+		IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
+		                IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
+		                IXGBE_AUTOC_AN_RESTART));
 	}
 
 setup_sfp_out:
@@ -174,7 +199,7 @@
 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
 		mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
 		mac->ops.get_link_capabilities =
-		                  &ixgbe_get_copper_link_capabilities_82599;
+			&ixgbe_get_copper_link_capabilities_generic;
 	}
 
 	/* Set necessary function pointers based on phy type */
@@ -184,6 +209,10 @@
 		phy->ops.get_firmware_version =
 		             &ixgbe_get_phy_firmware_version_tnx;
 		break;
+	case ixgbe_phy_aq:
+		phy->ops.get_firmware_version =
+			&ixgbe_get_phy_firmware_version_generic;
+		break;
 	default:
 		break;
 	}
@@ -290,37 +319,6 @@
 }
 
 /**
- *  ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
- *  @hw: pointer to hardware structure
- *  @speed: pointer to link speed
- *  @autoneg: boolean auto-negotiation value
- *
- *  Determines the link capabilities by reading the AUTOC register.
- **/
-static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
-                                                    ixgbe_link_speed *speed,
-                                                    bool *autoneg)
-{
-	s32 status = IXGBE_ERR_LINK_SETUP;
-	u16 speed_ability;
-
-	*speed = 0;
-	*autoneg = true;
-
-	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
-	                              &speed_ability);
-
-	if (status == 0) {
-		if (speed_ability & MDIO_SPEED_10G)
-		    *speed |= IXGBE_LINK_SPEED_10GB_FULL;
-		if (speed_ability & MDIO_PMA_SPEED_1000)
-		    *speed |= IXGBE_LINK_SPEED_1GB_FULL;
-	}
-
-	return status;
-}
-
-/**
  *  ixgbe_get_media_type_82599 - Get media type
  *  @hw: pointer to hardware structure
  *
@@ -332,7 +330,8 @@
 
 	/* Detect if there is a copper PHY attached. */
 	if (hw->phy.type == ixgbe_phy_cu_unknown ||
-	    hw->phy.type == ixgbe_phy_tn) {
+	    hw->phy.type == ixgbe_phy_tn ||
+	    hw->phy.type == ixgbe_phy_aq) {
 		media_type = ixgbe_media_type_copper;
 		goto out;
 	}
@@ -342,11 +341,13 @@
 	case IXGBE_DEV_ID_82599_KX4_MEZZ:
 	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
 	case IXGBE_DEV_ID_82599_KR:
+	case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
 	case IXGBE_DEV_ID_82599_XAUI_LOM:
 		/* Default device ID is mezzanine card KX/KX4 */
 		media_type = ixgbe_media_type_backplane;
 		break;
 	case IXGBE_DEV_ID_82599_SFP:
+	case IXGBE_DEV_ID_82599_SFP_FCOE:
 	case IXGBE_DEV_ID_82599_SFP_EM:
 		media_type = ixgbe_media_type_fiber;
 		break;
@@ -1924,6 +1925,7 @@
 	hw->phy.ops.identify(hw);
 
 	if (hw->phy.type == ixgbe_phy_tn ||
+	    hw->phy.type == ixgbe_phy_aq ||
 	    hw->phy.type == ixgbe_phy_cu_unknown) {
 		hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
 				     &ext_ability);
@@ -2125,51 +2127,6 @@
 	return status;
 }
 
-/**
- *  ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
- *  the EEPROM
- *  @hw: pointer to hardware structure
- *  @wwnn_prefix: the alternative WWNN prefix
- *  @wwpn_prefix: the alternative WWPN prefix
- *
- *  This function will read the EEPROM from the alternative SAN MAC address
- *  block to check the support for the alternative WWNN/WWPN prefix support.
- **/
-static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
-                                      u16 *wwpn_prefix)
-{
-	u16 offset, caps;
-	u16 alt_san_mac_blk_offset;
-
-	/* clear output first */
-	*wwnn_prefix = 0xFFFF;
-	*wwpn_prefix = 0xFFFF;
-
-	/* check if alternative SAN MAC is supported */
-	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
-	                    &alt_san_mac_blk_offset);
-
-	if ((alt_san_mac_blk_offset == 0) ||
-	    (alt_san_mac_blk_offset == 0xFFFF))
-		goto wwn_prefix_out;
-
-	/* check capability in alternative san mac address block */
-	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
-	hw->eeprom.ops.read(hw, offset, &caps);
-	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
-		goto wwn_prefix_out;
-
-	/* get the corresponding prefix for WWNN/WWPN */
-	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
-	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
-
-	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
-	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
-
-wwn_prefix_out:
-	return 0;
-}
-
 static struct ixgbe_mac_operations mac_ops_82599 = {
 	.init_hw                = &ixgbe_init_hw_generic,
 	.reset_hw               = &ixgbe_reset_hw_82599,
@@ -2181,7 +2138,7 @@
 	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
 	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
 	.get_device_caps        = &ixgbe_get_device_caps_82599,
-	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_82599,
+	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
 	.stop_adapter           = &ixgbe_stop_adapter_generic,
 	.get_bus_info           = &ixgbe_get_bus_info_generic,
 	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2214,6 +2171,7 @@
 	.init_params            = &ixgbe_init_eeprom_params_generic,
 	.read                   = &ixgbe_read_eerd_generic,
 	.write                  = &ixgbe_write_eeprom_generic,
+	.calc_checksum          = &ixgbe_calc_eeprom_checksum_generic,
 	.validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
 	.update_checksum        = &ixgbe_update_eeprom_checksum_generic,
 };
@@ -2240,5 +2198,5 @@
 	.mac_ops                = &mac_ops_82599,
 	.eeprom_ops             = &eeprom_ops_82599,
 	.phy_ops                = &phy_ops_82599,
-	.mbx_ops                = &mbx_ops_82599,
+	.mbx_ops                = &mbx_ops_generic,
 };
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca13..cc11e42 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -45,14 +45,12 @@
 static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
 static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
 
 static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
 static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
 static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
 static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
 
 /**
  *  ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -198,30 +196,110 @@
 }
 
 /**
- *  ixgbe_read_pba_num_generic - Reads part number from EEPROM
+ *  ixgbe_read_pba_string_generic - Reads part number string from EEPROM
  *  @hw: pointer to hardware structure
- *  @pba_num: stores the part number from the EEPROM
+ *  @pba_num: stores the part number string from the EEPROM
+ *  @pba_num_size: part number string buffer length
  *
- *  Reads the part number from the EEPROM.
+ *  Reads the part number string from the EEPROM.
  **/
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num)
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+                                  u32 pba_num_size)
 {
 	s32 ret_val;
 	u16 data;
+	u16 pba_ptr;
+	u16 offset;
+	u16 length;
+
+	if (pba_num == NULL) {
+		hw_dbg(hw, "PBA string buffer was null\n");
+		return IXGBE_ERR_INVALID_ARGUMENT;
+	}
 
 	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
 	if (ret_val) {
 		hw_dbg(hw, "NVM Read Error\n");
 		return ret_val;
 	}
-	*pba_num = (u32)(data << 16);
 
-	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data);
+	ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
 	if (ret_val) {
 		hw_dbg(hw, "NVM Read Error\n");
 		return ret_val;
 	}
-	*pba_num |= data;
+
+	/*
+	 * if data is not ptr guard the PBA must be in legacy format which
+	 * means pba_ptr is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (data != IXGBE_PBANUM_PTR_GUARD) {
+		hw_dbg(hw, "NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (pba_num_size < 11) {
+			hw_dbg(hw, "PBA string buffer too small\n");
+			return IXGBE_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pba_ptr */
+		pba_num[0] = (data >> 12) & 0xF;
+		pba_num[1] = (data >> 8) & 0xF;
+		pba_num[2] = (data >> 4) & 0xF;
+		pba_num[3] = data & 0xF;
+		pba_num[4] = (pba_ptr >> 12) & 0xF;
+		pba_num[5] = (pba_ptr >> 8) & 0xF;
+		pba_num[6] = '-';
+		pba_num[7] = 0;
+		pba_num[8] = (pba_ptr >> 4) & 0xF;
+		pba_num[9] = pba_ptr & 0xF;
+
+		/* put a null character on the end of our string */
+		pba_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (pba_num[offset] < 0xA)
+				pba_num[offset] += '0';
+			else if (pba_num[offset] < 0x10)
+				pba_num[offset] += 'A' - 0xA;
+		}
+
+		return 0;
+	}
+
+	ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
+	if (ret_val) {
+		hw_dbg(hw, "NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		hw_dbg(hw, "NVM PBA number section invalid length\n");
+		return IXGBE_ERR_PBA_SECTION;
+	}
+
+	/* check if pba_num buffer is big enough */
+	if (pba_num_size  < (((u32)length * 2) - 1)) {
+		hw_dbg(hw, "PBA string buffer too small\n");
+		return IXGBE_ERR_NO_SPACE;
+	}
+
+	/* trim pba length from start of string */
+	pba_ptr++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
+		if (ret_val) {
+			hw_dbg(hw, "NVM Read Error\n");
+			return ret_val;
+		}
+		pba_num[offset * 2] = (u8)(data >> 8);
+		pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
+	}
+	pba_num[offset * 2] = '\0';
 
 	return 0;
 }
@@ -638,7 +716,7 @@
  *  Polls the status bit (bit 1) of the EERD or EEWR to determine when the
  *  read or write is done respectively.
  **/
-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
 {
 	u32 i;
 	u32 reg;
@@ -1009,7 +1087,7 @@
  *  ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
  *  @hw: pointer to hardware structure
  **/
-static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw)
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
 {
 	u16 i;
 	u16 j;
@@ -1072,7 +1150,7 @@
 	status = hw->eeprom.ops.read(hw, 0, &checksum);
 
 	if (status == 0) {
-		checksum = ixgbe_calc_eeprom_checksum(hw);
+		checksum = hw->eeprom.ops.calc_checksum(hw);
 
 		hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
 
@@ -1110,7 +1188,7 @@
 	status = hw->eeprom.ops.read(hw, 0, &checksum);
 
 	if (status == 0) {
-		checksum = ixgbe_calc_eeprom_checksum(hw);
+		checksum = hw->eeprom.ops.calc_checksum(hw);
 		status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
 		                            checksum);
 	} else {
@@ -1595,6 +1673,7 @@
 	u32 mflcn_reg, fccfg_reg;
 	u32 reg;
 	u32 rx_pba_size;
+	u32 fcrtl, fcrth;
 
 #ifdef CONFIG_DCB
 	if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1750,21 @@
 	IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
 	IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
 
-	reg = IXGBE_READ_REG(hw, IXGBE_MTQC);
-	/* Thresholds are different for link flow control when in DCB mode */
-	if (reg & IXGBE_MTQC_RT_ENA) {
-		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+	rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
+	rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
-		/* Always disable XON for LFC when in DCB mode */
-		reg = (rx_pba_size >> 5) & 0xFFE0;
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
+	fcrth = (rx_pba_size - hw->fc.high_water) << 10;
+	fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
 
-		reg = (rx_pba_size >> 2) & 0xFFE0;
-		if (hw->fc.current_mode & ixgbe_fc_tx_pause)
-			reg |= IXGBE_FCRTH_FCEN;
-		IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg);
-	} else {
-		/*
-		 * Set up and enable Rx high/low water mark thresholds,
-		 * enable XON.
-		 */
-		if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
-			if (hw->fc.send_xon) {
-				IXGBE_WRITE_REG(hw,
-				              IXGBE_FCRTL_82599(packetbuf_num),
-			                      (hw->fc.low_water |
-				              IXGBE_FCRTL_XONE));
-			} else {
-				IXGBE_WRITE_REG(hw,
-				              IXGBE_FCRTL_82599(packetbuf_num),
-				              hw->fc.low_water);
-			}
-
-			IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
-			               (hw->fc.high_water | IXGBE_FCRTH_FCEN));
-		}
+	if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
+		fcrth |= IXGBE_FCRTH_FCEN;
+		if (hw->fc.send_xon)
+			fcrtl |= IXGBE_FCRTL_XONE;
 	}
 
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
+	IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
+
 	/* Configure pause time (2 TCs per register) */
 	reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
 	if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2764,48 @@
 
 	return 0;
 }
+
+/**
+ *  ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
+ *  the EEPROM
+ *  @hw: pointer to hardware structure
+ *  @wwnn_prefix: the alternative WWNN prefix
+ *  @wwpn_prefix: the alternative WWPN prefix
+ *
+ *  This function will read the EEPROM from the alternative SAN MAC address
+ *  block to check the support for the alternative WWNN/WWPN prefix support.
+ **/
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                        u16 *wwpn_prefix)
+{
+	u16 offset, caps;
+	u16 alt_san_mac_blk_offset;
+
+	/* clear output first */
+	*wwnn_prefix = 0xFFFF;
+	*wwpn_prefix = 0xFFFF;
+
+	/* check if alternative SAN MAC is supported */
+	hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
+	                    &alt_san_mac_blk_offset);
+
+	if ((alt_san_mac_blk_offset == 0) ||
+	    (alt_san_mac_blk_offset == 0xFFFF))
+		goto wwn_prefix_out;
+
+	/* check capability in alternative san mac address block */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
+	hw->eeprom.ops.read(hw, offset, &caps);
+	if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
+		goto wwn_prefix_out;
+
+	/* get the corresponding prefix for WWNN/WWPN */
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwnn_prefix);
+
+	offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
+	hw->eeprom.ops.read(hw, offset, wwpn_prefix);
+
+wwn_prefix_out:
+	return 0;
+}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223..e1f980a 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,7 +35,8 @@
 s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
 s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
-s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num);
+s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
+                                  u32 pba_num_size);
 s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
 s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
 void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -49,9 +50,11 @@
 s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                        u16 *data);
+u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
 s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
                                            u16 *checksum_val);
 s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
+s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
 
 s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
                           u32 enable_addr);
@@ -81,7 +84,8 @@
 s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
                                  ixgbe_link_speed *speed,
                                  bool *link_up, bool link_up_wait_to_complete);
-
+s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
+                                 u16 *wwpn_prefix);
 s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
 s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
 
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c64..d16c260 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -42,7 +42,8 @@
  * It should be called only after the rules are checked by
  * ixgbe_dcb_check_config().
  */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
+				   struct ixgbe_dcb_config *dcb_config,
 				   int max_frame, u8 direction)
 {
 	struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@
 			 * credit may not be enough to send out a TSO
 			 * packet in descriptor plane arbitration.
 			 */
-			if (credit_max &&
+			if ((hw->mac.type == ixgbe_mac_82598EB) &&
+			    credit_max &&
 			    (credit_max < MINIMUM_CREDIT_FOR_TSO))
 				credit_max = MINIMUM_CREDIT_FOR_TSO;
 
@@ -150,10 +152,17 @@
                         struct ixgbe_dcb_config *dcb_config)
 {
 	s32 ret = 0;
-	if (hw->mac.type == ixgbe_mac_82598EB)
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
 		ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
-	else if (hw->mac.type == ixgbe_mac_82599EB)
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
+		break;
+	default:
+		break;
+	}
 	return ret;
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87..1cfe38e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,7 +150,8 @@
 /* DCB driver APIs */
 
 /* DCB credits calculation */
-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8);
+s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
+				   struct ixgbe_dcb_config *, int, u8);
 
 /* DCB hw initialization */
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bc..9a5e89c 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -256,21 +256,17 @@
 	 * for each traffic class.
 	 */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		if (dcb_config->rx_pba_cfg == pba_equal) {
-			rx_pba_size = IXGBE_RXPBSIZE_64KB;
-		} else {
-			rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
-					      : IXGBE_RXPBSIZE_48KB;
-		}
+		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
+		reg = (rx_pba_size - hw->fc.low_water) << 10;
 
-		reg = ((rx_pba_size >> 5) &  0xFFF0);
 		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
 		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
 			reg |= IXGBE_FCRTL_XONE;
 
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
 
-		reg = ((rx_pba_size >> 2) & 0xFFF0);
+		reg = (rx_pba_size - hw->fc.high_water) << 10;
 		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
 		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
 			reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f2247..374e1f7 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -251,19 +251,17 @@
 
 	/* Configure PFC Tx thresholds per TC */
 	for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
-		if (dcb_config->rx_pba_cfg == pba_equal)
-			rx_pba_size = IXGBE_RXPBSIZE_64KB;
-		else
-			rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
-			                      : IXGBE_RXPBSIZE_48KB;
+		rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
+		rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
 
-		reg = ((rx_pba_size >> 5) & 0xFFE0);
+		reg = (rx_pba_size - hw->fc.low_water) << 10;
+
 		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
 		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
 			reg |= IXGBE_FCRTL_XONE;
 		IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
 
-		reg = ((rx_pba_size >> 2) & 0xFFE0);
+		reg = (rx_pba_size - hw->fc.high_water) << 10;
 		if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
 		    dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
 			reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465..bf566e8 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -130,15 +130,21 @@
 			netdev->netdev_ops->ndo_stop(netdev);
 		ixgbe_clear_interrupt_scheme(adapter);
 
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
+		switch (adapter->hw.mac.type) {
+		case ixgbe_mac_82598EB:
 			adapter->last_lfc_mode = adapter->hw.fc.current_mode;
 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
-		}
-		adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
-		if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+			break;
+		case ixgbe_mac_82599EB:
+		case ixgbe_mac_X540:
 			adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
 			adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+			break;
+		default:
+			break;
 		}
+
 		adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
 		ixgbe_init_interrupt_scheme(adapter);
 		if (netif_running(netdev))
@@ -155,8 +161,14 @@
 			adapter->dcb_cfg.pfc_mode_enable = false;
 			adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
 			adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
-			if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+			switch (adapter->hw.mac.type) {
+			case ixgbe_mac_82599EB:
+			case ixgbe_mac_X540:
 				adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+				break;
+			default:
+				break;
+			}
 
 			ixgbe_init_interrupt_scheme(adapter);
 			if (netif_running(netdev))
@@ -178,9 +190,14 @@
 	for (i = 0; i < netdev->addr_len; i++)
 		perm_addr[i] = adapter->hw.mac.perm_addr[i];
 
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		for (j = 0; j < netdev->addr_len; j++, i++)
 			perm_addr[i] = adapter->hw.mac.san_addr[j];
+		break;
+	default:
+		break;
 	}
 }
 
@@ -366,15 +383,29 @@
 	}
 
 	if (adapter->dcb_cfg.pfc_mode_enable) {
-		if ((adapter->hw.mac.type != ixgbe_mac_82598EB) &&
-			(adapter->hw.fc.current_mode != ixgbe_fc_pfc))
-			adapter->last_lfc_mode = adapter->hw.fc.current_mode;
+		switch (adapter->hw.mac.type) {
+		case ixgbe_mac_82599EB:
+		case ixgbe_mac_X540:
+			if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
+				adapter->last_lfc_mode =
+				                  adapter->hw.fc.current_mode;
+			break;
+		default:
+			break;
+		}
 		adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
 	} else {
-		if (adapter->hw.mac.type != ixgbe_mac_82598EB)
-			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
-		else
+		switch (adapter->hw.mac.type) {
+		case ixgbe_mac_82598EB:
 			adapter->hw.fc.requested_mode = ixgbe_fc_none;
+			break;
+		case ixgbe_mac_82599EB:
+		case ixgbe_mac_X540:
+			adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
+			break;
+		default:
+			break;
+		}
 	}
 
 	if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c..23ff23e 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -185,6 +185,16 @@
 					     ADVERTISED_FIBRE);
 			ecmd->port = PORT_FIBRE;
 			ecmd->autoneg = AUTONEG_DISABLE;
+		} else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
+			   (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
+			ecmd->supported |= (SUPPORTED_1000baseT_Full |
+					    SUPPORTED_Autoneg |
+					    SUPPORTED_FIBRE);
+			ecmd->advertising = (ADVERTISED_10000baseT_Full |
+					     ADVERTISED_1000baseT_Full |
+					     ADVERTISED_Autoneg |
+					     ADVERTISED_FIBRE);
+			ecmd->port = PORT_FIBRE;
 		} else {
 			ecmd->supported |= (SUPPORTED_1000baseT_Full |
 					    SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@
 	/* Get PHY type */
 	switch (adapter->hw.phy.type) {
 	case ixgbe_phy_tn:
+	case ixgbe_phy_aq:
 	case ixgbe_phy_cu_unknown:
 		/* Copper 10G-BASET */
 		ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@
 	else
 		pause->autoneg = 1;
 
-#ifdef CONFIG_DCB
-	if (hw->fc.current_mode == ixgbe_fc_pfc) {
-		pause->rx_pause = 0;
-		pause->tx_pause = 0;
-	}
-
-#endif
 	if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
 		pause->rx_pause = 1;
 	} else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@
 	} else if (hw->fc.current_mode == ixgbe_fc_full) {
 		pause->rx_pause = 1;
 		pause->tx_pause = 1;
+#ifdef CONFIG_DCB
+	} else if (hw->fc.current_mode == ixgbe_fc_pfc) {
+		pause->rx_pause = 0;
+		pause->tx_pause = 0;
+#endif
 	}
 }
 
@@ -363,7 +372,6 @@
 		return -EINVAL;
 
 #endif
-
 	fc = hw->fc;
 
 	if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@
 	else
 		adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
 
-	if (netif_running(netdev))
-		ixgbe_reinit_locked(adapter);
-	else
-		ixgbe_reset(adapter);
-
 	return 0;
 }
 
@@ -428,16 +431,21 @@
 static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	u32 feature_list;
 
-	if (data) {
-		netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-			netdev->features |= NETIF_F_SCTP_CSUM;
-	} else {
-		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
-		if (adapter->hw.mac.type == ixgbe_mac_82599EB)
-			netdev->features &= ~NETIF_F_SCTP_CSUM;
+	feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		feature_list |= NETIF_F_SCTP_CSUM;
+		break;
+	default:
+		break;
 	}
+	if (data)
+		netdev->features |= feature_list;
+	else
+		netdev->features &= ~feature_list;
 
 	return 0;
 }
@@ -530,10 +538,20 @@
 	regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
 	regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
 	regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
-	for (i = 0; i < 8; i++)
-		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
-	for (i = 0; i < 8; i++)
-		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+	for (i = 0; i < 8; i++) {
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
+			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
+			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
+			break;
+		case ixgbe_mac_82599EB:
+			regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
+			regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
+			break;
+		default:
+			break;
+		}
+	}
 	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
 	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
 
@@ -615,6 +633,7 @@
 	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
 	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
 
+	/* DCB */
 	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
 	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
 	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -820,9 +839,10 @@
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 	char firmware_version[32];
 
-	strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver));
+	strncpy(drvinfo->driver, ixgbe_driver_name,
+	        sizeof(drvinfo->driver) - 1);
 	strncpy(drvinfo->version, ixgbe_driver_version,
-	        sizeof(drvinfo->version));
+	        sizeof(drvinfo->version) - 1);
 
 	snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
 	         (adapter->eeprom_version & 0xF000) >> 12,
@@ -905,13 +925,11 @@
 			memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
 			       sizeof(struct ixgbe_ring));
 			temp_tx_ring[i].count = new_tx_count;
-			err = ixgbe_setup_tx_resources(adapter,
-			                               &temp_tx_ring[i]);
+			err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
-					ixgbe_free_tx_resources(adapter,
-					                      &temp_tx_ring[i]);
+					ixgbe_free_tx_resources(&temp_tx_ring[i]);
 				}
 				goto clear_reset;
 			}
@@ -930,13 +948,11 @@
 			memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
 			       sizeof(struct ixgbe_ring));
 			temp_rx_ring[i].count = new_rx_count;
-			err = ixgbe_setup_rx_resources(adapter,
-			                               &temp_rx_ring[i]);
+			err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
 			if (err) {
 				while (i) {
 					i--;
-					ixgbe_free_rx_resources(adapter,
-					                      &temp_rx_ring[i]);
+					ixgbe_free_rx_resources(&temp_rx_ring[i]);
 				}
 				goto err_setup;
 			}
@@ -951,8 +967,7 @@
 		/* tx */
 		if (new_tx_count != adapter->tx_ring_count) {
 			for (i = 0; i < adapter->num_tx_queues; i++) {
-				ixgbe_free_tx_resources(adapter,
-				                        adapter->tx_ring[i]);
+				ixgbe_free_tx_resources(adapter->tx_ring[i]);
 				memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
 				       sizeof(struct ixgbe_ring));
 			}
@@ -962,8 +977,7 @@
 		/* rx */
 		if (new_rx_count != adapter->rx_ring_count) {
 			for (i = 0; i < adapter->num_rx_queues; i++) {
-				ixgbe_free_rx_resources(adapter,
-				                        adapter->rx_ring[i]);
+				ixgbe_free_rx_resources(adapter->rx_ring[i]);
 				memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
 				       sizeof(struct ixgbe_ring));
 			}
@@ -1144,7 +1158,7 @@
 #define TABLE64_TEST_HI	6
 
 /* default 82599 register test */
-static struct ixgbe_reg_test reg_test_82599[] = {
+static const struct ixgbe_reg_test reg_test_82599[] = {
 	{ IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 	{ IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1168,7 +1182,7 @@
 };
 
 /* default 82598 register test */
-static struct ixgbe_reg_test reg_test_82598[] = {
+static const struct ixgbe_reg_test reg_test_82598[] = {
 	{ IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 	{ IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
 	{ IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1195,18 +1209,22 @@
 	{ 0, 0, 0, 0 }
 };
 
+static const u32 register_test_patterns[] = {
+	0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
 #define REG_PATTERN_TEST(R, M, W)                                             \
 {                                                                             \
 	u32 pat, val, before;                                                 \
-	const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
-	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
+	for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {      \
 		before = readl(adapter->hw.hw_addr + R);                      \
-		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
+		writel((register_test_patterns[pat] & W),                     \
+		       (adapter->hw.hw_addr + R));                            \
 		val = readl(adapter->hw.hw_addr + R);                         \
-		if (val != (_test[pat] & W & M)) {                            \
-			e_err(drv, "pattern test reg %04X failed: got "   \
-			      "0x%08X expected 0x%08X\n",		      \
-			      R, val, (_test[pat] & W & M));                \
+		if (val != (register_test_patterns[pat] & W & M)) {           \
+			e_err(drv, "pattern test reg %04X failed: got "       \
+			      "0x%08X expected 0x%08X\n",                     \
+			      R, val, (register_test_patterns[pat] & W & M)); \
 			*data = R;                                            \
 			writel(before, adapter->hw.hw_addr + R);              \
 			return 1;                                             \
@@ -1233,16 +1251,24 @@
 
 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
 {
-	struct ixgbe_reg_test *test;
+	const struct ixgbe_reg_test *test;
 	u32 value, before, after;
 	u32 i, toggle;
 
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-		toggle = 0x7FFFF30F;
-		test = reg_test_82599;
-	} else {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
 		toggle = 0x7FFFF3FF;
 		test = reg_test_82598;
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		toggle = 0x7FFFF30F;
+		test = reg_test_82599;
+		break;
+	default:
+		*data = 1;
+		return 1;
+		break;
 	}
 
 	/*
@@ -1460,16 +1486,21 @@
 	reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
 	IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
 
-	if (hw->mac.type == ixgbe_mac_82599EB) {
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
 		reg_ctl &= ~IXGBE_DMATXCTL_TE;
 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
+		break;
+	default:
+		break;
 	}
 
 	ixgbe_reset(adapter);
 
-	ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring);
-	ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring);
+	ixgbe_free_tx_resources(&adapter->test_tx_ring);
+	ixgbe_free_rx_resources(&adapter->test_rx_ring);
 }
 
 static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1514,24 @@
 	/* Setup Tx descriptor ring and Tx buffers */
 	tx_ring->count = IXGBE_DEFAULT_TXD;
 	tx_ring->queue_index = 0;
+	tx_ring->dev = &adapter->pdev->dev;
+	tx_ring->netdev = adapter->netdev;
 	tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
 	tx_ring->numa_node = adapter->node;
 
-	err = ixgbe_setup_tx_resources(adapter, tx_ring);
+	err = ixgbe_setup_tx_resources(tx_ring);
 	if (err)
 		return 1;
 
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
 		reg_data |= IXGBE_DMATXCTL_TE;
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
+		break;
+	default:
+		break;
 	}
 
 	ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1539,13 @@
 	/* Setup Rx Descriptor ring and Rx buffers */
 	rx_ring->count = IXGBE_DEFAULT_RXD;
 	rx_ring->queue_index = 0;
+	rx_ring->dev = &adapter->pdev->dev;
+	rx_ring->netdev = adapter->netdev;
 	rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
 	rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
 	rx_ring->numa_node = adapter->node;
 
-	err = ixgbe_setup_rx_resources(adapter, rx_ring);
+	err = ixgbe_setup_rx_resources(rx_ring);
 	if (err) {
 		ret_val = 4;
 		goto err_nomem;
@@ -1604,8 +1644,7 @@
 	return 13;
 }
 
-static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
-                                  struct ixgbe_ring *rx_ring,
+static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
                                   struct ixgbe_ring *tx_ring,
                                   unsigned int size)
 {
@@ -1627,7 +1666,7 @@
 		rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
 
 		/* unmap Rx buffer, will be remapped by alloc_rx_buffers */
-		dma_unmap_single(&adapter->pdev->dev,
+		dma_unmap_single(rx_ring->dev,
 		                 rx_buffer_info->dma,
 				 bufsz,
 				 DMA_FROM_DEVICE);
@@ -1639,7 +1678,7 @@
 
 		/* unmap buffer on Tx side */
 		tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
-		ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 
 		/* increment Rx/Tx next to clean counters */
 		rx_ntc++;
@@ -1655,7 +1694,7 @@
 	}
 
 	/* re-map buffers to ring, store next to clean values */
-	ixgbe_alloc_rx_buffers(adapter, rx_ring, count);
+	ixgbe_alloc_rx_buffers(rx_ring, count);
 	rx_ring->next_to_clean = rx_ntc;
 	tx_ring->next_to_clean = tx_ntc;
 
@@ -1699,7 +1738,6 @@
 		for (i = 0; i < 64; i++) {
 			skb_get(skb);
 			tx_ret_val = ixgbe_xmit_frame_ring(skb,
-							   adapter->netdev,
 							   adapter,
 							   tx_ring);
 			if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1752,7 @@
 		/* allow 200 milliseconds for packets to go from Tx to Rx */
 		msleep(200);
 
-		good_cnt = ixgbe_clean_test_rings(adapter, rx_ring,
-						  tx_ring, size);
+		good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
 		if (good_cnt != 64) {
 			ret_val = 13;
 			break;
@@ -1847,7 +1884,25 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	int retval = 1;
 
+	/* WOL not supported except for the following */
 	switch(hw->device_id) {
+	case IXGBE_DEV_ID_82599_SFP:
+		/* Only this subdevice supports WOL */
+		if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
+			wol->supported = 0;
+			break;
+		}
+		retval = 0;
+		break;
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+		/* All except this subdevice support WOL */
+		if (hw->subsystem_device_id ==
+		    IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+			wol->supported = 0;
+			break;
+		}
+		retval = 0;
+		break;
 	case IXGBE_DEV_ID_82599_KX4:
 		retval = 0;
 		break;
@@ -1985,6 +2040,41 @@
 	return 0;
 }
 
+/*
+ * this function must be called before setting the new value of
+ * rx_itr_setting
+ */
+static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
+			     struct ethtool_coalesce *ec)
+{
+	struct net_device *netdev = adapter->netdev;
+
+	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
+		return false;
+
+	/* if interrupt rate is too high then disable RSC */
+	if (ec->rx_coalesce_usecs != 1 &&
+	    ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
+		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+			e_info(probe, "rx-usecs set too low, "
+				      "disabling RSC\n");
+			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
+			return true;
+		}
+	} else {
+		/* check the feature flag value and enable RSC if necessary */
+		if ((netdev->features & NETIF_F_LRO) &&
+		    !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
+			e_info(probe, "rx-usecs set to %d, "
+				      "re-enabling RSC\n",
+			       ec->rx_coalesce_usecs);
+			adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
+			return true;
+		}
+	}
+	return false;
+}
+
 static int ixgbe_set_coalesce(struct net_device *netdev,
                               struct ethtool_coalesce *ec)
 {
@@ -2002,17 +2092,14 @@
 		adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
 
 	if (ec->rx_coalesce_usecs > 1) {
-		u32 max_int;
-		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
-			max_int = IXGBE_MAX_RSC_INT_RATE;
-		else
-			max_int = IXGBE_MAX_INT_RATE;
-
 		/* check the limits */
-		if ((1000000/ec->rx_coalesce_usecs > max_int) ||
+		if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
 		    (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
 			return -EINVAL;
 
+		/* check the old value and enable RSC if necessary */
+		need_reset = ixgbe_update_rsc(adapter, ec);
+
 		/* store the value in ints/second */
 		adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
 
@@ -2021,32 +2108,21 @@
 		/* clear the lower bit as its used for dynamic state */
 		adapter->rx_itr_setting &= ~1;
 	} else if (ec->rx_coalesce_usecs == 1) {
+		/* check the old value and enable RSC if necessary */
+		need_reset = ixgbe_update_rsc(adapter, ec);
+
 		/* 1 means dynamic mode */
 		adapter->rx_eitr_param = 20000;
 		adapter->rx_itr_setting = 1;
 	} else {
+		/* check the old value and enable RSC if necessary */
+		need_reset = ixgbe_update_rsc(adapter, ec);
 		/*
 		 * any other value means disable eitr, which is best
 		 * served by setting the interrupt rate very high
 		 */
 		adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
 		adapter->rx_itr_setting = 0;
-
-		/*
-		 * if hardware RSC is enabled, disable it when
-		 * setting low latency mode, to avoid errata, assuming
-		 * that when the user set low latency mode they want
-		 * it at the cost of anything else
-		 */
-		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
-			adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
-			if (netdev->features & NETIF_F_LRO) {
-				netdev->features &= ~NETIF_F_LRO;
-				e_info(probe, "rx-usecs set to 0, "
-				       "disabling RSC\n");
-			}
-			need_reset = true;
-		}
 	}
 
 	if (ec->tx_coalesce_usecs > 1) {
@@ -2127,34 +2203,45 @@
 	need_reset = (data & ETH_FLAG_RXVLAN) !=
 		     (netdev->features & NETIF_F_HW_VLAN_RX);
 
-	rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO |
+	rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
 					ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
 	if (rc)
 		return rc;
 
 	/* if state changes we need to update adapter->flags and reset */
-	if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) {
-		/*
-		 * cast both to bool and verify if they are set the same
-		 * but only enable RSC if itr is non-zero, as
-		 * itr=0 and RSC are mutually exclusive
-		 */
-		if (((!!(data & ETH_FLAG_LRO)) !=
-		     (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) &&
-		    adapter->rx_itr_setting) {
+	if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
+	    (!!(data & ETH_FLAG_LRO) !=
+	     !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
+		if ((data & ETH_FLAG_LRO) &&
+		    (!adapter->rx_itr_setting ||
+		     (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
+			e_info(probe, "rx-usecs set too low, "
+				      "not enabling RSC.\n");
+		} else {
 			adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
 			switch (adapter->hw.mac.type) {
 			case ixgbe_mac_82599EB:
 				need_reset = true;
 				break;
+			case ixgbe_mac_X540: {
+				int i;
+				for (i = 0; i < adapter->num_rx_queues; i++) {
+					struct ixgbe_ring *ring =
+					                  adapter->rx_ring[i];
+					if (adapter->flags2 &
+					    IXGBE_FLAG2_RSC_ENABLED) {
+						ixgbe_configure_rscctl(adapter,
+						                       ring);
+					} else {
+						ixgbe_clear_rscctl(adapter,
+						                   ring);
+					}
+				}
+			}
+				break;
 			default:
 				break;
 			}
-		} else if (!adapter->rx_itr_setting) {
-			netdev->features &= ~NETIF_F_LRO;
-			if (data & ETH_FLAG_LRO)
-				e_info(probe, "rx-usecs set to 0, "
-				       "LRO/RSC cannot be enabled.\n");
 		}
 	}
 
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a..6342d48 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -68,7 +68,7 @@
 static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
 {
 	ddp->len = 0;
-	ddp->err = 0;
+	ddp->err = 1;
 	ddp->udl = NULL;
 	ddp->udp = 0UL;
 	ddp->sgl = NULL;
@@ -92,6 +92,7 @@
 	struct ixgbe_fcoe *fcoe;
 	struct ixgbe_adapter *adapter;
 	struct ixgbe_fcoe_ddp *ddp;
+	u32 fcbuff;
 
 	if (!netdev)
 		goto out_ddp_put;
@@ -115,7 +116,14 @@
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
 				(xid | IXGBE_FCDMARW_WE));
+
+		/* guaranteed to be invalidated after 100us */
+		IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
+				(xid | IXGBE_FCDMARW_RE));
+		fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
 		spin_unlock_bh(&fcoe->lock);
+		if (fcbuff & IXGBE_FCBUFF_VALID)
+			udelay(100);
 	}
 	if (ddp->sgl)
 		pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@
 		return 0;
 	}
 
+	/* no DDP if we are already down or resetting */
+	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
+	    test_bit(__IXGBE_RESETTING, &adapter->state))
+		return 0;
+
 	fcoe = &adapter->fcoe;
 	if (!fcoe->pool) {
 		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 2bd3eb4..ca9036d 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,13 +52,14 @@
 static const char ixgbe_driver_string[] =
 			      "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "2.0.84-k2"
+#define DRV_VERSION "3.0.12-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
 	[board_82598] = &ixgbe_82598_info,
 	[board_82599] = &ixgbe_82599_info,
+	[board_X540] = &ixgbe_X540_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +109,16 @@
 	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
 	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
+	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
+	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
 	 board_82599 },
 	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
 	 board_82599 },
+	{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
+	 board_X540 },
 
 	/* required last entry */
 	{0, }
@@ -560,6 +567,7 @@
 		IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
 		break;
 	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		if (direction == -1) {
 			/* other causes */
 			msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +597,34 @@
 {
 	u32 mask;
 
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-	} else {
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		mask = (qmask & 0xFFFFFFFF);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
 		mask = (qmask >> 32);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+		break;
+	default:
+		break;
 	}
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
-				      struct ixgbe_tx_buffer
-				      *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+				      struct ixgbe_tx_buffer *tx_buffer_info)
 {
 	if (tx_buffer_info->dma) {
 		if (tx_buffer_info->mapped_as_page)
-			dma_unmap_page(&adapter->pdev->dev,
+			dma_unmap_page(tx_ring->dev,
 				       tx_buffer_info->dma,
 				       tx_buffer_info->length,
 				       DMA_TO_DEVICE);
 		else
-			dma_unmap_single(&adapter->pdev->dev,
+			dma_unmap_single(tx_ring->dev,
 					 tx_buffer_info->dma,
 					 tx_buffer_info->length,
 					 DMA_TO_DEVICE);
@@ -626,92 +639,166 @@
 }
 
 /**
- * ixgbe_tx_xon_state - check the tx ring xon state
- * @adapter: the ixgbe adapter
- * @tx_ring: the corresponding tx_ring
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
+ * @adapter: driver private struct
+ * @index: reg idx of queue to query (0-127)
  *
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
- * corresponding TC of this tx_ring when checking TFCS.
+ * Helper function to determine the traffic index for a paticular
+ * register index.
  *
- * Returns : true if in xon state (currently not paused)
+ * Returns : a tc index for use in range 0-7, or 0-3
  */
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
-				      struct ixgbe_ring *tx_ring)
+u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
 {
-	u32 txoff = IXGBE_TFCS_TXOFF;
+	int tc = -1;
+	int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-#ifdef CONFIG_IXGBE_DCB
-	if (adapter->dcb_cfg.pfc_mode_enable) {
-		int tc;
-		int reg_idx = tx_ring->reg_idx;
-		int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+	/* if DCB is not enabled the queues have no TC */
+	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+		return tc;
 
-		switch (adapter->hw.mac.type) {
-		case ixgbe_mac_82598EB:
-			tc = reg_idx >> 2;
-			txoff = IXGBE_TFCS_TXOFF0;
+	/* check valid range */
+	if (reg_idx >= adapter->hw.mac.max_tx_queues)
+		return tc;
+
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
+		tc = reg_idx >> 2;
+		break;
+	default:
+		if (dcb_i != 4 && dcb_i != 8)
 			break;
-		case ixgbe_mac_82599EB:
-			tc = 0;
-			txoff = IXGBE_TFCS_TXOFF;
-			if (dcb_i == 8) {
-				/* TC0, TC1 */
-				tc = reg_idx >> 5;
-				if (tc == 2) /* TC2, TC3 */
-					tc += (reg_idx - 64) >> 4;
-				else if (tc == 3) /* TC4, TC5, TC6, TC7 */
-					tc += 1 + ((reg_idx - 96) >> 3);
-			} else if (dcb_i == 4) {
-				/* TC0, TC1 */
-				tc = reg_idx >> 6;
-				if (tc == 1) {
-					tc += (reg_idx - 64) >> 5;
-					if (tc == 2) /* TC2, TC3 */
-						tc += (reg_idx - 96) >> 4;
-				}
-			}
+
+		/* if VMDq is enabled the lowest order bits determine TC */
+		if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+				      IXGBE_FLAG_VMDQ_ENABLED)) {
+			tc = reg_idx & (dcb_i - 1);
 			break;
-		default:
-			tc = 0;
 		}
-		txoff <<= tc;
+
+		/*
+		 * Convert the reg_idx into the correct TC. This bitmask
+		 * targets the last full 32 ring traffic class and assigns
+		 * it a value of 1. From there the rest of the rings are
+		 * based on shifting the mask further up to include the
+		 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
+		 * will only ever be 8 or 4 and that reg_idx will never
+		 * be greater then 128. The code without the power of 2
+		 * optimizations would be:
+		 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
+		 */
+		tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
+		tc >>= 9 - (reg_idx >> 5);
 	}
-#endif
-	return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+
+	return tc;
 }
 
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
-				       struct ixgbe_ring *tx_ring,
-				       unsigned int eop)
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_hw_stats *hwstats = &adapter->stats;
+	u32 data = 0;
+	u32 xoff[8] = {0};
+	int i;
 
-	/* Detect a transmit hang in hardware, this serializes the
-	 * check with the clearing of time_stamp and movement of eop */
-	adapter->detect_tx_hung = false;
-	if (tx_ring->tx_buffer_info[eop].time_stamp &&
-	    time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
-	    ixgbe_tx_xon_state(adapter, tx_ring)) {
-		/* detected Tx unit hang */
-		union ixgbe_adv_tx_desc *tx_desc;
-		tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
-		e_err(drv, "Detected Tx Unit Hang\n"
-		      "  Tx Queue             <%d>\n"
-		      "  TDH, TDT             <%x>, <%x>\n"
-		      "  next_to_use          <%x>\n"
-		      "  next_to_clean        <%x>\n"
-		      "tx_buffer_info[next_to_clean]\n"
-		      "  time_stamp           <%lx>\n"
-		      "  jiffies              <%lx>\n",
-		      tx_ring->queue_index,
-		      IXGBE_READ_REG(hw, tx_ring->head),
-		      IXGBE_READ_REG(hw, tx_ring->tail),
-		      tx_ring->next_to_use, eop,
-		      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
-		return true;
+	if ((hw->fc.current_mode == ixgbe_fc_full) ||
+	    (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
+			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+			break;
+		default:
+			data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+		}
+		hwstats->lxoffrxc += data;
+
+		/* refill credits (no tx hang) if we received xoff */
+		if (!data)
+			return;
+
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			clear_bit(__IXGBE_HANG_CHECK_ARMED,
+				  &adapter->tx_ring[i]->state);
+		return;
+	} else if (!(adapter->dcb_cfg.pfc_mode_enable))
+		return;
+
+	/* update stats for each tc, only valid with PFC enabled */
+	for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
+			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+			break;
+		default:
+			xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+		}
+		hwstats->pxoffrxc[i] += xoff[i];
 	}
 
-	return false;
+	/* disarm tx queues that have received xoff frames */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+		u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+
+		if (xoff[tc])
+			clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+	}
+}
+
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
+{
+	return ring->tx_stats.completed;
+}
+
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
+{
+	struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
+
+	u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+	u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+	if (head != tail)
+		return (head < tail) ?
+			tail - head : (tail + ring->count - head);
+
+	return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+	u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+	u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+	u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+	bool ret = false;
+
+	clear_check_for_tx_hang(tx_ring);
+
+	/*
+	 * Check for a hung queue, but be thorough. This verifies
+	 * that a transmit has been completed since the previous
+	 * check AND there is at least one packet pending. The
+	 * ARMED bit is set to indicate a potential hang. The
+	 * bit is cleared if a pause frame is received to remove
+	 * false hang detection due to PFC or 802.3x frames. By
+	 * requiring this to fail twice we avoid races with
+	 * pfc clearing the ARMED bit and conditions where we
+	 * run the check_tx_hang logic with a transmit completion
+	 * pending but without time to complete it yet.
+	 */
+	if ((tx_done_old == tx_done) && tx_pending) {
+		/* make sure it is true for two checks in a row */
+		ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+				       &tx_ring->state);
+	} else {
+		/* update completed stats and continue */
+		tx_ring->tx_stats.tx_done_old = tx_done;
+		/* reset the countdown */
+		clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+	}
+
+	return ret;
 }
 
 #define IXGBE_MAX_TXD_PWR       14
@@ -734,11 +821,10 @@
 			       struct ixgbe_ring *tx_ring)
 {
 	struct ixgbe_adapter *adapter = q_vector->adapter;
-	struct net_device *netdev = adapter->netdev;
 	union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
 	struct ixgbe_tx_buffer *tx_buffer_info;
-	unsigned int i, eop, count = 0;
 	unsigned int total_bytes = 0, total_packets = 0;
+	u16 i, eop, count = 0;
 
 	i = tx_ring->next_to_clean;
 	eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,147 +835,182 @@
 		bool cleaned = false;
 		rmb(); /* read buffer_info after eop_desc */
 		for ( ; !cleaned; count++) {
-			struct sk_buff *skb;
 			tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
 			tx_buffer_info = &tx_ring->tx_buffer_info[i];
-			cleaned = (i == eop);
-			skb = tx_buffer_info->skb;
-
-			if (cleaned && skb) {
-				unsigned int segs, bytecount;
-				unsigned int hlen = skb_headlen(skb);
-
-				/* gso_segs is currently only valid for tcp */
-				segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
-				/* adjust for FCoE Sequence Offload */
-				if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-				    && (skb->protocol == htons(ETH_P_FCOE)) &&
-				    skb_is_gso(skb)) {
-					hlen = skb_transport_offset(skb) +
-						sizeof(struct fc_frame_header) +
-						sizeof(struct fcoe_crc_eof);
-					segs = DIV_ROUND_UP(skb->len - hlen,
-						skb_shinfo(skb)->gso_size);
-				}
-#endif /* IXGBE_FCOE */
-				/* multiply data chunks by size of headers */
-				bytecount = ((segs - 1) * hlen) + skb->len;
-				total_packets += segs;
-				total_bytes += bytecount;
-			}
-
-			ixgbe_unmap_and_free_tx_resource(adapter,
-							 tx_buffer_info);
 
 			tx_desc->wb.status = 0;
+			cleaned = (i == eop);
 
 			i++;
 			if (i == tx_ring->count)
 				i = 0;
+
+			if (cleaned && tx_buffer_info->skb) {
+				total_bytes += tx_buffer_info->bytecount;
+				total_packets += tx_buffer_info->gso_segs;
+			}
+
+			ixgbe_unmap_and_free_tx_resource(tx_ring,
+							 tx_buffer_info);
 		}
 
+		tx_ring->tx_stats.completed++;
 		eop = tx_ring->tx_buffer_info[i].next_to_watch;
 		eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
 	}
 
 	tx_ring->next_to_clean = i;
-
-#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-	if (unlikely(count && netif_carrier_ok(netdev) &&
-		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
-		/* Make sure that anybody stopping the queue after this
-		 * sees the new next_to_clean.
-		 */
-		smp_mb();
-		if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
-		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
-			netif_wake_subqueue(netdev, tx_ring->queue_index);
-			++tx_ring->restart_queue;
-		}
-	}
-
-	if (adapter->detect_tx_hung) {
-		if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
-			/* schedule immediate reset if we believe we hung */
-			e_info(probe, "tx hang %d detected, resetting "
-			       "adapter\n", adapter->tx_timeout_count + 1);
-			ixgbe_tx_timeout(adapter->netdev);
-		}
-	}
-
-	/* re-arm the interrupt */
-	if (count >= tx_ring->work_limit)
-		ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
 	tx_ring->total_bytes += total_bytes;
 	tx_ring->total_packets += total_packets;
 	u64_stats_update_begin(&tx_ring->syncp);
 	tx_ring->stats.packets += total_packets;
 	tx_ring->stats.bytes += total_bytes;
 	u64_stats_update_end(&tx_ring->syncp);
+
+	if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+		/* schedule immediate reset if we believe we hung */
+		struct ixgbe_hw *hw = &adapter->hw;
+		tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+		e_err(drv, "Detected Tx Unit Hang\n"
+			"  Tx Queue             <%d>\n"
+			"  TDH, TDT             <%x>, <%x>\n"
+			"  next_to_use          <%x>\n"
+			"  next_to_clean        <%x>\n"
+			"tx_buffer_info[next_to_clean]\n"
+			"  time_stamp           <%lx>\n"
+			"  jiffies              <%lx>\n",
+			tx_ring->queue_index,
+			IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+			IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+			tx_ring->next_to_use, eop,
+			tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+
+		netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+		e_info(probe,
+		       "tx hang %d detected on queue %d, resetting adapter\n",
+			adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
+		/* schedule immediate reset if we believe we hung */
+		ixgbe_tx_timeout(adapter->netdev);
+
+		/* the adapter is about to reset, no point in enabling stuff */
+		return true;
+	}
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
+		     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
+		    !test_bit(__IXGBE_DOWN, &adapter->state)) {
+			netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+			++tx_ring->tx_stats.restart_queue;
+		}
+	}
+
 	return count < tx_ring->work_limit;
 }
 
 #ifdef CONFIG_IXGBE_DCA
 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *rx_ring)
+				struct ixgbe_ring *rx_ring,
+				int cpu)
 {
+	struct ixgbe_hw *hw = &adapter->hw;
 	u32 rxctrl;
-	int cpu = get_cpu();
-	int q = rx_ring->reg_idx;
+	u8 reg_idx = rx_ring->reg_idx;
 
-	if (rx_ring->cpu != cpu) {
-		rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-			rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
-			rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-			rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
-			rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-				   IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
-		}
-		rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
-		rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
-		rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
-		rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
-			    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
-		rx_ring->cpu = cpu;
+	rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+		rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+		rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			   IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+		break;
+	default:
+		break;
 	}
-	put_cpu();
+	rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+	rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+	rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+	rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+		    IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+	IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
 }
 
 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *tx_ring)
+				struct ixgbe_ring *tx_ring,
+				int cpu)
 {
-	u32 txctrl;
-	int cpu = get_cpu();
-	int q = tx_ring->reg_idx;
 	struct ixgbe_hw *hw = &adapter->hw;
+	u32 txctrl;
+	u8 reg_idx = tx_ring->reg_idx;
 
-	if (tx_ring->cpu != cpu) {
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
-			txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
-			txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-			txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
-		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-			txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
-			txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
-			txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-				  IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
-			txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-			IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
-		}
-		tx_ring->cpu = cpu;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+		txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+		txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+		txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+			   IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+		txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+		txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+		IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+		break;
+	default:
+		break;
 	}
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+	struct ixgbe_adapter *adapter = q_vector->adapter;
+	int cpu = get_cpu();
+	long r_idx;
+	int i;
+
+	if (q_vector->cpu == cpu)
+		goto out_no_update;
+
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	for (i = 0; i < q_vector->txr_count; i++) {
+		ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+				      r_idx + 1);
+	}
+
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	for (i = 0; i < q_vector->rxr_count; i++) {
+		ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+				      r_idx + 1);
+	}
+
+	q_vector->cpu = cpu;
+out_no_update:
 	put_cpu();
 }
 
 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
 {
+	int num_q_vectors;
 	int i;
 
 	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -898,22 +1019,25 @@
 	/* always use CB2 mode, difference is masked in the CB driver */
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		adapter->tx_ring[i]->cpu = -1;
-		ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
-	}
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		adapter->rx_ring[i]->cpu = -1;
-		ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+		num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+	else
+		num_q_vectors = 1;
+
+	for (i = 0; i < num_q_vectors; i++) {
+		adapter->q_vector[i]->cpu = -1;
+		ixgbe_update_dca(adapter->q_vector[i]);
 	}
 }
 
 static int __ixgbe_notify_dca(struct device *dev, void *data)
 {
-	struct net_device *netdev = dev_get_drvdata(dev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
 	unsigned long event = *(unsigned long *)data;
 
+	if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+		return 0;
+
 	switch (event) {
 	case DCA_PROVIDER_ADD:
 		/* if we're already enabled, don't do it again */
@@ -1012,8 +1136,7 @@
 	skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
-					 struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 {
 	/*
 	 * Force memory writes to complete before letting h/w
@@ -1022,72 +1145,81 @@
 	 * such as IA-64).
 	 */
 	wmb();
-	IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+	writel(val, rx_ring->tail);
 }
 
 /**
  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
  **/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-			    struct ixgbe_ring *rx_ring,
-			    int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 {
-	struct net_device *netdev = adapter->netdev;
-	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc;
 	struct ixgbe_rx_buffer *bi;
-	unsigned int i;
-	unsigned int bufsz = rx_ring->rx_buf_len;
+	struct sk_buff *skb;
+	u16 i = rx_ring->next_to_use;
 
-	i = rx_ring->next_to_use;
-	bi = &rx_ring->rx_buffer_info[i];
+	/* do nothing if no valid netdev defined */
+	if (!rx_ring->netdev)
+		return;
 
 	while (cleaned_count--) {
 		rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+		bi = &rx_ring->rx_buffer_info[i];
+		skb = bi->skb;
 
-		if (!bi->page_dma &&
-		    (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
-			if (!bi->page) {
-				bi->page = netdev_alloc_page(netdev);
-				if (!bi->page) {
-					adapter->alloc_rx_page_failed++;
-					goto no_buffers;
-				}
-				bi->page_offset = 0;
-			} else {
-				/* use a half page if we're re-using */
-				bi->page_offset ^= (PAGE_SIZE / 2);
-			}
-
-			bi->page_dma = dma_map_page(&pdev->dev, bi->page,
-						    bi->page_offset,
-						    (PAGE_SIZE / 2),
-						    DMA_FROM_DEVICE);
-		}
-
-		if (!bi->skb) {
-			struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
-									bufsz);
-			bi->skb = skb;
-
+		if (!skb) {
+			skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+							rx_ring->rx_buf_len);
 			if (!skb) {
-				adapter->alloc_rx_buff_failed++;
+				rx_ring->rx_stats.alloc_rx_buff_failed++;
 				goto no_buffers;
 			}
 			/* initialize queue mapping */
 			skb_record_rx_queue(skb, rx_ring->queue_index);
+			bi->skb = skb;
 		}
 
 		if (!bi->dma) {
-			bi->dma = dma_map_single(&pdev->dev,
-						 bi->skb->data,
+			bi->dma = dma_map_single(rx_ring->dev,
+						 skb->data,
 						 rx_ring->rx_buf_len,
 						 DMA_FROM_DEVICE);
+			if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+				rx_ring->rx_stats.alloc_rx_buff_failed++;
+				bi->dma = 0;
+				goto no_buffers;
+			}
 		}
-		/* Refresh the desc even if buffer_addrs didn't change because
-		 * each write-back erases this info. */
-		if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+
+		if (ring_is_ps_enabled(rx_ring)) {
+			if (!bi->page) {
+				bi->page = netdev_alloc_page(rx_ring->netdev);
+				if (!bi->page) {
+					rx_ring->rx_stats.alloc_rx_page_failed++;
+					goto no_buffers;
+				}
+			}
+
+			if (!bi->page_dma) {
+				/* use a half page if we're re-using */
+				bi->page_offset ^= PAGE_SIZE / 2;
+				bi->page_dma = dma_map_page(rx_ring->dev,
+							    bi->page,
+							    bi->page_offset,
+							    PAGE_SIZE / 2,
+							    DMA_FROM_DEVICE);
+				if (dma_mapping_error(rx_ring->dev,
+						      bi->page_dma)) {
+					rx_ring->rx_stats.alloc_rx_page_failed++;
+					bi->page_dma = 0;
+					goto no_buffers;
+				}
+			}
+
+			/* Refresh the desc even if buffer_addrs didn't change
+			 * because each write-back erases this info. */
 			rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
 			rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
 		} else {
@@ -1098,56 +1230,48 @@
 		i++;
 		if (i == rx_ring->count)
 			i = 0;
-		bi = &rx_ring->rx_buffer_info[i];
 	}
 
 no_buffers:
 	if (rx_ring->next_to_use != i) {
 		rx_ring->next_to_use = i;
-		if (i-- == 0)
-			i = (rx_ring->count - 1);
-
-		ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+		ixgbe_release_rx_desc(rx_ring, i);
 	}
 }
 
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
 {
-	return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-	return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
-{
-	return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
-		IXGBE_RXDADV_RSCCNT_MASK) >>
-		IXGBE_RXDADV_RSCCNT_SHIFT;
+	/* HW will not DMA in data larger than the given buffer, even if it
+	 * parses the (NFS, of course) header to be larger.  In that case, it
+	 * fills the header buffer and spills the rest into the page.
+	 */
+	u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+	u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+		    IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+	if (hlen > IXGBE_RX_HDR_SIZE)
+		hlen = IXGBE_RX_HDR_SIZE;
+	return hlen;
 }
 
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
-							u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 {
 	unsigned int frag_list_size = 0;
+	unsigned int skb_cnt = 1;
 
 	while (skb->prev) {
 		struct sk_buff *prev = skb->prev;
 		frag_list_size += skb->len;
 		skb->prev = NULL;
 		skb = prev;
-		*count += 1;
+		skb_cnt++;
 	}
 
 	skb_shinfo(skb)->frag_list = skb->next;
@@ -1155,68 +1279,59 @@
 	skb->len += frag_list_size;
 	skb->data_len += frag_list_size;
 	skb->truesize += frag_list_size;
+	IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
 	return skb;
 }
 
-struct ixgbe_rsc_cb {
-	dma_addr_t dma;
-	bool delay_unmap;
-};
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+	return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+		IXGBE_RXDADV_RSCCNT_MASK);
+}
 
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
-
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 			       struct ixgbe_ring *rx_ring,
 			       int *work_done, int work_to_do)
 {
 	struct ixgbe_adapter *adapter = q_vector->adapter;
-	struct pci_dev *pdev = adapter->pdev;
 	union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
 	struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
 	struct sk_buff *skb;
-	unsigned int i, rsc_count = 0;
-	u32 len, staterr;
-	u16 hdr_info;
-	bool cleaned = false;
-	int cleaned_count = 0;
 	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+	const int current_node = numa_node_id();
 #ifdef IXGBE_FCOE
 	int ddp_bytes = 0;
 #endif /* IXGBE_FCOE */
+	u32 staterr;
+	u16 i;
+	u16 cleaned_count = 0;
+	bool pkt_is_rsc = false;
 
 	i = rx_ring->next_to_clean;
 	rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
 	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-	rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
 	while (staterr & IXGBE_RXD_STAT_DD) {
 		u32 upper_len = 0;
-		if (*work_done >= work_to_do)
-			break;
-		(*work_done)++;
 
 		rmb(); /* read descriptor and rx_buffer_info after status DD */
-		if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
-			hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
-			len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-			       IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-			if ((len > IXGBE_RX_HDR_SIZE) ||
-			    (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
-				len = IXGBE_RX_HDR_SIZE;
-		} else {
-			len = le16_to_cpu(rx_desc->wb.upper.length);
-		}
 
-		cleaned = true;
+		rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
 		skb = rx_buffer_info->skb;
-		prefetch(skb->data);
 		rx_buffer_info->skb = NULL;
+		prefetch(skb->data);
 
+		if (ring_is_rsc_enabled(rx_ring))
+			pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
+		/* if this is a skb from previous receive DMA will be 0 */
 		if (rx_buffer_info->dma) {
-			if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-			    (!(staterr & IXGBE_RXD_STAT_EOP)) &&
-				 (!(skb->prev))) {
+			u16 hlen;
+			if (pkt_is_rsc &&
+			    !(staterr & IXGBE_RXD_STAT_EOP) &&
+			    !skb->prev) {
 				/*
 				 * When HWRSC is enabled, delay unmapping
 				 * of the first packet. It carries the
@@ -1227,29 +1342,42 @@
 				IXGBE_RSC_CB(skb)->delay_unmap = true;
 				IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
 			} else {
-				dma_unmap_single(&pdev->dev,
+				dma_unmap_single(rx_ring->dev,
 						 rx_buffer_info->dma,
 						 rx_ring->rx_buf_len,
 						 DMA_FROM_DEVICE);
 			}
 			rx_buffer_info->dma = 0;
-			skb_put(skb, len);
+
+			if (ring_is_ps_enabled(rx_ring)) {
+				hlen = ixgbe_get_hlen(rx_desc);
+				upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+			} else {
+				hlen = le16_to_cpu(rx_desc->wb.upper.length);
+			}
+
+			skb_put(skb, hlen);
+		} else {
+			/* assume packet split since header is unmapped */
+			upper_len = le16_to_cpu(rx_desc->wb.upper.length);
 		}
 
 		if (upper_len) {
-			dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
-				       PAGE_SIZE / 2, DMA_FROM_DEVICE);
+			dma_unmap_page(rx_ring->dev,
+				       rx_buffer_info->page_dma,
+				       PAGE_SIZE / 2,
+				       DMA_FROM_DEVICE);
 			rx_buffer_info->page_dma = 0;
 			skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
 					   rx_buffer_info->page,
 					   rx_buffer_info->page_offset,
 					   upper_len);
 
-			if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
-			    (page_count(rx_buffer_info->page) != 1))
-				rx_buffer_info->page = NULL;
-			else
+			if ((page_count(rx_buffer_info->page) == 1) &&
+			    (page_to_nid(rx_buffer_info->page) == current_node))
 				get_page(rx_buffer_info->page);
+			else
+				rx_buffer_info->page = NULL;
 
 			skb->len += upper_len;
 			skb->data_len += upper_len;
@@ -1264,10 +1392,7 @@
 		prefetch(next_rxd);
 		cleaned_count++;
 
-		if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-			rsc_count = ixgbe_get_rsc_count(rx_desc);
-
-		if (rsc_count) {
+		if (pkt_is_rsc) {
 			u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
 				     IXGBE_RXDADV_NEXTP_SHIFT;
 			next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1275,32 +1400,8 @@
 			next_buffer = &rx_ring->rx_buffer_info[i];
 		}
 
-		if (staterr & IXGBE_RXD_STAT_EOP) {
-			if (skb->prev)
-				skb = ixgbe_transform_rsc_queue(skb,
-								&(rx_ring->rsc_count));
-			if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
-				if (IXGBE_RSC_CB(skb)->delay_unmap) {
-					dma_unmap_single(&pdev->dev,
-							 IXGBE_RSC_CB(skb)->dma,
-							 rx_ring->rx_buf_len,
-							 DMA_FROM_DEVICE);
-					IXGBE_RSC_CB(skb)->dma = 0;
-					IXGBE_RSC_CB(skb)->delay_unmap = false;
-				}
-				if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
-					rx_ring->rsc_count +=
-						skb_shinfo(skb)->nr_frags;
-				else
-					rx_ring->rsc_count++;
-				rx_ring->rsc_flush++;
-			}
-			u64_stats_update_begin(&rx_ring->syncp);
-			rx_ring->stats.packets++;
-			rx_ring->stats.bytes += skb->len;
-			u64_stats_update_end(&rx_ring->syncp);
-		} else {
-			if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+		if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+			if (ring_is_ps_enabled(rx_ring)) {
 				rx_buffer_info->skb = next_buffer->skb;
 				rx_buffer_info->dma = next_buffer->dma;
 				next_buffer->skb = skb;
@@ -1309,12 +1410,45 @@
 				skb->next = next_buffer->skb;
 				skb->next->prev = skb;
 			}
-			rx_ring->non_eop_descs++;
+			rx_ring->rx_stats.non_eop_descs++;
 			goto next_desc;
 		}
 
+		if (skb->prev) {
+			skb = ixgbe_transform_rsc_queue(skb);
+			/* if we got here without RSC the packet is invalid */
+			if (!pkt_is_rsc) {
+				__pskb_trim(skb, 0);
+				rx_buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		if (ring_is_rsc_enabled(rx_ring)) {
+			if (IXGBE_RSC_CB(skb)->delay_unmap) {
+				dma_unmap_single(rx_ring->dev,
+						 IXGBE_RSC_CB(skb)->dma,
+						 rx_ring->rx_buf_len,
+						 DMA_FROM_DEVICE);
+				IXGBE_RSC_CB(skb)->dma = 0;
+				IXGBE_RSC_CB(skb)->delay_unmap = false;
+			}
+		}
+		if (pkt_is_rsc) {
+			if (ring_is_ps_enabled(rx_ring))
+				rx_ring->rx_stats.rsc_count +=
+					skb_shinfo(skb)->nr_frags;
+			else
+				rx_ring->rx_stats.rsc_count +=
+					IXGBE_RSC_CB(skb)->skb_cnt;
+			rx_ring->rx_stats.rsc_flush++;
+		}
+
+		/* ERR_MASK will only have valid bits if EOP set */
 		if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
-			dev_kfree_skb_irq(skb);
+			/* trim packet back to size 0 and recycle it */
+			__pskb_trim(skb, 0);
+			rx_buffer_info->skb = skb;
 			goto next_desc;
 		}
 
@@ -1324,7 +1458,7 @@
 		total_rx_bytes += skb->len;
 		total_rx_packets++;
 
-		skb->protocol = eth_type_trans(skb, adapter->netdev);
+		skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 #ifdef IXGBE_FCOE
 		/* if ddp, not passing to ULD unless for FCP_RSP or error */
 		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1338,16 +1472,18 @@
 next_desc:
 		rx_desc->wb.upper.status_error = 0;
 
+		(*work_done)++;
+		if (*work_done >= work_to_do)
+			break;
+
 		/* return some buffers to hardware, one at a time is too slow */
 		if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
-			ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+			ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 			cleaned_count = 0;
 		}
 
 		/* use prefetched values */
 		rx_desc = next_rxd;
-		rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
 		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
 	}
 
@@ -1355,14 +1491,14 @@
 	cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
 
 	if (cleaned_count)
-		ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+		ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 
 #ifdef IXGBE_FCOE
 	/* include DDPed FCoE data */
 	if (ddp_bytes > 0) {
 		unsigned int mss;
 
-		mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+		mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
 			sizeof(struct fc_frame_header) -
 			sizeof(struct fcoe_crc_eof);
 		if (mss > 512)
@@ -1374,8 +1510,10 @@
 
 	rx_ring->total_packets += total_rx_packets;
 	rx_ring->total_bytes += total_rx_bytes;
-
-	return cleaned;
+	u64_stats_update_begin(&rx_ring->syncp);
+	rx_ring->stats.packets += total_rx_packets;
+	rx_ring->stats.bytes += total_rx_bytes;
+	u64_stats_update_end(&rx_ring->syncp);
 }
 
 static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1389,7 +1527,7 @@
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_q_vector *q_vector;
-	int i, j, q_vectors, v_idx, r_idx;
+	int i, q_vectors, v_idx, r_idx;
 	u32 mask;
 
 	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1405,8 +1543,8 @@
 				       adapter->num_rx_queues);
 
 		for (i = 0; i < q_vector->rxr_count; i++) {
-			j = adapter->rx_ring[r_idx]->reg_idx;
-			ixgbe_set_ivar(adapter, 0, j, v_idx);
+			u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+			ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
 			r_idx = find_next_bit(q_vector->rxr_idx,
 					      adapter->num_rx_queues,
 					      r_idx + 1);
@@ -1415,8 +1553,8 @@
 				       adapter->num_tx_queues);
 
 		for (i = 0; i < q_vector->txr_count; i++) {
-			j = adapter->tx_ring[r_idx]->reg_idx;
-			ixgbe_set_ivar(adapter, 1, j, v_idx);
+			u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+			ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
 			r_idx = find_next_bit(q_vector->txr_idx,
 					      adapter->num_tx_queues,
 					      r_idx + 1);
@@ -1447,11 +1585,19 @@
 		}
 	}
 
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
 		ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
 			       v_idx);
-	else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		ixgbe_set_ivar(adapter, -1, 1, v_idx);
+		break;
+
+	default:
+		break;
+	}
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 
 	/* set up to autoclear timer, and the vectors */
@@ -1547,12 +1693,15 @@
 	int v_idx = q_vector->v_idx;
 	u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
 
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
 		/* must write high and low 16 bits to reset counter */
 		itr_reg |= (itr_reg << 16);
-	} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		/*
-		 * 82599 can support a value of zero, so allow it for
+		 * 82599 and X540 can support a value of zero, so allow it for
 		 * max interrupt rate, but there is an errata where it can
 		 * not be zero with RSC
 		 */
@@ -1565,6 +1714,9 @@
 		 * immediate assertion of the interrupt
 		 */
 		itr_reg |= IXGBE_EITR_CNT_WDIS;
+		break;
+	default:
+		break;
 	}
 	IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
 }
@@ -1572,14 +1724,13 @@
 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 {
 	struct ixgbe_adapter *adapter = q_vector->adapter;
+	int i, r_idx;
 	u32 new_itr;
 	u8 current_itr, ret_itr;
-	int i, r_idx;
-	struct ixgbe_ring *rx_ring, *tx_ring;
 
 	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 	for (i = 0; i < q_vector->txr_count; i++) {
-		tx_ring = adapter->tx_ring[r_idx];
+		struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
 		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
 					   q_vector->tx_itr,
 					   tx_ring->total_packets,
@@ -1594,7 +1745,7 @@
 
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 	for (i = 0; i < q_vector->rxr_count; i++) {
-		rx_ring = adapter->rx_ring[r_idx];
+		struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
 		ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
 					   q_vector->rx_itr,
 					   rx_ring->total_packets,
@@ -1625,7 +1776,7 @@
 
 	if (new_itr != q_vector->eitr) {
 		/* do an exponential smoothing */
-		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+		new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 
 		/* save the algorithm value here, not the smoothed one */
 		q_vector->eitr = new_itr;
@@ -1693,17 +1844,18 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 
+	if (eicr & IXGBE_EICR_GPI_SDP2) {
+		/* Clear the interrupt */
+		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			schedule_work(&adapter->sfp_config_module_task);
+	}
+
 	if (eicr & IXGBE_EICR_GPI_SDP1) {
 		/* Clear the interrupt */
 		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
-		schedule_work(&adapter->multispeed_fiber_task);
-	} else if (eicr & IXGBE_EICR_GPI_SDP2) {
-		/* Clear the interrupt */
-		IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
-		schedule_work(&adapter->sfp_config_module_task);
-	} else {
-		/* Interrupt isn't for us... */
-		return;
+		if (!test_bit(__IXGBE_DOWN, &adapter->state))
+			schedule_work(&adapter->multispeed_fiber_task);
 	}
 }
 
@@ -1743,16 +1895,16 @@
 	if (eicr & IXGBE_EICR_MAILBOX)
 		ixgbe_msg_task(adapter);
 
-	if (hw->mac.type == ixgbe_mac_82598EB)
-		ixgbe_check_fan_failure(adapter, eicr);
-
-	if (hw->mac.type == ixgbe_mac_82599EB) {
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
 		ixgbe_check_sfp_event(adapter, eicr);
-		adapter->interrupt_event = eicr;
 		if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-		    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
+		    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+			adapter->interrupt_event = eicr;
 			schedule_work(&adapter->check_overtemp_task);
-
+		}
+		/* now fallthrough to handle Flow Director */
+	case ixgbe_mac_X540:
 		/* Handle Flow Director Full threshold interrupt */
 		if (eicr & IXGBE_EICR_FLOW_DIR) {
 			int i;
@@ -1762,12 +1914,18 @@
 			for (i = 0; i < adapter->num_tx_queues; i++) {
 				struct ixgbe_ring *tx_ring =
 							    adapter->tx_ring[i];
-				if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
-						       &tx_ring->reinit_state))
+				if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+						       &tx_ring->state))
 					schedule_work(&adapter->fdir_reinit_task);
 			}
 		}
+		break;
+	default:
+		break;
 	}
+
+	ixgbe_check_fan_failure(adapter, eicr);
+
 	if (!test_bit(__IXGBE_DOWN, &adapter->state))
 		IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
@@ -1778,15 +1936,24 @@
 					   u64 qmask)
 {
 	u32 mask;
+	struct ixgbe_hw *hw = &adapter->hw;
 
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		mask = (qmask & 0xFFFFFFFF);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+		if (mask)
+			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
 		mask = (qmask >> 32);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+		if (mask)
+			IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+		break;
+	default:
+		break;
 	}
 	/* skip the flush */
 }
@@ -1795,15 +1962,24 @@
 					    u64 qmask)
 {
 	u32 mask;
+	struct ixgbe_hw *hw = &adapter->hw;
 
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
 		mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
-	} else {
+		IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		mask = (qmask & 0xFFFFFFFF);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+		if (mask)
+			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
 		mask = (qmask >> 32);
-		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+		if (mask)
+			IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+		break;
+	default:
+		break;
 	}
 	/* skip the flush */
 }
@@ -1846,8 +2022,13 @@
 	int r_idx;
 	int i;
 
+#ifdef CONFIG_IXGBE_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		ixgbe_update_dca(q_vector);
+#endif
+
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-	for (i = 0;  i < q_vector->rxr_count; i++) {
+	for (i = 0; i < q_vector->rxr_count; i++) {
 		rx_ring = adapter->rx_ring[r_idx];
 		rx_ring->total_bytes = 0;
 		rx_ring->total_packets = 0;
@@ -1858,7 +2039,6 @@
 	if (!q_vector->rxr_count)
 		return IRQ_HANDLED;
 
-	/* disable interrupts on this vector only */
 	/* EIAM disabled interrupts (on this vector) for us */
 	napi_schedule(&q_vector->napi);
 
@@ -1917,13 +2097,14 @@
 	int work_done = 0;
 	long r_idx;
 
-	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-	rx_ring = adapter->rx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-		ixgbe_update_rx_dca(adapter, rx_ring);
+		ixgbe_update_dca(q_vector);
 #endif
 
+	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+	rx_ring = adapter->rx_ring[r_idx];
+
 	ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
 
 	/* If all Rx work done, exit the polling mode */
@@ -1957,13 +2138,14 @@
 	long r_idx;
 	bool tx_clean_complete = true;
 
+#ifdef CONFIG_IXGBE_DCA
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		ixgbe_update_dca(q_vector);
+#endif
+
 	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
 	for (i = 0; i < q_vector->txr_count; i++) {
 		ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
-		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-			ixgbe_update_tx_dca(adapter, ring);
-#endif
 		tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
 		r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
 				      r_idx + 1);
@@ -1976,10 +2158,6 @@
 	r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
 	for (i = 0; i < q_vector->rxr_count; i++) {
 		ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
-		if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-			ixgbe_update_rx_dca(adapter, ring);
-#endif
 		ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
 		r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
 				      r_idx + 1);
@@ -2018,13 +2196,14 @@
 	int work_done = 0;
 	long r_idx;
 
-	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-	tx_ring = adapter->tx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-		ixgbe_update_tx_dca(adapter, tx_ring);
+		ixgbe_update_dca(q_vector);
 #endif
 
+	r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+	tx_ring = adapter->tx_ring[r_idx];
+
 	if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
 		work_done = budget;
 
@@ -2045,24 +2224,27 @@
 				     int r_idx)
 {
 	struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+	struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
 
 	set_bit(r_idx, q_vector->rxr_idx);
 	q_vector->rxr_count++;
+	rx_ring->q_vector = q_vector;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
 				     int t_idx)
 {
 	struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+	struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
 
 	set_bit(t_idx, q_vector->txr_idx);
 	q_vector->txr_count++;
+	tx_ring->q_vector = q_vector;
 }
 
 /**
  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
  * @adapter: board private structure to initialize
- * @vectors: allotted vector count for descriptor rings
  *
  * This function maps descriptor rings to the queue-specific vectors
  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
@@ -2070,9 +2252,9 @@
  * group the rings as "efficiently" as possible.  You would add new
  * mapping configurations in here.
  **/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
-				      int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
 {
+	int q_vectors;
 	int v_start = 0;
 	int rxr_idx = 0, txr_idx = 0;
 	int rxr_remaining = adapter->num_rx_queues;
@@ -2085,11 +2267,13 @@
 	if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
 		goto out;
 
+	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
 	/*
 	 * The ideal configuration...
 	 * We have enough vectors to map one per queue.
 	 */
-	if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+	if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
 		for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
 			map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -2105,23 +2289,20 @@
 	 * multiple queues per vector.
 	 */
 	/* Re-adjusting *qpv takes care of the remainder. */
-	for (i = v_start; i < vectors; i++) {
-		rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+	for (i = v_start; i < q_vectors; i++) {
+		rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
 		for (j = 0; j < rqpv; j++) {
 			map_vector_to_rxq(adapter, i, rxr_idx);
 			rxr_idx++;
 			rxr_remaining--;
 		}
-	}
-	for (i = v_start; i < vectors; i++) {
-		tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+		tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
 		for (j = 0; j < tqpv; j++) {
 			map_vector_to_txq(adapter, i, txr_idx);
 			txr_idx++;
 			txr_remaining--;
 		}
 	}
-
 out:
 	return err;
 }
@@ -2143,30 +2324,36 @@
 	/* Decrement for Other and TCP Timer vectors */
 	q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-	/* Map the Tx/Rx rings to the vectors we were allotted. */
-	err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+	err = ixgbe_map_rings_to_vectors(adapter);
 	if (err)
-		goto out;
+		return err;
 
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
-			 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
-			 &ixgbe_msix_clean_many)
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)        \
+					  ? &ixgbe_msix_clean_many : \
+			  (_v)->rxr_count ? &ixgbe_msix_clean_rx   : \
+			  (_v)->txr_count ? &ixgbe_msix_clean_tx   : \
+			  NULL)
 	for (vector = 0; vector < q_vectors; vector++) {
-		handler = SET_HANDLER(adapter->q_vector[vector]);
+		struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+		handler = SET_HANDLER(q_vector);
 
 		if (handler == &ixgbe_msix_clean_rx) {
-			sprintf(adapter->name[vector], "%s-%s-%d",
-				netdev->name, "rx", ri++);
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+			         "%s-%s-%d", netdev->name, "rx", ri++);
 		} else if (handler == &ixgbe_msix_clean_tx) {
-			sprintf(adapter->name[vector], "%s-%s-%d",
-				netdev->name, "tx", ti++);
-		} else
-			sprintf(adapter->name[vector], "%s-%s-%d",
-				netdev->name, "TxRx", vector);
-
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+			         "%s-%s-%d", netdev->name, "tx", ti++);
+		} else if (handler == &ixgbe_msix_clean_many) {
+			snprintf(q_vector->name, sizeof(q_vector->name) - 1,
+			         "%s-%s-%d", netdev->name, "TxRx", ri++);
+			ti++;
+		} else {
+			/* skip this unused q_vector */
+			continue;
+		}
 		err = request_irq(adapter->msix_entries[vector].vector,
-				  handler, 0, adapter->name[vector],
-				  adapter->q_vector[vector]);
+				  handler, 0, q_vector->name,
+				  q_vector);
 		if (err) {
 			e_err(probe, "request_irq failed for MSIX interrupt "
 			      "Error: %d\n", err);
@@ -2174,9 +2361,9 @@
 		}
 	}
 
-	sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+	sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
 	err = request_irq(adapter->msix_entries[vector].vector,
-			  ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+			  ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
 	if (err) {
 		e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
 		goto free_queue_irqs;
@@ -2192,17 +2379,16 @@
 	pci_disable_msix(adapter->pdev);
 	kfree(adapter->msix_entries);
 	adapter->msix_entries = NULL;
-out:
 	return err;
 }
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
 	struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
-	u8 current_itr;
-	u32 new_itr = q_vector->eitr;
 	struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
 	struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+	u32 new_itr = q_vector->eitr;
+	u8 current_itr;
 
 	q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
 					    q_vector->tx_itr,
@@ -2232,9 +2418,9 @@
 
 	if (new_itr != q_vector->eitr) {
 		/* do an exponential smoothing */
-		new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+		new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 
-		/* save the algorithm value here, not the smoothed one */
+		/* save the algorithm value here */
 		q_vector->eitr = new_itr;
 
 		ixgbe_write_eitr(q_vector);
@@ -2255,12 +2441,17 @@
 		mask |= IXGBE_EIMS_GPI_SDP0;
 	if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
 		mask |= IXGBE_EIMS_GPI_SDP1;
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		mask |= IXGBE_EIMS_ECC;
 		mask |= IXGBE_EIMS_GPI_SDP1;
 		mask |= IXGBE_EIMS_GPI_SDP2;
 		if (adapter->num_vfs)
 			mask |= IXGBE_EIMS_MAILBOX;
+		break;
+	default:
+		break;
 	}
 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
 	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2316,13 +2507,20 @@
 	if (eicr & IXGBE_EICR_LSC)
 		ixgbe_check_lsc(adapter);
 
-	if (hw->mac.type == ixgbe_mac_82599EB)
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
 		ixgbe_check_sfp_event(adapter, eicr);
+		if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+		    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+			adapter->interrupt_event = eicr;
+			schedule_work(&adapter->check_overtemp_task);
+		}
+		break;
+	default:
+		break;
+	}
 
 	ixgbe_check_fan_failure(adapter, eicr);
-	if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-	    ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-		schedule_work(&adapter->check_overtemp_task);
 
 	if (napi_schedule_prep(&(q_vector->napi))) {
 		adapter->tx_ring[0]->total_packets = 0;
@@ -2415,14 +2613,20 @@
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-	} else {
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
 		if (adapter->num_vfs > 32)
 			IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+		break;
+	default:
+		break;
 	}
 	IXGBE_WRITE_FLUSH(&adapter->hw);
 	if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2468,7 +2672,7 @@
 	u64 tdba = ring->dma;
 	int wait_loop = 10;
 	u32 txdctl;
-	u16 reg_idx = ring->reg_idx;
+	u8 reg_idx = ring->reg_idx;
 
 	/* disable queue to avoid issues while updating state */
 	txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2483,8 +2687,7 @@
 			ring->count * sizeof(union ixgbe_adv_tx_desc));
 	IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
 	IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
-	ring->head = IXGBE_TDH(reg_idx);
-	ring->tail = IXGBE_TDT(reg_idx);
+	ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
 	/* configure fetching thresholds */
 	if (adapter->rx_itr_setting == 0) {
@@ -2500,7 +2703,16 @@
 	}
 
 	/* reinitialize flowdirector state */
-	set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+	if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+	    adapter->atr_sample_rate) {
+		ring->atr_sample_rate = adapter->atr_sample_rate;
+		ring->atr_count = 0;
+		set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+	} else {
+		ring->atr_sample_rate = 0;
+	}
+
+	clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 
 	/* enable queue */
 	txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2591,16 +2803,22 @@
 				   struct ixgbe_ring *rx_ring)
 {
 	u32 srrctl;
-	int index;
-	struct ixgbe_ring_feature *feature = adapter->ring_feature;
+	u8 reg_idx = rx_ring->reg_idx;
 
-	index = rx_ring->reg_idx;
-	if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-		unsigned long mask;
-		mask = (unsigned long) feature[RING_F_RSS].mask;
-		index = index & mask;
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB: {
+		struct ixgbe_ring_feature *feature = adapter->ring_feature;
+		const int mask = feature[RING_F_RSS].mask;
+		reg_idx = reg_idx & mask;
 	}
-	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+	default:
+		break;
+	}
+
+	srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
 
 	srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
 	srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2610,7 +2828,7 @@
 	srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
 		  IXGBE_SRRCTL_BSIZEHDR_MASK;
 
-	if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+	if (ring_is_ps_enabled(rx_ring)) {
 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
 		srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
@@ -2623,7 +2841,7 @@
 		srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
 	}
 
-	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+	IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
 }
 
 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2693,19 +2911,36 @@
 }
 
 /**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                        struct ixgbe_ring *ring)
+{
+	struct ixgbe_hw *hw = &adapter->hw;
+	u32 rscctrl;
+	u8 reg_idx = ring->reg_idx;
+
+	rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+	rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+	IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
+/**
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
  * @adapter:    address of board private structure
  * @index:      index of ring to set
  **/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
 				   struct ixgbe_ring *ring)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 rscctrl;
 	int rx_buf_len;
-	u16 reg_idx = ring->reg_idx;
+	u8 reg_idx = ring->reg_idx;
 
-	if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+	if (!ring_is_rsc_enabled(ring))
 		return;
 
 	rx_buf_len = ring->rx_buf_len;
@@ -2716,7 +2951,7 @@
 	 * total size of max desc * buf_len is not greater
 	 * than 65535
 	 */
-	if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+	if (ring_is_ps_enabled(ring)) {
 #if (MAX_SKB_FRAGS > 16)
 		rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 #elif (MAX_SKB_FRAGS > 8)
@@ -2769,9 +3004,9 @@
 				       struct ixgbe_ring *ring)
 {
 	struct ixgbe_hw *hw = &adapter->hw;
-	int reg_idx = ring->reg_idx;
 	int wait_loop = IXGBE_MAX_RX_DESC_POLL;
 	u32 rxdctl;
+	u8 reg_idx = ring->reg_idx;
 
 	/* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
 	if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2795,7 +3030,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u64 rdba = ring->dma;
 	u32 rxdctl;
-	u16 reg_idx = ring->reg_idx;
+	u8 reg_idx = ring->reg_idx;
 
 	/* disable queue to avoid issues while updating state */
 	rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2809,8 +3044,7 @@
 			ring->count * sizeof(union ixgbe_adv_rx_desc));
 	IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
 	IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
-	ring->head = IXGBE_RDH(reg_idx);
-	ring->tail = IXGBE_RDT(reg_idx);
+	ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 
 	ixgbe_configure_srrctl(adapter, ring);
 	ixgbe_configure_rscctl(adapter, ring);
@@ -2832,7 +3066,7 @@
 	IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
 	ixgbe_rx_desc_queue_enable(adapter, ring);
-	ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+	ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
 }
 
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2955,24 +3189,32 @@
 		rx_ring->rx_buf_len = rx_buf_len;
 
 		if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
-			rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+			set_ring_ps_enabled(rx_ring);
 		else
-			rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+			clear_ring_ps_enabled(rx_ring);
+
+		if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+			set_ring_rsc_enabled(rx_ring);
+		else
+			clear_ring_rsc_enabled(rx_ring);
 
 #ifdef IXGBE_FCOE
 		if (netdev->features & NETIF_F_FCOE_MTU) {
 			struct ixgbe_ring_feature *f;
 			f = &adapter->ring_feature[RING_F_FCOE];
 			if ((i >= f->mask) && (i < f->mask + f->indices)) {
-				rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+				clear_ring_ps_enabled(rx_ring);
 				if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
 					rx_ring->rx_buf_len =
 						IXGBE_FCOE_JUMBO_FRAME_SIZE;
+			} else if (!ring_is_rsc_enabled(rx_ring) &&
+				   !ring_is_ps_enabled(rx_ring)) {
+				rx_ring->rx_buf_len =
+						IXGBE_FCOE_JUMBO_FRAME_SIZE;
 			}
 		}
 #endif /* IXGBE_FCOE */
 	}
-
 }
 
 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2995,6 +3237,7 @@
 		rdrxctl |= IXGBE_RDRXCTL_MVMEN;
 		break;
 	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		/* Disable RSC for ACK packets */
 		IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
 		   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3122,6 +3365,7 @@
 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 		break;
 	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			j = adapter->rx_ring[i]->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3151,6 +3395,7 @@
 		IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
 		break;
 	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		for (i = 0; i < adapter->num_rx_queues; i++) {
 			j = adapter->rx_ring[i]->reg_idx;
 			vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3348,8 +3593,6 @@
 {
 	struct ixgbe_hw *hw = &adapter->hw;
 	int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-	u32 txdctl;
-	int i, j;
 
 	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
 		if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3365,25 +3608,18 @@
 		max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-	ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+	ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
 					DCB_TX_CONFIG);
-	ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+	ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
 					DCB_RX_CONFIG);
 
-	/* reconfigure the hardware */
-	ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
-
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		j = adapter->tx_ring[i]->reg_idx;
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-		/* PThresh workaround for Tx hang with DFP enabled. */
-		txdctl |= 32;
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
-	}
 	/* Enable VLAN tag insert/strip */
 	adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
 
 	hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+	/* reconfigure the hardware */
+	ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
 }
 
 #endif
@@ -3515,8 +3751,9 @@
 		case ixgbe_mac_82598EB:
 			IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
 			break;
-		default:
 		case ixgbe_mac_82599EB:
+		case ixgbe_mac_X540:
+		default:
 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
 			IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
 			break;
@@ -3560,13 +3797,24 @@
 	else
 		ixgbe_configure_msi_and_legacy(adapter);
 
-	/* enable the optics */
-	if (hw->phy.multispeed_fiber)
+	/* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
+	if (hw->mac.ops.enable_tx_laser &&
+	    ((hw->phy.multispeed_fiber) ||
+	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+	      (hw->mac.type == ixgbe_mac_82599EB))))
 		hw->mac.ops.enable_tx_laser(hw);
 
 	clear_bit(__IXGBE_DOWN, &adapter->state);
 	ixgbe_napi_enable_all(adapter);
 
+	if (ixgbe_is_sfp(hw)) {
+		ixgbe_sfp_link_config(adapter);
+	} else {
+		err = ixgbe_non_sfp_link_config(hw);
+		if (err)
+			e_err(probe, "link_config FAILED %d\n", err);
+	}
+
 	/* clear any pending interrupts, may auto mask */
 	IXGBE_READ_REG(hw, IXGBE_EICR);
 	ixgbe_irq_enable(adapter, true, true);
@@ -3589,26 +3837,8 @@
 	 * If we're not hot-pluggable SFP+, we just need to configure link
 	 * and bring it up.
 	 */
-	if (hw->phy.type == ixgbe_phy_unknown) {
-		err = hw->phy.ops.identify(hw);
-		if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-			/*
-			 * Take the device down and schedule the sfp tasklet
-			 * which will unregister_netdev and log it.
-			 */
-			ixgbe_down(adapter);
-			schedule_work(&adapter->sfp_config_module_task);
-			return err;
-		}
-	}
-
-	if (ixgbe_is_sfp(hw)) {
-		ixgbe_sfp_link_config(adapter);
-	} else {
-		err = ixgbe_non_sfp_link_config(hw);
-		if (err)
-			e_err(probe, "link_config FAILED %d\n", err);
-	}
+	if (hw->phy.type == ixgbe_phy_unknown)
+		schedule_work(&adapter->sfp_config_module_task);
 
 	/* enable transmits */
 	netif_tx_start_all_queues(adapter->netdev);
@@ -3686,15 +3916,13 @@
 
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = rx_ring->dev;
 	unsigned long size;
-	unsigned int i;
+	u16 i;
 
 	/* ring already cleared, nothing to do */
 	if (!rx_ring->rx_buffer_info)
@@ -3706,7 +3934,7 @@
 
 		rx_buffer_info = &rx_ring->rx_buffer_info[i];
 		if (rx_buffer_info->dma) {
-			dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+			dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
 					 rx_ring->rx_buf_len,
 					 DMA_FROM_DEVICE);
 			rx_buffer_info->dma = 0;
@@ -3717,7 +3945,7 @@
 			do {
 				struct sk_buff *this = skb;
 				if (IXGBE_RSC_CB(this)->delay_unmap) {
-					dma_unmap_single(&pdev->dev,
+					dma_unmap_single(dev,
 							 IXGBE_RSC_CB(this)->dma,
 							 rx_ring->rx_buf_len,
 							 DMA_FROM_DEVICE);
@@ -3731,7 +3959,7 @@
 		if (!rx_buffer_info->page)
 			continue;
 		if (rx_buffer_info->page_dma) {
-			dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+			dma_unmap_page(dev, rx_buffer_info->page_dma,
 				       PAGE_SIZE / 2, DMA_FROM_DEVICE);
 			rx_buffer_info->page_dma = 0;
 		}
@@ -3748,24 +3976,17 @@
 
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
-
-	if (rx_ring->head)
-		writel(0, adapter->hw.hw_addr + rx_ring->head);
-	if (rx_ring->tail)
-		writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
  * ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
-				struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
 {
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	unsigned long size;
-	unsigned int i;
+	u16 i;
 
 	/* ring already cleared, nothing to do */
 	if (!tx_ring->tx_buffer_info)
@@ -3774,7 +3995,7 @@
 	/* Free all the Tx ring sk_buffs */
 	for (i = 0; i < tx_ring->count; i++) {
 		tx_buffer_info = &tx_ring->tx_buffer_info[i];
-		ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 	}
 
 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3785,11 +4006,6 @@
 
 	tx_ring->next_to_use = 0;
 	tx_ring->next_to_clean = 0;
-
-	if (tx_ring->head)
-		writel(0, adapter->hw.hw_addr + tx_ring->head);
-	if (tx_ring->tail)
-		writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -3801,7 +4017,7 @@
 	int i;
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
-		ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+		ixgbe_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -3813,7 +4029,7 @@
 	int i;
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
-		ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+		ixgbe_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3822,7 +4038,7 @@
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 rxctrl;
 	u32 txdctl;
-	int i, j;
+	int i;
 	int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
 	/* signal that we are down to the interrupt handler */
@@ -3880,26 +4096,36 @@
 
 	/* disable transmits in the hardware now that interrupts are off */
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		j = adapter->tx_ring[i]->reg_idx;
-		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+		u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+		txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+		IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
 				(txdctl & ~IXGBE_TXDCTL_ENABLE));
 	}
 	/* Disable the Tx DMA engine on 82599 */
-	if (hw->mac.type == ixgbe_mac_82599EB)
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
 				(IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
 				 ~IXGBE_DMATXCTL_TE));
-
-	/* power down the optics */
-	if (hw->phy.multispeed_fiber)
-		hw->mac.ops.disable_tx_laser(hw);
+		break;
+	default:
+		break;
+	}
 
 	/* clear n-tuple filters that are cached */
 	ethtool_ntuple_flush(netdev);
 
 	if (!pci_channel_offline(adapter->pdev))
 		ixgbe_reset(adapter);
+
+	/* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+	if (hw->mac.ops.disable_tx_laser &&
+	    ((hw->phy.multispeed_fiber) ||
+	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+	      (hw->mac.type == ixgbe_mac_82599EB))))
+		hw->mac.ops.disable_tx_laser(hw);
+
 	ixgbe_clean_all_tx_rings(adapter);
 	ixgbe_clean_all_rx_rings(adapter);
 
@@ -3924,10 +4150,8 @@
 	int tx_clean_complete, work_done = 0;
 
 #ifdef CONFIG_IXGBE_DCA
-	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
-		ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
-		ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
-	}
+	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+		ixgbe_update_dca(q_vector);
 #endif
 
 	tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3955,6 +4179,8 @@
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+	adapter->tx_timeout_count++;
+
 	/* Do the reset outside of interrupt context */
 	schedule_work(&adapter->reset_task);
 }
@@ -3969,8 +4195,6 @@
 	    test_bit(__IXGBE_RESETTING, &adapter->state))
 		return;
 
-	adapter->tx_timeout_count++;
-
 	ixgbe_dump(adapter);
 	netdev_err(adapter->netdev, "Reset adapter\n");
 	ixgbe_reinit_locked(adapter);
@@ -4220,19 +4444,16 @@
 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 {
 	int i;
-	bool ret = false;
 
-	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-		for (i = 0; i < adapter->num_rx_queues; i++)
-			adapter->rx_ring[i]->reg_idx = i;
-		for (i = 0; i < adapter->num_tx_queues; i++)
-			adapter->tx_ring[i]->reg_idx = i;
-		ret = true;
-	} else {
-		ret = false;
-	}
+	if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+		return false;
 
-	return ret;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		adapter->rx_ring[i]->reg_idx = i;
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		adapter->tx_ring[i]->reg_idx = i;
+
+	return true;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4249,71 +4470,67 @@
 	bool ret = false;
 	int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-		if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-			/* the number of queues is assumed to be symmetric */
-			for (i = 0; i < dcb_i; i++) {
-				adapter->rx_ring[i]->reg_idx = i << 3;
-				adapter->tx_ring[i]->reg_idx = i << 2;
+	if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+		return false;
+
+	/* the number of queues is assumed to be symmetric */
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82598EB:
+		for (i = 0; i < dcb_i; i++) {
+			adapter->rx_ring[i]->reg_idx = i << 3;
+			adapter->tx_ring[i]->reg_idx = i << 2;
+		}
+		ret = true;
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		if (dcb_i == 8) {
+			/*
+			 * Tx TC0 starts at: descriptor queue 0
+			 * Tx TC1 starts at: descriptor queue 32
+			 * Tx TC2 starts at: descriptor queue 64
+			 * Tx TC3 starts at: descriptor queue 80
+			 * Tx TC4 starts at: descriptor queue 96
+			 * Tx TC5 starts at: descriptor queue 104
+			 * Tx TC6 starts at: descriptor queue 112
+			 * Tx TC7 starts at: descriptor queue 120
+			 *
+			 * Rx TC0-TC7 are offset by 16 queues each
+			 */
+			for (i = 0; i < 3; i++) {
+				adapter->tx_ring[i]->reg_idx = i << 5;
+				adapter->rx_ring[i]->reg_idx = i << 4;
+			}
+			for ( ; i < 5; i++) {
+				adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+				adapter->rx_ring[i]->reg_idx = i << 4;
+			}
+			for ( ; i < dcb_i; i++) {
+				adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+				adapter->rx_ring[i]->reg_idx = i << 4;
 			}
 			ret = true;
-		} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-			if (dcb_i == 8) {
-				/*
-				 * Tx TC0 starts at: descriptor queue 0
-				 * Tx TC1 starts at: descriptor queue 32
-				 * Tx TC2 starts at: descriptor queue 64
-				 * Tx TC3 starts at: descriptor queue 80
-				 * Tx TC4 starts at: descriptor queue 96
-				 * Tx TC5 starts at: descriptor queue 104
-				 * Tx TC6 starts at: descriptor queue 112
-				 * Tx TC7 starts at: descriptor queue 120
-				 *
-				 * Rx TC0-TC7 are offset by 16 queues each
-				 */
-				for (i = 0; i < 3; i++) {
-					adapter->tx_ring[i]->reg_idx = i << 5;
-					adapter->rx_ring[i]->reg_idx = i << 4;
-				}
-				for ( ; i < 5; i++) {
-					adapter->tx_ring[i]->reg_idx =
-								 ((i + 2) << 4);
-					adapter->rx_ring[i]->reg_idx = i << 4;
-				}
-				for ( ; i < dcb_i; i++) {
-					adapter->tx_ring[i]->reg_idx =
-								 ((i + 8) << 3);
-					adapter->rx_ring[i]->reg_idx = i << 4;
-				}
-
-				ret = true;
-			} else if (dcb_i == 4) {
-				/*
-				 * Tx TC0 starts at: descriptor queue 0
-				 * Tx TC1 starts at: descriptor queue 64
-				 * Tx TC2 starts at: descriptor queue 96
-				 * Tx TC3 starts at: descriptor queue 112
-				 *
-				 * Rx TC0-TC3 are offset by 32 queues each
-				 */
-				adapter->tx_ring[0]->reg_idx = 0;
-				adapter->tx_ring[1]->reg_idx = 64;
-				adapter->tx_ring[2]->reg_idx = 96;
-				adapter->tx_ring[3]->reg_idx = 112;
-				for (i = 0 ; i < dcb_i; i++)
-					adapter->rx_ring[i]->reg_idx = i << 5;
-
-				ret = true;
-			} else {
-				ret = false;
-			}
-		} else {
-			ret = false;
+		} else if (dcb_i == 4) {
+			/*
+			 * Tx TC0 starts at: descriptor queue 0
+			 * Tx TC1 starts at: descriptor queue 64
+			 * Tx TC2 starts at: descriptor queue 96
+			 * Tx TC3 starts at: descriptor queue 112
+			 *
+			 * Rx TC0-TC3 are offset by 32 queues each
+			 */
+			adapter->tx_ring[0]->reg_idx = 0;
+			adapter->tx_ring[1]->reg_idx = 64;
+			adapter->tx_ring[2]->reg_idx = 96;
+			adapter->tx_ring[3]->reg_idx = 112;
+			for (i = 0 ; i < dcb_i; i++)
+				adapter->rx_ring[i]->reg_idx = i << 5;
+			ret = true;
 		}
-	} else {
-		ret = false;
+		break;
+	default:
+		break;
 	}
-
 	return ret;
 }
 #endif
@@ -4353,55 +4570,55 @@
  */
 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 {
-	int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
-	bool ret = false;
 	struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+	int i;
+	u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
 
-	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
+	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+		return false;
+
 #ifdef CONFIG_IXGBE_DCB
-		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-			struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+	if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+		struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
-			ixgbe_cache_ring_dcb(adapter);
-			/* find out queues in TC for FCoE */
-			fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
-			fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
-			/*
-			 * In 82599, the number of Tx queues for each traffic
-			 * class for both 8-TC and 4-TC modes are:
-			 * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
-			 * 8 TCs:  32  32  16  16   8   8   8   8
-			 * 4 TCs:  64  64  32  32
-			 * We have max 8 queues for FCoE, where 8 the is
-			 * FCoE redirection table size. If TC for FCoE is
-			 * less than or equal to TC3, we have enough queues
-			 * to add max of 8 queues for FCoE, so we start FCoE
-			 * tx descriptor from the next one, i.e., reg_idx + 1.
-			 * If TC for FCoE is above TC3, implying 8 TC mode,
-			 * and we need 8 for FCoE, we have to take all queues
-			 * in that traffic class for FCoE.
-			 */
-			if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
-				fcoe_tx_i--;
-		}
-#endif /* CONFIG_IXGBE_DCB */
-		if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-			if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
-			    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
-				ixgbe_cache_ring_fdir(adapter);
-			else
-				ixgbe_cache_ring_rss(adapter);
-
-			fcoe_rx_i = f->mask;
-			fcoe_tx_i = f->mask;
-		}
-		for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-			adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
-			adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
-		}
-		ret = true;
+		ixgbe_cache_ring_dcb(adapter);
+		/* find out queues in TC for FCoE */
+		fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+		fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
+		/*
+		 * In 82599, the number of Tx queues for each traffic
+		 * class for both 8-TC and 4-TC modes are:
+		 * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+		 * 8 TCs:  32  32  16  16   8   8   8   8
+		 * 4 TCs:  64  64  32  32
+		 * We have max 8 queues for FCoE, where 8 the is
+		 * FCoE redirection table size. If TC for FCoE is
+		 * less than or equal to TC3, we have enough queues
+		 * to add max of 8 queues for FCoE, so we start FCoE
+		 * Tx queue from the next one, i.e., reg_idx + 1.
+		 * If TC for FCoE is above TC3, implying 8 TC mode,
+		 * and we need 8 for FCoE, we have to take all queues
+		 * in that traffic class for FCoE.
+		 */
+		if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+			fcoe_tx_i--;
 	}
-	return ret;
+#endif /* CONFIG_IXGBE_DCB */
+	if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+		if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+		    (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+			ixgbe_cache_ring_fdir(adapter);
+		else
+			ixgbe_cache_ring_rss(adapter);
+
+		fcoe_rx_i = f->mask;
+		fcoe_tx_i = f->mask;
+	}
+	for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+		adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+		adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+	}
+	return true;
 }
 
 #endif /* IXGBE_FCOE */
@@ -4470,65 +4687,55 @@
  **/
 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
-	int i;
-	int orig_node = adapter->node;
+	int rx = 0, tx = 0, nid = adapter->node;
 
-	for (i = 0; i < adapter->num_tx_queues; i++) {
-		struct ixgbe_ring *ring = adapter->tx_ring[i];
-		if (orig_node == -1) {
-			int cur_node = next_online_node(adapter->node);
-			if (cur_node == MAX_NUMNODES)
-				cur_node = first_online_node;
-			adapter->node = cur_node;
-		}
-		ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
-				    adapter->node);
+	if (nid < 0 || !node_online(nid))
+		nid = first_online_node;
+
+	for (; tx < adapter->num_tx_queues; tx++) {
+		struct ixgbe_ring *ring;
+
+		ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
 		if (!ring)
-			ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+			ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 		if (!ring)
-			goto err_tx_ring_allocation;
+			goto err_allocation;
 		ring->count = adapter->tx_ring_count;
-		ring->queue_index = i;
-		ring->numa_node = adapter->node;
+		ring->queue_index = tx;
+		ring->numa_node = nid;
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
 
-		adapter->tx_ring[i] = ring;
+		adapter->tx_ring[tx] = ring;
 	}
 
-	/* Restore the adapter's original node */
-	adapter->node = orig_node;
+	for (; rx < adapter->num_rx_queues; rx++) {
+		struct ixgbe_ring *ring;
 
-	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct ixgbe_ring *ring = adapter->rx_ring[i];
-		if (orig_node == -1) {
-			int cur_node = next_online_node(adapter->node);
-			if (cur_node == MAX_NUMNODES)
-				cur_node = first_online_node;
-			adapter->node = cur_node;
-		}
-		ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
-				    adapter->node);
+		ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
 		if (!ring)
-			ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+			ring = kzalloc(sizeof(*ring), GFP_KERNEL);
 		if (!ring)
-			goto err_rx_ring_allocation;
+			goto err_allocation;
 		ring->count = adapter->rx_ring_count;
-		ring->queue_index = i;
-		ring->numa_node = adapter->node;
+		ring->queue_index = rx;
+		ring->numa_node = nid;
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
 
-		adapter->rx_ring[i] = ring;
+		adapter->rx_ring[rx] = ring;
 	}
 
-	/* Restore the adapter's original node */
-	adapter->node = orig_node;
-
 	ixgbe_cache_ring_register(adapter);
 
 	return 0;
 
-err_rx_ring_allocation:
-	for (i = 0; i < adapter->num_tx_queues; i++)
-		kfree(adapter->tx_ring[i]);
-err_tx_ring_allocation:
+err_allocation:
+	while (tx)
+		kfree(adapter->tx_ring[--tx]);
+
+	while (rx)
+		kfree(adapter->rx_ring[--rx]);
 	return -ENOMEM;
 }
 
@@ -4750,6 +4957,11 @@
 	return err;
 }
 
+static void ring_free_rcu(struct rcu_head *head)
+{
+	kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
 /**
  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  * @adapter: board private structure to clear interrupt scheme on
@@ -4766,10 +4978,18 @@
 		adapter->tx_ring[i] = NULL;
 	}
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		kfree(adapter->rx_ring[i]);
+		struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+		/* ixgbe_get_stats64() might access this ring, we must wait
+		 * a grace period before freeing it.
+		 */
+		call_rcu(&ring->rcu, ring_free_rcu);
 		adapter->rx_ring[i] = NULL;
 	}
 
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+
 	ixgbe_free_q_vectors(adapter);
 	ixgbe_reset_interrupt_capability(adapter);
 }
@@ -4843,6 +5063,7 @@
 	int j;
 	struct tc_configuration *tc;
 #endif
+	int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
 	/* PCI config space info */
 
@@ -4857,11 +5078,14 @@
 	adapter->ring_feature[RING_F_RSS].indices = rss;
 	adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
 	adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-	if (hw->mac.type == ixgbe_mac_82598EB) {
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
 		if (hw->device_id == IXGBE_DEV_ID_82598AT)
 			adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
 		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-	} else if (hw->mac.type == ixgbe_mac_82599EB) {
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
 		adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
 		adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4890,6 +5114,9 @@
 		adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
+		break;
+	default:
+		break;
 	}
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4919,8 +5146,8 @@
 #ifdef CONFIG_DCB
 	adapter->last_lfc_mode = hw->fc.current_mode;
 #endif
-	hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-	hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+	hw->fc.high_water = FC_HIGH_WATER(max_frame);
+	hw->fc.low_water = FC_LOW_WATER(max_frame);
 	hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
 	hw->fc.send_xon = true;
 	hw->fc.disable_fc_autoneg = false;
@@ -4958,30 +5185,27 @@
 
 /**
  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-			     struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = tx_ring->dev;
 	int size;
 
 	size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
-	tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+	tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
 	if (!tx_ring->tx_buffer_info)
-		tx_ring->tx_buffer_info = vmalloc(size);
+		tx_ring->tx_buffer_info = vzalloc(size);
 	if (!tx_ring->tx_buffer_info)
 		goto err;
-	memset(tx_ring->tx_buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
 	tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
 					   &tx_ring->dma, GFP_KERNEL);
 	if (!tx_ring->desc)
 		goto err;
@@ -4994,7 +5218,7 @@
 err:
 	vfree(tx_ring->tx_buffer_info);
 	tx_ring->tx_buffer_info = NULL;
-	e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
 	return -ENOMEM;
 }
 
@@ -5013,7 +5237,7 @@
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_tx_queues; i++) {
-		err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+		err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
 		if (!err)
 			continue;
 		e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5025,48 +5249,40 @@
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-			     struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = rx_ring->dev;
 	int size;
 
 	size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-	rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+	rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
 	if (!rx_ring->rx_buffer_info)
-		rx_ring->rx_buffer_info = vmalloc(size);
-	if (!rx_ring->rx_buffer_info) {
-		e_err(probe, "vmalloc allocation failed for the Rx "
-		      "descriptor ring\n");
-		goto alloc_failed;
-	}
-	memset(rx_ring->rx_buffer_info, 0, size);
+		rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto err;
 
 	/* Round up to nearest 4K */
 	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
 	rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
 					   &rx_ring->dma, GFP_KERNEL);
 
-	if (!rx_ring->desc) {
-		e_err(probe, "Memory allocation failed for the Rx "
-		      "descriptor ring\n");
-		vfree(rx_ring->rx_buffer_info);
-		goto alloc_failed;
-	}
+	if (!rx_ring->desc)
+		goto err;
 
 	rx_ring->next_to_clean = 0;
 	rx_ring->next_to_use = 0;
 
 	return 0;
-
-alloc_failed:
+err:
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
 	return -ENOMEM;
 }
 
@@ -5080,13 +5296,12 @@
  *
  * Return 0 on success, negative on failure
  **/
-
 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 {
 	int i, err = 0;
 
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+		err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
 		if (!err)
 			continue;
 		e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5098,23 +5313,23 @@
 
 /**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-			     struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
-
-	ixgbe_clean_tx_ring(adapter, tx_ring);
+	ixgbe_clean_tx_ring(tx_ring);
 
 	vfree(tx_ring->tx_buffer_info);
 	tx_ring->tx_buffer_info = NULL;
 
-	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
-			  tx_ring->dma);
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size,
+			  tx_ring->desc, tx_ring->dma);
 
 	tx_ring->desc = NULL;
 }
@@ -5131,28 +5346,28 @@
 
 	for (i = 0; i < adapter->num_tx_queues; i++)
 		if (adapter->tx_ring[i]->desc)
-			ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+			ixgbe_free_tx_resources(adapter->tx_ring[i]);
 }
 
 /**
  * ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
  * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
-			     struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 {
-	struct pci_dev *pdev = adapter->pdev;
-
-	ixgbe_clean_rx_ring(adapter, rx_ring);
+	ixgbe_clean_rx_ring(rx_ring);
 
 	vfree(rx_ring->rx_buffer_info);
 	rx_ring->rx_buffer_info = NULL;
 
-	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
-			  rx_ring->dma);
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(rx_ring->dev, rx_ring->size,
+			  rx_ring->desc, rx_ring->dma);
 
 	rx_ring->desc = NULL;
 }
@@ -5169,7 +5384,7 @@
 
 	for (i = 0; i < adapter->num_rx_queues; i++)
 		if (adapter->rx_ring[i]->desc)
-			ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+			ixgbe_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -5182,6 +5397,7 @@
 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_hw *hw = &adapter->hw;
 	int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
 	/* MTU < 68 is an error and causes problems on some kernels */
@@ -5192,6 +5408,9 @@
 	/* must set new MTU before calling down or up */
 	netdev->mtu = new_mtu;
 
+	hw->fc.high_water = FC_HIGH_WATER(max_frame);
+	hw->fc.low_water = FC_LOW_WATER(max_frame);
+
 	if (netif_running(netdev))
 		ixgbe_reinit_locked(adapter);
 
@@ -5287,8 +5506,8 @@
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
 	u32 err;
 
 	pci_set_power_state(pdev, PCI_D0);
@@ -5319,7 +5538,7 @@
 	IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 
 	if (netif_running(netdev)) {
-		err = ixgbe_open(adapter->netdev);
+		err = ixgbe_open(netdev);
 		if (err)
 			return err;
 	}
@@ -5332,8 +5551,8 @@
 
 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
 	u32 ctrl, fctrl;
 	u32 wufc = adapter->wol;
@@ -5350,6 +5569,8 @@
 		ixgbe_free_all_rx_resources(adapter);
 	}
 
+	ixgbe_clear_interrupt_scheme(adapter);
+
 #ifdef CONFIG_PM
 	retval = pci_save_state(pdev);
 	if (retval)
@@ -5376,15 +5597,20 @@
 		IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
 	}
 
-	if (wufc && hw->mac.type == ixgbe_mac_82599EB)
-		pci_wake_from_d3(pdev, true);
-	else
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
 		pci_wake_from_d3(pdev, false);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		pci_wake_from_d3(pdev, !!wufc);
+		break;
+	default:
+		break;
+	}
 
 	*enable_wake = !!wufc;
 
-	ixgbe_clear_interrupt_scheme(adapter);
-
 	ixgbe_release_hw_control(adapter);
 
 	pci_disable_device(pdev);
@@ -5433,10 +5659,12 @@
 {
 	struct net_device *netdev = adapter->netdev;
 	struct ixgbe_hw *hw = &adapter->hw;
+	struct ixgbe_hw_stats *hwstats = &adapter->stats;
 	u64 total_mpc = 0;
 	u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
-	u64 non_eop_descs = 0, restart_queue = 0;
-	struct ixgbe_hw_stats *hwstats = &adapter->stats;
+	u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+	u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+	u64 bytes = 0, packets = 0;
 
 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
 	    test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5449,21 +5677,41 @@
 			adapter->hw_rx_no_dma_resources +=
 				IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
 		for (i = 0; i < adapter->num_rx_queues; i++) {
-			rsc_count += adapter->rx_ring[i]->rsc_count;
-			rsc_flush += adapter->rx_ring[i]->rsc_flush;
+			rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+			rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
 		}
 		adapter->rsc_total_count = rsc_count;
 		adapter->rsc_total_flush = rsc_flush;
 	}
 
-	/* gather some stats to the adapter struct that are per queue */
-	for (i = 0; i < adapter->num_tx_queues; i++)
-		restart_queue += adapter->tx_ring[i]->restart_queue;
-	adapter->restart_queue = restart_queue;
-
-	for (i = 0; i < adapter->num_rx_queues; i++)
-		non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+		non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+		alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+		alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+		bytes += rx_ring->stats.bytes;
+		packets += rx_ring->stats.packets;
+	}
 	adapter->non_eop_descs = non_eop_descs;
+	adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+	adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+	netdev->stats.rx_bytes = bytes;
+	netdev->stats.rx_packets = packets;
+
+	bytes = 0;
+	packets = 0;
+	/* gather some stats to the adapter struct that are per queue */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+		restart_queue += tx_ring->tx_stats.restart_queue;
+		tx_busy += tx_ring->tx_stats.tx_busy;
+		bytes += tx_ring->stats.bytes;
+		packets += tx_ring->stats.packets;
+	}
+	adapter->restart_queue = restart_queue;
+	adapter->tx_busy = tx_busy;
+	netdev->stats.tx_bytes = bytes;
+	netdev->stats.tx_packets = packets;
 
 	hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
 	for (i = 0; i < 8; i++) {
@@ -5478,17 +5726,18 @@
 		hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
 		hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
 		hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-		if (hw->mac.type == ixgbe_mac_82599EB) {
-			hwstats->pxonrxc[i] +=
-				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-			hwstats->pxoffrxc[i] +=
-				IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-			hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-		} else {
+		switch (hw->mac.type) {
+		case ixgbe_mac_82598EB:
 			hwstats->pxonrxc[i] +=
 				IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
-			hwstats->pxoffrxc[i] +=
-				IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+			break;
+		case ixgbe_mac_82599EB:
+		case ixgbe_mac_X540:
+			hwstats->pxonrxc[i] +=
+				IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+			break;
+		default:
+			break;
 		}
 		hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
 		hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5497,21 +5746,25 @@
 	/* work around hardware counting issue */
 	hwstats->gprc -= missed_rx;
 
+	ixgbe_update_xoff_received(adapter);
+
 	/* 82598 hardware only has a 32 bit counter in the high register */
-	if (hw->mac.type == ixgbe_mac_82599EB) {
-		u64 tmp;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82598EB:
+		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+		break;
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-		tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
-						/* 4 high bits of GORC */
-		hwstats->gorc += (tmp << 32);
+		IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
 		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-		tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
-						/* 4 high bits of GOTC */
-		hwstats->gotc += (tmp << 32);
+		IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
 		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-		IXGBE_READ_REG(hw, IXGBE_TORH);	/* to clear */
+		IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
 		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-		hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
 		hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
 		hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
 #ifdef IXGBE_FCOE
@@ -5522,12 +5775,9 @@
 		hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
 		hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 #endif /* IXGBE_FCOE */
-	} else {
-		hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-		hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-		hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-		hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-		hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+		break;
+	default:
+		break;
 	}
 	bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
 	hwstats->bprc += bprc;
@@ -5700,8 +5950,8 @@
 
 	if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
 		for (i = 0; i < adapter->num_tx_queues; i++)
-			set_bit(__IXGBE_FDIR_INIT_DONE,
-				&(adapter->tx_ring[i]->reinit_state));
+			set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+				&(adapter->tx_ring[i]->state));
 	} else {
 		e_err(probe, "failed to finish FDIR re-initialization, "
 		      "ignored adding FDIR ATR filters\n");
@@ -5763,17 +6013,27 @@
 		if (!netif_carrier_ok(netdev)) {
 			bool flow_rx, flow_tx;
 
-			if (hw->mac.type == ixgbe_mac_82599EB) {
-				u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-				u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-				flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
-				flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
-			} else {
+			switch (hw->mac.type) {
+			case ixgbe_mac_82598EB: {
 				u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
 				u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
 				flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
 				flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
 			}
+				break;
+			case ixgbe_mac_82599EB:
+			case ixgbe_mac_X540: {
+				u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+				u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+				flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+				flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+			}
+				break;
+			default:
+				flow_tx = false;
+				flow_rx = false;
+				break;
+			}
 
 			e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
 			       (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5787,7 +6047,10 @@
 			netif_carrier_on(netdev);
 		} else {
 			/* Force detection of hung controller */
-			adapter->detect_tx_hung = true;
+			for (i = 0; i < adapter->num_tx_queues; i++) {
+				tx_ring = adapter->tx_ring[i];
+				set_check_for_tx_hang(tx_ring);
+			}
 		}
 	} else {
 		adapter->link_up = false;
@@ -5823,7 +6086,7 @@
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
 		     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
-		     u32 tx_flags, u8 *hdr_len)
+		     u32 tx_flags, u8 *hdr_len, __be16 protocol)
 {
 	struct ixgbe_adv_tx_context_desc *context_desc;
 	unsigned int i;
@@ -5841,7 +6104,7 @@
 		l4len = tcp_hdrlen(skb);
 		*hdr_len += l4len;
 
-		if (skb->protocol == htons(ETH_P_IP)) {
+		if (protocol == htons(ETH_P_IP)) {
 			struct iphdr *iph = ip_hdr(skb);
 			iph->tot_len = 0;
 			iph->check = 0;
@@ -5880,7 +6143,7 @@
 		type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
 				   IXGBE_ADVTXD_DTYP_CTXT);
 
-		if (skb->protocol == htons(ETH_P_IP))
+		if (protocol == htons(ETH_P_IP))
 			type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
 		type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
 		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +6169,10 @@
 	return false;
 }
 
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+		      __be16 protocol)
 {
 	u32 rtn = 0;
-	__be16 protocol;
-
-	if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-		protocol = ((const struct vlan_ethhdr *)skb->data)->
-					h_vlan_encapsulated_proto;
-	else
-		protocol = skb->protocol;
 
 	switch (protocol) {
 	case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +6200,7 @@
 	default:
 		if (unlikely(net_ratelimit()))
 			e_warn(probe, "partial checksum but proto=%x!\n",
-			       skb->protocol);
+			       protocol);
 		break;
 	}
 
@@ -5952,7 +6209,8 @@
 
 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 			  struct ixgbe_ring *tx_ring,
-			  struct sk_buff *skb, u32 tx_flags)
+			  struct sk_buff *skb, u32 tx_flags,
+			  __be16 protocol)
 {
 	struct ixgbe_adv_tx_context_desc *context_desc;
 	unsigned int i;
@@ -5981,7 +6239,7 @@
 				    IXGBE_ADVTXD_DTYP_CTXT);
 
 		if (skb->ip_summed == CHECKSUM_PARTIAL)
-			type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+			type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
 
 		context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
 		/* use index zero for tx checksum offload */
@@ -6004,15 +6262,17 @@
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 			struct ixgbe_ring *tx_ring,
 			struct sk_buff *skb, u32 tx_flags,
-			unsigned int first)
+			unsigned int first, const u8 hdr_len)
 {
-	struct pci_dev *pdev = adapter->pdev;
+	struct device *dev = tx_ring->dev;
 	struct ixgbe_tx_buffer *tx_buffer_info;
 	unsigned int len;
 	unsigned int total = skb->len;
 	unsigned int offset = 0, size, count = 0, i;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int f;
+	unsigned int bytecount = skb->len;
+	u16 gso_segs = 1;
 
 	i = tx_ring->next_to_use;
 
@@ -6027,10 +6287,10 @@
 
 		tx_buffer_info->length = size;
 		tx_buffer_info->mapped_as_page = false;
-		tx_buffer_info->dma = dma_map_single(&pdev->dev,
+		tx_buffer_info->dma = dma_map_single(dev,
 						     skb->data + offset,
 						     size, DMA_TO_DEVICE);
-		if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+		if (dma_mapping_error(dev, tx_buffer_info->dma))
 			goto dma_error;
 		tx_buffer_info->time_stamp = jiffies;
 		tx_buffer_info->next_to_watch = i;
@@ -6063,12 +6323,12 @@
 			size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
 			tx_buffer_info->length = size;
-			tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+			tx_buffer_info->dma = dma_map_page(dev,
 							   frag->page,
 							   offset, size,
 							   DMA_TO_DEVICE);
 			tx_buffer_info->mapped_as_page = true;
-			if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+			if (dma_mapping_error(dev, tx_buffer_info->dma))
 				goto dma_error;
 			tx_buffer_info->time_stamp = jiffies;
 			tx_buffer_info->next_to_watch = i;
@@ -6082,6 +6342,19 @@
 			break;
 	}
 
+	if (tx_flags & IXGBE_TX_FLAGS_TSO)
+		gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+	/* adjust for FCoE Sequence Offload */
+	else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+		gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+					skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+	bytecount += (gso_segs - 1) * hdr_len;
+
+	/* multiply data chunks by size of headers */
+	tx_ring->tx_buffer_info[i].bytecount = bytecount;
+	tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
 	tx_ring->tx_buffer_info[i].skb = skb;
 	tx_ring->tx_buffer_info[first].next_to_watch = i;
 
@@ -6103,14 +6376,13 @@
 			i += tx_ring->count;
 		i--;
 		tx_buffer_info = &tx_ring->tx_buffer_info[i];
-		ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+		ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
 	}
 
 	return 0;
 }
 
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-			   struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
 			   int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
 	union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6175,60 +6447,46 @@
 	wmb();
 
 	tx_ring->next_to_use = i;
-	writel(i, adapter->hw.hw_addr + tx_ring->tail);
+	writel(i, tx_ring->tail);
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-		      int queue, u32 tx_flags)
+		      u8 queue, u32 tx_flags, __be16 protocol)
 {
 	struct ixgbe_atr_input atr_input;
-	struct tcphdr *th;
 	struct iphdr *iph = ip_hdr(skb);
 	struct ethhdr *eth = (struct ethhdr *)skb->data;
-	u16 vlan_id, src_port, dst_port, flex_bytes;
-	u32 src_ipv4_addr, dst_ipv4_addr;
-	u8 l4type = 0;
+	struct tcphdr *th;
+	u16 vlan_id;
 
-	/* Right now, we support IPv4 only */
-	if (skb->protocol != htons(ETH_P_IP))
+	/* Right now, we support IPv4 w/ TCP only */
+	if (protocol != htons(ETH_P_IP) ||
+	    iph->protocol != IPPROTO_TCP)
 		return;
-	/* check if we're UDP or TCP */
-	if (iph->protocol == IPPROTO_TCP) {
-		th = tcp_hdr(skb);
-		src_port = th->source;
-		dst_port = th->dest;
-		l4type |= IXGBE_ATR_L4TYPE_TCP;
-		/* l4type IPv4 type is 0, no need to assign */
-	} else {
-		/* Unsupported L4 header, just bail here */
-		return;
-	}
 
 	memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
 
 	vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
 		   IXGBE_TX_FLAGS_VLAN_SHIFT;
-	src_ipv4_addr = iph->saddr;
-	dst_ipv4_addr = iph->daddr;
-	flex_bytes = eth->h_proto;
+
+	th = tcp_hdr(skb);
 
 	ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-	ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
-	ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
-	ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
-	ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+	ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+	ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+	ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+	ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
 	/* src and dst are inverted, think how the receiver sees them */
-	ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
-	ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+	ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+	ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
 
 	/* This assumes the Rx queue and Tx queue are bound to the same CPU */
 	ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
 }
 
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
-				 struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
-	netif_stop_subqueue(netdev, tx_ring->queue_index);
+	netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
 	/* Herbert's original patch had:
 	 *  smp_mb__after_netif_stop_queue();
 	 * but since that doesn't exist yet, just open code it. */
@@ -6240,27 +6498,29 @@
 		return -EBUSY;
 
 	/* A reprieve! - use start_queue because it doesn't call schedule */
-	netif_start_subqueue(netdev, tx_ring->queue_index);
-	++tx_ring->restart_queue;
+	netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+	++tx_ring->tx_stats.restart_queue;
 	return 0;
 }
 
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
-			      struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
 	if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
 		return 0;
-	return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+	return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
 	struct ixgbe_adapter *adapter = netdev_priv(dev);
 	int txq = smp_processor_id();
-
 #ifdef IXGBE_FCOE
-	if ((skb->protocol == htons(ETH_P_FCOE)) ||
-	    (skb->protocol == htons(ETH_P_FIP))) {
+	__be16 protocol;
+
+	protocol = vlan_get_protocol(skb);
+
+	if ((protocol == htons(ETH_P_FCOE)) ||
+	    (protocol == htons(ETH_P_FIP))) {
 		if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 			txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
 			txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6292,10 +6552,11 @@
 	return skb_tx_hash(dev, skb);
 }
 
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
 			  struct ixgbe_adapter *adapter,
 			  struct ixgbe_ring *tx_ring)
 {
+	struct net_device *netdev = tx_ring->netdev;
 	struct netdev_queue *txq;
 	unsigned int first;
 	unsigned int tx_flags = 0;
@@ -6303,6 +6564,9 @@
 	int tso;
 	int count = 0;
 	unsigned int f;
+	__be16 protocol;
+
+	protocol = vlan_get_protocol(skb);
 
 	if (vlan_tx_tag_present(skb)) {
 		tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6587,8 @@
 	/* for FCoE with DCB, we force the priority to what
 	 * was specified by the switch */
 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
-	    (skb->protocol == htons(ETH_P_FCOE) ||
-	     skb->protocol == htons(ETH_P_FIP))) {
+	    (protocol == htons(ETH_P_FCOE) ||
+	     protocol == htons(ETH_P_FIP))) {
 #ifdef CONFIG_IXGBE_DCB
 		if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 			tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6598,7 @@
 		}
 #endif
 		/* flag for FCoE offloads */
-		if (skb->protocol == htons(ETH_P_FCOE))
+		if (protocol == htons(ETH_P_FCOE))
 			tx_flags |= IXGBE_TX_FLAGS_FCOE;
 	}
 #endif
@@ -6350,8 +6614,8 @@
 	for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
 		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
-	if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
-		adapter->tx_busy++;
+	if (ixgbe_maybe_stop_tx(tx_ring, count)) {
+		tx_ring->tx_stats.tx_busy++;
 		return NETDEV_TX_BUSY;
 	}
 
@@ -6368,9 +6632,10 @@
 			tx_flags |= IXGBE_TX_FLAGS_FSO;
 #endif /* IXGBE_FCOE */
 	} else {
-		if (skb->protocol == htons(ETH_P_IP))
+		if (protocol == htons(ETH_P_IP))
 			tx_flags |= IXGBE_TX_FLAGS_IPV4;
-		tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+		tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+				protocol);
 		if (tso < 0) {
 			dev_kfree_skb_any(skb);
 			return NETDEV_TX_OK;
@@ -6378,30 +6643,30 @@
 
 		if (tso)
 			tx_flags |= IXGBE_TX_FLAGS_TSO;
-		else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+		else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+				       protocol) &&
 			 (skb->ip_summed == CHECKSUM_PARTIAL))
 			tx_flags |= IXGBE_TX_FLAGS_CSUM;
 	}
 
-	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+	count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
 	if (count) {
 		/* add the ATR filter if ATR is on */
 		if (tx_ring->atr_sample_rate) {
 			++tx_ring->atr_count;
 			if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-			     test_bit(__IXGBE_FDIR_INIT_DONE,
-				      &tx_ring->reinit_state)) {
+			     test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+				      &tx_ring->state)) {
 				ixgbe_atr(adapter, skb, tx_ring->queue_index,
-					  tx_flags);
+					  tx_flags, protocol);
 				tx_ring->atr_count = 0;
 			}
 		}
 		txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
 		txq->tx_bytes += skb->len;
 		txq->tx_packets++;
-		ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
-			       hdr_len);
-		ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+		ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
+		ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
 	} else {
 		dev_kfree_skb_any(skb);
@@ -6418,7 +6683,7 @@
 	struct ixgbe_ring *tx_ring;
 
 	tx_ring = adapter->tx_ring[skb->queue_mapping];
-	return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+	return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
 /**
@@ -6559,20 +6824,23 @@
 
 	/* accurate rx/tx bytes/packets stats */
 	dev_txq_stats_fold(netdev, stats);
+	rcu_read_lock();
 	for (i = 0; i < adapter->num_rx_queues; i++) {
-		struct ixgbe_ring *ring = adapter->rx_ring[i];
+		struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
 		u64 bytes, packets;
 		unsigned int start;
 
-		do {
-			start = u64_stats_fetch_begin_bh(&ring->syncp);
-			packets = ring->stats.packets;
-			bytes   = ring->stats.bytes;
-		} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-		stats->rx_packets += packets;
-		stats->rx_bytes   += bytes;
+		if (ring) {
+			do {
+				start = u64_stats_fetch_begin_bh(&ring->syncp);
+				packets = ring->stats.packets;
+				bytes   = ring->stats.bytes;
+			} while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+			stats->rx_packets += packets;
+			stats->rx_bytes   += bytes;
+		}
 	}
-
+	rcu_read_unlock();
 	/* following stats updated by ixgbe_watchdog_task() */
 	stats->multicast	= netdev->stats.multicast;
 	stats->rx_errors	= netdev->stats.rx_errors;
@@ -6687,11 +6955,12 @@
 	const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
 	static int cards_found;
 	int i, err, pci_using_dac;
+	u8 part_str[IXGBE_PBANUM_LENGTH];
 	unsigned int indices = num_possible_cpus();
 #ifdef IXGBE_FCOE
 	u16 device_caps;
 #endif
-	u32 part_num, eec;
+	u32 eec;
 
 	/* Catch broken hardware that put the wrong VF device ID in
 	 * the PCIe SR-IOV capability.
@@ -6754,8 +7023,8 @@
 
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 
-	pci_set_drvdata(pdev, netdev);
 	adapter = netdev_priv(netdev);
+	pci_set_drvdata(pdev, adapter);
 
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
@@ -6778,7 +7047,7 @@
 	netdev->netdev_ops = &ixgbe_netdev_ops;
 	ixgbe_set_ethtool_ops(netdev);
 	netdev->watchdog_timeo = 5 * HZ;
-	strcpy(netdev->name, pci_name(pdev));
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
 
 	adapter->bd_number = cards_found;
 
@@ -6828,8 +7097,14 @@
 		goto err_sw_init;
 
 	/* Make it possible the adapter to be woken up via WOL */
-	if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+	switch (adapter->hw.mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+		break;
+	default:
+		break;
+	}
 
 	/*
 	 * If there is a fan on this device and it has failed log the
@@ -6937,8 +7212,11 @@
 		goto err_eeprom;
 	}
 
-	/* power down the optics */
-	if (hw->phy.multispeed_fiber)
+	/* power down the optics for multispeed fiber and 82599 SFP+ fiber */
+	if (hw->mac.ops.disable_tx_laser &&
+	    ((hw->phy.multispeed_fiber) ||
+	     ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
+	      (hw->mac.type == ixgbe_mac_82599EB))))
 		hw->mac.ops.disable_tx_laser(hw);
 
 	init_timer(&adapter->watchdog_timer);
@@ -6953,6 +7231,18 @@
 		goto err_sw_init;
 
 	switch (pdev->device) {
+	case IXGBE_DEV_ID_82599_SFP:
+		/* Only this subdevice supports WOL */
+		if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
+			adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+			                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+		break;
+	case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+		/* All except this subdevice support WOL */
+		if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
+			adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
+			                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
+		break;
 	case IXGBE_DEV_ID_82599_KX4:
 		adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
 				IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -6976,16 +7266,17 @@
 		    hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
 		    "Unknown"),
 		   netdev->dev_addr);
-	ixgbe_read_pba_num_generic(hw, &part_num);
+
+	err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+	if (err)
+		strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
 	if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
-		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, "
-			   "PBA No: %06x-%03x\n",
+		e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
 			   hw->mac.type, hw->phy.type, hw->phy.sfp_type,
-			   (part_num >> 8), (part_num & 0xff));
+		           part_str);
 	else
-		e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n",
-			   hw->mac.type, hw->phy.type,
-			   (part_num >> 8), (part_num & 0xff));
+		e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
+			   hw->mac.type, hw->phy.type, part_str);
 
 	if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
 		e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -7078,17 +7369,19 @@
  **/
 static void __devexit ixgbe_remove(struct pci_dev *pdev)
 {
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
 
 	set_bit(__IXGBE_DOWN, &adapter->state);
-	/* clear the module not found bit to make sure the worker won't
-	 * reschedule
+
+	/*
+	 * The timers may be rescheduled, so explicitly disable them
+	 * from being rescheduled.
 	 */
 	clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
 	del_timer_sync(&adapter->watchdog_timer);
-
 	del_timer_sync(&adapter->sfp_timer);
+
 	cancel_work_sync(&adapter->watchdog_task);
 	cancel_work_sync(&adapter->sfp_task);
 	cancel_work_sync(&adapter->multispeed_fiber_task);
@@ -7096,7 +7389,8 @@
 	if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
 	    adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
 		cancel_work_sync(&adapter->fdir_reinit_task);
-	flush_scheduled_work();
+	if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
+		cancel_work_sync(&adapter->check_overtemp_task);
 
 #ifdef CONFIG_IXGBE_DCA
 	if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7149,8 +7443,8 @@
 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
 						pci_channel_state_t state)
 {
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
 
 	netif_device_detach(netdev);
 
@@ -7173,8 +7467,7 @@
  */
 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 {
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	pci_ers_result_t result;
 	int err;
 
@@ -7212,8 +7505,8 @@
  */
 static void ixgbe_io_resume(struct pci_dev *pdev)
 {
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+	struct net_device *netdev = adapter->netdev;
 
 	if (netif_running(netdev)) {
 		if (ixgbe_up(adapter)) {
@@ -7278,6 +7571,7 @@
 	dca_unregister_notify(&dca_notifier);
 #endif
 	pci_unregister_driver(&ixgbe_driver);
+	rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 #ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2..027c628 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -319,8 +319,14 @@
 	u32 vflre = 0;
 	s32 ret_val = IXGBE_ERR_MBX;
 
-	if (hw->mac.type == ixgbe_mac_82599EB)
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
 		vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+		break;
+	default:
+		break;
+	}
 
 	if (vflre & (1 << vf_shift)) {
 		ret_val = 0;
@@ -439,22 +445,26 @@
 {
 	struct ixgbe_mbx_info *mbx = &hw->mbx;
 
-	if (hw->mac.type != ixgbe_mac_82599EB)
-		return;
+	switch (hw->mac.type) {
+	case ixgbe_mac_82599EB:
+	case ixgbe_mac_X540:
+		mbx->timeout = 0;
+		mbx->usec_delay = 0;
 
-	mbx->timeout = 0;
-	mbx->usec_delay = 0;
+		mbx->size = IXGBE_VFMAILBOX_SIZE;
 
-	mbx->size = IXGBE_VFMAILBOX_SIZE;
-
-	mbx->stats.msgs_tx = 0;
-	mbx->stats.msgs_rx = 0;
-	mbx->stats.reqs = 0;
-	mbx->stats.acks = 0;
-	mbx->stats.rsts = 0;
+		mbx->stats.msgs_tx = 0;
+		mbx->stats.msgs_rx = 0;
+		mbx->stats.reqs = 0;
+		mbx->stats.acks = 0;
+		mbx->stats.rsts = 0;
+		break;
+	default:
+		break;
+	}
 }
 
-struct ixgbe_mbx_operations mbx_ops_82599 = {
+struct ixgbe_mbx_operations mbx_ops_generic = {
 	.read                   = ixgbe_read_mbx_pf,
 	.write                  = ixgbe_write_mbx_pf,
 	.read_posted            = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08f..3df9b15 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -88,6 +88,6 @@
 s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
 void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
 
-extern struct ixgbe_mbx_operations mbx_ops_82599;
+extern struct ixgbe_mbx_operations mbx_ops_generic;
 
 #endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e..8f7123e 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -115,6 +115,9 @@
 	case TN1010_PHY_ID:
 		phy_type = ixgbe_phy_tn;
 		break;
+	case X540_PHY_ID:
+		phy_type = ixgbe_phy_aq;
+		break;
 	case QT2022_PHY_ID:
 		phy_type = ixgbe_phy_qt;
 		break;
@@ -425,6 +428,39 @@
 }
 
 /**
+ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @autoneg: boolean auto-negotiation value
+ *
+ * Determines the link capabilities by reading the AUTOC register.
+ */
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed *speed,
+                                               bool *autoneg)
+{
+	s32 status = IXGBE_ERR_LINK_SETUP;
+	u16 speed_ability;
+
+	*speed = 0;
+	*autoneg = true;
+
+	status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
+	                              &speed_ability);
+
+	if (status == 0) {
+		if (speed_ability & MDIO_SPEED_10G)
+			*speed |= IXGBE_LINK_SPEED_10GB_FULL;
+		if (speed_ability & MDIO_PMA_SPEED_1000)
+			*speed |= IXGBE_LINK_SPEED_1GB_FULL;
+		if (speed_ability & MDIO_PMA_SPEED_100)
+			*speed |= IXGBE_LINK_SPEED_100_FULL;
+	}
+
+	return status;
+}
+
+/**
  *  ixgbe_reset_phy_nl - Performs a PHY reset
  *  @hw: pointer to hardware structure
  **/
@@ -1378,6 +1414,22 @@
 }
 
 /**
+ *  ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
+ *  @hw: pointer to hardware structure
+ *  @firmware_version: pointer to the PHY Firmware Version
+**/
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                           u16 *firmware_version)
+{
+	s32 status = 0;
+
+	status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
+	                              firmware_version);
+
+	return status;
+}
+
+/**
  *  ixgbe_tn_check_overtemp - Checks if an overtemp occured.
  *  @hw: pointer to hardware structure
  *
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f..e2c6b7e 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -96,6 +96,9 @@
                                        ixgbe_link_speed speed,
                                        bool autoneg,
                                        bool autoneg_wait_to_complete);
+s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
+                                               ixgbe_link_speed *speed,
+                                               bool *autoneg);
 
 /* PHY specific */
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@
                              bool *link_up);
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
                                        u16 *firmware_version);
+s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
+                                           u16 *firmware_version);
 
 s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153..6e3e94b 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -68,7 +68,7 @@
 	 * addresses
 	 */
 	for (i = 0; i < entries; i++) {
-		vfinfo->vf_mc_hashes[i] = hash_list[i];;
+		vfinfo->vf_mc_hashes[i] = hash_list[i];
 	}
 
 	for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@
 int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
 {
 	unsigned char vf_mac_addr[6];
-	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct ixgbe_adapter *adapter = netdev_priv(netdev);
+	struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
 	unsigned int vfn = (event_mask & 0x3f);
 
 	bool enable = ((event_mask & 0x10000000U) != 0);
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce..59f6d0a 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -54,9 +54,14 @@
 #define IXGBE_DEV_ID_82599_T3_LOM        0x151C
 #define IXGBE_DEV_ID_82599_CX4           0x10F9
 #define IXGBE_DEV_ID_82599_SFP           0x10FB
+#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE       0x152a
+#define IXGBE_DEV_ID_82599_SFP_FCOE      0x1529
+#define IXGBE_SUBDEV_ID_82599_SFP        0x11A9
 #define IXGBE_DEV_ID_82599_SFP_EM        0x1507
 #define IXGBE_DEV_ID_82599_XAUI_LOM      0x10FC
 #define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
+#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ  0x000C
+#define IXGBE_DEV_ID_X540T               0x1528
 
 /* General Registers */
 #define IXGBE_CTRL      0x00000
@@ -994,8 +999,10 @@
 /* PHY IDs*/
 #define TN1010_PHY_ID    0x00A19410
 #define TNX_FW_REV       0xB
+#define X540_PHY_ID      0x01540200
 #define QT2022_PHY_ID    0x0043A400
 #define ATH_PHY_ID       0x03429050
+#define AQ_FW_REV        0x20
 
 /* PHY Types */
 #define IXGBE_M88E1145_E_PHY_ID  0x01410CD0
@@ -1463,6 +1470,8 @@
 #define IXGBE_ANLP1_PAUSE               0x0C00
 #define IXGBE_ANLP1_SYM_PAUSE           0x0400
 #define IXGBE_ANLP1_ASM_PAUSE           0x0800
+#define IXGBE_ANLP1_AN_STATE_MASK       0x000f0000
+
 
 /* SW Semaphore Register bitmasks */
 #define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1491,6 +1500,7 @@
 #define IXGBE_EEC_PRES      0x00000100 /* EEPROM Present */
 #define IXGBE_EEC_ARD       0x00000200 /* EEPROM Auto Read Done */
 #define IXGBE_EEC_FLUP      0x00800000 /* Flash update command */
+#define IXGBE_EEC_SEC1VAL   0x02000000 /* Sector 1 Valid */
 #define IXGBE_EEC_FLUDONE   0x04000000 /* Flash update done */
 /* EEPROM Addressing bits based on type (0-small, 1-large) */
 #define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1500,12 +1510,18 @@
 #define IXGBE_EEPROM_WORD_SIZE_SHIFT  6
 #define IXGBE_EEPROM_OPCODE_BITS      8
 
+/* Part Number String Length */
+#define IXGBE_PBANUM_LENGTH 11
+
 /* Checksum and EEPROM pointers */
+#define IXGBE_PBANUM_PTR_GUARD  0xFAFA
 #define IXGBE_EEPROM_CHECKSUM   0x3F
 #define IXGBE_EEPROM_SUM        0xBABA
 #define IXGBE_PCIE_ANALOG_PTR   0x03
 #define IXGBE_ATLAS0_CONFIG_PTR 0x04
+#define IXGBE_PHY_PTR           0x04
 #define IXGBE_ATLAS1_CONFIG_PTR 0x05
+#define IXGBE_OPTION_ROM_PTR    0x05
 #define IXGBE_PCIE_GENERAL_PTR  0x06
 #define IXGBE_PCIE_CONFIG0_PTR  0x07
 #define IXGBE_PCIE_CONFIG1_PTR  0x08
@@ -2113,6 +2129,14 @@
 #define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
 #define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
 
+/* Flow Control Macros */
+#define PAUSE_RTT	8
+#define PAUSE_MTU(MTU)	((MTU + 1024 - 1) / 1024)
+
+#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
+				PAUSE_MTU(MTU))
+#define FC_LOW_WATER(MTU)  (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
+
 /* Software ATR hash keys */
 #define IXGBE_ATR_BUCKET_HASH_KEY    0xE214AD3D
 #define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17
@@ -2164,6 +2188,7 @@
 enum ixgbe_eeprom_type {
 	ixgbe_eeprom_uninitialized = 0,
 	ixgbe_eeprom_spi,
+	ixgbe_flash,
 	ixgbe_eeprom_none /* No NVM support */
 };
 
@@ -2171,12 +2196,14 @@
 	ixgbe_mac_unknown = 0,
 	ixgbe_mac_82598EB,
 	ixgbe_mac_82599EB,
+	ixgbe_mac_X540,
 	ixgbe_num_macs
 };
 
 enum ixgbe_phy_type {
 	ixgbe_phy_unknown = 0,
 	ixgbe_phy_tn,
+	ixgbe_phy_aq,
 	ixgbe_phy_cu_unknown,
 	ixgbe_phy_qt,
 	ixgbe_phy_xaui,
@@ -2405,6 +2432,7 @@
 	s32 (*write)(struct ixgbe_hw *, u16, u16);
 	s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
 	s32 (*update_checksum)(struct ixgbe_hw *);
+	u16 (*calc_checksum)(struct ixgbe_hw *);
 };
 
 struct ixgbe_mac_operations {
@@ -2574,6 +2602,7 @@
 	u16				subsystem_vendor_id;
 	u8				revision_id;
 	bool				adapter_stopped;
+	bool				force_full_reset;
 };
 
 struct ixgbe_info {
@@ -2614,6 +2643,9 @@
 #define IXGBE_ERR_NO_SPACE                      -25
 #define IXGBE_ERR_OVERTEMP                      -26
 #define IXGBE_ERR_RAR_INDEX                     -27
+#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE        -30
+#define IXGBE_ERR_PBA_SECTION                   -31
+#define IXGBE_ERR_INVALID_ARGUMENT              -32
 #define IXGBE_NOT_IMPLEMENTED                   0x7FFFFFFF
 
 #endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 0000000..cf88515
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,722 @@
+/*******************************************************************************
+
+  Intel 10 Gigabit PCI Express Linux driver
+  Copyright(c) 1999 - 2010 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include "ixgbe.h"
+#include "ixgbe_phy.h"
+//#include "ixgbe_mbx.h"
+
+#define IXGBE_X540_MAX_TX_QUEUES 128
+#define IXGBE_X540_MAX_RX_QUEUES 128
+#define IXGBE_X540_RAR_ENTRIES   128
+#define IXGBE_X540_MC_TBL_SIZE   128
+#define IXGBE_X540_VFT_TBL_SIZE  128
+
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
+
+static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
+{
+	return ixgbe_media_type_copper;
+}
+
+static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
+{
+	struct ixgbe_mac_info *mac = &hw->mac;
+
+	/* Call PHY identify routine to get the phy type */
+	ixgbe_identify_phy_generic(hw);
+
+	mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
+	mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
+	mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
+	mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
+	mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
+	mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
+
+	return 0;
+}
+
+/**
+ *  ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
+ *  @hw: pointer to hardware structure
+ *  @speed: new link speed
+ *  @autoneg: true if autonegotiation enabled
+ *  @autoneg_wait_to_complete: true when waiting for completion is needed
+ **/
+static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
+                                     ixgbe_link_speed speed, bool autoneg,
+                                     bool autoneg_wait_to_complete)
+{
+	return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
+	                                    autoneg_wait_to_complete);
+}
+
+/**
+ *  ixgbe_reset_hw_X540 - Perform hardware reset
+ *  @hw: pointer to hardware structure
+ *
+ *  Resets the hardware by resetting the transmit and receive units, masks
+ *  and clears all interrupts, perform a PHY reset, and perform a link (MAC)
+ *  reset.
+ **/
+static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
+{
+	ixgbe_link_speed link_speed;
+	s32 status = 0;
+	u32 ctrl;
+	u32 ctrl_ext;
+	u32 reset_bit;
+	u32 i;
+	u32 autoc;
+	u32 autoc2;
+	bool link_up = false;
+
+	/* Call adapter stop to disable tx/rx and clear interrupts */
+	hw->mac.ops.stop_adapter(hw);
+
+	/*
+	 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
+	 * access and verify no pending requests before reset
+	 */
+	status = ixgbe_disable_pcie_master(hw);
+	if (status != 0) {
+		status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
+		hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
+	}
+
+	/*
+	 * Issue global reset to the MAC.  Needs to be SW reset if link is up.
+	 * If link reset is used when link is up, it might reset the PHY when
+	 * mng is using it.  If link is down or the flag to force full link
+	 * reset is set, then perform link reset.
+	 */
+	if (hw->force_full_reset) {
+		reset_bit = IXGBE_CTRL_LNK_RST;
+	} else {
+		hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+		if (!link_up)
+			reset_bit = IXGBE_CTRL_LNK_RST;
+		else
+			reset_bit = IXGBE_CTRL_RST;
+	}
+
+	ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
+	IXGBE_WRITE_FLUSH(hw);
+
+	/* Poll for reset bit to self-clear indicating reset is complete */
+	for (i = 0; i < 10; i++) {
+		udelay(1);
+		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
+		if (!(ctrl & IXGBE_CTRL_RST))
+			break;
+	}
+	if (ctrl & IXGBE_CTRL_RST) {
+		status = IXGBE_ERR_RESET_FAILED;
+		hw_dbg(hw, "Reset polling failed to complete.\n");
+	}
+
+	/* Clear PF Reset Done bit so PF/VF Mail Ops can work */
+	ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+	ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+	IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
+	msleep(50);
+
+	/* Set the Rx packet buffer size. */
+	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/*
+	 * Store the original AUTOC/AUTOC2 values if they have not been
+	 * stored off yet.  Otherwise restore the stored original
+	 * values since the reset operation sets back to defaults.
+	 */
+	autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+	autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
+	if (hw->mac.orig_link_settings_stored == false) {
+		hw->mac.orig_autoc = autoc;
+		hw->mac.orig_autoc2 = autoc2;
+		hw->mac.orig_link_settings_stored = true;
+	} else {
+		if (autoc != hw->mac.orig_autoc)
+			IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
+			                IXGBE_AUTOC_AN_RESTART));
+
+		if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
+		    (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
+			autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
+			autoc2 |= (hw->mac.orig_autoc2 &
+			           IXGBE_AUTOC2_UPPER_MASK);
+			IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
+		}
+	}
+
+	/*
+	 * Store MAC address from RAR0, clear receive address registers, and
+	 * clear the multicast table.  Also reset num_rar_entries to 128,
+	 * since we modify this value when programming the SAN MAC address.
+	 */
+	hw->mac.num_rar_entries = 128;
+	hw->mac.ops.init_rx_addrs(hw);
+
+	/* Store the permanent mac address */
+	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
+
+	/* Store the permanent SAN mac address */
+	hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
+
+	/* Add the SAN MAC address to the RAR only if it's a valid address */
+	if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
+		hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
+		                    hw->mac.san_addr, 0, IXGBE_RAH_AV);
+
+		/* Reserve the last RAR for the SAN MAC address */
+		hw->mac.num_rar_entries--;
+	}
+
+	/* Store the alternative WWNN/WWPN prefix */
+	hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
+	                           &hw->mac.wwpn_prefix);
+
+	return status;
+}
+
+/**
+ *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
+ *  @hw: pointer to hardware structure
+ *
+ *  Determines physical layer capabilities of the current configuration.
+ **/
+static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
+{
+	u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
+	u16 ext_ability = 0;
+
+	hw->phy.ops.identify(hw);
+
+	hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
+			     &ext_ability);
+	if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
+	if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
+	if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
+		physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
+
+	return physical_layer;
+}
+
+/**
+ * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
+ * @hw: pointer to hardware structure
+ **/
+static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
+{
+	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
+	u32 eec;
+	u16 eeprom_size;
+
+	if (eeprom->type == ixgbe_eeprom_uninitialized) {
+		eeprom->semaphore_delay = 10;
+		eeprom->type = ixgbe_flash;
+
+		eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+		eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
+		                    IXGBE_EEC_SIZE_SHIFT);
+		eeprom->word_size = 1 << (eeprom_size +
+		                          IXGBE_EEPROM_WORD_SIZE_SHIFT);
+
+		hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
+		        eeprom->type, eeprom->word_size);
+	}
+
+	return 0;
+}
+
+/**
+ * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
+ * @hw: pointer to hardware structure
+ * @offset: offset of word in the EEPROM to read
+ * @data: word read from the EERPOM
+ **/
+static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
+{
+	s32 status;
+
+	if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0)
+		status = ixgbe_read_eerd_generic(hw, offset, data);
+	else
+		status = IXGBE_ERR_SWFW_SYNC;
+
+	ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+	return status;
+}
+
+/**
+ * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
+ * @hw: pointer to hardware structure
+ * @offset: offset of  word in the EEPROM to write
+ * @data: word write to the EEPROM
+ *
+ * Write a 16 bit word to the EEPROM using the EEWR register.
+ **/
+static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
+{
+	u32 eewr;
+	s32 status;
+
+	hw->eeprom.ops.init_params(hw);
+
+	if (offset >= hw->eeprom.word_size) {
+		status = IXGBE_ERR_EEPROM;
+		goto out;
+	}
+
+	eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
+	       (data << IXGBE_EEPROM_RW_REG_DATA) |
+	       IXGBE_EEPROM_RW_REG_START;
+
+	if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) {
+		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+		if (status != 0) {
+			hw_dbg(hw, "Eeprom write EEWR timed out\n");
+			goto out;
+		}
+
+		IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
+
+		status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
+		if (status != 0) {
+			hw_dbg(hw, "Eeprom write EEWR timed out\n");
+			goto out;
+		}
+	} else {
+		status = IXGBE_ERR_SWFW_SYNC;
+	}
+
+out:
+	ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
+	return status;
+}
+
+/**
+ * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
+ * @hw: pointer to hardware structure
+ **/
+static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+	u16 i;
+	u16 j;
+	u16 checksum = 0;
+	u16 length = 0;
+	u16 pointer = 0;
+	u16 word = 0;
+
+	/* Include 0x0-0x3F in the checksum */
+	for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
+		if (hw->eeprom.ops.read(hw, i, &word) != 0) {
+			hw_dbg(hw, "EEPROM read failed\n");
+			break;
+		}
+		checksum += word;
+	}
+
+	/*
+	 * Include all data from pointers 0x3, 0x6-0xE.  This excludes the
+	 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
+	 */
+	for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
+		if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
+			continue;
+
+		if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
+			hw_dbg(hw, "EEPROM read failed\n");
+			break;
+		}
+
+		/* Skip pointer section if the pointer is invalid. */
+		if (pointer == 0xFFFF || pointer == 0 ||
+		    pointer >= hw->eeprom.word_size)
+			continue;
+
+		if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
+			hw_dbg(hw, "EEPROM read failed\n");
+			break;
+		}
+
+		/* Skip pointer section if length is invalid. */
+		if (length == 0xFFFF || length == 0 ||
+		    (pointer + length) >= hw->eeprom.word_size)
+			continue;
+
+		for (j = pointer+1; j <= pointer+length; j++) {
+			if (hw->eeprom.ops.read(hw, j, &word) != 0) {
+				hw_dbg(hw, "EEPROM read failed\n");
+				break;
+			}
+			checksum += word;
+		}
+	}
+
+	checksum = (u16)IXGBE_EEPROM_SUM - checksum;
+
+	return checksum;
+}
+
+/**
+ * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
+ * @hw: pointer to hardware structure
+ *
+ * After writing EEPROM to shadow RAM using EEWR register, software calculates
+ * checksum and updates the EEPROM and instructs the hardware to update
+ * the flash.
+ **/
+static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
+{
+	s32 status;
+
+	status = ixgbe_update_eeprom_checksum_generic(hw);
+
+	if (status)
+		status = ixgbe_update_flash_X540(hw);
+
+	return status;
+}
+
+/**
+ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
+ * @hw: pointer to hardware structure
+ *
+ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
+ * EEPROM from shadow RAM to the flash device.
+ **/
+static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
+{
+	u32 flup;
+	s32 status = IXGBE_ERR_EEPROM;
+
+	status = ixgbe_poll_flash_update_done_X540(hw);
+	if (status == IXGBE_ERR_EEPROM) {
+		hw_dbg(hw, "Flash update time out\n");
+		goto out;
+	}
+
+	flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
+	IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+
+	status = ixgbe_poll_flash_update_done_X540(hw);
+	if (status)
+		hw_dbg(hw, "Flash update complete\n");
+	else
+		hw_dbg(hw, "Flash update time out\n");
+
+	if (hw->revision_id == 0) {
+		flup = IXGBE_READ_REG(hw, IXGBE_EEC);
+
+		if (flup & IXGBE_EEC_SEC1VAL) {
+			flup |= IXGBE_EEC_FLUP;
+			IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
+		}
+
+		status = ixgbe_poll_flash_update_done_X540(hw);
+		if (status)
+			hw_dbg(hw, "Flash update complete\n");
+		else
+			hw_dbg(hw, "Flash update time out\n");
+
+	}
+out:
+	return status;
+}
+
+/**
+ * ixgbe_poll_flash_update_done_X540 - Poll flash update status
+ * @hw: pointer to hardware structure
+ *
+ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
+ * flash update is done.
+ **/
+static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
+{
+	u32 i;
+	u32 reg;
+	s32 status = IXGBE_ERR_EEPROM;
+
+	for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
+		reg = IXGBE_READ_REG(hw, IXGBE_EEC);
+		if (reg & IXGBE_EEC_FLUDONE) {
+			status = 0;
+			break;
+		}
+		udelay(5);
+	}
+	return status;
+}
+
+/**
+ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to acquire
+ *
+ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
+ * the specified function (CSR, PHY0, PHY1, NVM, Flash)
+ **/
+static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 5;
+	u32 hwmask = 0;
+	u32 timeout = 200;
+	u32 i;
+
+	if (swmask == IXGBE_GSSR_EEP_SM)
+		hwmask = IXGBE_GSSR_FLASH_SM;
+
+	for (i = 0; i < timeout; i++) {
+		/*
+		 * SW NVM semaphore bit is used for access to all
+		 * SW_FW_SYNC bits (not just NVM)
+		 */
+		if (ixgbe_get_swfw_sync_semaphore(hw))
+			return IXGBE_ERR_SWFW_SYNC;
+
+		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask | hwmask))) {
+			swfw_sync |= swmask;
+			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+			ixgbe_release_swfw_sync_semaphore(hw);
+			break;
+		} else {
+			/*
+			 * Firmware currently using resource (fwmask),
+			 * hardware currently using resource (hwmask),
+			 * or other software thread currently using
+			 * resource (swmask)
+			 */
+			ixgbe_release_swfw_sync_semaphore(hw);
+			msleep(5);
+		}
+	}
+
+	/*
+	 * If the resource is not released by the FW/HW the SW can assume that
+	 * the FW/HW malfunctions. In that case the SW should sets the
+	 * SW bit(s) of the requested resource(s) while ignoring the
+	 * corresponding FW/HW bits in the SW_FW_SYNC register.
+	 */
+	if (i >= timeout) {
+		swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+		if (swfw_sync & (fwmask | hwmask)) {
+			if (ixgbe_get_swfw_sync_semaphore(hw))
+				return IXGBE_ERR_SWFW_SYNC;
+
+			swfw_sync |= swmask;
+			IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+			ixgbe_release_swfw_sync_semaphore(hw);
+		}
+	}
+
+	msleep(5);
+	return 0;
+}
+
+/**
+ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
+ * @hw: pointer to hardware structure
+ * @mask: Mask to specify which semaphore to release
+ *
+ * Releases the SWFW semaphore throught the SW_FW_SYNC register
+ * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
+ **/
+static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+
+	ixgbe_get_swfw_sync_semaphore(hw);
+
+	swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	swfw_sync &= ~swmask;
+	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
+
+	ixgbe_release_swfw_sync_semaphore(hw);
+	msleep(5);
+}
+
+/**
+ * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * Sets the hardware semaphores so SW/FW can gain control of shared resources
+ **/
+static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+	s32 status = IXGBE_ERR_EEPROM;
+	u32 timeout = 2000;
+	u32 i;
+	u32 swsm;
+
+	/* Get SMBI software semaphore between device drivers first */
+	for (i = 0; i < timeout; i++) {
+		/*
+		 * If the SMBI bit is 0 when we read it, then the bit will be
+		 * set and we have the semaphore
+		 */
+		swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+		if (!(swsm & IXGBE_SWSM_SMBI)) {
+			status = 0;
+			break;
+		}
+		udelay(50);
+	}
+
+	/* Now get the semaphore between SW/FW through the REGSMP bit */
+	if (status) {
+		for (i = 0; i < timeout; i++) {
+			swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+			if (!(swsm & IXGBE_SWFW_REGSMP))
+				break;
+
+			udelay(50);
+		}
+	} else {
+		hw_dbg(hw, "Software semaphore SMBI between device drivers "
+		           "not granted.\n");
+	}
+
+	return status;
+}
+
+/**
+ * ixgbe_release_nvm_semaphore - Release hardware semaphore
+ * @hw: pointer to hardware structure
+ *
+ * This function clears hardware semaphore bits.
+ **/
+static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
+{
+	 u32 swsm;
+
+	/* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+	swsm &= ~IXGBE_SWSM_SMBI;
+	IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+
+	swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+	swsm &= ~IXGBE_SWFW_REGSMP;
+	IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
+
+	IXGBE_WRITE_FLUSH(hw);
+}
+
+static struct ixgbe_mac_operations mac_ops_X540 = {
+	.init_hw                = &ixgbe_init_hw_generic,
+	.reset_hw               = &ixgbe_reset_hw_X540,
+	.start_hw               = &ixgbe_start_hw_generic,
+	.clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
+	.get_media_type         = &ixgbe_get_media_type_X540,
+	.get_supported_physical_layer =
+                                  &ixgbe_get_supported_physical_layer_X540,
+	.enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
+	.get_mac_addr           = &ixgbe_get_mac_addr_generic,
+	.get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
+	.get_device_caps        = NULL,
+	.get_wwn_prefix         = &ixgbe_get_wwn_prefix_generic,
+	.stop_adapter           = &ixgbe_stop_adapter_generic,
+	.get_bus_info           = &ixgbe_get_bus_info_generic,
+	.set_lan_id             = &ixgbe_set_lan_id_multi_port_pcie,
+	.read_analog_reg8       = NULL,
+	.write_analog_reg8      = NULL,
+	.setup_link             = &ixgbe_setup_mac_link_X540,
+	.check_link             = &ixgbe_check_mac_link_generic,
+	.get_link_capabilities  = &ixgbe_get_copper_link_capabilities_generic,
+	.led_on                 = &ixgbe_led_on_generic,
+	.led_off                = &ixgbe_led_off_generic,
+	.blink_led_start        = &ixgbe_blink_led_start_generic,
+	.blink_led_stop         = &ixgbe_blink_led_stop_generic,
+	.set_rar                = &ixgbe_set_rar_generic,
+	.clear_rar              = &ixgbe_clear_rar_generic,
+	.set_vmdq               = &ixgbe_set_vmdq_generic,
+	.clear_vmdq             = &ixgbe_clear_vmdq_generic,
+	.init_rx_addrs          = &ixgbe_init_rx_addrs_generic,
+	.update_uc_addr_list    = &ixgbe_update_uc_addr_list_generic,
+	.update_mc_addr_list    = &ixgbe_update_mc_addr_list_generic,
+	.enable_mc              = &ixgbe_enable_mc_generic,
+	.disable_mc             = &ixgbe_disable_mc_generic,
+	.clear_vfta             = &ixgbe_clear_vfta_generic,
+	.set_vfta               = &ixgbe_set_vfta_generic,
+	.fc_enable              = &ixgbe_fc_enable_generic,
+	.init_uta_tables        = &ixgbe_init_uta_tables_generic,
+	.setup_sfp              = NULL,
+};
+
+static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
+	.init_params            = &ixgbe_init_eeprom_params_X540,
+	.read                   = &ixgbe_read_eerd_X540,
+	.write                  = &ixgbe_write_eewr_X540,
+	.calc_checksum		= &ixgbe_calc_eeprom_checksum_X540,
+	.validate_checksum      = &ixgbe_validate_eeprom_checksum_generic,
+	.update_checksum        = &ixgbe_update_eeprom_checksum_X540,
+};
+
+static struct ixgbe_phy_operations phy_ops_X540 = {
+	.identify               = &ixgbe_identify_phy_generic,
+	.identify_sfp           = &ixgbe_identify_sfp_module_generic,
+	.init			= NULL,
+	.reset                  = &ixgbe_reset_phy_generic,
+	.read_reg               = &ixgbe_read_phy_reg_generic,
+	.write_reg              = &ixgbe_write_phy_reg_generic,
+	.setup_link             = &ixgbe_setup_phy_link_generic,
+	.setup_link_speed       = &ixgbe_setup_phy_link_speed_generic,
+	.read_i2c_byte          = &ixgbe_read_i2c_byte_generic,
+	.write_i2c_byte         = &ixgbe_write_i2c_byte_generic,
+	.read_i2c_eeprom        = &ixgbe_read_i2c_eeprom_generic,
+	.write_i2c_eeprom       = &ixgbe_write_i2c_eeprom_generic,
+	.check_overtemp         = &ixgbe_tn_check_overtemp,
+};
+
+struct ixgbe_info ixgbe_X540_info = {
+	.mac                    = ixgbe_mac_X540,
+	.get_invariants         = &ixgbe_get_invariants_X540,
+	.mac_ops                = &mac_ops_X540,
+	.eeprom_ops             = &eeprom_ops_X540,
+	.phy_ops                = &phy_ops_X540,
+	.mbx_ops                = &mbx_ops_generic,
+};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d2..1f35d22 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel 82599 Virtual Function driver
-# Copyright(c) 1999 - 2009 Intel Corporation.
+# Copyright(c) 1999 - 2010 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f..f8a807d 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4cc817a..fa29b3c 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -544,7 +544,7 @@
 #define TABLE64_TEST_HI	6
 
 /* default VF register test */
-static struct ixgbevf_reg_test reg_test_vf[] = {
+static const struct ixgbevf_reg_test reg_test_vf[] = {
 	{ IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
 	{ IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
 	{ IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
@@ -557,19 +557,23 @@
 	{ 0, 0, 0, 0 }
 };
 
+static const u32 register_test_patterns[] = {
+	0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
+};
+
 #define REG_PATTERN_TEST(R, M, W)                                             \
 {                                                                             \
 	u32 pat, val, before;                                                 \
-	const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
-	for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {                       \
+	for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {      \
 		before = readl(adapter->hw.hw_addr + R);                      \
-		writel((_test[pat] & W), (adapter->hw.hw_addr + R));          \
+		writel((register_test_patterns[pat] & W),                     \
+		       (adapter->hw.hw_addr + R));                            \
 		val = readl(adapter->hw.hw_addr + R);                         \
-		if (val != (_test[pat] & W & M)) {                            \
+		if (val != (register_test_patterns[pat] & W & M)) {           \
 			hw_dbg(&adapter->hw,                                  \
 			"pattern test reg %04X failed: got "                  \
 			"0x%08X expected 0x%08X\n",                           \
-			R, val, (_test[pat] & W & M));                        \
+			R, val, (register_test_patterns[pat] & W & M));       \
 			*data = R;                                            \
 			writel(before, adapter->hw.hw_addr + R);              \
 			return 1;                                             \
@@ -596,7 +600,7 @@
 
 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
 {
-	struct ixgbevf_reg_test *test;
+	const struct ixgbevf_reg_test *test;
 	u32 i;
 
 	test = reg_test_vf;
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c..0cd6abc 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c96..809e38c 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -51,9 +51,10 @@
 static const char ixgbevf_driver_string[] =
 	"Intel(R) 82599 Virtual Function";
 
-#define DRV_VERSION "1.0.0-k0"
+#define DRV_VERSION "1.0.12-k0"
 const char ixgbevf_driver_version[] = DRV_VERSION;
-static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+static char ixgbevf_copyright[] =
+	"Copyright (c) 2009 - 2010 Intel Corporation.";
 
 static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
 	[board_82599_vf] = &ixgbevf_vf_info,
@@ -2488,10 +2489,9 @@
 	int size;
 
 	size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
-	tx_ring->tx_buffer_info = vmalloc(size);
+	tx_ring->tx_buffer_info = vzalloc(size);
 	if (!tx_ring->tx_buffer_info)
 		goto err;
-	memset(tx_ring->tx_buffer_info, 0, size);
 
 	/* round up to nearest 4K */
 	tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2555,14 +2555,13 @@
 	int size;
 
 	size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
-	rx_ring->rx_buffer_info = vmalloc(size);
+	rx_ring->rx_buffer_info = vzalloc(size);
 	if (!rx_ring->rx_buffer_info) {
 		hw_dbg(&adapter->hw,
 		       "Unable to vmalloc buffer memory for "
 		       "the receive descriptor ring\n");
 		goto alloc_failed;
 	}
-	memset(rx_ring->rx_buffer_info, 0, size);
 
 	/* Round up to nearest 4K */
 	rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3424,10 +3423,6 @@
 	if (hw->mac.ops.get_bus_info)
 		hw->mac.ops.get_bus_info(hw);
 
-
-	netif_carrier_off(netdev);
-	netif_tx_stop_all_queues(netdev);
-
 	strcpy(netdev->name, "eth%d");
 
 	err = register_netdev(netdev);
@@ -3436,6 +3431,8 @@
 
 	adapter->netdev_registered = true;
 
+	netif_carrier_off(netdev);
+
 	ixgbevf_init_last_counter_stats(adapter);
 
 	/* print the MAC address */
@@ -3487,10 +3484,9 @@
 
 	del_timer_sync(&adapter->watchdog_timer);
 
+	cancel_work_sync(&adapter->reset_task);
 	cancel_work_sync(&adapter->watchdog_task);
 
-	flush_scheduled_work();
-
 	if (adapter->netdev_registered) {
 		unregister_netdev(netdev);
 		adapter->netdev_registered = false;
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486..7a88331 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063be..b2b5bf5 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f7596..fb80ca1 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1..971019d 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc8..144c99d 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 82599 Virtual Function driver
-  Copyright(c) 1999 - 2009 Intel Corporation.
+  Copyright(c) 1999 - 2010 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3..2411e72 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2076,12 +2076,11 @@
 	}
 
 	if (new_mtu > 1900) {
-		netdev->features &= ~(NETIF_F_HW_CSUM |
-				NETIF_F_TSO |
-				NETIF_F_TSO6);
+		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+				NETIF_F_TSO | NETIF_F_TSO6);
 	} else {
 		if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
-			netdev->features |= NETIF_F_HW_CSUM;
+			netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 		if (test_bit(JME_FLAG_TSO, &jme->flags))
 			netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
 	}
@@ -2514,10 +2513,12 @@
 	if (on) {
 		set_bit(JME_FLAG_TXCSUM, &jme->flags);
 		if (netdev->mtu <= 1900)
-			netdev->features |= NETIF_F_HW_CSUM;
+			netdev->features |=
+				NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	} else {
 		clear_bit(JME_FLAG_TXCSUM, &jme->flags);
-		netdev->features &= ~NETIF_F_HW_CSUM;
+		netdev->features &=
+				~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 	}
 
 	return 0;
@@ -2797,7 +2798,8 @@
 	netdev->netdev_ops = &jme_netdev_ops;
 	netdev->ethtool_ops		= &jme_ethtool_ops;
 	netdev->watchdog_timeo		= TX_TIMEOUT;
-	netdev->features		=	NETIF_F_HW_CSUM |
+	netdev->features		=	NETIF_F_IP_CSUM |
+						NETIF_F_IPV6_CSUM |
 						NETIF_F_SG |
 						NETIF_F_TSO |
 						NETIF_F_TSO6 |
@@ -2955,11 +2957,7 @@
 	 * Tell stack that we are not ready to work until open()
 	 */
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
-	/*
-	 * Register netdev
-	 */
 	rc = register_netdev(netdev);
 	if (rc) {
 		pr_err("Cannot register net device\n");
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fc..0fa4a98 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@
 
 /* driver bus management functions */
 
+#ifdef CONFIG_PM
+static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
+{
+	struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+	struct net_device *dev = ks->netdev;
+
+	if (netif_running(dev)) {
+		netif_device_detach(dev);
+		ks8851_net_stop(dev);
+	}
+
+	return 0;
+}
+
+static int ks8851_resume(struct spi_device *spi)
+{
+	struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
+	struct net_device *dev = ks->netdev;
+
+	if (netif_running(dev)) {
+		ks8851_net_open(dev);
+		netif_device_attach(dev);
+	}
+
+	return 0;
+}
+#else
+#define ks8851_suspend NULL
+#define ks8851_resume NULL
+#endif
+
 static int __devinit ks8851_probe(struct spi_device *spi)
 {
 	struct net_device *ndev;
@@ -1679,6 +1710,8 @@
 	},
 	.probe = ks8851_probe,
 	.remove = __devexit_p(ks8851_remove),
+	.suspend = ks8851_suspend,
+	.resume = ks8851_resume,
 };
 
 static int __init ks8851_init(void)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296b..02336ed 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@
 #define LANCE_BUS_IF 0x16
 #define LANCE_TOTAL_SIZE 0x18
 
-#define TX_TIMEOUT	20
+#define TX_TIMEOUT	(HZ/5)
 
 /* The LANCE Rx and Tx ring descriptors. */
 struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f429..9e04289 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@
 #define	 RX_SUSPEND	0x0030
 #define	 RX_ABORT	0x0040
 
-#define TX_TIMEOUT	5
+#define TX_TIMEOUT	(HZ/20)
 
 
 struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ce..da74db4 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@
 static int __ei_open(struct net_device *dev)
 {
 	unsigned long flags;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	if (dev->watchdog_timeo <= 0)
 		 dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@
  */
 static int __ei_close(struct net_device *dev)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned long flags;
 
 	/*
@@ -256,7 +256,7 @@
 static void __ei_tx_timeout(struct net_device *dev)
 {
 	unsigned long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
 	unsigned long flags;
 
@@ -303,7 +303,7 @@
 				   struct net_device *dev)
 {
 	unsigned long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int send_length = skb->len, output_page;
 	unsigned long flags;
 	char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@
 static void ei_tx_intr(struct net_device *dev)
 {
 	unsigned long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int status = ei_inb(e8390_base + EN0_TSR);
 
 	ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@
 static void ei_receive(struct net_device *dev)
 {
 	unsigned long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned char rxing_page, this_frame, next_frame;
 	unsigned short current_offset;
 	int rx_pkt_count = 0;
@@ -879,7 +879,7 @@
 static struct net_device_stats *__ei_get_stats(struct net_device *dev)
 {
 	unsigned long ioaddr = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned long flags;
 
 	/* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@
 {
 	unsigned long e8390_base = dev->base_addr;
 	int i;
-	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
 	{
@@ -981,7 +981,7 @@
 static void __ei_set_multicast_list(struct net_device *dev)
 {
 	unsigned long flags;
-	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	spin_lock_irqsave(&ei_local->page_lock, flags);
 	do_set_multicast_list(dev);
@@ -998,7 +998,7 @@
 
 static void ethdev_setup(struct net_device *dev)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	if (ei_debug > 1)
 		printk(version);
 
@@ -1036,7 +1036,7 @@
 static void __NS8390_init(struct net_device *dev, int startp)
 {
 	unsigned long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int i;
 	int endcfg = ei_local->word16
 	    ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@
 								int start_page)
 {
 	unsigned long e8390_base = dev->base_addr;
- 	struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ 	struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
 
 	ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
 
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7..6ed577b 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@
 	struct hlist_head	vlan_hash[MACVLAN_HASH_SIZE];
 	struct list_head	vlans;
 	struct rcu_head		rcu;
+	bool 			passthru;
 };
 
 #define macvlan_port_get_rcu(dev) \
@@ -169,6 +170,7 @@
 			macvlan_broadcast(skb, port, NULL,
 					  MACVLAN_MODE_PRIVATE |
 					  MACVLAN_MODE_VEPA    |
+					  MACVLAN_MODE_PASSTHRU|
 					  MACVLAN_MODE_BRIDGE);
 		else if (src->mode == MACVLAN_MODE_VEPA)
 			/* flood to everyone except source */
@@ -185,7 +187,10 @@
 		return skb;
 	}
 
-	vlan = macvlan_hash_lookup(port, eth->h_dest);
+	if (port->passthru)
+		vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
+	else
+		vlan = macvlan_hash_lookup(port, eth->h_dest);
 	if (vlan == NULL)
 		return skb;
 
@@ -243,18 +248,22 @@
 netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
 			       struct net_device *dev)
 {
-	int i = skb_get_queue_mapping(skb);
-	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 	unsigned int len = skb->len;
 	int ret;
+	const struct macvlan_dev *vlan = netdev_priv(dev);
 
 	ret = macvlan_queue_xmit(skb, dev);
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-		txq->tx_packets++;
-		txq->tx_bytes += len;
-	} else
-		txq->tx_dropped++;
+		struct macvlan_pcpu_stats *pcpu_stats;
 
+		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->tx_packets++;
+		pcpu_stats->tx_bytes += len;
+		u64_stats_update_end(&pcpu_stats->syncp);
+	} else {
+		this_cpu_inc(vlan->pcpu_stats->tx_dropped);
+	}
 	return ret;
 }
 EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +293,11 @@
 	struct net_device *lowerdev = vlan->lowerdev;
 	int err;
 
+	if (vlan->port->passthru) {
+		dev_set_promiscuity(lowerdev, 1);
+		goto hash_add;
+	}
+
 	err = -EBUSY;
 	if (macvlan_addr_busy(vlan->port, dev->dev_addr))
 		goto out;
@@ -296,6 +310,8 @@
 		if (err < 0)
 			goto del_unicast;
 	}
+
+hash_add:
 	macvlan_hash_add(vlan);
 	return 0;
 
@@ -310,12 +326,18 @@
 	struct macvlan_dev *vlan = netdev_priv(dev);
 	struct net_device *lowerdev = vlan->lowerdev;
 
+	if (vlan->port->passthru) {
+		dev_set_promiscuity(lowerdev, -1);
+		goto hash_del;
+	}
+
 	dev_mc_unsync(lowerdev, dev);
 	if (dev->flags & IFF_ALLMULTI)
 		dev_set_allmulti(lowerdev, -1);
 
 	dev_uc_del(lowerdev, dev->dev_addr);
 
+hash_del:
 	macvlan_hash_del(vlan);
 	return 0;
 }
@@ -414,14 +436,15 @@
 	dev->state		= (dev->state & ~MACVLAN_STATE_MASK) |
 				  (lowerdev->state & MACVLAN_STATE_MASK);
 	dev->features 		= lowerdev->features & MACVLAN_FEATURES;
+	dev->features		|= NETIF_F_LLTX;
 	dev->gso_max_size	= lowerdev->gso_max_size;
 	dev->iflink		= lowerdev->ifindex;
 	dev->hard_header_len	= lowerdev->hard_header_len;
 
 	macvlan_set_lockdep_class(dev);
 
-	vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats);
-	if (!vlan->rx_stats)
+	vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
+	if (!vlan->pcpu_stats)
 		return -ENOMEM;
 
 	return 0;
@@ -431,7 +454,7 @@
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
-	free_percpu(vlan->rx_stats);
+	free_percpu(vlan->pcpu_stats);
 }
 
 static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +462,38 @@
 {
 	struct macvlan_dev *vlan = netdev_priv(dev);
 
-	dev_txq_stats_fold(dev, stats);
-
-	if (vlan->rx_stats) {
-		struct macvlan_rx_stats *p, accum = {0};
-		u64 rx_packets, rx_bytes, rx_multicast;
+	if (vlan->pcpu_stats) {
+		struct macvlan_pcpu_stats *p;
+		u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
+		u32 rx_errors = 0, tx_dropped = 0;
 		unsigned int start;
 		int i;
 
 		for_each_possible_cpu(i) {
-			p = per_cpu_ptr(vlan->rx_stats, i);
+			p = per_cpu_ptr(vlan->pcpu_stats, i);
 			do {
 				start = u64_stats_fetch_begin_bh(&p->syncp);
 				rx_packets	= p->rx_packets;
 				rx_bytes	= p->rx_bytes;
 				rx_multicast	= p->rx_multicast;
+				tx_packets	= p->tx_packets;
+				tx_bytes	= p->tx_bytes;
 			} while (u64_stats_fetch_retry_bh(&p->syncp, start));
-			accum.rx_packets	+= rx_packets;
-			accum.rx_bytes		+= rx_bytes;
-			accum.rx_multicast	+= rx_multicast;
-			/* rx_errors is an ulong, updated without syncp protection */
-			accum.rx_errors		+= p->rx_errors;
+
+			stats->rx_packets	+= rx_packets;
+			stats->rx_bytes		+= rx_bytes;
+			stats->multicast	+= rx_multicast;
+			stats->tx_packets	+= tx_packets;
+			stats->tx_bytes		+= tx_bytes;
+			/* rx_errors & tx_dropped are u32, updated
+			 * without syncp protection.
+			 */
+			rx_errors	+= p->rx_errors;
+			tx_dropped	+= p->tx_dropped;
 		}
-		stats->rx_packets = accum.rx_packets;
-		stats->rx_bytes   = accum.rx_bytes;
-		stats->rx_errors  = accum.rx_errors;
-		stats->rx_dropped = accum.rx_errors;
-		stats->multicast  = accum.rx_multicast;
+		stats->rx_errors	= rx_errors;
+		stats->rx_dropped	= rx_errors;
+		stats->tx_dropped	= tx_dropped;
 	}
 	return stats;
 }
@@ -549,6 +577,7 @@
 	if (port == NULL)
 		return -ENOMEM;
 
+	port->passthru = false;
 	port->dev = dev;
 	INIT_LIST_HEAD(&port->vlans);
 	for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +622,7 @@
 		case MACVLAN_MODE_PRIVATE:
 		case MACVLAN_MODE_VEPA:
 		case MACVLAN_MODE_BRIDGE:
+		case MACVLAN_MODE_PASSTHRU:
 			break;
 		default:
 			return -EINVAL;
@@ -601,25 +631,6 @@
 	return 0;
 }
 
-static int macvlan_get_tx_queues(struct net *net,
-				 struct nlattr *tb[],
-				 unsigned int *num_tx_queues,
-				 unsigned int *real_num_tx_queues)
-{
-	struct net_device *real_dev;
-
-	if (!tb[IFLA_LINK])
-		return -EINVAL;
-
-	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
-	if (!real_dev)
-		return -ENODEV;
-
-	*num_tx_queues      = real_dev->num_tx_queues;
-	*real_num_tx_queues = real_dev->real_num_tx_queues;
-	return 0;
-}
-
 int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
 			   struct nlattr *tb[], struct nlattr *data[],
 			   int (*receive)(struct sk_buff *skb),
@@ -661,6 +672,10 @@
 	}
 	port = macvlan_port_get(lowerdev);
 
+	/* Only 1 macvlan device can be created in passthru mode */
+	if (port->passthru)
+		return -EINVAL;
+
 	vlan->lowerdev = lowerdev;
 	vlan->dev      = dev;
 	vlan->port     = port;
@@ -671,6 +686,13 @@
 	if (data && data[IFLA_MACVLAN_MODE])
 		vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
 
+	if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
+		if (!list_empty(&port->vlans))
+			return -EINVAL;
+		port->passthru = true;
+		memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
+	}
+
 	err = register_netdevice(dev);
 	if (err < 0)
 		goto destroy_port;
@@ -743,7 +765,6 @@
 {
 	/* common fields */
 	ops->priv_size		= sizeof(struct macvlan_dev);
-	ops->get_tx_queues	= macvlan_get_tx_queues;
 	ops->validate		= macvlan_validate;
 	ops->maxtype		= IFLA_MACVLAN_MAX;
 	ops->policy		= macvlan_policy;
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index dd2b6a7..02076e1 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1514,11 +1514,6 @@
 	return genphy_restart_aneg(mp->phy);
 }
 
-static u32 mv643xx_eth_get_link(struct net_device *dev)
-{
-	return !!netif_carrier_ok(dev);
-}
-
 static int
 mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
 {
@@ -1658,7 +1653,7 @@
 	.set_settings		= mv643xx_eth_set_settings,
 	.get_drvinfo		= mv643xx_eth_get_drvinfo,
 	.nway_reset		= mv643xx_eth_nway_reset,
-	.get_link		= mv643xx_eth_get_link,
+	.get_link		= ethtool_op_get_link,
 	.get_coalesce		= mv643xx_eth_get_coalesce,
 	.set_coalesce		= mv643xx_eth_set_coalesce,
 	.get_ringparam		= mv643xx_eth_get_ringparam,
@@ -2983,7 +2978,7 @@
 	unregister_netdev(mp->dev);
 	if (mp->phy != NULL)
 		phy_detach(mp->phy);
-	flush_scheduled_work();
+	cancel_work_sync(&mp->tx_timeout_task);
 	free_netdev(mp->dev);
 
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 8524cc4..1ce0207 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -4067,7 +4067,7 @@
 	if (mgp == NULL)
 		return;
 
-	flush_scheduled_work();
+	cancel_work_sync(&mgp->watchdog_work);
 	netdev = mgp->dev;
 	unregister_netdev(netdev);
 
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef1..30be8c6 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@
 
 static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int i;
 	unsigned char bus_width;
 
@@ -218,7 +218,7 @@
 	int start_page, stop_page;
 	int reg0, ret;
 	static unsigned version_printed;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned char bus_width;
 
 	if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@
 static void ne_reset_8390(struct net_device *dev)
 {
 	unsigned long reset_start_time = jiffies;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	if (ei_debug > 1)
 		printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@
 
 static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	/* This *shouldn't* happen. If it does, it's the last thing you'll see */
 
 	if (ei_status.dmaing)
@@ -437,7 +437,7 @@
 
 static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 #ifdef NE_SANITY_CHECK
 	int xfer_count = count;
 #endif
@@ -507,7 +507,7 @@
 static void ne_block_output(struct net_device *dev, int count,
 		const unsigned char *buf, const int start_page)
 {
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned long dma_start;
 #ifdef NE_SANITY_CHECK
 	int retries = 0;
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552..731077d 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@
 	tx_ring->num_desc = adapter->num_txd;
 	tx_ring->txq = netdev_get_tx_queue(netdev, 0);
 
-	cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+	cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
 	if (cmd_buf_arr == NULL) {
 		dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
 		       netdev->name);
 		goto err_out;
 	}
-	memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
 	tx_ring->cmd_buf_arr = cmd_buf_arr;
 
 	recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@
 			break;
 
 		}
-		rds_ring->rx_buf_arr = (struct netxen_rx_buffer *)
-			vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+		rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
 		if (rds_ring->rx_buf_arr == NULL) {
 			printk(KERN_ERR "%s: Failed to allocate "
 				"rx buffer ring %d\n",
@@ -288,7 +286,6 @@
 			/* free whatever was already allocated */
 			goto err_out;
 		}
-		memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
 		INIT_LIST_HEAD(&rds_ring->free_list);
 		/*
 		 * Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a75ba95..ceeaac9 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
 
 char netxen_nic_driver_name[] = "netxen_nic";
@@ -1280,6 +1277,7 @@
 	int i = 0, err;
 	int pci_func_id = PCI_FUNC(pdev->devfn);
 	uint8_t revision_id;
+	u32 val;
 
 	if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
 		pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1355,8 +1353,9 @@
 		break;
 	}
 
-	if (reset_devices) {
-		if (adapter->portnum == 0) {
+	if (adapter->portnum == 0) {
+		val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
+		if (val != 0xffffffff && val != 0) {
 			NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
 			adapter->need_fw_reset = 1;
 		}
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 781e368..f64c424 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -9917,7 +9917,7 @@
 	if (!netif_running(dev))
 		return 0;
 
-	flush_scheduled_work();
+	flush_work_sync(&np->reset_task);
 	niu_netif_stop(np);
 
 	del_timer_sync(&np->timer);
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8cc32c..c8c873b 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -469,18 +469,6 @@
 }
 
 /**
- * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
- * @netdev:  Network interface device structure
- * Returns
- *	true(1):  Checksum On
- *	false(0): Checksum Off
- */
-static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
-{
-	return (netdev->features & NETIF_F_HW_CSUM) != 0;
-}
-
-/**
  * pch_gbe_set_tx_csum - Turn transmit checksums on or off
  * @netdev: Network interface device structure
  * @data:   Checksum on[true] or off[false]
@@ -493,11 +481,7 @@
 	struct pch_gbe_adapter *adapter = netdev_priv(netdev);
 
 	adapter->tx_csum = data;
-	if (data)
-		netdev->features |= NETIF_F_HW_CSUM;
-	else
-		netdev->features &= ~NETIF_F_HW_CSUM;
-	return 0;
+	return ethtool_op_set_tx_ipv6_csum(netdev, data);
 }
 
 /**
@@ -572,7 +556,6 @@
 	.set_pauseparam = pch_gbe_set_pauseparam,
 	.get_rx_csum = pch_gbe_get_rx_csum,
 	.set_rx_csum = pch_gbe_set_rx_csum,
-	.get_tx_csum = pch_gbe_get_tx_csum,
 	.set_tx_csum = pch_gbe_set_tx_csum,
 	.get_strings = pch_gbe_get_strings,
 	.get_ethtool_stats = pch_gbe_get_ethtool_stats,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 472056b..d735530 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1,6 +1,6 @@
 /*
  * Copyright (C) 1999 - 2010 Intel Corporation.
- * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD.
+ * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
  *
  * This code was derived from the Intel e1000e Linux driver.
  *
@@ -1523,12 +1523,11 @@
 	int desNo;
 
 	size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
-	tx_ring->buffer_info = vmalloc(size);
+	tx_ring->buffer_info = vzalloc(size);
 	if (!tx_ring->buffer_info) {
 		pr_err("Unable to allocate memory for the buffer infomation\n");
 		return -ENOMEM;
 	}
-	memset(tx_ring->buffer_info, 0, size);
 
 	tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
 
@@ -1573,12 +1572,11 @@
 	int desNo;
 
 	size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
-	rx_ring->buffer_info = vmalloc(size);
+	rx_ring->buffer_info = vzalloc(size);
 	if (!rx_ring->buffer_info) {
 		pr_err("Unable to allocate memory for the receive descriptor ring\n");
 		return -ENOMEM;
 	}
-	memset(rx_ring->buffer_info, 0, size);
 	rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
 	rx_ring->desc =	dma_alloc_coherent(&pdev->dev, rx_ring->size,
 					   &rx_ring->dma, GFP_KERNEL);
@@ -2321,7 +2319,7 @@
 	netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
 	netif_napi_add(netdev, &adapter->napi,
 		       pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
-	netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO;
+	netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
 	pch_gbe_set_ethtool_ops(netdev);
 
 	pch_gbe_mac_reset_hw(&adapter->hw);
@@ -2360,9 +2358,9 @@
 	pch_gbe_check_options(adapter);
 
 	if (adapter->tx_csum)
-		netdev->features |= NETIF_F_HW_CSUM;
+		netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	else
-		netdev->features &= ~NETIF_F_HW_CSUM;
+		netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
 
 	/* initialize the wol settings based on the eeprom settings */
 	adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
@@ -2464,8 +2462,8 @@
 module_init(pch_gbe_init_module);
 module_exit(pch_gbe_exit_module);
 
-MODULE_DESCRIPTION("OKI semiconductor PCH Gigabit ethernet Driver");
-MODULE_AUTHOR("OKI semiconductor, <masa-korg@dsn.okisemi.com>");
+MODULE_DESCRIPTION("EG20T PCH Gigabit ethernet Driver");
+MODULE_AUTHOR("OKI SEMICONDUCTOR, <toshiharu-linux@dsn.okisemi.com>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 MODULE_DEVICE_TABLE(pci, pch_gbe_pcidev_id);
diff --git a/drivers/net/pch_gbe/pch_gbe_param.c b/drivers/net/pch_gbe/pch_gbe_param.c
index 2510146..ef0996a 100644
--- a/drivers/net/pch_gbe/pch_gbe_param.c
+++ b/drivers/net/pch_gbe/pch_gbe_param.c
@@ -434,8 +434,8 @@
 			.err  = "using default of "
 				__MODULE_STRING(PCH_GBE_DEFAULT_TXD),
 			.def  = PCH_GBE_DEFAULT_TXD,
-			.arg  = { .r = { .min = PCH_GBE_MIN_TXD } },
-			.arg  = { .r = { .max = PCH_GBE_MAX_TXD } }
+			.arg  = { .r = { .min = PCH_GBE_MIN_TXD,
+					 .max = PCH_GBE_MAX_TXD } }
 		};
 		struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 		tx_ring->count = TxDescriptors;
@@ -450,8 +450,8 @@
 			.err  = "using default of "
 				__MODULE_STRING(PCH_GBE_DEFAULT_RXD),
 			.def  = PCH_GBE_DEFAULT_RXD,
-			.arg  = { .r = { .min = PCH_GBE_MIN_RXD } },
-			.arg  = { .r = { .max = PCH_GBE_MAX_RXD } }
+			.arg  = { .r = { .min = PCH_GBE_MIN_RXD,
+					 .max = PCH_GBE_MAX_RXD } }
 		};
 		struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
 		rx_ring->count = RxDescriptors;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d2e166e..1a0eb128 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -111,13 +111,14 @@
 
 typedef struct axnet_dev_t {
 	struct pcmcia_device	*p_dev;
-    caddr_t		base;
-    struct timer_list	watchdog;
-    int			stale, fast_poll;
-    u_short		link_status;
-    u_char		duplex_flag;
-    int			phy_id;
-    int			flags;
+	caddr_t	base;
+	struct timer_list	watchdog;
+	int	stale, fast_poll;
+	u_short	link_status;
+	u_char	duplex_flag;
+	int	phy_id;
+	int	flags;
+	int	active_low;
 } axnet_dev_t;
 
 static inline axnet_dev_t *PRIV(struct net_device *dev)
@@ -322,6 +323,8 @@
     if (info->flags & IS_AX88790)
 	outb(0x10, dev->base_addr + AXNET_GPIO);  /* select Internal PHY */
 
+    info->active_low = 0;
+
     for (i = 0; i < 32; i++) {
 	j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
 	j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
@@ -329,15 +332,18 @@
 	if ((j != 0) && (j != 0xffff)) break;
     }
 
-    /* Maybe PHY is in power down mode. (PPD_SET = 1) 
-       Bit 2 of CCSR is active low. */ 
     if (i == 32) {
+	/* Maybe PHY is in power down mode. (PPD_SET = 1)
+	   Bit 2 of CCSR is active low. */
 	pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
 	for (i = 0; i < 32; i++) {
 	    j = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 1);
 	    j2 = mdio_read(dev->base_addr + AXNET_MII_EEP, i, 2);
 	    if (j == j2) continue;
-	    if ((j != 0) && (j != 0xffff)) break;
+	    if ((j != 0) && (j != 0xffff)) {
+		info->active_low = 1;
+		break;
+	    }
 	}
     }
 
@@ -383,8 +389,12 @@
 static int axnet_resume(struct pcmcia_device *link)
 {
 	struct net_device *dev = link->priv;
+	axnet_dev_t *info = PRIV(dev);
 
 	if (link->open) {
+		if (info->active_low == 1)
+			pcmcia_write_config_byte(link, CISREG_CCSR, 0x04);
+
 		axnet_reset_8390(dev);
 		AX88190_init(dev, 1);
 		netif_device_attach(dev);
@@ -865,7 +875,7 @@
 static int ax_open(struct net_device *dev)
 {
 	unsigned long flags;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	/*
 	 *	Grab the page lock so we own the register set, then call
@@ -916,7 +926,7 @@
 static void axnet_tx_timeout(struct net_device *dev)
 {
 	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
 	unsigned long flags;
 
@@ -963,7 +973,7 @@
 					  struct net_device *dev)
 {
 	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int length, send_length, output_page;
 	unsigned long flags;
 	u8 packet[ETH_ZLEN];
@@ -1260,7 +1270,7 @@
 static void ei_tx_intr(struct net_device *dev)
 {
 	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int status = inb(e8390_base + EN0_TSR);
     
 	/*
@@ -1344,7 +1354,7 @@
 static void ei_receive(struct net_device *dev)
 {
 	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned char rxing_page, this_frame, next_frame;
 	unsigned short current_offset;
 	int rx_pkt_count = 0;
@@ -1529,7 +1539,7 @@
 static struct net_device_stats *get_stats(struct net_device *dev)
 {
 	long ioaddr = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	unsigned long flags;
     
 	/* If the card is stopped, just return the present stats. */
@@ -1578,7 +1588,7 @@
 {
 	long e8390_base = dev->base_addr;
 	int i;
-	struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 
 	if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
 		memset(ei_local->mcfilter, 0, 8);
@@ -1636,7 +1646,7 @@
 {
 	axnet_dev_t *info = PRIV(dev);
 	long e8390_base = dev->base_addr;
-	struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev);
+	struct ei_device *ei_local = netdev_priv(dev);
 	int i;
 	int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
     
@@ -1702,7 +1712,7 @@
 								int start_page)
 {
 	long e8390_base = dev->base_addr;
- 	struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev);
+ 	struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
     
 	if (inb_p(e8390_base) & E8390_TRANS) 
 	{
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index f0bd1a1..e8b9c53 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -30,11 +30,14 @@
 #include <linux/ethtool.h>
 #include <linux/phy.h>
 #include <linux/marvell_phy.h>
+#include <linux/of.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 
+#define MII_MARVELL_PHY_PAGE		22
+
 #define MII_M1011_IEVENT		0x13
 #define MII_M1011_IEVENT_CLEAR		0x0000
 
@@ -80,7 +83,6 @@
 #define MII_88E1121_PHY_LED_CTRL	16
 #define MII_88E1121_PHY_LED_PAGE	3
 #define MII_88E1121_PHY_LED_DEF		0x0030
-#define MII_88E1121_PHY_PAGE		22
 
 #define MII_M1011_PHY_STATUS		0x11
 #define MII_M1011_PHY_STATUS_1000	0x8000
@@ -186,13 +188,94 @@
 	return 0;
 }
 
+#ifdef CONFIG_OF_MDIO
+/*
+ * Set and/or override some configuration registers based on the
+ * marvell,reg-init property stored in the of_node for the phydev.
+ *
+ * marvell,reg-init = <reg-page reg mask value>,...;
+ *
+ * There may be one or more sets of <reg-page reg mask value>:
+ *
+ * reg-page: which register bank to use.
+ * reg: the register.
+ * mask: if non-zero, ANDed with existing register value.
+ * value: ORed with the masked value and written to the regiser.
+ *
+ */
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+	const __be32 *paddr;
+	int len, i, saved_page, current_page, page_changed, ret;
+
+	if (!phydev->dev.of_node)
+		return 0;
+
+	paddr = of_get_property(phydev->dev.of_node, "marvell,reg-init", &len);
+	if (!paddr || len < (4 * sizeof(*paddr)))
+		return 0;
+
+	saved_page = phy_read(phydev, MII_MARVELL_PHY_PAGE);
+	if (saved_page < 0)
+		return saved_page;
+	page_changed = 0;
+	current_page = saved_page;
+
+	ret = 0;
+	len /= sizeof(*paddr);
+	for (i = 0; i < len - 3; i += 4) {
+		u16 reg_page = be32_to_cpup(paddr + i);
+		u16 reg = be32_to_cpup(paddr + i + 1);
+		u16 mask = be32_to_cpup(paddr + i + 2);
+		u16 val_bits = be32_to_cpup(paddr + i + 3);
+		int val;
+
+		if (reg_page != current_page) {
+			current_page = reg_page;
+			page_changed = 1;
+			ret = phy_write(phydev, MII_MARVELL_PHY_PAGE, reg_page);
+			if (ret < 0)
+				goto err;
+		}
+
+		val = 0;
+		if (mask) {
+			val = phy_read(phydev, reg);
+			if (val < 0) {
+				ret = val;
+				goto err;
+			}
+			val &= mask;
+		}
+		val |= val_bits;
+
+		ret = phy_write(phydev, reg, val);
+		if (ret < 0)
+			goto err;
+
+	}
+err:
+	if (page_changed) {
+		i = phy_write(phydev, MII_MARVELL_PHY_PAGE, saved_page);
+		if (ret == 0)
+			ret = i;
+	}
+	return ret;
+}
+#else
+static int marvell_of_reg_init(struct phy_device *phydev)
+{
+	return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
 static int m88e1121_config_aneg(struct phy_device *phydev)
 {
 	int err, oldpage, mscr;
 
-	oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+	oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
 
-	err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
 			MII_88E1121_PHY_MSCR_PAGE);
 	if (err < 0)
 		return err;
@@ -218,7 +301,7 @@
 			return err;
 	}
 
-	phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+	phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
 
 	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
 	if (err < 0)
@@ -229,11 +312,11 @@
 	if (err < 0)
 		return err;
 
-	oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+	oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
 
-	phy_write(phydev, MII_88E1121_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
+	phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_88E1121_PHY_LED_PAGE);
 	phy_write(phydev, MII_88E1121_PHY_LED_CTRL, MII_88E1121_PHY_LED_DEF);
-	phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+	phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
 
 	err = genphy_config_aneg(phydev);
 
@@ -244,9 +327,9 @@
 {
 	int err, oldpage, mscr;
 
-	oldpage = phy_read(phydev, MII_88E1121_PHY_PAGE);
+	oldpage = phy_read(phydev, MII_MARVELL_PHY_PAGE);
 
-	err = phy_write(phydev, MII_88E1121_PHY_PAGE,
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE,
 			MII_88E1121_PHY_MSCR_PAGE);
 	if (err < 0)
 		return err;
@@ -258,7 +341,7 @@
 	if (err < 0)
 		return err;
 
-	err = phy_write(phydev, MII_88E1121_PHY_PAGE, oldpage);
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, oldpage);
 	if (err < 0)
 		return err;
 
@@ -368,6 +451,9 @@
 			return err;
 	}
 
+	err = marvell_of_reg_init(phydev);
+	if (err < 0)
+		return err;
 
 	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
 	if (err < 0)
@@ -398,7 +484,7 @@
 	int err;
 
 	/* Change address */
-	err = phy_write(phydev, 0x16, 0x0002);
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
 	if (err < 0)
 		return err;
 
@@ -408,7 +494,7 @@
 		return err;
 
 	/* Change address */
-	err = phy_write(phydev, 0x16, 0x0003);
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0003);
 	if (err < 0)
 		return err;
 
@@ -420,8 +506,42 @@
 	if (err < 0)
 		return err;
 
+	err = marvell_of_reg_init(phydev);
+	if (err < 0)
+		return err;
+
 	/* Reset address */
-	err = phy_write(phydev, 0x16, 0x0);
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
+	if (err < 0)
+		return err;
+
+	err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int m88e1149_config_init(struct phy_device *phydev)
+{
+	int err;
+
+	/* Change address */
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0002);
+	if (err < 0)
+		return err;
+
+	/* Enable 1000 Mbit */
+	err = phy_write(phydev, 0x15, 0x1048);
+	if (err < 0)
+		return err;
+
+	err = marvell_of_reg_init(phydev);
+	if (err < 0)
+		return err;
+
+	/* Reset address */
+	err = phy_write(phydev, MII_MARVELL_PHY_PAGE, 0x0);
 	if (err < 0)
 		return err;
 
@@ -491,6 +611,10 @@
 		}
 	}
 
+	err = marvell_of_reg_init(phydev);
+	if (err < 0)
+		return err;
+
 	return 0;
 }
 
@@ -685,6 +809,19 @@
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
+		.phy_id = MARVELL_PHY_ID_88E1149R,
+		.phy_id_mask = MARVELL_PHY_ID_MASK,
+		.name = "Marvell 88E1149R",
+		.features = PHY_GBIT_FEATURES,
+		.flags = PHY_HAS_INTERRUPT,
+		.config_init = &m88e1149_config_init,
+		.config_aneg = &m88e1118_config_aneg,
+		.read_status = &genphy_read_status,
+		.ack_interrupt = &marvell_ack_interrupt,
+		.config_intr = &marvell_config_intr,
+		.driver = { .owner = THIS_MODULE },
+	},
+	{
 		.phy_id = MARVELL_PHY_ID_88E1240,
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E1240",
@@ -735,6 +872,7 @@
 	{ 0x01410e10, 0xfffffff0 },
 	{ 0x01410cb0, 0xfffffff0 },
 	{ 0x01410cd0, 0xfffffff0 },
+	{ 0x01410e50, 0xfffffff0 },
 	{ 0x01410e30, 0xfffffff0 },
 	{ 0x01410e90, 0xfffffff0 },
 	{ }
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac..a8445c7 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@
 	pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
 			phydev->link ? "Up" : "Down");
 	if (phydev->link)
-		printk(" - %d/%s", phydev->speed,
+		printk(KERN_CONT " - %d/%s", phydev->speed,
 				DUPLEX_FULL == phydev->duplex ?
 				"Full" : "Half");
 
-	printk("\n");
+	printk(KERN_CONT "\n");
 }
 EXPORT_SYMBOL(phy_print_status);
 
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 09cf56d..b708f68 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -1136,8 +1136,7 @@
 		   a four-byte PPP header on each packet */
 		*skb_push(skb, 2) = 1;
 		if (ppp->pass_filter &&
-		    sk_run_filter(skb, ppp->pass_filter,
-				  ppp->pass_len) == 0) {
+		    sk_run_filter(skb, ppp->pass_filter) == 0) {
 			if (ppp->debug & 1)
 				printk(KERN_DEBUG "PPP: outbound frame not passed\n");
 			kfree_skb(skb);
@@ -1145,8 +1144,7 @@
 		}
 		/* if this packet passes the active filter, record the time */
 		if (!(ppp->active_filter &&
-		      sk_run_filter(skb, ppp->active_filter,
-				    ppp->active_len) == 0))
+		      sk_run_filter(skb, ppp->active_filter) == 0))
 			ppp->last_xmit = jiffies;
 		skb_pull(skb, 2);
 #else
@@ -1758,8 +1756,7 @@
 
 			*skb_push(skb, 2) = 0;
 			if (ppp->pass_filter &&
-			    sk_run_filter(skb, ppp->pass_filter,
-					  ppp->pass_len) == 0) {
+			    sk_run_filter(skb, ppp->pass_filter) == 0) {
 				if (ppp->debug & 1)
 					printk(KERN_DEBUG "PPP: inbound frame "
 					       "not passed\n");
@@ -1767,8 +1764,7 @@
 				return;
 			}
 			if (!(ppp->active_filter &&
-			      sk_run_filter(skb, ppp->active_filter,
-					    ppp->active_len) == 0))
+			      sk_run_filter(skb, ppp->active_filter) == 0))
 				ppp->last_recv = jiffies;
 			__skb_pull(skb, 2);
 		} else
@@ -2584,16 +2580,16 @@
 	 */
 	dev_net_set(dev, net);
 
-	ret = -EEXIST;
 	mutex_lock(&pn->all_ppp_mutex);
 
 	if (unit < 0) {
 		unit = unit_get(&pn->units_idr, ppp);
 		if (unit < 0) {
-			*retp = unit;
+			ret = unit;
 			goto out2;
 		}
 	} else {
+		ret = -EEXIST;
 		if (unit_find(&pn->units_idr, unit))
 			goto out2; /* unit already exists */
 		/*
@@ -2668,10 +2664,10 @@
 		ppp->closing = 1;
 		ppp_unlock(ppp);
 		unregister_netdev(ppp->dev);
+		unit_put(&pn->units_idr, ppp->file.index);
 	} else
 		ppp_unlock(ppp);
 
-	unit_put(&pn->units_idr, ppp->file.index);
 	ppp->file.dead = 1;
 	ppp->owner = NULL;
 	wake_up_interruptible(&ppp->file.rwait);
@@ -2859,8 +2855,7 @@
  * by holding all_ppp_mutex
  */
 
-/* associate pointer with specified number */
-static int unit_set(struct idr *p, void *ptr, int n)
+static int __unit_alloc(struct idr *p, void *ptr, int n)
 {
 	int unit, err;
 
@@ -2871,10 +2866,24 @@
 	}
 
 	err = idr_get_new_above(p, ptr, n, &unit);
-	if (err == -EAGAIN)
-		goto again;
+	if (err < 0) {
+		if (err == -EAGAIN)
+			goto again;
+		return err;
+	}
 
-	if (unit != n) {
+	return unit;
+}
+
+/* associate pointer with specified number */
+static int unit_set(struct idr *p, void *ptr, int n)
+{
+	int unit;
+
+	unit = __unit_alloc(p, ptr, n);
+	if (unit < 0)
+		return unit;
+	else if (unit != n) {
 		idr_remove(p, unit);
 		return -EINVAL;
 	}
@@ -2885,19 +2894,7 @@
 /* get new free unit number and associate pointer with it */
 static int unit_get(struct idr *p, void *ptr)
 {
-	int unit, err;
-
-again:
-	if (!idr_pre_get(p, GFP_KERNEL)) {
-		printk(KERN_ERR "PPP: No free memory for idr\n");
-		return -ENOMEM;
-	}
-
-	err = idr_get_new_above(p, ptr, 0, &unit);
-	if (err == -EAGAIN)
-		goto again;
-
-	return unit;
+	return __unit_alloc(p, ptr, 0);
 }
 
 /* put unit number back to a pool */
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc913..164cfad 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -277,7 +277,7 @@
 	iph->tos      = 0;
 	iph->daddr    = rt->rt_dst;
 	iph->saddr    = rt->rt_src;
-	iph->ttl      = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+	iph->ttl      = ip4_dst_hoplimit(&rt->dst);
 	iph->tot_len  = htons(skb->len);
 
 	skb_dst_drop(skb);
@@ -673,8 +673,7 @@
 	int err = 0;
 	pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
 
-	callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *),
-		GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+	callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
 	if (!callid_sock) {
 		pr_err("PPTP: cann't allocate memory\n");
 		return -ENOMEM;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 18c0297..1b63c8a 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1450,16 +1450,11 @@
 	strncpy(info->bus_info, "N/A", 32);
 }
 
-static u32 pxa168_get_link(struct net_device *dev)
-{
-	return !!netif_carrier_ok(dev);
-}
-
 static const struct ethtool_ops pxa168_ethtool_ops = {
 	.get_settings = pxa168_get_settings,
 	.set_settings = pxa168_set_settings,
 	.get_drvinfo = pxa168_get_drvinfo,
-	.get_link = pxa168_get_link,
+	.get_link = ethtool_op_get_link,
 };
 
 static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1607,7 +1602,7 @@
 	mdiobus_unregister(pep->smi_bus);
 	mdiobus_free(pep->smi_bus);
 	unregister_netdev(dev);
-	flush_scheduled_work();
+	cancel_work_sync(&pep->tx_timeout_task);
 	free_netdev(dev);
 	platform_set_drvdata(pdev, NULL);
 	return 0;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2..1a3584e 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2467,7 +2467,7 @@
 static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
 			       struct net_device *ndev)
 {
-	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+	struct ql3_adapter *qdev = netdev_priv(ndev);
 	struct ql3xxx_port_registers __iomem *port_regs =
 			qdev->mem_map_registers;
 	struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@
 
 static void ql_display_dev_info(struct net_device *ndev)
 {
-	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+	struct ql3_adapter *qdev = netdev_priv(ndev);
 	struct pci_dev *pdev = qdev->pdev;
 
 	netdev_info(ndev,
@@ -3573,7 +3573,7 @@
 
 static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
 {
-	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+	struct ql3_adapter *qdev = netdev_priv(ndev);
 	struct ql3xxx_port_registers __iomem *port_regs =
 			qdev->mem_map_registers;
 	struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@
 
 static void ql3xxx_tx_timeout(struct net_device *ndev)
 {
-	struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev);
+	struct ql3_adapter *qdev = netdev_priv(ndev);
 
 	netdev_err(ndev, "Resetting...\n");
 	/*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170..f267da4 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #ifndef _QLCNIC_H_
@@ -51,8 +34,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 0
-#define _QLCNIC_LINUX_SUBVERSION 11
-#define QLCNIC_LINUX_VERSIONID  "5.0.11"
+#define _QLCNIC_LINUX_SUBVERSION 13
+#define QLCNIC_LINUX_VERSIONID  "5.0.13"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
 		 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -798,7 +781,6 @@
 #define QLCNIC_H2C_OPCODE_GET_NET_STATS 		16
 #define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V		17
 #define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 		18
-#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK		19
 #define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE		20
 #define QLCNIC_H2C_OPCODE_GET_LINKEVENT 		21
 #define QLCNIC_C2C_OPCODE				22
@@ -923,6 +905,7 @@
 #define QLCNIC_MACSPOOF			0x200
 #define QLCNIC_MAC_OVERRIDE_DISABLED	0x400
 #define QLCNIC_PROMISC_DISABLED		0x800
+#define QLCNIC_NEED_FLR			0x1000
 #define QLCNIC_IS_MSI_FAMILY(adapter) \
 	((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
 
@@ -1126,8 +1109,7 @@
 /* Return codes for Error handling */
 #define QL_STATUS_INVALID_PARAM	-1
 
-#define MAX_BW			100
-#define MIN_BW			1
+#define MAX_BW			100	/* % of link speed */
 #define MAX_VLAN_ID		4095
 #define MIN_VLAN_ID		2
 #define MAX_TX_QUEUES		1
@@ -1135,7 +1117,7 @@
 #define DEFAULT_MAC_LEARN	1
 
 #define IS_VALID_VLAN(vlan)	(vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
-#define IS_VALID_BW(bw)		(bw >= MIN_BW && bw <= MAX_BW)
+#define IS_VALID_BW(bw)		(bw <= MAX_BW)
 #define IS_VALID_TX_QUEUES(que)	(que > 0 && que <= MAX_TX_QUEUES)
 #define IS_VALID_RX_QUEUES(que)	(que > 0 && que <= MAX_RX_QUEUES)
 
@@ -1314,21 +1296,15 @@
 int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
 void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
 		struct qlcnic_host_tx_ring *tx_ring);
-void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
-int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
 void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
 
 /* Functions from qlcnic_main.c */
-int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
-void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
 int qlcnic_reset_context(struct qlcnic_adapter *);
 u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
 	u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
 void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
 int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
-int qlcnic_check_loopback_buff(unsigned char *data);
 netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
-void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
 
 /* Management functions */
 int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1377,6 +1353,8 @@
 		"3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
 	{0x1077, 0x8020, 0x103c, 0x3733,
 		"NC523SFP 10Gb 2-port Server Adapter"},
+	{0x1077, 0x8020, 0x103c, 0x3346,
+		"CN1000Q Dual Port Converged Network Adapter"},
 	{0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
 };
 
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05d..27631f2 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #include "qlcnic.h"
@@ -480,6 +463,11 @@
 {
 	int err;
 
+	if (adapter->flags & QLCNIC_NEED_FLR) {
+		pci_reset_function(adapter->pdev);
+		adapter->flags &= ~QLCNIC_NEED_FLR;
+	}
+
 	err = qlcnic_fw_cmd_create_rx_ctx(adapter);
 	if (err)
 		return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24..0eaf31b 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #include <linux/types.h>
@@ -101,8 +84,7 @@
 static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
 	"Register_Test_on_offline",
 	"Link_Test_on_offline",
-	"Interrupt_Test_offline",
-	"Loopback_Test_offline"
+	"Interrupt_Test_offline"
 };
 
 #define QLCNIC_TEST_LEN	ARRAY_SIZE(qlcnic_gstrings_test)
@@ -643,104 +625,6 @@
 	}
 }
 
-#define QLC_ILB_PKT_SIZE 64
-#define QLC_NUM_ILB_PKT	16
-#define QLC_ILB_MAX_RCV_LOOP 10
-
-static void qlcnic_create_loopback_buff(unsigned char *data)
-{
-	unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
-	memset(data, 0x4e, QLC_ILB_PKT_SIZE);
-	memset(data, 0xff, 12);
-	memcpy(data + 12, random_data, sizeof(random_data));
-}
-
-int qlcnic_check_loopback_buff(unsigned char *data)
-{
-	unsigned char buff[QLC_ILB_PKT_SIZE];
-	qlcnic_create_loopback_buff(buff);
-	return memcmp(data, buff, QLC_ILB_PKT_SIZE);
-}
-
-static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
-{
-	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
-	struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
-	struct sk_buff *skb;
-	int i, loop, cnt = 0;
-
-	for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
-		skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
-		qlcnic_create_loopback_buff(skb->data);
-		skb_put(skb, QLC_ILB_PKT_SIZE);
-
-		adapter->diag_cnt = 0;
-		qlcnic_xmit_frame(skb, adapter->netdev);
-
-		loop = 0;
-		do {
-			msleep(1);
-			qlcnic_process_rcv_ring_diag(sds_ring);
-		} while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
-			 !adapter->diag_cnt);
-
-		dev_kfree_skb_any(skb);
-
-		if (!adapter->diag_cnt)
-			dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
-				" not recevied\n", i + 1);
-		else
-			cnt++;
-	}
-	if (cnt != i) {
-		dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
-		return -1;
-	}
-	return 0;
-}
-
-static int qlcnic_loopback_test(struct net_device *netdev)
-{
-	struct qlcnic_adapter *adapter = netdev_priv(netdev);
-	int max_sds_rings = adapter->max_sds_rings;
-	int ret;
-
-	if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
-		dev_warn(&adapter->pdev->dev, "Loopback test not supported"
-				"for non privilege function\n");
-		return 0;
-	}
-
-	if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
-		return -EIO;
-
-	if (qlcnic_request_quiscent_mode(adapter)) {
-		clear_bit(__QLCNIC_RESETTING, &adapter->state);
-		return -EIO;
-	}
-
-	ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
-	if (ret)
-		goto clear_it;
-
-	ret = qlcnic_set_ilb_mode(adapter);
-	if (ret)
-		goto done;
-
-	ret = qlcnic_do_ilb_test(adapter);
-
-	qlcnic_clear_ilb_mode(adapter);
-
-done:
-	qlcnic_diag_free_res(netdev, max_sds_rings);
-
-clear_it:
-	qlcnic_clear_quiscent_mode(adapter);
-	adapter->max_sds_rings = max_sds_rings;
-	clear_bit(__QLCNIC_RESETTING, &adapter->state);
-	return ret;
-}
-
 static int qlcnic_irq_test(struct net_device *netdev)
 {
 	struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -793,9 +677,6 @@
 		if (data[2])
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
-		data[3] = qlcnic_loopback_test(dev);
-		if (data[3])
-			eth_test->flags |= ETH_TEST_FL_FAILED;
 
 	}
 }
@@ -925,9 +806,10 @@
 
 		dev->features &= ~NETIF_F_LRO;
 		qlcnic_send_lro_cleanup(adapter);
+		dev_info(&adapter->pdev->dev,
+					"disabling LRO as rx_csum is off\n");
 	}
 	adapter->rx_csum = !!data;
-	dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
 	return 0;
 }
 
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80..19328e0 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #ifndef __QLCNIC_HDR_H_
@@ -722,7 +705,7 @@
 #define QLCNIC_DEV_NPAR_OPER		1 /* NPAR Operational */
 #define QLCNIC_DEV_NPAR_OPER_TIMEO	30 /* Operational time out */
 
-#define QLC_DEV_CHECK_ACTIVE(VAL, FN)		((VAL) &= (1 << (FN * 4)))
+#define QLC_DEV_CHECK_ACTIVE(VAL, FN)		((VAL) & (1 << (FN * 4)))
 #define QLC_DEV_SET_REF_CNT(VAL, FN)		((VAL) |= (1 << (FN * 4)))
 #define QLC_DEV_CLR_REF_CNT(VAL, FN)		((VAL) &= ~(1 << (FN * 4)))
 #define QLC_DEV_SET_RST_RDY(VAL, FN)		((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 7a47a2a..c9c4bf14 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #include "qlcnic.h"
@@ -1234,56 +1217,3 @@
 
 	return rv;
 }
-
-static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
-{
-	struct qlcnic_nic_req	req;
-	int			rv;
-	u64			word;
-
-	memset(&req, 0, sizeof(struct qlcnic_nic_req));
-	req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
-
-	word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
-			((u64)adapter->portnum << 16);
-	req.req_hdr = cpu_to_le64(word);
-	req.words[0] = cpu_to_le64(flag);
-
-	rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
-	if (rv)
-		dev_err(&adapter->pdev->dev,
-			"%sting loopback mode failed.\n",
-					flag ? "Set" : "Reset");
-	return rv;
-}
-
-int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
-{
-	if (qlcnic_set_fw_loopback(adapter, 1))
-		return -EIO;
-
-	if (qlcnic_nic_set_promisc(adapter,
-				VPORT_MISS_MODE_ACCEPT_ALL)) {
-		qlcnic_set_fw_loopback(adapter, 0);
-		return -EIO;
-	}
-
-	msleep(1000);
-	return 0;
-}
-
-void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
-{
-	int mode = VPORT_MISS_MODE_DROP;
-	struct net_device *netdev = adapter->netdev;
-
-	qlcnic_set_fw_loopback(adapter, 0);
-
-	if (netdev->flags & IFF_PROMISC)
-		mode = VPORT_MISS_MODE_ACCEPT_ALL;
-	else if (netdev->flags & IFF_ALLMULTI)
-		mode = VPORT_MISS_MODE_ACCEPT_MULTI;
-
-	qlcnic_nic_set_promisc(adapter, mode);
-	msleep(1000);
-}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6..9b9c7c3 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #include <linux/netdevice.h>
@@ -236,12 +219,11 @@
 	tx_ring->num_desc = adapter->num_txd;
 	tx_ring->txq = netdev_get_tx_queue(netdev, 0);
 
-	cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+	cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
 	if (cmd_buf_arr == NULL) {
 		dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
 		goto err_out;
 	}
-	memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
 	tx_ring->cmd_buf_arr = cmd_buf_arr;
 
 	recv_ctx = &adapter->recv_ctx;
@@ -275,14 +257,12 @@
 				rds_ring->dma_size + NET_IP_ALIGN;
 			break;
 		}
-		rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
-			vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+		rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
 		if (rds_ring->rx_buf_arr == NULL) {
 			dev_err(&netdev->dev, "Failed to allocate "
 				"rx buffer ring %d\n", ring);
 			goto err_out;
 		}
-		memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
 		INIT_LIST_HEAD(&rds_ring->free_list);
 		/*
 		 * Now go through all of them, set reference handles
@@ -1693,99 +1673,6 @@
 	spin_unlock(&rds_ring->lock);
 }
 
-static void dump_skb(struct sk_buff *skb)
-{
-	int i;
-	unsigned char *data = skb->data;
-
-	for (i = 0; i < skb->len; i++) {
-		printk("%02x ", data[i]);
-		if ((i & 0x0f) == 8)
-			printk("\n");
-	}
-}
-
-static struct qlcnic_rx_buffer *
-qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
-		struct qlcnic_host_sds_ring *sds_ring,
-		int ring, u64 sts_data0)
-{
-	struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
-	struct qlcnic_rx_buffer *buffer;
-	struct sk_buff *skb;
-	struct qlcnic_host_rds_ring *rds_ring;
-	int index, length, cksum, pkt_offset;
-
-	if (unlikely(ring >= adapter->max_rds_rings))
-		return NULL;
-
-	rds_ring = &recv_ctx->rds_rings[ring];
-
-	index = qlcnic_get_sts_refhandle(sts_data0);
-	if (unlikely(index >= rds_ring->num_desc))
-		return NULL;
-
-	buffer = &rds_ring->rx_buf_arr[index];
-
-	length = qlcnic_get_sts_totallength(sts_data0);
-	cksum  = qlcnic_get_sts_status(sts_data0);
-	pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
-
-	skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
-	if (!skb)
-		return buffer;
-
-	if (length > rds_ring->skb_size)
-		skb_put(skb, rds_ring->skb_size);
-	else
-		skb_put(skb, length);
-
-	if (pkt_offset)
-		skb_pull(skb, pkt_offset);
-
-	if (!qlcnic_check_loopback_buff(skb->data))
-		adapter->diag_cnt++;
-	else
-		dump_skb(skb);
-
-	dev_kfree_skb_any(skb);
-	adapter->stats.rx_pkts++;
-	adapter->stats.rxbytes += length;
-
-	return buffer;
-}
-
-void
-qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
-{
-	struct qlcnic_adapter *adapter = sds_ring->adapter;
-	struct status_desc *desc;
-	struct qlcnic_rx_buffer *rxbuf;
-	u64 sts_data0;
-
-	int opcode, ring, desc_cnt;
-	u32 consumer = sds_ring->consumer;
-
-	desc = &sds_ring->desc_head[consumer];
-	sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
-
-	if (!(sts_data0 & STATUS_OWNER_HOST))
-		return;
-
-	desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
-	opcode = qlcnic_get_sts_opcode(sts_data0);
-
-	ring = qlcnic_get_sts_type(sts_data0);
-	rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
-					ring, sts_data0);
-
-	desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
-	consumer = get_next_index(consumer, sds_ring->num_desc);
-
-	sds_ring->consumer = consumer;
-	writel(consumer, sds_ring->crb_sts_consumer);
-}
-
 void
 qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
 			u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cd..788850e 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1,25 +1,8 @@
 /*
- * Copyright (C) 2009 - QLogic Corporation.
- * All rights reserved.
+ * QLogic qlcnic NIC Driver
+ * Copyright (c)  2009-2010 QLogic Corporation
  *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
- * MA  02111-1307, USA.
- *
- * The full GNU General Public License is included in this distribution
- * in the file called "COPYING".
- *
+ * See LICENSE.qlcnic for copyright and licensing details.
  */
 
 #include <linux/slab.h>
@@ -1450,7 +1433,6 @@
 	netdev->irq = adapter->msix_entries[0].vector;
 
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
 	err = register_netdev(netdev);
 	if (err) {
@@ -1486,6 +1468,7 @@
 	uint8_t revision_id;
 	uint8_t pci_using_dac;
 	char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+	u32 val;
 
 	err = pci_enable_device(pdev);
 	if (err)
@@ -1547,6 +1530,10 @@
 	if (err)
 		goto err_out_iounmap;
 
+	val = QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE);
+	if (QLC_DEV_CHECK_ACTIVE(val, adapter->portnum))
+		adapter->flags |= QLCNIC_NEED_FLR;
+
 	err = adapter->nic_ops->start_firmware(adapter);
 	if (err) {
 		dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
@@ -2855,61 +2842,6 @@
 	qlcnic_api_unlock(adapter);
 }
 
-/* Caller should held RESETTING bit.
- * This should be call in sync with qlcnic_request_quiscent_mode.
- */
-void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
-{
-	qlcnic_clr_drv_state(adapter);
-	qlcnic_api_lock(adapter);
-	QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
-	qlcnic_api_unlock(adapter);
-}
-
-/* Caller should held RESETTING bit.
- */
-int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
-{
-	u8 timeo = adapter->dev_init_timeo / 2;
-	u32 state;
-
-	if (qlcnic_api_lock(adapter))
-		return -EIO;
-
-	state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-	if (state != QLCNIC_DEV_READY)
-		return -EIO;
-
-	QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
-	qlcnic_api_unlock(adapter);
-	QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
-	qlcnic_idc_debug_info(adapter, 0);
-
-	qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
-
-	do {
-		msleep(2000);
-		state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
-		if (state == QLCNIC_DEV_QUISCENT)
-			return 0;
-		if (!qlcnic_check_drv_state(adapter)) {
-			if (qlcnic_api_lock(adapter))
-				return -EIO;
-			QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
-							QLCNIC_DEV_QUISCENT);
-			qlcnic_api_unlock(adapter);
-			QLCDB(adapter, DRV, "QUISCENT mode set\n");
-			return 0;
-		}
-	} while (--timeo);
-
-	dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
-		" DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
-		QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
-	qlcnic_clear_quiscent_mode(adapter);
-	return -EIO;
-}
-
 /*Transit to RESET state from READY state only */
 static void
 qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -3588,9 +3520,12 @@
 		case QLCNIC_PORT_DEFAULTS:
 			if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
 						QLCNIC_NON_PRIV_FUNC) {
-				esw_cfg[i].mac_anti_spoof = 0;
-				esw_cfg[i].mac_override = 1;
-				esw_cfg[i].promisc_mode = 1;
+				if (esw_cfg[i].mac_anti_spoof != 0)
+					return QL_STATUS_INVALID_PARAM;
+				if (esw_cfg[i].mac_override != 1)
+					return QL_STATUS_INVALID_PARAM;
+				if (esw_cfg[i].promisc_mode != 1)
+					return QL_STATUS_INVALID_PARAM;
 			}
 			break;
 		case QLCNIC_ADD_VLAN:
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 2282139..bdb8fe8 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
  */
 #define DRV_NAME  	"qlge"
 #define DRV_STRING 	"QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION	"v1.00.00.25.00.00-01"
+#define DRV_VERSION	"v1.00.00.27.00.00-01"
 
 #define WQ_ADDR_ALIGN	0x3	/* 4 byte alignment */
 
@@ -2221,6 +2221,7 @@
 int ql_unpause_mpi_risc(struct ql_adapter *qdev);
 int ql_pause_mpi_risc(struct ql_adapter *qdev);
 int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
 int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
 		u32 ram_addr, int word_count);
 int ql_core_dump(struct ql_adapter *qdev,
@@ -2236,6 +2237,7 @@
 int ql_mb_get_port_cfg(struct ql_adapter *qdev);
 int ql_mb_set_port_cfg(struct ql_adapter *qdev);
 int ql_wait_fifo_empty(struct ql_adapter *qdev);
+void ql_get_dump(struct ql_adapter *qdev, void *buff);
 void ql_gen_reg_dump(struct ql_adapter *qdev,
 			struct ql_reg_dump *mpi_coredump);
 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492..fca804f 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@
 	status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
 	if (status)
 		return;
+}
 
-	if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+void ql_get_dump(struct ql_adapter *qdev, void *buff)
+{
+	/*
+	 * If the dump has already been taken and is stored
+	 * in our internal buffer and if force dump is set then
+	 * just start the spool to dump it to the log file
+	 * and also, take a snapshot of the general regs to
+	 * to the user's buffer or else take complete dump
+	 * to the user's buffer if force is not set.
+	 */
+
+	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
+		if (!ql_core_dump(qdev, buff))
+			ql_soft_reset_mpi_risc(qdev);
+		else
+			netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
+	} else {
+		ql_gen_reg_dump(qdev, buff);
 		ql_get_core_dump(qdev);
+	}
 }
 
 /* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64..8149cc9 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@
 	strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
 	drvinfo->n_stats = 0;
 	drvinfo->testinfo_len = 0;
-	drvinfo->regdump_len = 0;
+	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+		drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
+	else
+		drvinfo->regdump_len = sizeof(struct ql_reg_dump);
 	drvinfo->eedump_len = 0;
 }
 
@@ -547,7 +550,12 @@
 
 static int ql_get_regs_len(struct net_device *ndev)
 {
-	return sizeof(struct ql_reg_dump);
+	struct ql_adapter *qdev = netdev_priv(ndev);
+
+	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+		return sizeof(struct ql_mpi_coredump);
+	else
+		return sizeof(struct ql_reg_dump);
 }
 
 static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@
 {
 	struct ql_adapter *qdev = netdev_priv(ndev);
 
-	ql_gen_reg_dump(qdev, p);
+	ql_get_dump(qdev, p);
+	qdev->core_is_dumped = 0;
+	if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
+		regs->len = sizeof(struct ql_mpi_coredump);
+	else
+		regs->len = sizeof(struct ql_reg_dump);
 }
 
 static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index c30e0fe..e4dbbbf 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -62,15 +62,15 @@
 /* NETIF_MSG_PKTDATA | */
     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
 
-static int debug = 0x00007fff;	/* defaults above */
-module_param(debug, int, 0);
+static int debug = -1;	/* defaults above */
+module_param(debug, int, 0664);
 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
 
 #define MSIX_IRQ 0
 #define MSI_IRQ 1
 #define LEG_IRQ 2
 static int qlge_irq_type = MSIX_IRQ;
-module_param(qlge_irq_type, int, MSIX_IRQ);
+module_param(qlge_irq_type, int, 0664);
 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
 
 static int qlge_mpi_coredump;
@@ -3844,7 +3844,7 @@
 
 static void ql_display_dev_info(struct net_device *ndev)
 {
-	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
 
 	netif_info(qdev, probe, qdev->ndev,
 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4264,7 @@
 
 static void qlge_set_multicast_list(struct net_device *ndev)
 {
-	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
 	struct netdev_hw_addr *ha;
 	int i, status;
 
@@ -4354,7 +4354,7 @@
 
 static int qlge_set_mac_address(struct net_device *ndev, void *p)
 {
-	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
 	struct sockaddr *addr = p;
 	int status;
 
@@ -4377,7 +4377,7 @@
 
 static void qlge_tx_timeout(struct net_device *ndev)
 {
-	struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
+	struct ql_adapter *qdev = netdev_priv(ndev);
 	ql_queue_asic_error(qdev);
 }
 
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index 0e7c7c7..100a462 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@
 	return status;
 }
 
-static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
+int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
 {
 	int status;
 	status = ql_write_mpi_reg(qdev, 0x00001010, 1);
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index d88ce9f..98d792c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -846,10 +846,10 @@
 	else
 		tp->features &= ~RTL_FEATURE_WOL;
 	__rtl8169_set_wol(tp, wol->wolopts);
-	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
-
 	spin_unlock_irq(&tp->lock);
 
+	device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
+
 	return 0;
 }
 
@@ -2931,7 +2931,7 @@
 		.hw_start	= rtl_hw_start_8168,
 		.region		= 2,
 		.align		= 8,
-		.intr_event	= SYSErr | RxFIFOOver | LinkChg | RxOverflow |
+		.intr_event	= SYSErr | LinkChg | RxOverflow |
 				  TxErr | TxOK | RxOK | RxErr,
 		.napi_event	= TxErr | TxOK | RxOK | RxOverflow,
 		.features	= RTL_FEATURE_GMII | RTL_FEATURE_MSI,
@@ -3240,7 +3240,7 @@
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct rtl8169_private *tp = netdev_priv(dev);
 
-	flush_scheduled_work();
+	cancel_delayed_work_sync(&tp->task);
 
 	unregister_netdev(dev);
 
@@ -4440,8 +4440,7 @@
 	u32 status = opts1 & RxProtoMask;
 
 	if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
-	    ((status == RxProtoUDP) && !(opts1 & UDPFail)) ||
-	    ((status == RxProtoIP) && !(opts1 & IPFail)))
+	    ((status == RxProtoUDP) && !(opts1 & UDPFail)))
 		skb->ip_summed = CHECKSUM_UNNECESSARY;
 	else
 		skb_checksum_none_assert(skb);
@@ -4588,7 +4587,8 @@
 		}
 
 		/* Work around for rx fifo overflow */
-		if (unlikely(status & RxFIFOOver)) {
+		if (unlikely(status & RxFIFOOver) &&
+		(tp->mac_version == RTL_GIGA_MAC_VER_11)) {
 			netif_stop_queue(dev);
 			rtl8169_tx_timeout(dev);
 			break;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aa..39c17ce 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -88,14 +88,14 @@
 #include "s2io.h"
 #include "s2io-regs.h"
 
-#define DRV_VERSION "2.0.26.27"
+#define DRV_VERSION "2.0.26.28"
 
 /* S2io Driver name & version. */
-static char s2io_driver_name[] = "Neterion";
-static char s2io_driver_version[] = DRV_VERSION;
+static const char s2io_driver_name[] = "Neterion";
+static const char s2io_driver_version[] = DRV_VERSION;
 
-static int rxd_size[2] = {32, 48};
-static int rxd_count[2] = {127, 85};
+static const int rxd_size[2] = {32, 48};
+static const int rxd_count[2] = {127, 85};
 
 static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
 {
@@ -3598,10 +3598,12 @@
 	val64 = readq(&bar0->pif_rd_swapper_fb);
 	if (val64 != 0x0123456789ABCDEFULL) {
 		int i = 0;
-		u64 value[] = { 0xC30000C3C30000C3ULL,   /* FE=1, SE=1 */
-				0x8100008181000081ULL,  /* FE=1, SE=0 */
-				0x4200004242000042ULL,  /* FE=0, SE=1 */
-				0};                     /* FE=0, SE=0 */
+		static const u64 value[] = {
+			0xC30000C3C30000C3ULL,	/* FE=1, SE=1 */
+			0x8100008181000081ULL,	/* FE=1, SE=0 */
+			0x4200004242000042ULL,	/* FE=0, SE=1 */
+			0			/* FE=0, SE=0 */
+		};
 
 		while (i < 4) {
 			writeq(value[i], &bar0->swapper_ctrl);
@@ -3627,10 +3629,12 @@
 
 	if (val64 != valt) {
 		int i = 0;
-		u64 value[] = { 0x00C3C30000C3C300ULL,  /* FE=1, SE=1 */
-				0x0081810000818100ULL,  /* FE=1, SE=0 */
-				0x0042420000424200ULL,  /* FE=0, SE=1 */
-				0};                     /* FE=0, SE=0 */
+		static const u64 value[] = {
+			0x00C3C30000C3C300ULL,	/* FE=1, SE=1 */
+			0x0081810000818100ULL,	/* FE=1, SE=0 */
+			0x0042420000424200ULL,	/* FE=0, SE=1 */
+			0			/* FE=0, SE=0 */
+		};
 
 		while (i < 4) {
 			writeq((value[i] | valr), &bar0->swapper_ctrl);
@@ -5568,30 +5572,27 @@
 	struct s2io_nic *sp = netdev_priv(dev);
 	int i, tx_desc_count = 0, rx_desc_count = 0;
 
-	if (sp->rxd_mode == RXD_MODE_1)
+	if (sp->rxd_mode == RXD_MODE_1) {
 		ering->rx_max_pending = MAX_RX_DESC_1;
-	else if (sp->rxd_mode == RXD_MODE_3B)
+		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
+	} else {
 		ering->rx_max_pending = MAX_RX_DESC_2;
-
-	ering->tx_max_pending = MAX_TX_DESC;
-	for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
-		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
-
-	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
-	ering->tx_pending = tx_desc_count;
-	rx_desc_count = 0;
-	for (i = 0 ; i < sp->config.rx_ring_num ; i++)
-		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
-
-	ering->rx_pending = rx_desc_count;
+		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
+	}
 
 	ering->rx_mini_max_pending = 0;
-	ering->rx_mini_pending = 0;
-	if (sp->rxd_mode == RXD_MODE_1)
-		ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
-	else if (sp->rxd_mode == RXD_MODE_3B)
-		ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
+	ering->tx_max_pending = MAX_TX_DESC;
+
+	for (i = 0; i < sp->config.rx_ring_num; i++)
+		rx_desc_count += sp->config.rx_cfg[i].num_rxd;
+	ering->rx_pending = rx_desc_count;
 	ering->rx_jumbo_pending = rx_desc_count;
+	ering->rx_mini_pending = 0;
+
+	for (i = 0; i < sp->config.tx_fifo_num; i++)
+		tx_desc_count += sp->config.tx_cfg[i].fifo_len;
+	ering->tx_pending = tx_desc_count;
+	DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
 }
 
 /**
@@ -7692,6 +7693,8 @@
 static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
 			    u8 *dev_multiq)
 {
+	int i;
+
 	if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
 		DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
 			  "(%d) not supported\n", tx_fifo_num);
@@ -7750,6 +7753,15 @@
 		DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
 		rx_ring_mode = 1;
 	}
+
+	for (i = 0; i < MAX_RX_RINGS; i++)
+		if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
+			DBG_PRINT(ERR_DBG, "Requested rx ring size not "
+				  "supported\nDefaulting to %d\n",
+				  MAX_RX_BLOCKS_PER_RING);
+			rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
+		}
+
 	return SUCCESS;
 }
 
@@ -8321,8 +8333,7 @@
 
 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
 {
-	struct net_device *dev =
-		(struct net_device *)pci_get_drvdata(pdev);
+	struct net_device *dev = pci_get_drvdata(pdev);
 	struct s2io_nic *sp;
 
 	if (dev == NULL) {
@@ -8330,9 +8341,11 @@
 		return;
 	}
 
-	flush_scheduled_work();
-
 	sp = netdev_priv(dev);
+
+	cancel_work_sync(&sp->rst_timer_task);
+	cancel_work_sync(&sp->set_link_task);
+
 	unregister_netdev(dev);
 
 	free_shared_mem(sp);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 00b8614..7d16030 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -355,13 +355,12 @@
 #define FIFO_OTHER_MAX_NUM			1
 
 
-#define MAX_RX_DESC_1  (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 )
-#define MAX_RX_DESC_2  (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
-#define MAX_RX_DESC_3  (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
+#define MAX_RX_DESC_1  (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
+#define MAX_RX_DESC_2  (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
 #define MAX_TX_DESC    (MAX_AVAILABLE_TXDS)
 
 /* FIFO mappings for all possible number of fifos configured */
-static int fifo_map[][MAX_TX_FIFOS] = {
+static const int fifo_map[][MAX_TX_FIFOS] = {
 	{0, 0, 0, 0, 0, 0, 0, 0},
 	{0, 0, 0, 0, 1, 1, 1, 1},
 	{0, 0, 0, 1, 1, 1, 2, 2},
@@ -372,7 +371,7 @@
 	{0, 1, 2, 3, 4, 5, 6, 7},
 };
 
-static u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
+static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
 
 /* Maintains Per FIFO related information. */
 struct tx_fifo_config {
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 417adf3..76290a8 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1449,7 +1449,8 @@
 	dev->irq = pdev->irq;
 
 	/* faked with skb_copy_and_csum_dev */
-	dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA;
+	dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
+		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
 	dev->netdev_ops		= &sc92031_netdev_ops;
 	dev->watchdog_timeo	= TX_TIMEOUT;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 05df20e..2166c1d 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -23,7 +23,6 @@
 #include <linux/gfp.h>
 #include "net_driver.h"
 #include "efx.h"
-#include "mdio_10g.h"
 #include "nic.h"
 
 #include "mcdi.h"
@@ -197,7 +196,9 @@
 
 static void efx_remove_channels(struct efx_nic *efx);
 static void efx_remove_port(struct efx_nic *efx);
+static void efx_init_napi(struct efx_nic *efx);
 static void efx_fini_napi(struct efx_nic *efx);
+static void efx_fini_napi_channel(struct efx_channel *channel);
 static void efx_fini_struct(struct efx_nic *efx);
 static void efx_start_all(struct efx_nic *efx);
 static void efx_stop_all(struct efx_nic *efx);
@@ -335,8 +336,10 @@
 
 	/* Disable interrupts and wait for ISRs to complete */
 	efx_nic_disable_interrupts(efx);
-	if (efx->legacy_irq)
+	if (efx->legacy_irq) {
 		synchronize_irq(efx->legacy_irq);
+		efx->legacy_irq_enabled = false;
+	}
 	if (channel->irq)
 		synchronize_irq(channel->irq);
 
@@ -351,6 +354,8 @@
 	efx_channel_processed(channel);
 
 	napi_enable(&channel->napi_str);
+	if (efx->legacy_irq)
+		efx->legacy_irq_enabled = true;
 	efx_nic_enable_interrupts(efx);
 }
 
@@ -426,6 +431,7 @@
 
 		*channel = *old_channel;
 
+		channel->napi_dev = NULL;
 		memset(&channel->eventq, 0, sizeof(channel->eventq));
 
 		rx_queue = &channel->rx_queue;
@@ -736,9 +742,13 @@
 	if (rc)
 		goto rollback;
 
+	efx_init_napi(efx);
+
 	/* Destroy old channels */
-	for (i = 0; i < efx->n_channels; i++)
+	for (i = 0; i < efx->n_channels; i++) {
+		efx_fini_napi_channel(other_channel[i]);
 		efx_remove_channel(other_channel[i]);
+	}
 out:
 	/* Free unused channel structures */
 	for (i = 0; i < efx->n_channels; i++)
@@ -910,6 +920,7 @@
 
 static int efx_probe_port(struct efx_nic *efx)
 {
+	unsigned char *perm_addr;
 	int rc;
 
 	netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -923,11 +934,12 @@
 		return rc;
 
 	/* Sanity check MAC address */
-	if (is_valid_ether_addr(efx->mac_address)) {
-		memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN);
+	perm_addr = efx->net_dev->perm_addr;
+	if (is_valid_ether_addr(perm_addr)) {
+		memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
 	} else {
 		netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
-			  efx->mac_address);
+			  perm_addr);
 		if (!allow_bad_hwaddr) {
 			rc = -EINVAL;
 			goto err;
@@ -1400,6 +1412,8 @@
 		efx_start_channel(channel);
 	}
 
+	if (efx->legacy_irq)
+		efx->legacy_irq_enabled = true;
 	efx_nic_enable_interrupts(efx);
 
 	/* Switch to event based MCDI completions after enabling interrupts.
@@ -1460,8 +1474,10 @@
 
 	/* Disable interrupts and wait for ISR to complete */
 	efx_nic_disable_interrupts(efx);
-	if (efx->legacy_irq)
+	if (efx->legacy_irq) {
 		synchronize_irq(efx->legacy_irq);
+		efx->legacy_irq_enabled = false;
+	}
 	efx_for_each_channel(channel, efx) {
 		if (channel->irq)
 			synchronize_irq(channel->irq);
@@ -1593,7 +1609,7 @@
  *
  **************************************************************************/
 
-static int efx_init_napi(struct efx_nic *efx)
+static void efx_init_napi(struct efx_nic *efx)
 {
 	struct efx_channel *channel;
 
@@ -1602,18 +1618,21 @@
 		netif_napi_add(channel->napi_dev, &channel->napi_str,
 			       efx_poll, napi_weight);
 	}
-	return 0;
+}
+
+static void efx_fini_napi_channel(struct efx_channel *channel)
+{
+	if (channel->napi_dev)
+		netif_napi_del(&channel->napi_str);
+	channel->napi_dev = NULL;
 }
 
 static void efx_fini_napi(struct efx_nic *efx)
 {
 	struct efx_channel *channel;
 
-	efx_for_each_channel(channel, efx) {
-		if (channel->napi_dev)
-			netif_napi_del(&channel->napi_str);
-		channel->napi_dev = NULL;
-	}
+	efx_for_each_channel(channel, efx)
+		efx_fini_napi_channel(channel);
 }
 
 /**************************************************************************
@@ -1962,7 +1981,6 @@
 
 	efx_stop_all(efx);
 	mutex_lock(&efx->mac_lock);
-	mutex_lock(&efx->spi_lock);
 
 	efx_fini_channels(efx);
 	if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2004,7 +2022,6 @@
 	efx_init_channels(efx);
 	efx_restore_filters(efx);
 
-	mutex_unlock(&efx->spi_lock);
 	mutex_unlock(&efx->mac_lock);
 
 	efx_start_all(efx);
@@ -2014,7 +2031,6 @@
 fail:
 	efx->port_initialized = false;
 
-	mutex_unlock(&efx->spi_lock);
 	mutex_unlock(&efx->mac_lock);
 
 	return rc;
@@ -2202,8 +2218,6 @@
 	/* Initialise common structures */
 	memset(efx, 0, sizeof(*efx));
 	spin_lock_init(&efx->biu_lock);
-	mutex_init(&efx->mdio_lock);
-	mutex_init(&efx->spi_lock);
 #ifdef CONFIG_SFC_MTD
 	INIT_LIST_HEAD(&efx->mtd_list);
 #endif
@@ -2335,9 +2349,7 @@
 	if (rc)
 		goto fail1;
 
-	rc = efx_init_napi(efx);
-	if (rc)
-		goto fail2;
+	efx_init_napi(efx);
 
 	rc = efx->type->init(efx);
 	if (rc) {
@@ -2368,7 +2380,6 @@
 	efx->type->fini(efx);
  fail3:
 	efx_fini_napi(efx);
- fail2:
 	efx_remove_all(efx);
  fail1:
 	return rc;
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 10a1bf4..003fdb3 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -74,9 +74,8 @@
 				    bool replace);
 extern int efx_filter_remove_filter(struct efx_nic *efx,
 				    struct efx_filter_spec *spec);
-extern void efx_filter_table_clear(struct efx_nic *efx,
-				   enum efx_filter_table_id table_id,
-				   enum efx_filter_priority priority);
+extern void efx_filter_clear_rx(struct efx_nic *efx,
+				enum efx_filter_priority priority);
 
 /* Channels */
 extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index edb9d16..0e8bb19 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -11,14 +11,13 @@
 #include <linux/netdevice.h>
 #include <linux/ethtool.h>
 #include <linux/rtnetlink.h>
+#include <linux/in.h>
 #include "net_driver.h"
 #include "workarounds.h"
 #include "selftest.h"
 #include "efx.h"
 #include "filter.h"
 #include "nic.h"
-#include "spi.h"
-#include "mdio_10g.h"
 
 struct ethtool_string {
 	char name[ETH_GSTRING_LEN];
@@ -560,12 +559,8 @@
 	if (rc)
 		return rc;
 
-	if (!(data & ETH_FLAG_NTUPLE)) {
-		efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP,
-				       EFX_FILTER_PRI_MANUAL);
-		efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
-				       EFX_FILTER_PRI_MANUAL);
-	}
+	if (!(data & ETH_FLAG_NTUPLE))
+		efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
 
 	return 0;
 }
@@ -584,6 +579,9 @@
 		goto fail1;
 	}
 
+	netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
+		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+
 	/* We need rx buffers and interrupts. */
 	already_up = (efx->net_dev->flags & IFF_UP);
 	if (!already_up) {
@@ -602,9 +600,9 @@
 	if (!already_up)
 		dev_close(efx->net_dev);
 
-	netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n",
-		  rc == 0 ? "passed" : "failed",
-		  (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
+	netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
+		   rc == 0 ? "passed" : "failed",
+		   (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
 
  fail2:
  fail1:
@@ -622,68 +620,6 @@
 	return mdio45_nway_restart(&efx->mdio);
 }
 
-static u32 efx_ethtool_get_link(struct net_device *net_dev)
-{
-	struct efx_nic *efx = netdev_priv(net_dev);
-
-	return efx->link_state.up;
-}
-
-static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
-{
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_spi_device *spi = efx->spi_eeprom;
-
-	if (!spi)
-		return 0;
-	return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
-		min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
-}
-
-static int efx_ethtool_get_eeprom(struct net_device *net_dev,
-				  struct ethtool_eeprom *eeprom, u8 *buf)
-{
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_spi_device *spi = efx->spi_eeprom;
-	size_t len;
-	int rc;
-
-	rc = mutex_lock_interruptible(&efx->spi_lock);
-	if (rc)
-		return rc;
-	rc = falcon_spi_read(efx, spi,
-			     eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
-			     eeprom->len, &len, buf);
-	mutex_unlock(&efx->spi_lock);
-
-	eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
-	eeprom->len = len;
-	return rc;
-}
-
-static int efx_ethtool_set_eeprom(struct net_device *net_dev,
-				  struct ethtool_eeprom *eeprom, u8 *buf)
-{
-	struct efx_nic *efx = netdev_priv(net_dev);
-	struct efx_spi_device *spi = efx->spi_eeprom;
-	size_t len;
-	int rc;
-
-	if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
-		return -EINVAL;
-
-	rc = mutex_lock_interruptible(&efx->spi_lock);
-	if (rc)
-		return rc;
-	rc = falcon_spi_write(efx, spi,
-			      eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
-			      eeprom->len, &len, buf);
-	mutex_unlock(&efx->spi_lock);
-
-	eeprom->len = len;
-	return rc;
-}
-
 static int efx_ethtool_get_coalesce(struct net_device *net_dev,
 				    struct ethtool_coalesce *coalesce)
 {
@@ -978,6 +914,7 @@
 	struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
 	struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
 	struct efx_filter_spec filter;
+	int rc;
 
 	/* Range-check action */
 	if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
@@ -987,9 +924,16 @@
 	if (~ntuple->fs.data_mask)
 		return -EINVAL;
 
+	efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
+			   (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
+			   0xfff : ntuple->fs.action);
+
 	switch (ntuple->fs.flow_type) {
 	case TCP_V4_FLOW:
-	case UDP_V4_FLOW:
+	case UDP_V4_FLOW: {
+		u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
+			    IPPROTO_TCP : IPPROTO_UDP);
+
 		/* Must match all of destination, */
 		if (ip_mask->ip4dst | ip_mask->pdst)
 			return -EINVAL;
@@ -1001,7 +945,22 @@
 		/* and nothing else */
 		if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
 			return -EINVAL;
+
+		if (!ip_mask->ip4src)
+			rc = efx_filter_set_ipv4_full(&filter, proto,
+						      ip_entry->ip4dst,
+						      ip_entry->pdst,
+						      ip_entry->ip4src,
+						      ip_entry->psrc);
+		else
+			rc = efx_filter_set_ipv4_local(&filter, proto,
+						       ip_entry->ip4dst,
+						       ip_entry->pdst);
+		if (rc)
+			return rc;
 		break;
+	}
+
 	case ETHER_FLOW:
 		/* Must match all of destination, */
 		if (!is_zero_ether_addr(mac_mask->h_dest))
@@ -1014,58 +973,24 @@
 		if (!is_broadcast_ether_addr(mac_mask->h_source) ||
 		    mac_mask->h_proto != htons(0xffff))
 			return -EINVAL;
+
+		rc = efx_filter_set_eth_local(
+			&filter,
+			(ntuple->fs.vlan_tag_mask == 0xf000) ?
+			ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
+			mac_entry->h_dest);
+		if (rc)
+			return rc;
 		break;
+
 	default:
 		return -EINVAL;
 	}
 
-	filter.priority = EFX_FILTER_PRI_MANUAL;
-	filter.flags = 0;
-
-	switch (ntuple->fs.flow_type) {
-	case TCP_V4_FLOW:
-		if (!ip_mask->ip4src)
-			efx_filter_set_rx_tcp_full(&filter,
-						   htonl(ip_entry->ip4src),
-						   htons(ip_entry->psrc),
-						   htonl(ip_entry->ip4dst),
-						   htons(ip_entry->pdst));
-		else
-			efx_filter_set_rx_tcp_wild(&filter,
-						   htonl(ip_entry->ip4dst),
-						   htons(ip_entry->pdst));
-		break;
-	case UDP_V4_FLOW:
-		if (!ip_mask->ip4src)
-			efx_filter_set_rx_udp_full(&filter,
-						   htonl(ip_entry->ip4src),
-						   htons(ip_entry->psrc),
-						   htonl(ip_entry->ip4dst),
-						   htons(ip_entry->pdst));
-		else
-			efx_filter_set_rx_udp_wild(&filter,
-						   htonl(ip_entry->ip4dst),
-						   htons(ip_entry->pdst));
-		break;
-	case ETHER_FLOW:
-		if (ntuple->fs.vlan_tag_mask == 0xf000)
-			efx_filter_set_rx_mac_full(&filter,
-						   ntuple->fs.vlan_tag & 0xfff,
-						   mac_entry->h_dest);
-		else
-			efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
-		break;
-	}
-
-	if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
+	if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
 		return efx_filter_remove_filter(efx, &filter);
-	} else {
-		if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
-			filter.dmaq_id = 0xfff;
-		else
-			filter.dmaq_id = ntuple->fs.action;
+	else
 		return efx_filter_insert_filter(efx, &filter, true);
-	}
 }
 
 static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
@@ -1115,10 +1040,7 @@
 	.get_msglevel		= efx_ethtool_get_msglevel,
 	.set_msglevel		= efx_ethtool_set_msglevel,
 	.nway_reset		= efx_ethtool_nway_reset,
-	.get_link		= efx_ethtool_get_link,
-	.get_eeprom_len		= efx_ethtool_get_eeprom_len,
-	.get_eeprom		= efx_ethtool_get_eeprom,
-	.set_eeprom		= efx_ethtool_set_eeprom,
+	.get_link		= ethtool_op_get_link,
 	.get_coalesce		= efx_ethtool_get_coalesce,
 	.set_coalesce		= efx_ethtool_set_coalesce,
 	.get_ringparam		= efx_ethtool_get_ringparam,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 267019b..70e4f7d 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -24,7 +24,6 @@
 #include "nic.h"
 #include "regs.h"
 #include "io.h"
-#include "mdio_10g.h"
 #include "phy.h"
 #include "workarounds.h"
 
@@ -255,7 +254,6 @@
 	/* Input validation */
 	if (len > FALCON_SPI_MAX_LEN)
 		return -EINVAL;
-	BUG_ON(!mutex_is_locked(&efx->spi_lock));
 
 	/* Check that previous command is not still running */
 	rc = falcon_spi_poll(efx);
@@ -719,6 +717,7 @@
 			     int prtad, int devad, u16 addr, u16 value)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t reg;
 	int rc;
 
@@ -726,7 +725,7 @@
 		   "writing MDIO %d register %d.%d with 0x%04x\n",
 		    prtad, devad, addr, value);
 
-	mutex_lock(&efx->mdio_lock);
+	mutex_lock(&nic_data->mdio_lock);
 
 	/* Check MDIO not currently being accessed */
 	rc = falcon_gmii_wait(efx);
@@ -762,7 +761,7 @@
 	}
 
 out:
-	mutex_unlock(&efx->mdio_lock);
+	mutex_unlock(&nic_data->mdio_lock);
 	return rc;
 }
 
@@ -771,10 +770,11 @@
 			    int prtad, int devad, u16 addr)
 {
 	struct efx_nic *efx = netdev_priv(net_dev);
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t reg;
 	int rc;
 
-	mutex_lock(&efx->mdio_lock);
+	mutex_lock(&nic_data->mdio_lock);
 
 	/* Check MDIO not currently being accessed */
 	rc = falcon_gmii_wait(efx);
@@ -813,7 +813,7 @@
 	}
 
 out:
-	mutex_unlock(&efx->mdio_lock);
+	mutex_unlock(&nic_data->mdio_lock);
 	return rc;
 }
 
@@ -841,6 +841,7 @@
 	}
 
 	/* Fill out MDIO structure and loopback modes */
+	mutex_init(&nic_data->mdio_lock);
 	efx->mdio.mdio_read = falcon_mdio_read;
 	efx->mdio.mdio_write = falcon_mdio_write;
 	rc = efx->phy_op->probe(efx);
@@ -880,6 +881,41 @@
 	efx_nic_free_buffer(efx, &efx->stats_buffer);
 }
 
+/* Global events are basically PHY events */
+static bool
+falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
+{
+	struct efx_nic *efx = channel->efx;
+	struct falcon_nic_data *nic_data = efx->nic_data;
+
+	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
+	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
+	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
+		/* Ignored */
+		return true;
+
+	if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
+	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
+		nic_data->xmac_poll_required = true;
+		return true;
+	}
+
+	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
+	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
+	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
+		netif_err(efx, rx_err, efx->net_dev,
+			  "channel %d seen global RX_RESET event. Resetting.\n",
+			  channel->channel);
+
+		atomic_inc(&efx->rx_reset);
+		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
+				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
+		return true;
+	}
+
+	return false;
+}
+
 /**************************************************************************
  *
  * Falcon test code
@@ -889,6 +925,7 @@
 static int
 falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	struct falcon_nvconfig *nvconfig;
 	struct efx_spi_device *spi;
 	void *region;
@@ -896,8 +933,11 @@
 	__le16 *word, *limit;
 	u32 csum;
 
-	spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom;
-	if (!spi)
+	if (efx_spi_present(&nic_data->spi_flash))
+		spi = &nic_data->spi_flash;
+	else if (efx_spi_present(&nic_data->spi_eeprom))
+		spi = &nic_data->spi_eeprom;
+	else
 		return -EINVAL;
 
 	region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -905,12 +945,13 @@
 		return -ENOMEM;
 	nvconfig = region + FALCON_NVCONFIG_OFFSET;
 
-	mutex_lock(&efx->spi_lock);
+	mutex_lock(&nic_data->spi_lock);
 	rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
-	mutex_unlock(&efx->spi_lock);
+	mutex_unlock(&nic_data->spi_lock);
 	if (rc) {
 		netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
-			  efx->spi_flash ? "flash" : "EEPROM");
+			  efx_spi_present(&nic_data->spi_flash) ?
+			  "flash" : "EEPROM");
 		rc = -EIO;
 		goto out;
 	}
@@ -1012,7 +1053,7 @@
 
 /* Resets NIC to known state.  This routine must be called in process
  * context and is allowed to sleep. */
-static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
 {
 	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t glb_ctl_reg_ker;
@@ -1108,6 +1149,18 @@
 	return rc;
 }
 
+static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
+{
+	struct falcon_nic_data *nic_data = efx->nic_data;
+	int rc;
+
+	mutex_lock(&nic_data->spi_lock);
+	rc = __falcon_reset_hw(efx, method);
+	mutex_unlock(&nic_data->spi_lock);
+
+	return rc;
+}
+
 static void falcon_monitor(struct efx_nic *efx)
 {
 	bool link_changed;
@@ -1189,16 +1242,11 @@
 	return -ETIMEDOUT;
 }
 
-static int falcon_spi_device_init(struct efx_nic *efx,
-				  struct efx_spi_device **spi_device_ret,
+static void falcon_spi_device_init(struct efx_nic *efx,
+				  struct efx_spi_device *spi_device,
 				  unsigned int device_id, u32 device_type)
 {
-	struct efx_spi_device *spi_device;
-
 	if (device_type != 0) {
-		spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
-		if (!spi_device)
-			return -ENOMEM;
 		spi_device->device_id = device_id;
 		spi_device->size =
 			1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1215,27 +1263,15 @@
 			1 << SPI_DEV_TYPE_FIELD(device_type,
 						SPI_DEV_TYPE_BLOCK_SIZE);
 	} else {
-		spi_device = NULL;
+		spi_device->size = 0;
 	}
-
-	kfree(*spi_device_ret);
-	*spi_device_ret = spi_device;
-	return 0;
-}
-
-static void falcon_remove_spi_devices(struct efx_nic *efx)
-{
-	kfree(efx->spi_eeprom);
-	efx->spi_eeprom = NULL;
-	kfree(efx->spi_flash);
-	efx->spi_flash = NULL;
 }
 
 /* Extract non-volatile configuration */
 static int falcon_probe_nvconfig(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	struct falcon_nvconfig *nvconfig;
-	int board_rev;
 	int rc;
 
 	nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1243,55 +1279,32 @@
 		return -ENOMEM;
 
 	rc = falcon_read_nvram(efx, nvconfig);
-	if (rc == -EINVAL) {
-		netif_err(efx, probe, efx->net_dev,
-			  "NVRAM is invalid therefore using defaults\n");
-		efx->phy_type = PHY_TYPE_NONE;
-		efx->mdio.prtad = MDIO_PRTAD_NONE;
-		board_rev = 0;
-		rc = 0;
-	} else if (rc) {
-		goto fail1;
-	} else {
-		struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2;
-		struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3;
+	if (rc)
+		goto out;
 
-		efx->phy_type = v2->port0_phy_type;
-		efx->mdio.prtad = v2->port0_phy_addr;
-		board_rev = le16_to_cpu(v2->board_revision);
+	efx->phy_type = nvconfig->board_v2.port0_phy_type;
+	efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
 
-		if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
-			rc = falcon_spi_device_init(
-				efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
-				le32_to_cpu(v3->spi_device_type
-					    [FFE_AB_SPI_DEVICE_FLASH]));
-			if (rc)
-				goto fail2;
-			rc = falcon_spi_device_init(
-				efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
-				le32_to_cpu(v3->spi_device_type
-					    [FFE_AB_SPI_DEVICE_EEPROM]));
-			if (rc)
-				goto fail2;
-		}
+	if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
+		falcon_spi_device_init(
+			efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
+			le32_to_cpu(nvconfig->board_v3
+				    .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
+		falcon_spi_device_init(
+			efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
+			le32_to_cpu(nvconfig->board_v3
+				    .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
 	}
 
 	/* Read the MAC addresses */
-	memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN);
+	memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
 
 	netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
 		  efx->phy_type, efx->mdio.prtad);
 
-	rc = falcon_probe_board(efx, board_rev);
-	if (rc)
-		goto fail2;
-
-	kfree(nvconfig);
-	return 0;
-
- fail2:
-	falcon_remove_spi_devices(efx);
- fail1:
+	rc = falcon_probe_board(efx,
+				le16_to_cpu(nvconfig->board_v2.board_revision));
+out:
 	kfree(nvconfig);
 	return rc;
 }
@@ -1299,6 +1312,7 @@
 /* Probe all SPI devices on the NIC */
 static void falcon_probe_spi_devices(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
 	int boot_dev;
 
@@ -1327,12 +1341,14 @@
 		efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
 	}
 
+	mutex_init(&nic_data->spi_lock);
+
 	if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
-		falcon_spi_device_init(efx, &efx->spi_flash,
+		falcon_spi_device_init(efx, &nic_data->spi_flash,
 				       FFE_AB_SPI_DEVICE_FLASH,
 				       default_flash_type);
 	if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
-		falcon_spi_device_init(efx, &efx->spi_eeprom,
+		falcon_spi_device_init(efx, &nic_data->spi_eeprom,
 				       FFE_AB_SPI_DEVICE_EEPROM,
 				       large_eeprom_type);
 }
@@ -1397,7 +1413,7 @@
 	}
 
 	/* Now we can reset the NIC */
-	rc = falcon_reset_hw(efx, RESET_TYPE_ALL);
+	rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
 	if (rc) {
 		netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
 		goto fail3;
@@ -1419,8 +1435,11 @@
 
 	/* Read in the non-volatile configuration */
 	rc = falcon_probe_nvconfig(efx);
-	if (rc)
+	if (rc) {
+		if (rc == -EINVAL)
+			netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
 		goto fail5;
+	}
 
 	/* Initialise I2C adapter */
 	board = falcon_board(efx);
@@ -1452,7 +1471,6 @@
 	BUG_ON(i2c_del_adapter(&board->i2c_adap));
 	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
  fail5:
-	falcon_remove_spi_devices(efx);
 	efx_nic_free_buffer(efx, &efx->irq_status);
  fail4:
  fail3:
@@ -1606,10 +1624,9 @@
 	BUG_ON(rc);
 	memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
 
-	falcon_remove_spi_devices(efx);
 	efx_nic_free_buffer(efx, &efx->irq_status);
 
-	falcon_reset_hw(efx, RESET_TYPE_ALL);
+	__falcon_reset_hw(efx, RESET_TYPE_ALL);
 
 	/* Release the second function after the reset */
 	if (nic_data->pci_dev2) {
@@ -1720,6 +1737,7 @@
 	.reset = falcon_reset_hw,
 	.probe_port = falcon_probe_port,
 	.remove_port = falcon_remove_port,
+	.handle_global_event = falcon_handle_global_event,
 	.prepare_flush = falcon_prepare_flush,
 	.update_stats = falcon_update_nic_stats,
 	.start_stats = falcon_start_nic_stats,
@@ -1760,6 +1778,7 @@
 	.reset = falcon_reset_hw,
 	.probe_port = falcon_probe_port,
 	.remove_port = falcon_remove_port,
+	.handle_global_event = falcon_handle_global_event,
 	.prepare_flush = falcon_prepare_flush,
 	.update_stats = falcon_update_nic_stats,
 	.start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index cfc6a5b..2dd16f0 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -13,8 +13,6 @@
 #include "phy.h"
 #include "efx.h"
 #include "nic.h"
-#include "regs.h"
-#include "io.h"
 #include "workarounds.h"
 
 /* Macros for unpacking the board revision */
@@ -30,17 +28,28 @@
 #define FALCON_BOARD_SFN4112F 0x52
 
 /* Board temperature is about 15°C above ambient when air flow is
- * limited. */
+ * limited.  The maximum acceptable ambient temperature varies
+ * depending on the PHY specifications but the critical temperature
+ * above which we should shut down to avoid damage is 80°C. */
 #define FALCON_BOARD_TEMP_BIAS	15
+#define FALCON_BOARD_TEMP_CRIT	(80 + FALCON_BOARD_TEMP_BIAS)
 
 /* SFC4000 datasheet says: 'The maximum permitted junction temperature
  * is 125°C; the thermal design of the environment for the SFC4000
  * should aim to keep this well below 100°C.' */
+#define FALCON_JUNC_TEMP_MIN	0
 #define FALCON_JUNC_TEMP_MAX	90
+#define FALCON_JUNC_TEMP_CRIT	125
 
 /*****************************************************************************
  * Support for LM87 sensor chip used on several boards
  */
+#define LM87_REG_TEMP_HW_INT_LOCK	0x13
+#define LM87_REG_TEMP_HW_EXT_LOCK	0x14
+#define LM87_REG_TEMP_HW_INT		0x17
+#define LM87_REG_TEMP_HW_EXT		0x18
+#define LM87_REG_TEMP_EXT1		0x26
+#define LM87_REG_TEMP_INT		0x27
 #define LM87_REG_ALARMS1		0x41
 #define LM87_REG_ALARMS2		0x42
 #define LM87_IN_LIMITS(nr, _min, _max)			\
@@ -57,6 +66,27 @@
 
 #if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
 
+static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
+{
+	while (*reg_values) {
+		u8 reg = *reg_values++;
+		u8 value = *reg_values++;
+		int rc = i2c_smbus_write_byte_data(client, reg, value);
+		if (rc)
+			return rc;
+	}
+	return 0;
+}
+
+static const u8 falcon_lm87_common_regs[] = {
+	LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
+	LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
+	LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
+	LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
+	LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
+	0
+};
+
 static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
 			 const u8 *reg_values)
 {
@@ -67,13 +97,16 @@
 	if (!client)
 		return -EIO;
 
-	while (*reg_values) {
-		u8 reg = *reg_values++;
-		u8 value = *reg_values++;
-		rc = i2c_smbus_write_byte_data(client, reg, value);
-		if (rc)
-			goto err;
-	}
+	/* Read-to-clear alarm/interrupt status */
+	i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+	i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+
+	rc = efx_poke_lm87(client, reg_values);
+	if (rc)
+		goto err;
+	rc = efx_poke_lm87(client, falcon_lm87_common_regs);
+	if (rc)
+		goto err;
 
 	board->hwmon_client = client;
 	return 0;
@@ -91,36 +124,56 @@
 static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
 {
 	struct i2c_client *client = falcon_board(efx)->hwmon_client;
-	s32 alarms1, alarms2;
+	bool temp_crit, elec_fault, is_failure;
+	u16 alarms;
+	s32 reg;
 
 	/* If link is up then do not monitor temperature */
 	if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
 		return 0;
 
-	alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
-	alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
-	if (alarms1 < 0)
-		return alarms1;
-	if (alarms2 < 0)
-		return alarms2;
-	alarms1 &= mask;
-	alarms2 &= mask >> 8;
-	if (alarms1 || alarms2) {
-		netif_err(efx, hw, efx->net_dev,
-			  "LM87 detected a hardware failure (status %02x:%02x)"
-			  "%s%s%s\n",
-			  alarms1, alarms2,
-			  (alarms1 & LM87_ALARM_TEMP_INT) ?
-			  "; board is overheating" : "",
-			  (alarms1 & LM87_ALARM_TEMP_EXT1) ?
-			  "; controller is overheating" : "",
-			  (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1)
-			   || alarms2) ?
-			  "; electrical fault" : "");
-		return -ERANGE;
-	}
+	reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
+	if (reg < 0)
+		return reg;
+	alarms = reg;
+	reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
+	if (reg < 0)
+		return reg;
+	alarms |= reg << 8;
+	alarms &= mask;
 
-	return 0;
+	temp_crit = false;
+	if (alarms & LM87_ALARM_TEMP_INT) {
+		reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
+		if (reg < 0)
+			return reg;
+		if (reg > FALCON_BOARD_TEMP_CRIT)
+			temp_crit = true;
+	}
+	if (alarms & LM87_ALARM_TEMP_EXT1) {
+		reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
+		if (reg < 0)
+			return reg;
+		if (reg > FALCON_JUNC_TEMP_CRIT)
+			temp_crit = true;
+	}
+	elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
+	is_failure = temp_crit || elec_fault;
+
+	if (alarms)
+		netif_err(efx, hw, efx->net_dev,
+			  "LM87 detected a hardware %s (status %02x:%02x)"
+			  "%s%s%s%s\n",
+			  is_failure ? "failure" : "problem",
+			  alarms & 0xff, alarms >> 8,
+			  (alarms & LM87_ALARM_TEMP_INT) ?
+			  "; board is overheating" : "",
+			  (alarms & LM87_ALARM_TEMP_EXT1) ?
+			  "; controller is overheating" : "",
+			  temp_crit ? "; reached critical temperature" : "",
+			  elec_fault ? "; electrical fault" : "");
+
+	return is_failure ? -ERANGE : 0;
 }
 
 #else /* !CONFIG_SENSORS_LM87 */
@@ -325,7 +378,7 @@
 		new_mode = old_mode & ~PHY_MODE_SPECIAL;
 	else
 		new_mode = PHY_MODE_SPECIAL;
-	if (old_mode == new_mode) {
+	if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
 		err = 0;
 	} else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
 		err = -EBUSY;
@@ -362,10 +415,11 @@
 
 static int sfe4001_check_hw(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	s32 status;
 
 	/* If XAUI link is up then do not monitor */
-	if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required)
+	if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
 		return 0;
 
 	/* Check the powered status of the PHY. Lack of power implies that
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b31f595..b49e843 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -16,7 +16,6 @@
 #include "io.h"
 #include "mac.h"
 #include "mdio_10g.h"
-#include "phy.h"
 #include "workarounds.h"
 
 /**************************************************************************
@@ -88,6 +87,7 @@
 
 static void falcon_ack_status_intr(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	efx_oword_t reg;
 
 	if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@
 
 	/* We can only use this interrupt to signal the negative edge of
 	 * xaui_align [we have to poll the positive edge]. */
-	if (efx->xmac_poll_required)
+	if (nic_data->xmac_poll_required)
 		return;
 
 	efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -277,12 +277,14 @@
 
 static int falcon_reconfigure_xmac(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
+
 	falcon_reconfigure_xgxs_core(efx);
 	falcon_reconfigure_xmac_core(efx);
 
 	falcon_reconfigure_mac_wrapper(efx);
 
-	efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
+	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
 	falcon_ack_status_intr(efx);
 
 	return 0;
@@ -350,11 +352,13 @@
 
 void falcon_poll_xmac(struct efx_nic *efx)
 {
+	struct falcon_nic_data *nic_data = efx->nic_data;
+
 	if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
-	    !efx->xmac_poll_required)
+	    !nic_data->xmac_poll_required)
 		return;
 
-	efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
+	nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
 	falcon_ack_status_intr(efx);
 }
 
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb608..d4722c4 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -7,6 +7,7 @@
  * by the Free Software Foundation, incorporated herein by reference.
  */
 
+#include <linux/in.h>
 #include "efx.h"
 #include "filter.h"
 #include "io.h"
@@ -26,19 +27,26 @@
  */
 #define FILTER_CTL_SRCH_MAX 200
 
+enum efx_filter_table_id {
+	EFX_FILTER_TABLE_RX_IP = 0,
+	EFX_FILTER_TABLE_RX_MAC,
+	EFX_FILTER_TABLE_COUNT,
+};
+
 struct efx_filter_table {
+	enum efx_filter_table_id id;
 	u32		offset;		/* address of table relative to BAR */
 	unsigned	size;		/* number of entries */
 	unsigned	step;		/* step between entries */
 	unsigned	used;		/* number currently used */
 	unsigned long	*used_bitmap;
 	struct efx_filter_spec *spec;
+	unsigned	search_depth[EFX_FILTER_TYPE_COUNT];
 };
 
 struct efx_filter_state {
 	spinlock_t	lock;
 	struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
-	unsigned	search_depth[EFX_FILTER_TYPE_COUNT];
 };
 
 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -65,68 +73,203 @@
 }
 
 static enum efx_filter_table_id
-efx_filter_type_table_id(enum efx_filter_type type)
+efx_filter_spec_table_id(const struct efx_filter_spec *spec)
 {
-	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2));
-	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2));
-	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2));
-	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2));
-	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
-	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
-	return type >> 2;
+	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
+	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
+	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
+	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
+	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
+	BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
+	EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
+	return spec->type >> 2;
 }
 
-static void
-efx_filter_table_reset_search_depth(struct efx_filter_state *state,
-				    enum efx_filter_table_id table_id)
+static struct efx_filter_table *
+efx_filter_spec_table(struct efx_filter_state *state,
+		      const struct efx_filter_spec *spec)
 {
-	memset(state->search_depth + (table_id << 2), 0,
-	       sizeof(state->search_depth[0]) << 2);
+	if (spec->type == EFX_FILTER_UNSPEC)
+		return NULL;
+	else
+		return &state->table[efx_filter_spec_table_id(spec)];
+}
+
+static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
+{
+	memset(table->search_depth, 0, sizeof(table->search_depth));
 }
 
 static void efx_filter_push_rx_limits(struct efx_nic *efx)
 {
 	struct efx_filter_state *state = efx->filter_state;
+	struct efx_filter_table *table;
 	efx_oword_t filter_ctl;
 
 	efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
 
+	table = &state->table[EFX_FILTER_TABLE_RX_IP];
 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
-			    state->search_depth[EFX_FILTER_RX_TCP_FULL] +
+			    table->search_depth[EFX_FILTER_TCP_FULL] +
 			    FILTER_CTL_SRCH_FUDGE_FULL);
 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
-			    state->search_depth[EFX_FILTER_RX_TCP_WILD] +
+			    table->search_depth[EFX_FILTER_TCP_WILD] +
 			    FILTER_CTL_SRCH_FUDGE_WILD);
 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
-			    state->search_depth[EFX_FILTER_RX_UDP_FULL] +
+			    table->search_depth[EFX_FILTER_UDP_FULL] +
 			    FILTER_CTL_SRCH_FUDGE_FULL);
 	EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
-			    state->search_depth[EFX_FILTER_RX_UDP_WILD] +
+			    table->search_depth[EFX_FILTER_UDP_WILD] +
 			    FILTER_CTL_SRCH_FUDGE_WILD);
 
-	if (state->table[EFX_FILTER_TABLE_RX_MAC].size) {
+	table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+	if (table->size) {
 		EFX_SET_OWORD_FIELD(
 			filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
-			state->search_depth[EFX_FILTER_RX_MAC_FULL] +
+			table->search_depth[EFX_FILTER_MAC_FULL] +
 			FILTER_CTL_SRCH_FUDGE_FULL);
 		EFX_SET_OWORD_FIELD(
 			filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
-			state->search_depth[EFX_FILTER_RX_MAC_WILD] +
+			table->search_depth[EFX_FILTER_MAC_WILD] +
 			FILTER_CTL_SRCH_FUDGE_WILD);
 	}
 
 	efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
 }
 
+static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
+					 __be32 host1, __be16 port1,
+					 __be32 host2, __be16 port2)
+{
+	spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
+	spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
+	spec->data[2] = ntohl(host2);
+}
+
+/**
+ * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ */
+int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+			      __be32 host, __be16 port)
+{
+	__be32 host1;
+	__be16 port1;
+
+	EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+	/* This cannot currently be combined with other filtering */
+	if (spec->type != EFX_FILTER_UNSPEC)
+		return -EPROTONOSUPPORT;
+
+	if (port == 0)
+		return -EINVAL;
+
+	switch (proto) {
+	case IPPROTO_TCP:
+		spec->type = EFX_FILTER_TCP_WILD;
+		break;
+	case IPPROTO_UDP:
+		spec->type = EFX_FILTER_UDP_WILD;
+		break;
+	default:
+		return -EPROTONOSUPPORT;
+	}
+
+	/* Filter is constructed in terms of source and destination,
+	 * with the odd wrinkle that the ports are swapped in a UDP
+	 * wildcard filter.  We need to convert from local and remote
+	 * (= zero for wildcard) addresses.
+	 */
+	host1 = 0;
+	if (proto != IPPROTO_UDP) {
+		port1 = 0;
+	} else {
+		port1 = port;
+		port = 0;
+	}
+
+	__efx_filter_set_ipv4(spec, host1, port1, host, port);
+	return 0;
+}
+
+/**
+ * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
+ * @spec: Specification to initialise
+ * @proto: Transport layer protocol number
+ * @host: Local host address (network byte order)
+ * @port: Local port (network byte order)
+ * @rhost: Remote host address (network byte order)
+ * @rport: Remote port (network byte order)
+ */
+int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+			     __be32 host, __be16 port,
+			     __be32 rhost, __be16 rport)
+{
+	EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+	/* This cannot currently be combined with other filtering */
+	if (spec->type != EFX_FILTER_UNSPEC)
+		return -EPROTONOSUPPORT;
+
+	if (port == 0 || rport == 0)
+		return -EINVAL;
+
+	switch (proto) {
+	case IPPROTO_TCP:
+		spec->type = EFX_FILTER_TCP_FULL;
+		break;
+	case IPPROTO_UDP:
+		spec->type = EFX_FILTER_UDP_FULL;
+		break;
+	default:
+		return -EPROTONOSUPPORT;
+	}
+
+	__efx_filter_set_ipv4(spec, rhost, rport, host, port);
+	return 0;
+}
+
+/**
+ * efx_filter_set_eth_local - specify local Ethernet address and optional VID
+ * @spec: Specification to initialise
+ * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
+ * @addr: Local Ethernet MAC address
+ */
+int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+			     u16 vid, const u8 *addr)
+{
+	EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
+
+	/* This cannot currently be combined with other filtering */
+	if (spec->type != EFX_FILTER_UNSPEC)
+		return -EPROTONOSUPPORT;
+
+	if (vid == EFX_FILTER_VID_UNSPEC) {
+		spec->type = EFX_FILTER_MAC_WILD;
+		spec->data[0] = 0;
+	} else {
+		spec->type = EFX_FILTER_MAC_FULL;
+		spec->data[0] = vid;
+	}
+
+	spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
+	spec->data[2] = addr[0] << 8 | addr[1];
+	return 0;
+}
+
 /* Build a filter entry and return its n-tuple key. */
 static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
 {
 	u32 data3;
 
-	switch (efx_filter_type_table_id(spec->type)) {
+	switch (efx_filter_spec_table_id(spec)) {
 	case EFX_FILTER_TABLE_RX_IP: {
-		bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL ||
-			       spec->type == EFX_FILTER_RX_UDP_WILD);
+		bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
+			       spec->type == EFX_FILTER_UDP_WILD);
 		EFX_POPULATE_OWORD_7(
 			*filter,
 			FRF_BZ_RSS_EN,
@@ -143,7 +286,7 @@
 	}
 
 	case EFX_FILTER_TABLE_RX_MAC: {
-		bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD;
+		bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
 		EFX_POPULATE_OWORD_8(
 			*filter,
 			FRF_CZ_RMFT_RSS_EN,
@@ -206,6 +349,14 @@
 	return filter_idx;
 }
 
+/* Construct/deconstruct external filter IDs */
+
+static inline int
+efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
+{
+	return table_id << 16 | index;
+}
+
 /**
  * efx_filter_insert_filter - add or replace a filter
  * @efx: NIC in which to insert the filter
@@ -213,30 +364,28 @@
  * @replace: Flag for whether the specified filter may replace a filter
  *	with an identical match expression and equal or lower priority
  *
- * On success, return the filter index within its table.
+ * On success, return the filter ID.
  * On failure, return a negative error code.
  */
 int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
 			     bool replace)
 {
 	struct efx_filter_state *state = efx->filter_state;
-	enum efx_filter_table_id table_id =
-		efx_filter_type_table_id(spec->type);
-	struct efx_filter_table *table = &state->table[table_id];
+	struct efx_filter_table *table = efx_filter_spec_table(state, spec);
 	struct efx_filter_spec *saved_spec;
 	efx_oword_t filter;
 	int filter_idx, depth;
 	u32 key;
 	int rc;
 
-	if (table->size == 0)
+	if (!table || table->size == 0)
 		return -EINVAL;
 
 	key = efx_filter_build(&filter, spec);
 
 	netif_vdbg(efx, hw, efx->net_dev,
 		   "%s: type %d search_depth=%d", __func__, spec->type,
-		   state->search_depth[spec->type]);
+		   table->search_depth[spec->type]);
 
 	spin_lock_bh(&state->lock);
 
@@ -263,8 +412,8 @@
 	}
 	*saved_spec = *spec;
 
-	if (state->search_depth[spec->type] < depth) {
-		state->search_depth[spec->type] = depth;
+	if (table->search_depth[spec->type] < depth) {
+		table->search_depth[spec->type] = depth;
 		efx_filter_push_rx_limits(efx);
 	}
 
@@ -273,6 +422,7 @@
 	netif_vdbg(efx, hw, efx->net_dev,
 		   "%s: filter type %d index %d rxq %u set",
 		   __func__, spec->type, filter_idx, spec->dmaq_id);
+	rc = efx_filter_make_id(table->id, filter_idx);
 
 out:
 	spin_unlock_bh(&state->lock);
@@ -306,15 +456,16 @@
 int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
 {
 	struct efx_filter_state *state = efx->filter_state;
-	enum efx_filter_table_id table_id =
-		efx_filter_type_table_id(spec->type);
-	struct efx_filter_table *table = &state->table[table_id];
+	struct efx_filter_table *table = efx_filter_spec_table(state, spec);
 	struct efx_filter_spec *saved_spec;
 	efx_oword_t filter;
 	int filter_idx, depth;
 	u32 key;
 	int rc;
 
+	if (!table)
+		return -EINVAL;
+
 	key = efx_filter_build(&filter, spec);
 
 	spin_lock_bh(&state->lock);
@@ -332,7 +483,7 @@
 
 	efx_filter_table_clear_entry(efx, table, filter_idx);
 	if (table->used == 0)
-		efx_filter_table_reset_search_depth(state, table_id);
+		efx_filter_table_reset_search_depth(table);
 	rc = 0;
 
 out:
@@ -340,15 +491,9 @@
 	return rc;
 }
 
-/**
- * efx_filter_table_clear - remove filters from a table by priority
- * @efx: NIC from which to remove the filters
- * @table_id: Table from which to remove the filters
- * @priority: Maximum priority to remove
- */
-void efx_filter_table_clear(struct efx_nic *efx,
-			    enum efx_filter_table_id table_id,
-			    enum efx_filter_priority priority)
+static void efx_filter_table_clear(struct efx_nic *efx,
+				   enum efx_filter_table_id table_id,
+				   enum efx_filter_priority priority)
 {
 	struct efx_filter_state *state = efx->filter_state;
 	struct efx_filter_table *table = &state->table[table_id];
@@ -360,11 +505,22 @@
 		if (table->spec[filter_idx].priority <= priority)
 			efx_filter_table_clear_entry(efx, table, filter_idx);
 	if (table->used == 0)
-		efx_filter_table_reset_search_depth(state, table_id);
+		efx_filter_table_reset_search_depth(table);
 
 	spin_unlock_bh(&state->lock);
 }
 
+/**
+ * efx_filter_clear_rx - remove RX filters by priority
+ * @efx: NIC from which to remove the filters
+ * @priority: Maximum priority to remove
+ */
+void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
+{
+	efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
+	efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
+}
+
 /* Restore filter stater after reset */
 void efx_restore_filters(struct efx_nic *efx)
 {
@@ -407,6 +563,7 @@
 
 	if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
 		table = &state->table[EFX_FILTER_TABLE_RX_IP];
+		table->id = EFX_FILTER_TABLE_RX_IP;
 		table->offset = FR_BZ_RX_FILTER_TBL0;
 		table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
 		table->step = FR_BZ_RX_FILTER_TBL0_STEP;
@@ -414,6 +571,7 @@
 
 	if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
 		table = &state->table[EFX_FILTER_TABLE_RX_MAC];
+		table->id = EFX_FILTER_TABLE_RX_MAC;
 		table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
 		table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
 		table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
@@ -428,10 +586,9 @@
 					     GFP_KERNEL);
 		if (!table->used_bitmap)
 			goto fail;
-		table->spec = vmalloc(table->size * sizeof(*table->spec));
+		table->spec = vzalloc(table->size * sizeof(*table->spec));
 		if (!table->spec)
 			goto fail;
-		memset(table->spec, 0, table->size * sizeof(*table->spec));
 	}
 
 	return 0;
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
index a53319d..872f213 100644
--- a/drivers/net/sfc/filter.h
+++ b/drivers/net/sfc/filter.h
@@ -12,31 +12,27 @@
 
 #include <linux/types.h>
 
-enum efx_filter_table_id {
-	EFX_FILTER_TABLE_RX_IP = 0,
-	EFX_FILTER_TABLE_RX_MAC,
-	EFX_FILTER_TABLE_COUNT,
-};
-
 /**
  * enum efx_filter_type - type of hardware filter
- * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple
- * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port)
- * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple
- * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port)
- * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID
- * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address
+ * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
+ * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
+ * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
+ * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
+ * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
+ * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
+ * @EFX_FILTER_UNSPEC: Match type is unspecified
  *
- * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types.
+ * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
  */
 enum efx_filter_type {
-	EFX_FILTER_RX_TCP_FULL = 0,
-	EFX_FILTER_RX_TCP_WILD,
-	EFX_FILTER_RX_UDP_FULL,
-	EFX_FILTER_RX_UDP_WILD,
-	EFX_FILTER_RX_MAC_FULL = 4,
-	EFX_FILTER_RX_MAC_WILD,
-	EFX_FILTER_TYPE_COUNT,
+	EFX_FILTER_TCP_FULL = 0,
+	EFX_FILTER_TCP_WILD,
+	EFX_FILTER_UDP_FULL,
+	EFX_FILTER_UDP_WILD,
+	EFX_FILTER_MAC_FULL = 4,
+	EFX_FILTER_MAC_WILD,
+	EFX_FILTER_TYPE_COUNT,		/* number of specific types */
+	EFX_FILTER_UNSPEC = 0xf,
 };
 
 /**
@@ -63,13 +59,13 @@
  * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
  *	any IP filter that matches the same packet.  By default, IP
  *	filters take precedence.
- *
- * Currently, no flags are defined for TX filters.
+ * @EFX_FILTER_FLAG_RX: Filter is for RX
  */
 enum efx_filter_flags {
 	EFX_FILTER_FLAG_RX_RSS = 0x01,
 	EFX_FILTER_FLAG_RX_SCATTER = 0x02,
 	EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
+	EFX_FILTER_FLAG_RX = 0x08,
 };
 
 /**
@@ -91,99 +87,26 @@
 	u32	data[3];
 };
 
-/**
- * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match
- * @spec: Specification to initialise
- * @shost: Source host address (host byte order)
- * @sport: Source port (host byte order)
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
-			   u32 shost, u16 sport, u32 dhost, u16 dport)
+static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
+				      enum efx_filter_priority priority,
+				      enum efx_filter_flags flags,
+				      unsigned rxq_id)
 {
-	spec->type = EFX_FILTER_RX_TCP_FULL;
-	spec->data[0] = sport | shost << 16;
-	spec->data[1] = dport << 16 | shost >> 16;
-	spec->data[2] = dhost;
+	spec->type = EFX_FILTER_UNSPEC;
+	spec->priority = priority;
+	spec->flags = EFX_FILTER_FLAG_RX | flags;
+	spec->dmaq_id = rxq_id;
 }
 
-/**
- * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
- * @spec: Specification to initialise
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
-{
-	spec->type = EFX_FILTER_RX_TCP_WILD;
-	spec->data[0] = 0;
-	spec->data[1] = dport << 16;
-	spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
- * @spec: Specification to initialise
- * @shost: Source host address (host byte order)
- * @sport: Source port (host byte order)
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
-			   u32 shost, u16 sport, u32 dhost, u16 dport)
-{
-	spec->type = EFX_FILTER_RX_UDP_FULL;
-	spec->data[0] = sport | shost << 16;
-	spec->data[1] = dport << 16 | shost >> 16;
-	spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
- * @spec: Specification to initialise
- * @dhost: Destination host address (host byte order)
- * @dport: Destination port (host byte order)
- */
-static inline void
-efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
-{
-	spec->type = EFX_FILTER_RX_UDP_WILD;
-	spec->data[0] = dport;
-	spec->data[1] = 0;
-	spec->data[2] = dhost;
-}
-
-/**
- * efx_filter_set_rx_mac_full - specify RX filter with MAC full match
- * @spec: Specification to initialise
- * @vid: VLAN ID
- * @addr: Destination MAC address
- */
-static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec,
-					      u16 vid, const u8 *addr)
-{
-	spec->type = EFX_FILTER_RX_MAC_FULL;
-	spec->data[0] = vid;
-	spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
-	spec->data[2] = addr[0] << 8 | addr[1];
-}
-
-/**
- * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
- * @spec: Specification to initialise
- * @addr: Destination MAC address
- */
-static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
-					      const u8 *addr)
-{
-	spec->type = EFX_FILTER_RX_MAC_WILD;
-	spec->data[0] = 0;
-	spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
-	spec->data[2] = addr[0] << 8 | addr[1];
-}
+extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
+				     __be32 host, __be16 port);
+extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
+				    __be32 host, __be16 port,
+				    __be32 rhost, __be16 rport);
+extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
+				    u16 vid, const u8 *addr);
+enum {
+	EFX_FILTER_VID_UNSPEC = 0xffff,
+};
 
 #endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 85a99fe..6da4ae2 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -22,28 +22,39 @@
  *
  * Notes on locking strategy:
  *
- * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
- * which necessitates locking.
- * Under normal operation few writes to NIC registers are made and these
- * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
- * cased to allow 4-byte (hence lockless) accesses.
+ * Most CSRs are 128-bit (oword) and therefore cannot be read or
+ * written atomically.  Access from the host is buffered by the Bus
+ * Interface Unit (BIU).  Whenever the host reads from the lowest
+ * address of such a register, or from the address of a different such
+ * register, the BIU latches the register's value.  Subsequent reads
+ * from higher addresses of the same register will read the latched
+ * value.  Whenever the host writes part of such a register, the BIU
+ * collects the written value and does not write to the underlying
+ * register until all 4 dwords have been written.  A similar buffering
+ * scheme applies to host access to the NIC's 64-bit SRAM.
  *
- * It *is* safe to write to these 4-byte registers in the middle of an
- * access to an 8-byte or 16-byte register.  We therefore use a
- * spinlock to protect accesses to the larger registers, but no locks
- * for the 4-byte registers.
+ * Access to different CSRs and 64-bit SRAM words must be serialised,
+ * since interleaved access can result in lost writes or lost
+ * information from read-to-clear fields.  We use efx_nic::biu_lock
+ * for this.  (We could use separate locks for read and write, but
+ * this is not normally a performance bottleneck.)
  *
- * A write barrier is needed to ensure that DW3 is written after DW0/1/2
- * due to the way the 16byte registers are "collected" in the BIU.
+ * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
+ * 128-bit but are special-cased in the BIU to avoid the need for
+ * locking in the host:
  *
- * We also lock when carrying out reads, to ensure consistency of the
- * data (made possible since the BIU reads all 128 bits into a cache).
- * Reads are very rare, so this isn't a significant performance
- * impact.  (Most data transferred from NIC to host is DMAed directly
- * into host memory).
- *
- * I/O BAR access uses locks for both reads and writes (but is only provided
- * for testing purposes).
+ * - They are write-only.
+ * - The semantics of writing to these registers are such that
+ *   replacing the low 96 bits with zero does not affect functionality.
+ * - If the host writes to the last dword address of such a register
+ *   (i.e. the high 32 bits) the underlying register will always be
+ *   written.  If the collector does not hold values for the low 96
+ *   bits of the register, they will be written as zero.  Writing to
+ *   the last qword does not have this effect and must not be done.
+ * - If the host writes to the address of any other part of such a
+ *   register while the collector already holds values for some other
+ *   register, the write is discarded and the collector maintains its
+ *   current state.
  */
 
 #if BITS_PER_LONG == 64
@@ -72,7 +83,7 @@
 	return (__force __le32)__raw_readl(efx->membase + reg);
 }
 
-/* Writes to a normal 16-byte Efx register, locking as appropriate. */
+/* Write a normal 128-bit CSR, locking as appropriate. */
 static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
 			      unsigned int reg)
 {
@@ -85,21 +96,18 @@
 	spin_lock_irqsave(&efx->biu_lock, flags);
 #ifdef EFX_USE_QWORD_IO
 	_efx_writeq(efx, value->u64[0], reg + 0);
-	wmb();
 	_efx_writeq(efx, value->u64[1], reg + 8);
 #else
 	_efx_writed(efx, value->u32[0], reg + 0);
 	_efx_writed(efx, value->u32[1], reg + 4);
 	_efx_writed(efx, value->u32[2], reg + 8);
-	wmb();
 	_efx_writed(efx, value->u32[3], reg + 12);
 #endif
 	mmiowb();
 	spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
 
-/* Write an 8-byte NIC SRAM entry through the supplied mapping,
- * locking as appropriate. */
+/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
 static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
 				   efx_qword_t *value, unsigned int index)
 {
@@ -115,36 +123,25 @@
 	__raw_writeq((__force u64)value->u64[0], membase + addr);
 #else
 	__raw_writel((__force u32)value->u32[0], membase + addr);
-	wmb();
 	__raw_writel((__force u32)value->u32[1], membase + addr + 4);
 #endif
 	mmiowb();
 	spin_unlock_irqrestore(&efx->biu_lock, flags);
 }
 
-/* Write dword to NIC register that allows partial writes
- *
- * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
- * TX_DESC_UPD_REG) can be written to as a single dword.  This allows
- * for lockless writes.
- */
+/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
 static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
 			      unsigned int reg)
 {
 	netif_vdbg(efx, hw, efx->net_dev,
-		   "writing partial register %x with "EFX_DWORD_FMT"\n",
+		   "writing register %x with "EFX_DWORD_FMT"\n",
 		   reg, EFX_DWORD_VAL(*value));
 
 	/* No lock required */
 	_efx_writed(efx, value->u32[0], reg);
 }
 
-/* Read from a NIC register
- *
- * This reads an entire 16-byte register in one go, locking as
- * appropriate.  It is essential to read the first dword first, as this
- * prompts the NIC to load the current value into the shadow register.
- */
+/* Read a 128-bit CSR, locking as appropriate. */
 static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
 			     unsigned int reg)
 {
@@ -152,7 +149,6 @@
 
 	spin_lock_irqsave(&efx->biu_lock, flags);
 	value->u32[0] = _efx_readd(efx, reg + 0);
-	rmb();
 	value->u32[1] = _efx_readd(efx, reg + 4);
 	value->u32[2] = _efx_readd(efx, reg + 8);
 	value->u32[3] = _efx_readd(efx, reg + 12);
@@ -163,8 +159,7 @@
 		   EFX_OWORD_VAL(*value));
 }
 
-/* Read an 8-byte SRAM entry through supplied mapping,
- * locking as appropriate. */
+/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
 static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
 				  efx_qword_t *value, unsigned int index)
 {
@@ -176,7 +171,6 @@
 	value->u64[0] = (__force __le64)__raw_readq(membase + addr);
 #else
 	value->u32[0] = (__force __le32)__raw_readl(membase + addr);
-	rmb();
 	value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
 #endif
 	spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -186,7 +180,7 @@
 		   addr, EFX_QWORD_VAL(*value));
 }
 
-/* Read dword from register that allows partial writes (sic) */
+/* Read a 32-bit CSR or SRAM */
 static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
 				unsigned int reg)
 {
@@ -196,28 +190,28 @@
 		   reg, EFX_DWORD_VAL(*value));
 }
 
-/* Write to a register forming part of a table */
+/* Write a 128-bit CSR forming part of a table */
 static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
 				      unsigned int reg, unsigned int index)
 {
 	efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
 }
 
-/* Read to a register forming part of a table */
+/* Read a 128-bit CSR forming part of a table */
 static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
 				     unsigned int reg, unsigned int index)
 {
 	efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
 }
 
-/* Write to a dword register forming part of a table */
+/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
 static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
 				       unsigned int reg, unsigned int index)
 {
 	efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
 }
 
-/* Read from a dword register forming part of a table */
+/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
 static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
 				   unsigned int reg, unsigned int index)
 {
@@ -231,29 +225,54 @@
 #define EFX_PAGED_REG(page, reg) \
 	((page) * EFX_PAGE_BLOCK_SIZE + (reg))
 
-/* As for efx_writeo(), but for a page-mapped register. */
-static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
-				   unsigned int reg, unsigned int page)
+/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
+static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
+				    unsigned int reg, unsigned int page)
 {
-	efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
-}
+	reg = EFX_PAGED_REG(page, reg);
 
-/* As for efx_writed(), but for a page-mapped register. */
-static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
-				   unsigned int reg, unsigned int page)
+	netif_vdbg(efx, hw, efx->net_dev,
+		   "writing register %x with " EFX_OWORD_FMT "\n", reg,
+		   EFX_OWORD_VAL(*value));
+
+#ifdef EFX_USE_QWORD_IO
+	_efx_writeq(efx, value->u64[0], reg + 0);
+#else
+	_efx_writed(efx, value->u32[0], reg + 0);
+	_efx_writed(efx, value->u32[1], reg + 4);
+#endif
+	_efx_writed(efx, value->u32[2], reg + 8);
+	_efx_writed(efx, value->u32[3], reg + 12);
+}
+#define efx_writeo_page(efx, value, reg, page)				\
+	_efx_writeo_page(efx, value,					\
+			 reg +						\
+			 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
+			 page)
+
+/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
+ * RX_DESC_UPD or TX_DESC_UPD)
+ */
+static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
+				    unsigned int reg, unsigned int page)
 {
 	efx_writed(efx, value, EFX_PAGED_REG(page, reg));
 }
+#define efx_writed_page(efx, value, reg, page)				\
+	_efx_writed_page(efx, value,					\
+			 reg +						\
+			 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
+					   && (reg) != 0xa1c),		\
+			 page)
 
-/* Write dword to page-mapped register with an extra lock.
- *
- * As for efx_writed_page(), but for a register that suffers from
- * SFC bug 3181. Take out a lock so the BIU collector cannot be
- * confused. */
-static inline void efx_writed_page_locked(struct efx_nic *efx,
-					  efx_dword_t *value,
-					  unsigned int reg,
-					  unsigned int page)
+/* Write TIMER_COMMAND.  This is a page-mapped 32-bit CSR, but a bug
+ * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
+ * collector register.
+ */
+static inline void _efx_writed_page_locked(struct efx_nic *efx,
+					   efx_dword_t *value,
+					   unsigned int reg,
+					   unsigned int page)
 {
 	unsigned long flags __attribute__ ((unused));
 
@@ -265,5 +284,9 @@
 		efx_writed(efx, value, EFX_PAGED_REG(page, reg));
 	}
 }
+#define efx_writed_page_locked(efx, value, reg, page)			\
+	_efx_writed_page_locked(efx, value,				\
+				reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
+				page)
 
 #endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 12cf910..b716e82 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -381,7 +381,7 @@
 				  -rc);
 			efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
 		} else
-			netif_err(efx, hw, efx->net_dev,
+			netif_dbg(efx, hw, efx->net_dev,
 				  "MC command 0x%x inlen %d failed rc=%d\n",
 				  cmd, (int)inlen, -rc);
 	}
@@ -463,6 +463,7 @@
 		if (mcdi->mode == MCDI_MODE_EVENTS) {
 			mcdi->resprc = rc;
 			mcdi->resplen = 0;
+			++mcdi->credits;
 		}
 	} else
 		/* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index c992742..0e97eed 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -16,7 +16,6 @@
 #include "phy.h"
 #include "mcdi.h"
 #include "mcdi_pcol.h"
-#include "mdio_10g.h"
 #include "nic.h"
 #include "selftest.h"
 
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 98d9460..56b0266 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,7 +15,6 @@
 #include "net_driver.h"
 #include "mdio_10g.h"
 #include "workarounds.h"
-#include "nic.h"
 
 unsigned efx_mdio_id_oui(u32 id)
 {
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4..d386274 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -321,14 +321,15 @@
 	struct efx_mtd *efx_mtd = mtd->priv;
 	const struct efx_spi_device *spi = efx_mtd->spi;
 	struct efx_nic *efx = efx_mtd->efx;
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	int rc;
 
-	rc = mutex_lock_interruptible(&efx->spi_lock);
+	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 	if (rc)
 		return rc;
 	rc = falcon_spi_read(efx, spi, part->offset + start, len,
 			     retlen, buffer);
-	mutex_unlock(&efx->spi_lock);
+	mutex_unlock(&nic_data->spi_lock);
 	return rc;
 }
 
@@ -337,13 +338,14 @@
 	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
 	struct efx_mtd *efx_mtd = mtd->priv;
 	struct efx_nic *efx = efx_mtd->efx;
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	int rc;
 
-	rc = mutex_lock_interruptible(&efx->spi_lock);
+	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 	if (rc)
 		return rc;
 	rc = efx_spi_erase(part, part->offset + start, len);
-	mutex_unlock(&efx->spi_lock);
+	mutex_unlock(&nic_data->spi_lock);
 	return rc;
 }
 
@@ -354,14 +356,15 @@
 	struct efx_mtd *efx_mtd = mtd->priv;
 	const struct efx_spi_device *spi = efx_mtd->spi;
 	struct efx_nic *efx = efx_mtd->efx;
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	int rc;
 
-	rc = mutex_lock_interruptible(&efx->spi_lock);
+	rc = mutex_lock_interruptible(&nic_data->spi_lock);
 	if (rc)
 		return rc;
 	rc = falcon_spi_write(efx, spi, part->offset + start, len,
 			      retlen, buffer);
-	mutex_unlock(&efx->spi_lock);
+	mutex_unlock(&nic_data->spi_lock);
 	return rc;
 }
 
@@ -370,11 +373,12 @@
 	struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
 	struct efx_mtd *efx_mtd = mtd->priv;
 	struct efx_nic *efx = efx_mtd->efx;
+	struct falcon_nic_data *nic_data = efx->nic_data;
 	int rc;
 
-	mutex_lock(&efx->spi_lock);
+	mutex_lock(&nic_data->spi_lock);
 	rc = efx_spi_slow_wait(part, true);
-	mutex_unlock(&efx->spi_lock);
+	mutex_unlock(&nic_data->spi_lock);
 	return rc;
 }
 
@@ -387,35 +391,67 @@
 
 static int falcon_mtd_probe(struct efx_nic *efx)
 {
-	struct efx_spi_device *spi = efx->spi_flash;
+	struct falcon_nic_data *nic_data = efx->nic_data;
+	struct efx_spi_device *spi;
 	struct efx_mtd *efx_mtd;
-	int rc;
+	int rc = -ENODEV;
 
 	ASSERT_RTNL();
 
-	if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START)
-		return -ENODEV;
+	spi = &nic_data->spi_flash;
+	if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
+		efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+				  GFP_KERNEL);
+		if (!efx_mtd)
+			return -ENOMEM;
 
-	efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
-			  GFP_KERNEL);
-	if (!efx_mtd)
-		return -ENOMEM;
+		efx_mtd->spi = spi;
+		efx_mtd->name = "flash";
+		efx_mtd->ops = &falcon_mtd_ops;
 
-	efx_mtd->spi = spi;
-	efx_mtd->name = "flash";
-	efx_mtd->ops = &falcon_mtd_ops;
+		efx_mtd->n_parts = 1;
+		efx_mtd->part[0].mtd.type = MTD_NORFLASH;
+		efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
+		efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
+		efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+		efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
+		efx_mtd->part[0].type_name = "sfc_flash_bootrom";
 
-	efx_mtd->n_parts = 1;
-	efx_mtd->part[0].mtd.type = MTD_NORFLASH;
-	efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
-	efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
-	efx_mtd->part[0].mtd.erasesize = spi->erase_size;
-	efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
-	efx_mtd->part[0].type_name = "sfc_flash_bootrom";
+		rc = efx_mtd_probe_device(efx, efx_mtd);
+		if (rc) {
+			kfree(efx_mtd);
+			return rc;
+		}
+	}
 
-	rc = efx_mtd_probe_device(efx, efx_mtd);
-	if (rc)
-		kfree(efx_mtd);
+	spi = &nic_data->spi_eeprom;
+	if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
+		efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
+				  GFP_KERNEL);
+		if (!efx_mtd)
+			return -ENOMEM;
+
+		efx_mtd->spi = spi;
+		efx_mtd->name = "EEPROM";
+		efx_mtd->ops = &falcon_mtd_ops;
+
+		efx_mtd->n_parts = 1;
+		efx_mtd->part[0].mtd.type = MTD_RAM;
+		efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
+		efx_mtd->part[0].mtd.size =
+			min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
+			EFX_EEPROM_BOOTCONFIG_START;
+		efx_mtd->part[0].mtd.erasesize = spi->erase_size;
+		efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
+		efx_mtd->part[0].type_name = "sfc_bootconfig";
+
+		rc = efx_mtd_probe_device(efx, efx_mtd);
+		if (rc) {
+			kfree(efx_mtd);
+			return rc;
+		}
+	}
+
 	return rc;
 }
 
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 0a7e26d..76f2fb1 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -142,6 +142,12 @@
  * @flushed: Used when handling queue flushing
  * @read_count: Current read pointer.
  *	This is the number of buffers that have been removed from both rings.
+ * @old_write_count: The value of @write_count when last checked.
+ *	This is here for performance reasons.  The xmit path will
+ *	only get the up-to-date value of @write_count if this
+ *	variable indicates that the queue is empty.  This is to
+ *	avoid cache-line ping-pong between the xmit path and the
+ *	completion path.
  * @stopped: Stopped count.
  *	Set if this TX queue is currently stopping its port.
  * @insert_count: Current insert pointer
@@ -163,6 +169,10 @@
  * @tso_long_headers: Number of packets with headers too long for standard
  *	blocks
  * @tso_packets: Number of packets via the TSO xmit path
+ * @pushes: Number of times the TX push feature has been used
+ * @empty_read_count: If the completion path has seen the queue as empty
+ *	and the transmission path has not yet checked this, the value of
+ *	@read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
  */
 struct efx_tx_queue {
 	/* Members which don't change on the fast path */
@@ -177,6 +187,7 @@
 
 	/* Members used mainly on the completion path */
 	unsigned int read_count ____cacheline_aligned_in_smp;
+	unsigned int old_write_count;
 	int stopped;
 
 	/* Members used only on the xmit path */
@@ -187,6 +198,11 @@
 	unsigned int tso_bursts;
 	unsigned int tso_long_headers;
 	unsigned int tso_packets;
+	unsigned int pushes;
+
+	/* Members shared between paths and sometimes updated */
+	unsigned int empty_read_count ____cacheline_aligned_in_smp;
+#define EFX_EMPTY_COUNT_VALID 0x80000000
 };
 
 /**
@@ -621,14 +637,13 @@
  * @pci_dev: The PCI device
  * @type: Controller type attributes
  * @legacy_irq: IRQ number
+ * @legacy_irq_enabled: Are IRQs enabled on NIC (INT_EN_KER register)?
  * @workqueue: Workqueue for port reconfigures and the HW monitor.
  *	Work items do not hold and must not acquire RTNL.
  * @workqueue_name: Name of workqueue
  * @reset_work: Scheduled reset workitem
- * @monitor_work: Hardware monitor workitem
  * @membase_phys: Memory BAR value as physical address
  * @membase: Memory BAR value
- * @biu_lock: BIU (bus interface unit) lock
  * @interrupt_mode: Interrupt mode
  * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
  * @irq_rx_moderation: IRQ moderation time for RX event queues
@@ -647,23 +662,14 @@
  * @n_tx_channels: Number of channels used for TX
  * @rx_buffer_len: RX buffer length
  * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
+ * @rx_hash_key: Toeplitz hash key for RSS
  * @rx_indir_table: Indirection table for RSS
  * @int_error_count: Number of internal errors seen recently
  * @int_error_expire: Time at which error count will be expired
  * @irq_status: Interrupt status buffer
- * @last_irq_cpu: Last CPU to handle interrupt.
- *	This register is written with the SMP processor ID whenever an
- *	interrupt is handled.  It is used by efx_nic_test_interrupt()
- *	to verify that an interrupt has occurred.
  * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
  * @fatal_irq_level: IRQ level (bit number) used for serious errors
- * @spi_flash: SPI flash device
- *	This field will be %NULL if no flash device is present (or for Siena).
- * @spi_eeprom: SPI EEPROM device
- *	This field will be %NULL if no EEPROM device is present (or for Siena).
- * @spi_lock: SPI bus lock
  * @mtd_list: List of MTDs attached to the NIC
- * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
  * @nic_data: Hardware dependant state
  * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
  *	@port_inhibited, efx_monitor() and efx_reconfigure_port()
@@ -676,21 +682,14 @@
  * @port_initialized: Port initialized?
  * @net_dev: Operating system network device. Consider holding the rtnl lock
  * @rx_checksum_enabled: RX checksumming enabled
- * @mac_stats: MAC statistics. These include all statistics the MACs
- *	can provide.  Generic code converts these into a standard
- *	&struct net_device_stats.
  * @stats_buffer: DMA buffer for statistics
- * @stats_lock: Statistics update lock. Serialises statistics fetches
  * @mac_op: MAC interface
- * @mac_address: Permanent MAC address
  * @phy_type: PHY type
- * @mdio_lock: MDIO lock
  * @phy_op: PHY interface
  * @phy_data: PHY private data (including PHY-specific stats)
  * @mdio: PHY MDIO interface
  * @mdio_bus: PHY MDIO bus ID (only used by Siena)
  * @phy_mode: PHY operating mode. Serialised by @mac_lock.
- * @xmac_poll_required: XMAC link state needs polling
  * @link_advertising: Autonegotiation advertising flags
  * @link_state: Current state of the link
  * @n_link_state_changes: Number of times the link has changed state
@@ -701,21 +700,34 @@
  * @loopback_mode: Loopback status
  * @loopback_modes: Supported loopback mode bitmask
  * @loopback_selftest: Offline self-test private state
+ * @monitor_work: Hardware monitor workitem
+ * @biu_lock: BIU (bus interface unit) lock
+ * @last_irq_cpu: Last CPU to handle interrupt.
+ *	This register is written with the SMP processor ID whenever an
+ *	interrupt is handled.  It is used by efx_nic_test_interrupt()
+ *	to verify that an interrupt has occurred.
+ * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
+ * @mac_stats: MAC statistics. These include all statistics the MACs
+ *	can provide.  Generic code converts these into a standard
+ *	&struct net_device_stats.
+ * @stats_lock: Statistics update lock. Serialises statistics fetches
  *
  * This is stored in the private area of the &struct net_device.
  */
 struct efx_nic {
+	/* The following fields should be written very rarely */
+
 	char name[IFNAMSIZ];
 	struct pci_dev *pci_dev;
 	const struct efx_nic_type *type;
 	int legacy_irq;
+	bool legacy_irq_enabled;
 	struct workqueue_struct *workqueue;
 	char workqueue_name[16];
 	struct work_struct reset_work;
-	struct delayed_work monitor_work;
 	resource_size_t membase_phys;
 	void __iomem *membase;
-	spinlock_t biu_lock;
+
 	enum efx_int_mode interrupt_mode;
 	bool irq_rx_adaptive;
 	unsigned int irq_rx_moderation;
@@ -742,19 +754,13 @@
 	unsigned long int_error_expire;
 
 	struct efx_buffer irq_status;
-	volatile signed int last_irq_cpu;
 	unsigned irq_zero_count;
 	unsigned fatal_irq_level;
 
-	struct efx_spi_device *spi_flash;
-	struct efx_spi_device *spi_eeprom;
-	struct mutex spi_lock;
 #ifdef CONFIG_SFC_MTD
 	struct list_head mtd_list;
 #endif
 
-	unsigned n_rx_nodesc_drop_cnt;
-
 	void *nic_data;
 
 	struct mutex mac_lock;
@@ -766,22 +772,17 @@
 	struct net_device *net_dev;
 	bool rx_checksum_enabled;
 
-	struct efx_mac_stats mac_stats;
 	struct efx_buffer stats_buffer;
-	spinlock_t stats_lock;
 
 	struct efx_mac_operations *mac_op;
-	unsigned char mac_address[ETH_ALEN];
 
 	unsigned int phy_type;
-	struct mutex mdio_lock;
 	struct efx_phy_operations *phy_op;
 	void *phy_data;
 	struct mdio_if_info mdio;
 	unsigned int mdio_bus;
 	enum efx_phy_mode phy_mode;
 
-	bool xmac_poll_required;
 	u32 link_advertising;
 	struct efx_link_state link_state;
 	unsigned int n_link_state_changes;
@@ -797,6 +798,15 @@
 	void *loopback_selftest;
 
 	struct efx_filter_state *filter_state;
+
+	/* The following fields may be written more often */
+
+	struct delayed_work monitor_work ____cacheline_aligned_in_smp;
+	spinlock_t biu_lock;
+	volatile signed int last_irq_cpu;
+	unsigned n_rx_nodesc_drop_cnt;
+	struct efx_mac_stats mac_stats;
+	spinlock_t stats_lock;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
@@ -829,6 +839,7 @@
  *	be called while the controller is uninitialised.
  * @probe_port: Probe the MAC and PHY
  * @remove_port: Free resources allocated by probe_port()
+ * @handle_global_event: Handle a "global" event (may be %NULL)
  * @prepare_flush: Prepare the hardware for flushing the DMA queues
  * @update_stats: Update statistics not provided by event handling
  * @start_stats: Start the regular fetching of statistics
@@ -873,6 +884,7 @@
 	int (*reset)(struct efx_nic *efx, enum reset_type method);
 	int (*probe_port)(struct efx_nic *efx);
 	void (*remove_port)(struct efx_nic *efx);
+	bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
 	void (*prepare_flush)(struct efx_nic *efx);
 	void (*update_stats)(struct efx_nic *efx);
 	void (*start_stats)(struct efx_nic *efx);
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 41c36b9..da38659 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -362,6 +362,35 @@
 			FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
 }
 
+/* Write pointer and first descriptor for TX descriptor ring */
+static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
+				    const efx_qword_t *txd)
+{
+	unsigned write_ptr;
+	efx_oword_t reg;
+
+	BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
+	BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
+
+	write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
+	EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
+			     FRF_AZ_TX_DESC_WPTR, write_ptr);
+	reg.qword[0] = *txd;
+	efx_writeo_page(tx_queue->efx, &reg,
+			FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
+}
+
+static inline bool
+efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
+{
+	unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
+
+	if (empty_read_count == 0)
+		return false;
+
+	tx_queue->empty_read_count = 0;
+	return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
+}
 
 /* For each entry inserted into the software descriptor ring, create a
  * descriptor in the hardware TX descriptor ring (in host memory), and
@@ -373,6 +402,7 @@
 	struct efx_tx_buffer *buffer;
 	efx_qword_t *txd;
 	unsigned write_ptr;
+	unsigned old_write_count = tx_queue->write_count;
 
 	BUG_ON(tx_queue->write_count == tx_queue->insert_count);
 
@@ -391,7 +421,15 @@
 	} while (tx_queue->write_count != tx_queue->insert_count);
 
 	wmb(); /* Ensure descriptors are written before they are fetched */
-	efx_notify_tx_desc(tx_queue);
+
+	if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
+		txd = efx_tx_desc(tx_queue,
+				  old_write_count & tx_queue->ptr_mask);
+		efx_push_tx_desc(tx_queue, txd);
+		++tx_queue->pushes;
+	} else {
+		efx_notify_tx_desc(tx_queue);
+	}
 }
 
 /* Allocate hardware resources for a TX queue */
@@ -894,46 +932,6 @@
 			  channel->channel, EFX_QWORD_VAL(*event));
 }
 
-/* Global events are basically PHY events */
-static void
-efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
-{
-	struct efx_nic *efx = channel->efx;
-	bool handled = false;
-
-	if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
-	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
-	    EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
-		/* Ignored */
-		handled = true;
-	}
-
-	if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
-	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
-		efx->xmac_poll_required = true;
-		handled = true;
-	}
-
-	if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
-	    EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
-	    EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
-		netif_err(efx, rx_err, efx->net_dev,
-			  "channel %d seen global RX_RESET event. Resetting.\n",
-			  channel->channel);
-
-		atomic_inc(&efx->rx_reset);
-		efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
-				   RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
-		handled = true;
-	}
-
-	if (!handled)
-		netif_err(efx, hw, efx->net_dev,
-			  "channel %d unknown global event "
-			  EFX_QWORD_FMT "\n", channel->channel,
-			  EFX_QWORD_VAL(*event));
-}
-
 static void
 efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
 {
@@ -1050,15 +1048,17 @@
 		case FSE_AZ_EV_CODE_DRV_GEN_EV:
 			efx_handle_generated_event(channel, &event);
 			break;
-		case FSE_AZ_EV_CODE_GLOBAL_EV:
-			efx_handle_global_event(channel, &event);
-			break;
 		case FSE_AZ_EV_CODE_DRIVER_EV:
 			efx_handle_driver_event(channel, &event);
 			break;
 		case FSE_CZ_EV_CODE_MCDI_EV:
 			efx_mcdi_process_event(channel, &event);
 			break;
+		case FSE_AZ_EV_CODE_GLOBAL_EV:
+			if (efx->type->handle_global_event &&
+			    efx->type->handle_global_event(channel, &event))
+				break;
+			/* else fall through */
 		default:
 			netif_err(channel->efx, hw, channel->efx->net_dev,
 				  "channel %d unknown event type %d (data "
@@ -1418,6 +1418,12 @@
 	u32 queues;
 	int syserr;
 
+	/* Could this be ours?  If interrupts are disabled then the
+	 * channel state may not be valid.
+	 */
+	if (!efx->legacy_irq_enabled)
+		return result;
+
 	/* Read the ISR which also ACKs the interrupts */
 	efx_readd(efx, &reg, FR_BZ_INT_ISR0);
 	queues = EFX_EXTRACT_DWORD(reg, 0, 31);
@@ -1664,7 +1670,7 @@
 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
-	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
+	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
 	/* Enable SW_EV to inherit in char driver - assume harmless here */
 	EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc9..eb05869 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -15,6 +15,7 @@
 #include "net_driver.h"
 #include "efx.h"
 #include "mcdi.h"
+#include "spi.h"
 
 /*
  * Falcon hardware control
@@ -113,6 +114,11 @@
  * @stats_pending: Is there a pending DMA of MAC statistics.
  * @stats_timer: A timer for regularly fetching MAC statistics.
  * @stats_dma_done: Pointer to the flag which indicates DMA completion.
+ * @spi_flash: SPI flash device
+ * @spi_eeprom: SPI EEPROM device
+ * @spi_lock: SPI bus lock
+ * @mdio_lock: MDIO bus lock
+ * @xmac_poll_required: XMAC link state needs polling
  */
 struct falcon_nic_data {
 	struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@
 	bool stats_pending;
 	struct timer_list stats_timer;
 	u32 *stats_dma_done;
+	struct efx_spi_device spi_flash;
+	struct efx_spi_device spi_eeprom;
+	struct mutex spi_lock;
+	struct mutex mdio_lock;
+	bool xmac_poll_required;
 };
 
 static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -135,7 +146,6 @@
  * @fw_build: Firmware build number
  * @mcdi: Management-Controller-to-Driver Interface
  * @wol_filter_id: Wake-on-LAN packet filter id
- * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
  */
 struct siena_nic_data {
 	u64 fw_version;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1..ea3ae00 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -41,6 +41,8 @@
 #define PCS_UC_STATUS_LBN	0
 #define PCS_UC_STATUS_WIDTH	8
 #define PCS_UC_STATUS_FW_SAVE	0x20
+#define PMA_PMD_MODE_REG	0xc301
+#define PMA_PMD_RXIN_SEL_LBN	6
 #define PMA_PMD_FTX_CTRL2_REG	0xc309
 #define PMA_PMD_FTX_STATIC_LBN	13
 #define PMA_PMD_VEND1_REG	0xc001
@@ -282,6 +284,10 @@
 	 * slow) reload of the firmware image (the microcontroller's code
 	 * memory is not affected by the microcontroller reset). */
 	efx_mdio_write(efx, 1, 0xc317, 0x00ff);
+	/* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
+	 * restart doesn't reset it. We need to do that ourselves. */
+	efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
+			  1 << PMA_PMD_RXIN_SEL_LBN, false);
 	efx_mdio_write(efx, 1, 0xc300, 0x0002);
 	msleep(20);
 
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6d0959b..3925fd6 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -37,7 +37,7 @@
  * This driver supports two methods for allocating and using RX buffers:
  * each RX buffer may be backed by an skb or by an order-n page.
  *
- * When LRO is in use then the second method has a lower overhead,
+ * When GRO is in use then the second method has a lower overhead,
  * since we don't have to allocate then free skbs on reassembled frames.
  *
  * Values:
@@ -50,25 +50,25 @@
  *
  *   - Since pushing and popping descriptors are separated by the rx_queue
  *     size, so the watermarks should be ~rxd_size.
- *   - The performance win by using page-based allocation for LRO is less
- *     than the performance hit of using page-based allocation of non-LRO,
+ *   - The performance win by using page-based allocation for GRO is less
+ *     than the performance hit of using page-based allocation of non-GRO,
  *     so the watermarks should reflect this.
  *
  * Per channel we maintain a single variable, updated by each channel:
  *
- *   rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO :
+ *   rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
  *                      RX_ALLOC_FACTOR_SKB)
  * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
  * limits the hysteresis), and update the allocation strategy:
  *
- *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
+ *   rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
  *                      RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
  */
 static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
 
-#define RX_ALLOC_LEVEL_LRO 0x2000
+#define RX_ALLOC_LEVEL_GRO 0x2000
 #define RX_ALLOC_LEVEL_MAX 0x3000
-#define RX_ALLOC_FACTOR_LRO 1
+#define RX_ALLOC_FACTOR_GRO 1
 #define RX_ALLOC_FACTOR_SKB (-2)
 
 /* This is the percentage fill level below which new RX descriptors
@@ -441,19 +441,19 @@
 	efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
 }
 
-/* Pass a received packet up through the generic LRO stack
+/* Pass a received packet up through the generic GRO stack
  *
  * Handles driverlink veto, and passes the fragment up via
- * the appropriate LRO method
+ * the appropriate GRO method
  */
-static void efx_rx_packet_lro(struct efx_channel *channel,
+static void efx_rx_packet_gro(struct efx_channel *channel,
 			      struct efx_rx_buffer *rx_buf,
 			      bool checksummed)
 {
 	struct napi_struct *napi = &channel->napi_str;
 	gro_result_t gro_result;
 
-	/* Pass the skb/page into the LRO engine */
+	/* Pass the skb/page into the GRO engine */
 	if (rx_buf->page) {
 		struct efx_nic *efx = channel->efx;
 		struct page *page = rx_buf->page;
@@ -499,7 +499,7 @@
 	if (gro_result == GRO_NORMAL) {
 		channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
 	} else if (gro_result != GRO_DROP) {
-		channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
+		channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
 		channel->irq_mod_score += 2;
 	}
 }
@@ -605,7 +605,7 @@
 	}
 
 	if (likely(checksummed || rx_buf->page)) {
-		efx_rx_packet_lro(channel, rx_buf, checksummed);
+		efx_rx_packet_gro(channel, rx_buf, checksummed);
 		return;
 	}
 
@@ -628,7 +628,7 @@
 {
 	enum efx_rx_alloc_method method = rx_alloc_method;
 
-	/* Only makes sense to use page based allocation if LRO is enabled */
+	/* Only makes sense to use page based allocation if GRO is enabled */
 	if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
 		method = RX_ALLOC_METHOD_SKB;
 	} else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +639,7 @@
 			channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
 
 		/* Decide on the allocation method */
-		method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ?
+		method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
 			  RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
 	}
 
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 45236f5..bf84561 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -194,13 +194,7 @@
 
 static int siena_probe_nvconfig(struct efx_nic *efx)
 {
-	int rc;
-
-	rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
-	if (rc)
-		return rc;
-
-	return 0;
+	return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
 }
 
 static int siena_probe_nic(struct efx_nic *efx)
@@ -562,7 +556,7 @@
 		if (nic_data->wol_filter_id != -1)
 			efx_mcdi_wol_filter_remove(efx,
 						   nic_data->wol_filter_id);
-		rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address,
+		rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
 						   &nic_data->wol_filter_id);
 		if (rc)
 			goto fail;
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce..879b7f6 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -61,6 +61,11 @@
 	unsigned int block_size;
 };
 
+static inline bool efx_spi_present(const struct efx_spi_device *spi)
+{
+	return spi->size != 0;
+}
+
 int falcon_spi_cmd(struct efx_nic *efx,
 		   const struct efx_spi_device *spi, unsigned int command,
 		   int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1bc6c48..f102912 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -15,9 +15,7 @@
 #include "mdio_10g.h"
 #include "nic.h"
 #include "phy.h"
-#include "regs.h"
 #include "workarounds.h"
-#include "selftest.h"
 
 /* We expect these MMDs to be in the package. */
 #define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD	| \
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 1172698..bdb92b4 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -240,8 +240,7 @@
 				 * of read_count. */
 				smp_mb();
 				tx_queue->old_read_count =
-					*(volatile unsigned *)
-					&tx_queue->read_count;
+					ACCESS_ONCE(tx_queue->read_count);
 				fill_level = (tx_queue->insert_count
 					      - tx_queue->old_read_count);
 				q_space = efx->txq_entries - 1 - fill_level;
@@ -401,6 +400,7 @@
 {
 	unsigned fill_level;
 	struct efx_nic *efx = tx_queue->efx;
+	struct netdev_queue *queue;
 
 	EFX_BUG_ON_PARANOID(index > tx_queue->ptr_mask);
 
@@ -417,12 +417,25 @@
 
 			/* Do this under netif_tx_lock(), to avoid racing
 			 * with efx_xmit(). */
-			netif_tx_lock(efx->net_dev);
+			queue = netdev_get_tx_queue(
+				efx->net_dev,
+				tx_queue->queue / EFX_TXQ_TYPES);
+			__netif_tx_lock(queue, smp_processor_id());
 			if (tx_queue->stopped) {
 				tx_queue->stopped = 0;
 				efx_wake_queue(tx_queue->channel);
 			}
-			netif_tx_unlock(efx->net_dev);
+			__netif_tx_unlock(queue);
+		}
+	}
+
+	/* Check whether the hardware queue is now empty */
+	if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
+		tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
+		if (tx_queue->read_count == tx_queue->old_write_count) {
+			smp_mb();
+			tx_queue->empty_read_count =
+				tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
 		}
 	}
 }
@@ -470,8 +483,10 @@
 
 	tx_queue->insert_count = 0;
 	tx_queue->write_count = 0;
+	tx_queue->old_write_count = 0;
 	tx_queue->read_count = 0;
 	tx_queue->old_read_count = 0;
+	tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
 	BUG_ON(tx_queue->stopped);
 
 	/* Set up TX descriptor ring */
@@ -760,7 +775,7 @@
 			 * stopped from the access of read_count. */
 			smp_mb();
 			tx_queue->old_read_count =
-				*(volatile unsigned *)&tx_queue->read_count;
+				ACCESS_ONCE(tx_queue->read_count);
 			fill_level = (tx_queue->insert_count
 				      - tx_queue->old_read_count);
 			q_space = efx->txq_entries - 1 - fill_level;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259df..819c175 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -45,9 +45,9 @@
 	u32 ioaddr = ndev->base_addr;
 
 	if (mdp->duplex) /* Full */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
 	else		/* Half */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@
 
 	switch (mdp->speed) {
 	case 10: /* 10BASE */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
 		break;
 	case 100:/* 100BASE */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
 		break;
 	default:
 		break;
@@ -96,9 +96,9 @@
 	u32 ioaddr = ndev->base_addr;
 
 	if (mdp->duplex) /* Full */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
 	else		/* Half */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@
 
 	switch (mdp->speed) {
 	case 10: /* 10BASE */
-		ctrl_outl(0, ioaddr + RTRATE);
+		writel(0, ioaddr + RTRATE);
 		break;
 	case 100:/* 100BASE */
-		ctrl_outl(1, ioaddr + RTRATE);
+		writel(1, ioaddr + RTRATE);
 		break;
 	default:
 		break;
@@ -143,7 +143,7 @@
 static void sh_eth_chip_reset(struct net_device *ndev)
 {
 	/* reset device */
-	ctrl_outl(ARSTR_ARSTR, ARSTR);
+	writel(ARSTR_ARSTR, ARSTR);
 	mdelay(1);
 }
 
@@ -152,10 +152,10 @@
 	u32 ioaddr = ndev->base_addr;
 	int cnt = 100;
 
-	ctrl_outl(EDSR_ENALL, ioaddr + EDSR);
-	ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+	writel(EDSR_ENALL, ioaddr + EDSR);
+	writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
 	while (cnt > 0) {
-		if (!(ctrl_inl(ioaddr + EDMR) & 0x3))
+		if (!(readl(ioaddr + EDMR) & 0x3))
 			break;
 		mdelay(1);
 		cnt--;
@@ -164,14 +164,14 @@
 		printk(KERN_ERR "Device reset fail\n");
 
 	/* Table Init */
-	ctrl_outl(0x0, ioaddr + TDLAR);
-	ctrl_outl(0x0, ioaddr + TDFAR);
-	ctrl_outl(0x0, ioaddr + TDFXR);
-	ctrl_outl(0x0, ioaddr + TDFFR);
-	ctrl_outl(0x0, ioaddr + RDLAR);
-	ctrl_outl(0x0, ioaddr + RDFAR);
-	ctrl_outl(0x0, ioaddr + RDFXR);
-	ctrl_outl(0x0, ioaddr + RDFFR);
+	writel(0x0, ioaddr + TDLAR);
+	writel(0x0, ioaddr + TDFAR);
+	writel(0x0, ioaddr + TDFXR);
+	writel(0x0, ioaddr + TDFFR);
+	writel(0x0, ioaddr + RDLAR);
+	writel(0x0, ioaddr + RDFAR);
+	writel(0x0, ioaddr + RDFXR);
+	writel(0x0, ioaddr + RDFFR);
 }
 
 static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@
 	u32 ioaddr = ndev->base_addr;
 
 	if (mdp->duplex) /* Full */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
 	else		/* Half */
-		ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
+		writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
 }
 
 static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@
 
 	switch (mdp->speed) {
 	case 10: /* 10BASE */
-		ctrl_outl(GECMR_10, ioaddr + GECMR);
+		writel(GECMR_10, ioaddr + GECMR);
 		break;
 	case 100:/* 100BASE */
-		ctrl_outl(GECMR_100, ioaddr + GECMR);
+		writel(GECMR_100, ioaddr + GECMR);
 		break;
 	case 1000: /* 1000BASE */
-		ctrl_outl(GECMR_1000, ioaddr + GECMR);
+		writel(GECMR_1000, ioaddr + GECMR);
 		break;
 	default:
 		break;
@@ -283,9 +283,9 @@
 {
 	u32 ioaddr = ndev->base_addr;
 
-	ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
+	writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
 	mdelay(3);
-	ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
+	writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
 }
 #endif
 
@@ -336,10 +336,10 @@
 {
 	u32 ioaddr = ndev->base_addr;
 
-	ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
+	writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
 		  (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
 		  ioaddr + MAHR);
-	ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
+	writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
 		  ioaddr + MALR);
 }
 
@@ -358,12 +358,12 @@
 	if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
 		memcpy(ndev->dev_addr, mac, 6);
 	} else {
-		ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
-		ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
-		ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
-		ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
-		ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
-		ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
+		ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
+		ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
+		ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
+		ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
+		ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
+		ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
 	}
 }
 
@@ -379,19 +379,19 @@
 /* PHY bit set */
 static void bb_set(u32 addr, u32 msk)
 {
-	ctrl_outl(ctrl_inl(addr) | msk, addr);
+	writel(readl(addr) | msk, addr);
 }
 
 /* PHY bit clear */
 static void bb_clr(u32 addr, u32 msk)
 {
-	ctrl_outl((ctrl_inl(addr) & ~msk), addr);
+	writel((readl(addr) & ~msk), addr);
 }
 
 /* PHY bit read */
 static int bb_read(u32 addr, u32 msk)
 {
-	return (ctrl_inl(addr) & msk) != 0;
+	return (readl(addr) & msk) != 0;
 }
 
 /* Data I/O pin control */
@@ -506,9 +506,9 @@
 		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
 		/* Rx descriptor address set */
 		if (i == 0) {
-			ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR);
+			writel(mdp->rx_desc_dma, ioaddr + RDLAR);
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-			ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR);
+			writel(mdp->rx_desc_dma, ioaddr + RDFAR);
 #endif
 		}
 	}
@@ -528,9 +528,9 @@
 		txdesc->buffer_length = 0;
 		if (i == 0) {
 			/* Tx descriptor address set */
-			ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR);
+			writel(mdp->tx_desc_dma, ioaddr + TDLAR);
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-			ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR);
+			writel(mdp->tx_desc_dma, ioaddr + TDFAR);
 #endif
 		}
 	}
@@ -623,71 +623,71 @@
 	/* Descriptor format */
 	sh_eth_ring_format(ndev);
 	if (mdp->cd->rpadir)
-		ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR);
+		writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
 
 	/* all sh_eth int mask */
-	ctrl_outl(0, ioaddr + EESIPR);
+	writel(0, ioaddr + EESIPR);
 
 #if defined(__LITTLE_ENDIAN__)
 	if (mdp->cd->hw_swap)
-		ctrl_outl(EDMR_EL, ioaddr + EDMR);
+		writel(EDMR_EL, ioaddr + EDMR);
 	else
 #endif
-		ctrl_outl(0, ioaddr + EDMR);
+		writel(0, ioaddr + EDMR);
 
 	/* FIFO size set */
-	ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR);
-	ctrl_outl(0, ioaddr + TFTR);
+	writel(mdp->cd->fdr_value, ioaddr + FDR);
+	writel(0, ioaddr + TFTR);
 
 	/* Frame recv control */
-	ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR);
+	writel(mdp->cd->rmcr_value, ioaddr + RMCR);
 
 	rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
 	tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
-	ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);
+	writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
 
 	if (mdp->cd->bculr)
-		ctrl_outl(0x800, ioaddr + BCULR);	/* Burst sycle set */
+		writel(0x800, ioaddr + BCULR);	/* Burst sycle set */
 
-	ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR);
+	writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
 
 	if (!mdp->cd->no_trimd)
-		ctrl_outl(0, ioaddr + TRIMD);
+		writel(0, ioaddr + TRIMD);
 
 	/* Recv frame limit set register */
-	ctrl_outl(RFLR_VALUE, ioaddr + RFLR);
+	writel(RFLR_VALUE, ioaddr + RFLR);
 
-	ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
-	ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR);
+	writel(readl(ioaddr + EESR), ioaddr + EESR);
+	writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
 
 	/* PAUSE Prohibition */
-	val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
+	val = (readl(ioaddr + ECMR) & ECMR_DM) |
 		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
 
-	ctrl_outl(val, ioaddr + ECMR);
+	writel(val, ioaddr + ECMR);
 
 	if (mdp->cd->set_rate)
 		mdp->cd->set_rate(ndev);
 
 	/* E-MAC Status Register clear */
-	ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR);
+	writel(mdp->cd->ecsr_value, ioaddr + ECSR);
 
 	/* E-MAC Interrupt Enable register */
-	ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
+	writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
 
 	/* Set MAC address */
 	update_mac_address(ndev);
 
 	/* mask reset */
 	if (mdp->cd->apr)
-		ctrl_outl(APR_AP, ioaddr + APR);
+		writel(APR_AP, ioaddr + APR);
 	if (mdp->cd->mpr)
-		ctrl_outl(MPR_MP, ioaddr + MPR);
+		writel(MPR_MP, ioaddr + MPR);
 	if (mdp->cd->tpauser)
-		ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
+		writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
 
 	/* Setting the Rx mode will start the Rx process. */
-	ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+	writel(EDRRR_R, ioaddr + EDRRR);
 
 	netif_start_queue(ndev);
 
@@ -811,8 +811,8 @@
 
 	/* Restart Rx engine if stopped. */
 	/* If we don't need to check status, don't. -KDU */
-	if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
-		ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);
+	if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
+		writel(EDRRR_R, ndev->base_addr + EDRRR);
 
 	return 0;
 }
@@ -827,8 +827,8 @@
 	u32 mask;
 
 	if (intr_status & EESR_ECI) {
-		felic_stat = ctrl_inl(ioaddr + ECSR);
-		ctrl_outl(felic_stat, ioaddr + ECSR);	/* clear int */
+		felic_stat = readl(ioaddr + ECSR);
+		writel(felic_stat, ioaddr + ECSR);	/* clear int */
 		if (felic_stat & ECSR_ICD)
 			mdp->stats.tx_carrier_errors++;
 		if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@
 				else
 					link_stat = PHY_ST_LINK;
 			} else {
-				link_stat = (ctrl_inl(ioaddr + PSR));
+				link_stat = (readl(ioaddr + PSR));
 				if (mdp->ether_link_active_low)
 					link_stat = ~link_stat;
 			}
 			if (!(link_stat & PHY_ST_LINK)) {
 				/* Link Down : disable tx and rx */
-				ctrl_outl(ctrl_inl(ioaddr + ECMR) &
+				writel(readl(ioaddr + ECMR) &
 					  ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
 			} else {
 				/* Link Up */
-				ctrl_outl(ctrl_inl(ioaddr + EESIPR) &
+				writel(readl(ioaddr + EESIPR) &
 					  ~DMAC_M_ECI, ioaddr + EESIPR);
 				/*clear int */
-				ctrl_outl(ctrl_inl(ioaddr + ECSR),
+				writel(readl(ioaddr + ECSR),
 					  ioaddr + ECSR);
-				ctrl_outl(ctrl_inl(ioaddr + EESIPR) |
+				writel(readl(ioaddr + EESIPR) |
 					  DMAC_M_ECI, ioaddr + EESIPR);
 				/* enable tx and rx */
-				ctrl_outl(ctrl_inl(ioaddr + ECMR) |
+				writel(readl(ioaddr + ECMR) |
 					  (ECMR_RE | ECMR_TE), ioaddr + ECMR);
 			}
 		}
@@ -888,8 +888,8 @@
 		/* Receive Descriptor Empty int */
 		mdp->stats.rx_over_errors++;
 
-		if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R)
-			ctrl_outl(EDRRR_R, ioaddr + EDRRR);
+		if (readl(ioaddr + EDRRR) ^ EDRRR_R)
+			writel(EDRRR_R, ioaddr + EDRRR);
 		dev_err(&ndev->dev, "Receive Descriptor Empty\n");
 	}
 	if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@
 		mask &= ~EESR_ADE;
 	if (intr_status & mask) {
 		/* Tx error */
-		u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR);
+		u32 edtrr = readl(ndev->base_addr + EDTRR);
 		/* dmesg */
 		dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
 				intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@
 		/* SH7712 BUG */
 		if (edtrr ^ EDTRR_TRNS) {
 			/* tx dma start */
-			ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+			writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
 		}
 		/* wakeup */
 		netif_wake_queue(ndev);
@@ -934,12 +934,12 @@
 	spin_lock(&mdp->lock);
 
 	/* Get interrpt stat */
-	intr_status = ctrl_inl(ioaddr + EESR);
+	intr_status = readl(ioaddr + EESR);
 	/* Clear interrupt */
 	if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
 			EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
 			cd->tx_check | cd->eesr_err_check)) {
-		ctrl_outl(intr_status, ioaddr + EESR);
+		writel(intr_status, ioaddr + EESR);
 		ret = IRQ_HANDLED;
 	} else
 		goto other_irq;
@@ -1000,7 +1000,7 @@
 				mdp->cd->set_rate(ndev);
 		}
 		if (mdp->link == PHY_DOWN) {
-			ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF)
+			writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
 					| ECMR_DM, ioaddr + ECMR);
 			new_state = 1;
 			mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@
 
 	/* worning message out. */
 	printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
-	       " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR));
+	       " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
 
 	/* tx_errors count up */
 	mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@
 
 	mdp->cur_tx++;
 
-	if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
-		ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);
+	if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
+		writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
 
 	return NETDEV_TX_OK;
 }
@@ -1212,11 +1212,11 @@
 	netif_stop_queue(ndev);
 
 	/* Disable interrupts by clearing the interrupt mask. */
-	ctrl_outl(0x0000, ioaddr + EESIPR);
+	writel(0x0000, ioaddr + EESIPR);
 
 	/* Stop the chip's Tx and Rx processes. */
-	ctrl_outl(0, ioaddr + EDTRR);
-	ctrl_outl(0, ioaddr + EDRRR);
+	writel(0, ioaddr + EDTRR);
+	writel(0, ioaddr + EDRRR);
 
 	/* PHY Disconnect */
 	if (mdp->phydev) {
@@ -1251,20 +1251,20 @@
 
 	pm_runtime_get_sync(&mdp->pdev->dev);
 
-	mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
-	ctrl_outl(0, ioaddr + TROCR);	/* (write clear) */
-	mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
-	ctrl_outl(0, ioaddr + CDCR);	/* (write clear) */
-	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR);
-	ctrl_outl(0, ioaddr + LCCR);	/* (write clear) */
+	mdp->stats.tx_dropped += readl(ioaddr + TROCR);
+	writel(0, ioaddr + TROCR);	/* (write clear) */
+	mdp->stats.collisions += readl(ioaddr + CDCR);
+	writel(0, ioaddr + CDCR);	/* (write clear) */
+	mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
+	writel(0, ioaddr + LCCR);	/* (write clear) */
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */
-	ctrl_outl(0, ioaddr + CERCR);	/* (write clear) */
-	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */
-	ctrl_outl(0, ioaddr + CEECR);	/* (write clear) */
+	mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
+	writel(0, ioaddr + CERCR);	/* (write clear) */
+	mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
+	writel(0, ioaddr + CEECR);	/* (write clear) */
 #else
-	mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
-	ctrl_outl(0, ioaddr + CNDCR);	/* (write clear) */
+	mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
+	writel(0, ioaddr + CNDCR);	/* (write clear) */
 #endif
 	pm_runtime_put_sync(&mdp->pdev->dev);
 
@@ -1295,11 +1295,11 @@
 
 	if (ndev->flags & IFF_PROMISC) {
 		/* Set promiscuous. */
-		ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
+		writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
 			  ioaddr + ECMR);
 	} else {
 		/* Normal, unicast/broadcast-only mode. */
-		ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
+		writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
 			  ioaddr + ECMR);
 	}
 }
@@ -1307,30 +1307,30 @@
 /* SuperH's TSU register init function */
 static void sh_eth_tsu_init(u32 ioaddr)
 {
-	ctrl_outl(0, ioaddr + TSU_FWEN0);	/* Disable forward(0->1) */
-	ctrl_outl(0, ioaddr + TSU_FWEN1);	/* Disable forward(1->0) */
-	ctrl_outl(0, ioaddr + TSU_FCM);	/* forward fifo 3k-3k */
-	ctrl_outl(0xc, ioaddr + TSU_BSYSL0);
-	ctrl_outl(0xc, ioaddr + TSU_BSYSL1);
-	ctrl_outl(0, ioaddr + TSU_PRISL0);
-	ctrl_outl(0, ioaddr + TSU_PRISL1);
-	ctrl_outl(0, ioaddr + TSU_FWSL0);
-	ctrl_outl(0, ioaddr + TSU_FWSL1);
-	ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
+	writel(0, ioaddr + TSU_FWEN0);	/* Disable forward(0->1) */
+	writel(0, ioaddr + TSU_FWEN1);	/* Disable forward(1->0) */
+	writel(0, ioaddr + TSU_FCM);	/* forward fifo 3k-3k */
+	writel(0xc, ioaddr + TSU_BSYSL0);
+	writel(0xc, ioaddr + TSU_BSYSL1);
+	writel(0, ioaddr + TSU_PRISL0);
+	writel(0, ioaddr + TSU_PRISL1);
+	writel(0, ioaddr + TSU_FWSL0);
+	writel(0, ioaddr + TSU_FWSL1);
+	writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
 #if defined(CONFIG_CPU_SUBTYPE_SH7763)
-	ctrl_outl(0, ioaddr + TSU_QTAG0);	/* Disable QTAG(0->1) */
-	ctrl_outl(0, ioaddr + TSU_QTAG1);	/* Disable QTAG(1->0) */
+	writel(0, ioaddr + TSU_QTAG0);	/* Disable QTAG(0->1) */
+	writel(0, ioaddr + TSU_QTAG1);	/* Disable QTAG(1->0) */
 #else
-	ctrl_outl(0, ioaddr + TSU_QTAGM0);	/* Disable QTAG(0->1) */
-	ctrl_outl(0, ioaddr + TSU_QTAGM1);	/* Disable QTAG(1->0) */
+	writel(0, ioaddr + TSU_QTAGM0);	/* Disable QTAG(0->1) */
+	writel(0, ioaddr + TSU_QTAGM1);	/* Disable QTAG(1->0) */
 #endif
-	ctrl_outl(0, ioaddr + TSU_FWSR);	/* all interrupt status clear */
-	ctrl_outl(0, ioaddr + TSU_FWINMK);	/* Disable all interrupt */
-	ctrl_outl(0, ioaddr + TSU_TEN);	/* Disable all CAM entry */
-	ctrl_outl(0, ioaddr + TSU_POST1);	/* Disable CAM entry [ 0- 7] */
-	ctrl_outl(0, ioaddr + TSU_POST2);	/* Disable CAM entry [ 8-15] */
-	ctrl_outl(0, ioaddr + TSU_POST3);	/* Disable CAM entry [16-23] */
-	ctrl_outl(0, ioaddr + TSU_POST4);	/* Disable CAM entry [24-31] */
+	writel(0, ioaddr + TSU_FWSR);	/* all interrupt status clear */
+	writel(0, ioaddr + TSU_FWINMK);	/* Disable all interrupt */
+	writel(0, ioaddr + TSU_TEN);	/* Disable all CAM entry */
+	writel(0, ioaddr + TSU_POST1);	/* Disable CAM entry [ 0- 7] */
+	writel(0, ioaddr + TSU_POST2);	/* Disable CAM entry [ 8-15] */
+	writel(0, ioaddr + TSU_POST3);	/* Disable CAM entry [16-23] */
+	writel(0, ioaddr + TSU_POST4);	/* Disable CAM entry [24-31] */
 }
 #endif /* SH_ETH_HAS_TSU */
 
@@ -1552,7 +1552,6 @@
 
 	sh_mdio_release(ndev);
 	unregister_netdev(ndev);
-	flush_scheduled_work();
 	pm_runtime_disable(&pdev->dev);
 	free_netdev(ndev);
 	platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 8b47763..efa6422 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -26,7 +26,6 @@
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
-#include <linux/workqueue.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index a5d6a6b..3406ed8 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1915,9 +1915,10 @@
 static void __devexit sis190_remove_one(struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
+	struct sis190_private *tp = netdev_priv(dev);
 
 	sis190_mii_remove(dev);
-	flush_scheduled_work();
+	cancel_work_sync(&tp->phy_task);
 	unregister_netdev(dev);
 	sis190_release_board(pdev);
 	pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index bfec2e0..8c1404b 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3858,7 +3858,6 @@
 
 	/* device is off until link detection */
 	netif_carrier_off(dev);
-	netif_stop_queue(dev);
 
 	return dev;
 }
@@ -4013,8 +4012,6 @@
 	if (!hw)
 		return;
 
-	flush_scheduled_work();
-
 	dev1 = hw->dev[1];
 	if (dev1)
 		unregister_netdev(dev1);
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e1..50f712e 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
 #define __SMSC911X_H__
 
 #define TX_FIFO_LOW_THRESHOLD	((u32)1600)
-#define SMSC911X_EEPROM_SIZE	((u32)7)
+#define SMSC911X_EEPROM_SIZE	((u32)128)
 #define USE_DEBUG		0
 
 /* This is the maximum number of packets to be received every
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e..5f06c47 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
 *******************************************************************************/
 
-#define DRV_MODULE_VERSION	"Apr_2010"
+#define DRV_MODULE_VERSION	"Nov_2010"
 #include <linux/platform_device.h>
 #include <linux/stmmac.h>
 
@@ -37,7 +37,6 @@
 	unsigned int cur_tx;
 	unsigned int dirty_tx;
 	unsigned int dma_tx_size;
-	int tx_coe;
 	int tx_coalesce;
 
 	struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@
 	struct sk_buff_head rx_recycle;
 
 	struct net_device *dev;
-	int is_gmac;
 	dma_addr_t dma_rx_phy;
 	unsigned int dma_rx_size;
 	unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@
 	struct napi_struct napi;
 
 	phy_interface_t phy_interface;
-	int pbl;
-	int bus_id;
 	int phy_addr;
 	int phy_mask;
 	int (*phy_reset) (void *priv);
-	void (*fix_mac_speed) (void *priv, unsigned int speed);
-	void (*bus_setup)(void __iomem *ioaddr);
-	void *bsp_priv;
+	int rx_coe;
+	int no_csum_insertion;
 
 	int phy_irq;
 	struct phy_device *phydev;
@@ -77,47 +72,20 @@
 	unsigned int flow_ctrl;
 	unsigned int pause;
 	struct mii_bus *mii;
-	int mii_clk_csr;
 
 	u32 msg_enable;
 	spinlock_t lock;
 	int wolopts;
 	int wolenabled;
-	int shutdown;
 #ifdef CONFIG_STMMAC_TIMER
 	struct stmmac_timer *tm;
 #endif
 #ifdef STMMAC_VLAN_TAG_USED
 	struct vlan_group *vlgrp;
 #endif
-	int enh_desc;
-	int rx_coe;
-	int bugged_jumbo;
-	int no_csum_insertion;
+	struct plat_stmmacenet_data *plat;
 };
 
-#ifdef CONFIG_STM_DRIVERS
-#include <linux/stm/pad.h>
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
-	int ret = 0;
-	struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
-
-	/* Pad routing setup */
-	if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
-			dev_name(&pdev->dev)))) {
-		printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
-		ret = -ENODEV;
-	}
-	return ret;
-}
-#else
-static inline int stmmac_claim_resource(struct platform_device *pdev)
-{
-	return 0;
-}
-#endif
-
 extern int stmmac_mdio_unregister(struct net_device *ndev);
 extern int stmmac_mdio_register(struct net_device *ndev);
 extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482..fd719ed 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
 
-	if (!priv->is_gmac)
+	if (!priv->plat->has_gmac)
 		strcpy(info->driver, MAC100_ETHTOOL_NAME);
 	else
 		strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@
 
 	memset(reg_space, 0x0, REG_SPACE_SIZE);
 
-	if (!priv->is_gmac) {
+	if (!priv->plat->has_gmac) {
 		/* MAC registers */
 		for (i = 0; i < 12; i++)
 			reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -197,16 +197,6 @@
 	}
 }
 
-static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
-{
-	if (data)
-		netdev->features |= NETIF_F_HW_CSUM;
-	else
-		netdev->features &= ~NETIF_F_HW_CSUM;
-
-	return 0;
-}
-
 static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
 {
 	struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@
 	.get_link = ethtool_op_get_link,
 	.get_rx_csum = stmmac_ethtool_get_rx_csum,
 	.get_tx_csum = ethtool_op_get_tx_csum,
-	.set_tx_csum = stmmac_ethtool_set_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_ipv6_csum,
 	.get_sg = ethtool_op_get_sg,
 	.set_sg = ethtool_op_set_sg,
 	.get_pauseparam = stmmac_get_pauseparam,
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 06bc603..20f803d 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@
 	return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
 }
 
+/* On some ST platforms, some HW system configuraton registers have to be
+ * set according to the link speed negotiated.
+ */
+static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
+{
+	struct phy_device *phydev = priv->phydev;
+
+	if (likely(priv->plat->fix_mac_speed))
+		priv->plat->fix_mac_speed(priv->plat->bsp_priv,
+					  phydev->speed);
+}
+
 /**
  * stmmac_adjust_link
  * @dev: net device structure
@@ -228,15 +240,13 @@
 			new_state = 1;
 			switch (phydev->speed) {
 			case 1000:
-				if (likely(priv->is_gmac))
+				if (likely(priv->plat->has_gmac))
 					ctrl &= ~priv->hw->link.port;
-				if (likely(priv->fix_mac_speed))
-					priv->fix_mac_speed(priv->bsp_priv,
-							    phydev->speed);
+				stmmac_hw_fix_mac_speed(priv);
 				break;
 			case 100:
 			case 10:
-				if (priv->is_gmac) {
+				if (priv->plat->has_gmac) {
 					ctrl |= priv->hw->link.port;
 					if (phydev->speed == SPEED_100) {
 						ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@
 				} else {
 					ctrl &= ~priv->hw->link.port;
 				}
-				if (likely(priv->fix_mac_speed))
-					priv->fix_mac_speed(priv->bsp_priv,
-							    phydev->speed);
+				stmmac_hw_fix_mac_speed(priv);
 				break;
 			default:
 				if (netif_msg_link(priv))
@@ -305,7 +313,7 @@
 		return 0;
 	}
 
-	snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+	snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
 	snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
 		 priv->phy_addr);
 	pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@
  */
 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
 {
-	if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) {
+	if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
 		/* In case of GMAC, SF mode has to be enabled
 		 * to perform the TX COE. This depends on:
 		 * 1) TX COE if actually supported
@@ -814,7 +822,7 @@
 	init_dma_desc_rings(dev);
 
 	/* DMA initialization and SW reset */
-	if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl,
+	if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
 					 priv->dma_tx_phy,
 					 priv->dma_rx_phy) < 0)) {
 
@@ -825,19 +833,17 @@
 	/* Copy the MAC addr into the HW  */
 	priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
 	/* If required, perform hw setup of the bus. */
-	if (priv->bus_setup)
-		priv->bus_setup(priv->ioaddr);
+	if (priv->plat->bus_setup)
+		priv->plat->bus_setup(priv->ioaddr);
 	/* Initialize the MAC Core */
 	priv->hw->mac->core_init(priv->ioaddr);
 
 	priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
 	if (priv->rx_coe)
 		pr_info("stmmac: Rx Checksum Offload Engine supported\n");
-	if (priv->tx_coe)
+	if (priv->plat->tx_coe)
 		pr_info("\tTX Checksum insertion supported\n");
 
-	priv->shutdown = 0;
-
 	/* Initialise the MMC (if present) to disable all interrupts. */
 	writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
 	writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -943,7 +949,7 @@
 	       skb, skb->len);
 
 	segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		goto sw_tso_end;
 
 	do {
@@ -1042,7 +1048,8 @@
 		return stmmac_sw_tso(priv, skb);
 
 	if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
-		if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion)))
+		if (unlikely((!priv->plat->tx_coe) ||
+			     (priv->no_csum_insertion)))
 			skb_checksum_help(skb);
 		else
 			csum_insertion = 1;
@@ -1146,7 +1153,7 @@
 					   DMA_FROM_DEVICE);
 
 			(p + entry)->des2 = priv->rx_skbuff_dma[entry];
-			if (unlikely(priv->is_gmac)) {
+			if (unlikely(priv->plat->has_gmac)) {
 				if (bfsize >= BUF_SIZE_8KiB)
 					(p + entry)->des3 =
 					    (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@
 		return -EBUSY;
 	}
 
-	if (priv->is_gmac)
+	if (priv->plat->has_gmac)
 		max_mtu = JUMBO_LEN;
 	else
 		max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@
 	 * needs to have the Tx COE disabled for oversized frames
 	 * (due to limited buffer sizes). In this case we disable
 	 * the TX csum insertionin the TDES and not use SF. */
-	if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
+	if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
 		priv->no_csum_insertion = 1;
 	else
 		priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@
 		return IRQ_NONE;
 	}
 
-	if (priv->is_gmac)
+	if (priv->plat->has_gmac)
 		/* To handle GMAC own interrupts */
 		priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
 
@@ -1487,7 +1494,8 @@
 	dev->netdev_ops = &stmmac_netdev_ops;
 	stmmac_set_ethtool_ops(dev);
 
-	dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA);
+	dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
+		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	dev->watchdog_timeo = msecs_to_jiffies(watchdog);
 #ifdef STMMAC_VLAN_TAG_USED
 	/* Both mac100 and gmac support receive VLAN tag detection */
@@ -1509,6 +1517,8 @@
 		pr_warning("\tno valid MAC address;"
 			"please, use ifconfig or nwhwconfig!\n");
 
+	spin_lock_init(&priv->lock);
+
 	ret = register_netdev(dev);
 	if (ret) {
 		pr_err("%s: ERROR %i registering the device\n",
@@ -1518,9 +1528,7 @@
 
 	DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
 	    dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
-	    (dev->features & NETIF_F_HW_CSUM) ? "on" : "off");
-
-	spin_lock_init(&priv->lock);
+	    (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
 
 	return ret;
 }
@@ -1536,7 +1544,7 @@
 
 	struct mac_device_info *device;
 
-	if (priv->is_gmac)
+	if (priv->plat->has_gmac)
 		device = dwmac1000_setup(priv->ioaddr);
 	else
 		device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1552,7 @@
 	if (!device)
 		return -ENOMEM;
 
-	if (priv->enh_desc) {
+	if (priv->plat->enh_desc) {
 		device->desc = &enh_desc_ops;
 		pr_info("\tEnhanced descriptor structure\n");
 	} else
@@ -1598,7 +1606,7 @@
 		plat_dat->bus_id);
 
 	/* Check that this phy is for the MAC being initialised */
-	if (priv->bus_id != plat_dat->bus_id)
+	if (priv->plat->bus_id != plat_dat->bus_id)
 		return 0;
 
 	/* OK, this PHY is connected to the MAC.
@@ -1634,7 +1642,7 @@
 	struct resource *res;
 	void __iomem *addr = NULL;
 	struct net_device *ndev = NULL;
-	struct stmmac_priv *priv;
+	struct stmmac_priv *priv = NULL;
 	struct plat_stmmacenet_data *plat_dat;
 
 	pr_info("STMMAC driver:\n\tplatform registration... ");
@@ -1683,13 +1691,9 @@
 	priv->device = &(pdev->dev);
 	priv->dev = ndev;
 	plat_dat = pdev->dev.platform_data;
-	priv->bus_id = plat_dat->bus_id;
-	priv->pbl = plat_dat->pbl;	/* TLI */
-	priv->mii_clk_csr = plat_dat->clk_csr;
-	priv->tx_coe = plat_dat->tx_coe;
-	priv->bugged_jumbo = plat_dat->bugged_jumbo;
-	priv->is_gmac = plat_dat->has_gmac;	/* GMAC is on board */
-	priv->enh_desc = plat_dat->enh_desc;
+
+	priv->plat = plat_dat;
+
 	priv->ioaddr = addr;
 
 	/* PMT module is not integrated in all the MAC devices. */
@@ -1703,10 +1707,12 @@
 	/* Set the I/O base addr */
 	ndev->base_addr = (unsigned long)addr;
 
-	/* Verify embedded resource for the platform */
-	ret = stmmac_claim_resource(pdev);
-	if (ret < 0)
-		goto out;
+	/* Custom initialisation */
+	if (priv->plat->init) {
+		ret = priv->plat->init(pdev);
+		if (unlikely(ret))
+			goto out;
+	}
 
 	/* MAC HW revice detection */
 	ret = stmmac_mac_device_setup(ndev);
@@ -1727,16 +1733,12 @@
 		goto out;
 	}
 
-	priv->fix_mac_speed = plat_dat->fix_mac_speed;
-	priv->bus_setup = plat_dat->bus_setup;
-	priv->bsp_priv = plat_dat->bsp_priv;
-
 	pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
 	       "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
 	       pdev->id, ndev->irq, addr);
 
 	/* MDIO bus Registration */
-	pr_debug("\tMDIO bus (id: %d)...", priv->bus_id);
+	pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
 	ret = stmmac_mdio_register(ndev);
 	if (ret < 0)
 		goto out;
@@ -1744,6 +1746,9 @@
 
 out:
 	if (ret < 0) {
+		if (priv->plat->exit)
+			priv->plat->exit(pdev);
+
 		platform_set_drvdata(pdev, NULL);
 		release_mem_region(res->start, resource_size(res));
 		if (addr != NULL)
@@ -1777,6 +1782,9 @@
 
 	stmmac_mdio_unregister(ndev);
 
+	if (priv->plat->exit)
+		priv->plat->exit(pdev);
+
 	platform_set_drvdata(pdev, NULL);
 	unregister_netdev(ndev);
 
@@ -1790,70 +1798,55 @@
 }
 
 #ifdef CONFIG_PM
-static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
+static int stmmac_suspend(struct device *dev)
 {
-	struct net_device *dev = platform_get_drvdata(pdev);
-	struct stmmac_priv *priv = netdev_priv(dev);
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
 	int dis_ic = 0;
 
-	if (!dev || !netif_running(dev))
+	if (!ndev || !netif_running(ndev))
 		return 0;
 
 	spin_lock(&priv->lock);
 
-	if (state.event == PM_EVENT_SUSPEND) {
-		netif_device_detach(dev);
-		netif_stop_queue(dev);
-		if (priv->phydev)
-			phy_stop(priv->phydev);
+	netif_device_detach(ndev);
+	netif_stop_queue(ndev);
+	if (priv->phydev)
+		phy_stop(priv->phydev);
 
 #ifdef CONFIG_STMMAC_TIMER
-		priv->tm->timer_stop();
-		if (likely(priv->tm->enable))
-			dis_ic = 1;
+	priv->tm->timer_stop();
+	if (likely(priv->tm->enable))
+		dis_ic = 1;
 #endif
-		napi_disable(&priv->napi);
+	napi_disable(&priv->napi);
 
-		/* Stop TX/RX DMA */
-		priv->hw->dma->stop_tx(priv->ioaddr);
-		priv->hw->dma->stop_rx(priv->ioaddr);
-		/* Clear the Rx/Tx descriptors */
-		priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
-					     dis_ic);
-		priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+	/* Stop TX/RX DMA */
+	priv->hw->dma->stop_tx(priv->ioaddr);
+	priv->hw->dma->stop_rx(priv->ioaddr);
+	/* Clear the Rx/Tx descriptors */
+	priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+				     dis_ic);
+	priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
 
-		/* Enable Power down mode by programming the PMT regs */
-		if (device_can_wakeup(priv->device))
-			priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
-		else
-			stmmac_disable_mac(priv->ioaddr);
-	} else {
-		priv->shutdown = 1;
-		/* Although this can appear slightly redundant it actually
-		 * makes fast the standby operation and guarantees the driver
-		 * working if hibernation is on media. */
-		stmmac_release(dev);
-	}
+	/* Enable Power down mode by programming the PMT regs */
+	if (device_may_wakeup(priv->device))
+		priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+	else
+		stmmac_disable_mac(priv->ioaddr);
 
 	spin_unlock(&priv->lock);
 	return 0;
 }
 
-static int stmmac_resume(struct platform_device *pdev)
+static int stmmac_resume(struct device *dev)
 {
-	struct net_device *dev = platform_get_drvdata(pdev);
-	struct stmmac_priv *priv = netdev_priv(dev);
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct stmmac_priv *priv = netdev_priv(ndev);
 
-	if (!netif_running(dev))
+	if (!netif_running(ndev))
 		return 0;
 
-	if (priv->shutdown) {
-		/* Re-open the interface and re-init the MAC/DMA
-		   and the rings (i.e. on hibernation stage) */
-		stmmac_open(dev);
-		return 0;
-	}
-
 	spin_lock(&priv->lock);
 
 	/* Power Down bit, into the PM register, is cleared
@@ -1861,10 +1854,10 @@
 	 * is received. Anyway, it's better to manually clear
 	 * this bit because it can generate problems while resuming
 	 * from another devices (e.g. serial console). */
-	if (device_can_wakeup(priv->device))
+	if (device_may_wakeup(priv->device))
 		priv->hw->mac->pmt(priv->ioaddr, 0);
 
-	netif_device_attach(dev);
+	netif_device_attach(ndev);
 
 	/* Enable the MAC and DMA */
 	stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1865,59 @@
 	priv->hw->dma->start_rx(priv->ioaddr);
 
 #ifdef CONFIG_STMMAC_TIMER
-	priv->tm->timer_start(tmrate);
+	if (likely(priv->tm->enable))
+		priv->tm->timer_start(tmrate);
 #endif
 	napi_enable(&priv->napi);
 
 	if (priv->phydev)
 		phy_start(priv->phydev);
 
-	netif_start_queue(dev);
+	netif_start_queue(ndev);
 
 	spin_unlock(&priv->lock);
 	return 0;
 }
-#endif
 
-static struct platform_driver stmmac_driver = {
-	.driver = {
-		   .name = STMMAC_RESOURCE_NAME,
-		   },
-	.probe = stmmac_dvr_probe,
-	.remove = stmmac_dvr_remove,
-#ifdef CONFIG_PM
+static int stmmac_freeze(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+
+	if (!ndev || !netif_running(ndev))
+		return 0;
+
+	return stmmac_release(ndev);
+}
+
+static int stmmac_restore(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+
+	if (!ndev || !netif_running(ndev))
+		return 0;
+
+	return stmmac_open(ndev);
+}
+
+static const struct dev_pm_ops stmmac_pm_ops = {
 	.suspend = stmmac_suspend,
 	.resume = stmmac_resume,
-#endif
+	.freeze = stmmac_freeze,
+	.thaw = stmmac_restore,
+	.restore = stmmac_restore,
+};
+#else
+static const struct dev_pm_ops stmmac_pm_ops;
+#endif /* CONFIG_PM */
 
+static struct platform_driver stmmac_driver = {
+	.probe = stmmac_dvr_probe,
+	.remove = stmmac_dvr_remove,
+	.driver = {
+		.name = STMMAC_RESOURCE_NAME,
+		.owner = THIS_MODULE,
+		.pm = &stmmac_pm_ops,
+	},
 };
 
 /**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d744161..234b406 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@
 	int data;
 	u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
 			((phyreg << 6) & (0x000007C0)));
-	regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+	regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
 
 	do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
 	writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@
 	    (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
 	    | MII_WRITE;
 
-	value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2);
+	value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
 
 
 	/* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@
 
 	if (priv->phy_reset) {
 		pr_debug("stmmac_mdio_reset: calling phy_reset\n");
-		priv->phy_reset(priv->bsp_priv);
+		priv->phy_reset(priv->plat->bsp_priv);
 	}
 
 	/* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@
 	new_bus->read = &stmmac_mdio_read;
 	new_bus->write = &stmmac_mdio_write;
 	new_bus->reset = &stmmac_mdio_reset;
-	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
+	snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
 	new_bus->priv = ndev;
 	new_bus->irq = irqlist;
 	new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4ceb3cf..9e992ca 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2380,10 +2380,8 @@
 	 */
 	mutex_unlock(&gp->pm_mutex);
 
-	/* Wait for a pending reset task to complete */
-	while (gp->reset_task_pending)
-		yield();
-	flush_scheduled_work();
+	/* Wait for the pending reset task to complete */
+	flush_work_sync(&gp->reset_task);
 
 	/* Shut the PHY down eventually and setup WOL */
 	gem_stop_phy(gp, gp->asleep_wol);
@@ -2928,10 +2926,8 @@
 		/* We shouldn't need any locking here */
 		gem_get_cell(gp);
 
-		/* Wait for a pending reset task to complete */
-		while (gp->reset_task_pending)
-			yield();
-		flush_scheduled_work();
+		/* Cancel reset task */
+		cancel_work_sync(&gp->reset_task);
 
 		/* Shut the PHY down */
 		gem_stop_phy(gp, 0);
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 2cf84e5..767e1e2 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1295,17 +1295,9 @@
 	strcpy(info->version, "2.02");
 }
 
-static u32 sparc_lance_get_link(struct net_device *dev)
-{
-	/* We really do not keep track of this, but this
-	 * is better than not reporting anything at all.
-	 */
-	return 1;
-}
-
 static const struct ethtool_ops sparc_lance_ethtool_ops = {
 	.get_drvinfo		= sparc_lance_get_drvinfo,
-	.get_link		= sparc_lance_get_link,
+	.get_link		= ethtool_op_get_link,
 };
 
 static const struct net_device_ops sparc_lance_ops = {
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6..57e19fb 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,7 @@
 #include <linux/etherdevice.h>
 #include <linux/skbuff.h>
 #include <linux/ethtool.h>
+#include <linux/mdio.h>
 #include <linux/mii.h>
 #include <linux/phy.h>
 #include <linux/brcmphy.h>
@@ -69,10 +70,10 @@
 
 #define DRV_MODULE_NAME		"tg3"
 #define TG3_MAJ_NUM			3
-#define TG3_MIN_NUM			115
+#define TG3_MIN_NUM			116
 #define DRV_MODULE_VERSION	\
 	__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE	"October 14, 2010"
+#define DRV_MODULE_RELDATE	"December 3, 2010"
 
 #define TG3_DEF_MAC_MODE	0
 #define TG3_DEF_RX_MODE		0
@@ -1769,9 +1770,9 @@
 
 	if (tp->link_config.autoneg == AUTONEG_ENABLE &&
 	    current_link_up == 1 &&
-	    (tp->link_config.active_speed == SPEED_1000 ||
-	     (tp->link_config.active_speed == SPEED_100 &&
-	      tp->link_config.active_duplex == DUPLEX_FULL))) {
+	    tp->link_config.active_duplex == DUPLEX_FULL &&
+	    (tp->link_config.active_speed == SPEED_100 ||
+	     tp->link_config.active_speed == SPEED_1000)) {
 		u32 eeectl;
 
 		if (tp->link_config.active_speed == SPEED_1000)
@@ -1781,7 +1782,8 @@
 
 		tw32(TG3_CPMU_EEE_CTRL, eeectl);
 
-		tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val);
+		tg3_phy_cl45_read(tp, MDIO_MMD_AN,
+				  TG3_CL45_D7_EEERES_STAT, &val);
 
 		if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
 		    val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
@@ -2728,12 +2730,10 @@
 		     (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
 			mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
 
-		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
-			mac_mode |= tp->mac_mode &
-				    (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
-			if (mac_mode & MAC_MODE_APE_TX_EN)
-				mac_mode |= MAC_MODE_TDE_ENABLE;
-		}
+		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+			mac_mode |= MAC_MODE_APE_TX_EN |
+				    MAC_MODE_APE_RX_EN |
+				    MAC_MODE_TDE_ENABLE;
 
 		tw32_f(MAC_MODE, mac_mode);
 		udelay(100);
@@ -2969,7 +2969,7 @@
 	}
 
 	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
-		u32 val = 0;
+		u32 val;
 
 		tw32(TG3_CPMU_EEE_MODE,
 		     tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
@@ -2986,19 +2986,18 @@
 			tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
 					 val | MII_TG3_DSP_CH34TP2_HIBW01);
 
+		val = 0;
 		if (tp->link_config.autoneg == AUTONEG_ENABLE) {
 			/* Advertise 100-BaseTX EEE ability */
 			if (tp->link_config.advertising &
-			    (ADVERTISED_100baseT_Half |
-			     ADVERTISED_100baseT_Full))
-				val |= TG3_CL45_D7_EEEADV_CAP_100TX;
+			    ADVERTISED_100baseT_Full)
+				val |= MDIO_AN_EEE_ADV_100TX;
 			/* Advertise 1000-BaseT EEE ability */
 			if (tp->link_config.advertising &
-			    (ADVERTISED_1000baseT_Half |
-			     ADVERTISED_1000baseT_Full))
-				val |= TG3_CL45_D7_EEEADV_CAP_1000T;
+			    ADVERTISED_1000baseT_Full)
+				val |= MDIO_AN_EEE_ADV_1000T;
 		}
-		tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val);
+		tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
 
 		/* Turn off SM_DSP clock. */
 		val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
@@ -5763,7 +5762,7 @@
 	dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
 
 	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
-	    !mss && skb->len > ETH_DATA_LEN)
+	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
 		base_flags |= TXD_FLAG_JMB_PKT;
 
 	tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5997,7 +5996,7 @@
 #endif
 
 	if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
-	    !mss && skb->len > ETH_DATA_LEN)
+	    !mss && skb->len > VLAN_ETH_FRAME_LEN)
 		base_flags |= TXD_FLAG_JMB_PKT;
 
 	len = skb_headlen(skb);
@@ -6339,13 +6338,13 @@
 	kfree(tpr->rx_jmb_buffers);
 	tpr->rx_jmb_buffers = NULL;
 	if (tpr->rx_std) {
-		pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
-				    tpr->rx_std, tpr->rx_std_mapping);
+		dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
+				  tpr->rx_std, tpr->rx_std_mapping);
 		tpr->rx_std = NULL;
 	}
 	if (tpr->rx_jmb) {
-		pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp),
-				    tpr->rx_jmb, tpr->rx_jmb_mapping);
+		dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
+				  tpr->rx_jmb, tpr->rx_jmb_mapping);
 		tpr->rx_jmb = NULL;
 	}
 }
@@ -6358,8 +6357,10 @@
 	if (!tpr->rx_std_buffers)
 		return -ENOMEM;
 
-	tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp),
-					   &tpr->rx_std_mapping);
+	tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
+					 TG3_RX_STD_RING_BYTES(tp),
+					 &tpr->rx_std_mapping,
+					 GFP_KERNEL);
 	if (!tpr->rx_std)
 		goto err_out;
 
@@ -6370,9 +6371,10 @@
 		if (!tpr->rx_jmb_buffers)
 			goto err_out;
 
-		tpr->rx_jmb = pci_alloc_consistent(tp->pdev,
-						   TG3_RX_JMB_RING_BYTES(tp),
-						   &tpr->rx_jmb_mapping);
+		tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
+						 TG3_RX_JMB_RING_BYTES(tp),
+						 &tpr->rx_jmb_mapping,
+						 GFP_KERNEL);
 		if (!tpr->rx_jmb)
 			goto err_out;
 	}
@@ -6491,7 +6493,7 @@
 		struct tg3_napi *tnapi = &tp->napi[i];
 
 		if (tnapi->tx_ring) {
-			pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+			dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
 				tnapi->tx_ring, tnapi->tx_desc_mapping);
 			tnapi->tx_ring = NULL;
 		}
@@ -6500,25 +6502,26 @@
 		tnapi->tx_buffers = NULL;
 
 		if (tnapi->rx_rcb) {
-			pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
-					    tnapi->rx_rcb,
-					    tnapi->rx_rcb_mapping);
+			dma_free_coherent(&tp->pdev->dev,
+					  TG3_RX_RCB_RING_BYTES(tp),
+					  tnapi->rx_rcb,
+					  tnapi->rx_rcb_mapping);
 			tnapi->rx_rcb = NULL;
 		}
 
 		tg3_rx_prodring_fini(tp, &tnapi->prodring);
 
 		if (tnapi->hw_status) {
-			pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
-					    tnapi->hw_status,
-					    tnapi->status_mapping);
+			dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
+					  tnapi->hw_status,
+					  tnapi->status_mapping);
 			tnapi->hw_status = NULL;
 		}
 	}
 
 	if (tp->hw_stats) {
-		pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
-				    tp->hw_stats, tp->stats_mapping);
+		dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
+				  tp->hw_stats, tp->stats_mapping);
 		tp->hw_stats = NULL;
 	}
 }
@@ -6531,9 +6534,10 @@
 {
 	int i;
 
-	tp->hw_stats = pci_alloc_consistent(tp->pdev,
-					    sizeof(struct tg3_hw_stats),
-					    &tp->stats_mapping);
+	tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
+					  sizeof(struct tg3_hw_stats),
+					  &tp->stats_mapping,
+					  GFP_KERNEL);
 	if (!tp->hw_stats)
 		goto err_out;
 
@@ -6543,9 +6547,10 @@
 		struct tg3_napi *tnapi = &tp->napi[i];
 		struct tg3_hw_status *sblk;
 
-		tnapi->hw_status = pci_alloc_consistent(tp->pdev,
-							TG3_HW_STATUS_SIZE,
-							&tnapi->status_mapping);
+		tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
+						      TG3_HW_STATUS_SIZE,
+						      &tnapi->status_mapping,
+						      GFP_KERNEL);
 		if (!tnapi->hw_status)
 			goto err_out;
 
@@ -6566,9 +6571,10 @@
 			if (!tnapi->tx_buffers)
 				goto err_out;
 
-			tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
-							      TG3_TX_RING_BYTES,
-						       &tnapi->tx_desc_mapping);
+			tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
+							    TG3_TX_RING_BYTES,
+							&tnapi->tx_desc_mapping,
+							    GFP_KERNEL);
 			if (!tnapi->tx_ring)
 				goto err_out;
 		}
@@ -6601,9 +6607,10 @@
 		if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
 			continue;
 
-		tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
-						     TG3_RX_RCB_RING_BYTES(tp),
-						     &tnapi->rx_rcb_mapping);
+		tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
+						   TG3_RX_RCB_RING_BYTES(tp),
+						   &tnapi->rx_rcb_mapping,
+						   GFP_KERNEL);
 		if (!tnapi->rx_rcb)
 			goto err_out;
 
@@ -6987,7 +6994,7 @@
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
 		if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
-			pcie_set_readrq(tp->pdev, 4096);
+			pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 		else {
 			pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
 					      tp->pci_cacheline_sz);
@@ -7181,7 +7188,7 @@
 				      tp->pcie_cap + PCI_EXP_DEVCTL,
 				      val16);
 
-		pcie_set_readrq(tp->pdev, 4096);
+		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
 		/* Clear error status */
 		pci_write_config_word(tp->pdev,
@@ -7222,19 +7229,21 @@
 		tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
 	}
 
+	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+		tp->mac_mode = MAC_MODE_APE_TX_EN |
+			       MAC_MODE_APE_RX_EN |
+			       MAC_MODE_TDE_ENABLE;
+
 	if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
-		tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
-		tw32_f(MAC_MODE, tp->mac_mode);
+		tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
+		val = tp->mac_mode;
 	} else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
-		tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
-		tw32_f(MAC_MODE, tp->mac_mode);
-	} else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
-		tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
-		if (tp->mac_mode & MAC_MODE_APE_TX_EN)
-			tp->mac_mode |= MAC_MODE_TDE_ENABLE;
-		tw32_f(MAC_MODE, tp->mac_mode);
+		tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
+		val = tp->mac_mode;
 	} else
-		tw32_f(MAC_MODE, 0);
+		val = 0;
+
+	tw32_f(MAC_MODE, val);
 	udelay(40);
 
 	tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7801,6 +7810,37 @@
 	if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
 		tg3_abort_hw(tp, 1);
 
+	/* Enable MAC control of LPI */
+	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
+		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
+		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
+		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
+
+		tw32_f(TG3_CPMU_EEE_CTRL,
+		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
+
+		val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
+		      TG3_CPMU_EEEMD_LPI_IN_TX |
+		      TG3_CPMU_EEEMD_LPI_IN_RX |
+		      TG3_CPMU_EEEMD_EEE_ENABLE;
+
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
+			val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
+
+		if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
+			val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
+
+		tw32_f(TG3_CPMU_EEE_MODE, val);
+
+		tw32_f(TG3_CPMU_EEE_DBTMR1,
+		       TG3_CPMU_DBTMR1_PCIEXIT_2047US |
+		       TG3_CPMU_DBTMR1_LNKIDLE_2047US);
+
+		tw32_f(TG3_CPMU_EEE_DBTMR2,
+		       TG3_CPMU_DBTMR1_APE_TX_2047US |
+		       TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
+	}
+
 	if (reset_phy)
 		tg3_phy_reset(tp);
 
@@ -7860,18 +7900,21 @@
 		tw32(GRC_MODE, grc_mode);
 	}
 
-	if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
-		u32 grc_mode = tr32(GRC_MODE);
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+		if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+			u32 grc_mode = tr32(GRC_MODE);
 
-		/* Access the lower 1K of PL PCIE block registers. */
-		val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
-		tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+			/* Access the lower 1K of PL PCIE block registers. */
+			val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+			tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
 
-		val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5);
-		tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
-		     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
+			val = tr32(TG3_PCIE_TLDLPL_PORT +
+				   TG3_PCIE_PL_LO_PHYCTL5);
+			tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
+			     val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
 
-		tw32(GRC_MODE, grc_mode);
+			tw32(GRC_MODE, grc_mode);
+		}
 
 		val = tr32(TG3_CPMU_LSPD_10MB_CLK);
 		val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -7879,22 +7922,6 @@
 		tw32(TG3_CPMU_LSPD_10MB_CLK, val);
 	}
 
-	/* Enable MAC control of LPI */
-	if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
-		tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
-		       TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
-		       TG3_CPMU_EEE_LNKIDL_UART_IDL);
-
-		tw32_f(TG3_CPMU_EEE_CTRL,
-		       TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
-
-		tw32_f(TG3_CPMU_EEE_MODE,
-		       TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
-		       TG3_CPMU_EEEMD_LPI_IN_TX |
-		       TG3_CPMU_EEEMD_LPI_IN_RX |
-		       TG3_CPMU_EEEMD_EEE_ENABLE);
-	}
-
 	/* This works around an issue with Athlon chipsets on
 	 * B3 tigon3 silicon.  This bit has no effect on any
 	 * other revision.  But do not set this on PCI Express
@@ -8162,8 +8189,7 @@
 		      RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
 		      RDMAC_MODE_LNGREAD_ENAB);
 
-	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
-	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
+	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
 		rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
 
 	if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8229,10 @@
 	    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
 	    (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
 		val = tr32(TG3_RDMA_RSRVCTRL_REG);
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+			val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
+			val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
+		}
 		tw32(TG3_RDMA_RSRVCTRL_REG,
 		     val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
 	}
@@ -8280,7 +8310,7 @@
 	}
 
 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
-		tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
 	else
 		tp->mac_mode = 0;
 	tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9061,14 @@
 		pci_disable_msix(tp->pdev);
 		return false;
 	}
-	if (tp->irq_cnt > 1)
+
+	if (tp->irq_cnt > 1) {
 		tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+			tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
+			netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
+		}
+	}
 
 	return true;
 }
@@ -12411,8 +12447,9 @@
 		if (cfg2 & (1 << 18))
 			tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
 
-		if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
-		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
+		if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
+		    ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
+		      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
 		    (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
 			tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
 
@@ -12548,9 +12585,11 @@
 		}
 	}
 
-	if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
-	    (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
-	     tp->pci_chip_rev_id != CHIPREV_ID_57765_A0))
+	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
+	    ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
+	      tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
+	     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+	      tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
 		tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
 
 	if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
@@ -13359,7 +13398,45 @@
 
 		tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
 
-		pcie_set_readrq(tp->pdev, 4096);
+		tp->pcie_readrq = 4096;
+		if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
+			u16 word;
+
+			pci_read_config_word(tp->pdev,
+					     tp->pcie_cap + PCI_EXP_LNKSTA,
+					     &word);
+			switch (word & PCI_EXP_LNKSTA_CLS) {
+			case PCI_EXP_LNKSTA_CLS_2_5GB:
+				word &= PCI_EXP_LNKSTA_NLW;
+				word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
+				switch (word) {
+				case 2:
+					tp->pcie_readrq = 2048;
+					break;
+				case 4:
+					tp->pcie_readrq = 1024;
+					break;
+				}
+				break;
+
+			case PCI_EXP_LNKSTA_CLS_5_0GB:
+				word &= PCI_EXP_LNKSTA_NLW;
+				word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
+				switch (word) {
+				case 1:
+					tp->pcie_readrq = 2048;
+					break;
+				case 2:
+					tp->pcie_readrq = 1024;
+					break;
+				case 4:
+					tp->pcie_readrq = 512;
+					break;
+				}
+			}
+		}
+
+		pcie_set_readrq(tp->pdev, tp->pcie_readrq);
 
 		pci_read_config_word(tp->pdev,
 				     tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13722,8 +13799,7 @@
 
 	/* Preserve the APE MAC_MODE bits */
 	if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
-		tp->mac_mode = tr32(MAC_MODE) |
-			       MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
+		tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
 	else
 		tp->mac_mode = TG3_DEF_MAC_MODE;
 
@@ -14159,7 +14235,8 @@
 	u32 *buf, saved_dma_rwctrl;
 	int ret = 0;
 
-	buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
+	buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
+				 &buf_dma, GFP_KERNEL);
 	if (!buf) {
 		ret = -ENOMEM;
 		goto out_nofree;
@@ -14343,7 +14420,7 @@
 	}
 
 out:
-	pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
+	dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
 out_nofree:
 	return ret;
 }
@@ -14957,7 +15034,7 @@
 		if (tp->fw)
 			release_firmware(tp->fw);
 
-		flush_scheduled_work();
+		cancel_work_sync(&tp->reset_task);
 
 		if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
 			tg3_phy_fini(tp);
@@ -14996,7 +15073,7 @@
 	if (!netif_running(dev))
 		return 0;
 
-	flush_scheduled_work();
+	flush_work_sync(&tp->reset_task);
 	tg3_phy_stop(tp);
 	tg3_netif_stop(tp);
 
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a19748..d62c8d9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1094,13 +1094,19 @@
 /* 0x3664 --> 0x36b0 unused */
 
 #define TG3_CPMU_EEE_MODE		0x000036b0
-#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET	 0x00000008
-#define TG3_CPMU_EEEMD_LPI_ENABLE	 0x00000080
-#define TG3_CPMU_EEEMD_LPI_IN_TX	 0x00000100
-#define TG3_CPMU_EEEMD_LPI_IN_RX	 0x00000200
-#define TG3_CPMU_EEEMD_EEE_ENABLE	 0x00100000
-/* 0x36b4 --> 0x36b8 unused */
-
+#define  TG3_CPMU_EEEMD_APE_TX_DET_EN	 0x00000004
+#define  TG3_CPMU_EEEMD_ERLY_L1_XIT_DET	 0x00000008
+#define  TG3_CPMU_EEEMD_SND_IDX_DET_EN	 0x00000040
+#define  TG3_CPMU_EEEMD_LPI_ENABLE	 0x00000080
+#define  TG3_CPMU_EEEMD_LPI_IN_TX	 0x00000100
+#define  TG3_CPMU_EEEMD_LPI_IN_RX	 0x00000200
+#define  TG3_CPMU_EEEMD_EEE_ENABLE	 0x00100000
+#define TG3_CPMU_EEE_DBTMR1		0x000036b4
+#define  TG3_CPMU_DBTMR1_PCIEXIT_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR1_LNKIDLE_2047US	 0x000070ff
+#define TG3_CPMU_EEE_DBTMR2		0x000036b8
+#define  TG3_CPMU_DBTMR1_APE_TX_2047US	 0x07ff0000
+#define  TG3_CPMU_DBTMR2_TXIDXEQ_2047US	 0x000070ff
 #define TG3_CPMU_EEE_LNKIDL_CTRL	0x000036bc
 #define  TG3_CPMU_EEE_LNKIDL_PCIE_NL0	 0x01000000
 #define  TG3_CPMU_EEE_LNKIDL_UART_IDL	 0x00000004
@@ -1327,6 +1333,8 @@
 
 #define TG3_RDMA_RSRVCTRL_REG		0x00004900
 #define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX	 0x00000004
+#define TG3_RDMA_RSRVCTRL_TXMRGN_320B	 0x28000000
+#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK	 0xffe00000
 /* 0x4904 --> 0x4910 unused */
 
 #define TG3_LSO_RD_DMA_CRPTEN_CTRL	0x00004910
@@ -2170,9 +2178,6 @@
 #define MII_TG3_TEST1_CRC_EN		0x8000
 
 /* Clause 45 expansion registers */
-#define TG3_CL45_D7_EEEADV_CAP		0x003c
-#define TG3_CL45_D7_EEEADV_CAP_100TX	0x0002
-#define TG3_CL45_D7_EEEADV_CAP_1000T	0x0004
 #define TG3_CL45_D7_EEERES_STAT		0x803e
 #define TG3_CL45_D7_EEERES_STAT_LP_100TX	0x0002
 #define TG3_CL45_D7_EEERES_STAT_LP_1000T	0x0004
@@ -2562,10 +2567,6 @@
 	DEFINE_DMA_UNMAP_ADDR(mapping);
 };
 
-struct tg3_config_info {
-	u32				flags;
-};
-
 struct tg3_link_config {
 	/* Describes what we're trying to get. */
 	u32				advertising;
@@ -2713,17 +2714,17 @@
 	u32				last_irq_tag;
 	u32				int_mbox;
 	u32				coal_now;
-	u32				tx_prod;
-	u32				tx_cons;
-	u32				tx_pending;
-	u32				prodmbox;
 
-	u32				consmbox;
+	u32				consmbox ____cacheline_aligned;
 	u32				rx_rcb_ptr;
 	u16				*rx_rcb_prod_idx;
 	struct tg3_rx_prodring_set	prodring;
-
 	struct tg3_rx_buffer_desc	*rx_rcb;
+
+	u32				tx_prod	____cacheline_aligned;
+	u32				tx_cons;
+	u32				tx_pending;
+	u32				prodmbox;
 	struct tg3_tx_buffer_desc	*tx_ring;
 	struct ring_info		*tx_buffers;
 
@@ -2946,6 +2947,7 @@
 	int				pcix_cap;
 	int				pcie_cap;
 	};
+	int				pcie_readrq;
 
 	struct mii_bus			*mdio_bus;
 	int				mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb..c78a505 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@
 	de->media_timer.data = (unsigned long) de;
 
 	netif_carrier_off(dev);
-	netif_stop_queue(dev);
 
 	/* wake up device, assign resources */
 	rc = pci_enable_device(pdev);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index a9f7d5d..7064e03 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -688,9 +688,6 @@
 
 	DMFE_DBUG(0, "dmfe_start_xmit", 0);
 
-	/* Resource flag check */
-	netif_stop_queue(dev);
-
 	/* Too large packet check */
 	if (skb->len > MAX_PACKET_SIZE) {
 		pr_err("big packet = %d\n", (u16)skb->len);
@@ -698,6 +695,9 @@
 		return NETDEV_TX_OK;
 	}
 
+	/* Resource flag check */
+	netif_stop_queue(dev);
+
 	spin_lock_irqsave(&db->lock, flags);
 
 	/* No Tx resource check, it never happen nromally */
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index a4c3f57..acbdab3 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -2050,12 +2050,16 @@
 
 	ugeth_vdbg("%s: IN", __func__);
 
+	/*
+	 * Tell the kernel the link is down.
+	 * Must be done before disabling the controller
+	 * or deadlock may happen.
+	 */
+	phy_stop(phydev);
+
 	/* Disable the controller */
 	ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
 
-	/* Tell the kernel the link is down */
-	phy_stop(phydev);
-
 	/* Mask all interrupts */
 	out_be32(ugeth->uccf->p_uccm, 0x00000000);
 
@@ -2065,9 +2069,6 @@
 	/* Disable Rx and Tx */
 	clrbits32(&ug_regs->maccfg1, MACCFG1_ENABLE_RX | MACCFG1_ENABLE_TX);
 
-	phy_disconnect(ugeth->phydev);
-	ugeth->phydev = NULL;
-
 	ucc_geth_memclean(ugeth);
 }
 
@@ -3550,7 +3551,10 @@
 
 	napi_disable(&ugeth->napi);
 
+	cancel_work_sync(&ugeth->timeout_work);
 	ucc_geth_stop(ugeth);
+	phy_disconnect(ugeth->phydev);
+	ugeth->phydev = NULL;
 
 	free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
 
@@ -3579,8 +3583,12 @@
 		 * Must reset MAC *and* PHY. This is done by reopening
 		 * the device.
 		 */
-		ucc_geth_close(dev);
-		ucc_geth_open(dev);
+		netif_tx_stop_all_queues(dev);
+		ucc_geth_stop(ugeth);
+		ucc_geth_init_mac(ugeth);
+		/* Must start PHY here */
+		phy_start(ugeth->phydev);
+		netif_tx_start_all_queues(dev);
 	}
 
 	netif_tx_schedule_all(dev);
@@ -3594,7 +3602,6 @@
 {
 	struct ucc_geth_private *ugeth = netdev_priv(dev);
 
-	netif_carrier_off(dev);
 	schedule_work(&ugeth->timeout_work);
 }
 
diff --git a/drivers/net/ucc_geth.h b/drivers/net/ucc_geth.h
index 05a9558..055b87a 100644
--- a/drivers/net/ucc_geth.h
+++ b/drivers/net/ucc_geth.h
@@ -899,7 +899,8 @@
 #define UCC_GETH_UTFS_INIT                      512	/* Tx virtual FIFO size
 							 */
 #define UCC_GETH_UTFET_INIT                     256	/* 1/2 utfs */
-#define UCC_GETH_UTFTT_INIT                     512
+#define UCC_GETH_UTFTT_INIT                     256	/* 1/2 utfs
+							   due to errata */
 /* Gigabit Ethernet (1000 Mbps) */
 #define UCC_GETH_URFS_GIGA_INIT                 4096/*2048*/	/* Rx virtual
 								   FIFO size */
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 52ffabe..6f600cc 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -196,6 +196,25 @@
 	  IEEE 802 "local assignment" bit is set in the address, a "usbX"
 	  name is used instead.
 
+config USB_NET_CDC_NCM
+	tristate "CDC NCM support"
+	depends on USB_USBNET
+	default y
+	help
+	  This driver provides support for CDC NCM (Network Control Model
+	  Device USB Class Specification). The CDC NCM specification is
+	  available from <http://www.usb.org/>.
+
+	  Say "y" to link the driver statically, or "m" to build a
+	  dynamically linked module.
+
+	  This driver should work with at least the following devices:
+	    * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
+	    * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
+	    * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
+	    * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
+	    * Ericsson F5521gw Mobile Broadband Module
+
 config USB_NET_DM9601
 	tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
 	depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a19b025..cac1703 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -26,4 +26,5 @@
 obj-$(CONFIG_USB_IPHETH)	+= ipheth.o
 obj-$(CONFIG_USB_SIERRA_NET)	+= sierra_net.o
 obj-$(CONFIG_USB_NET_CX82310_ETH)	+= cx82310_eth.o
+obj-$(CONFIG_USB_NET_CDC_NCM)	+= cdc_ncm.o
 
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644
index 0000000..593c104
--- /dev/null
+++ b/drivers/net/usb/cdc_ncm.c
@@ -0,0 +1,1213 @@
+/*
+ * cdc_ncm.c
+ *
+ * Copyright (C) ST-Ericsson 2010
+ * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
+ * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
+ *
+ * USB Host Driver for Network Control Model (NCM)
+ * http://www.usb.org/developers/devclass_docs/NCM10.zip
+ *
+ * The NCM encoding, decoding and initialization logic
+ * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose this file to be licensed under the terms
+ * of the GNU General Public License (GPL) Version 2 or the 2-clause
+ * BSD license listed below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/ctype.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/spinlock.h>
+#include <linux/atomic.h>
+#include <linux/usb/usbnet.h>
+#include <linux/usb/cdc.h>
+
+#define	DRIVER_VERSION				"30-Nov-2010"
+
+/* CDC NCM subclass 3.2.1 */
+#define USB_CDC_NCM_NDP16_LENGTH_MIN		0x10
+
+/* Maximum NTB length */
+#define	CDC_NCM_NTB_MAX_SIZE_TX			16384	/* bytes */
+#define	CDC_NCM_NTB_MAX_SIZE_RX			16384	/* bytes */
+
+/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
+#define	CDC_NCM_MIN_DATAGRAM_SIZE		1514	/* bytes */
+
+#define	CDC_NCM_MIN_TX_PKT			512	/* bytes */
+
+/* Default value for MaxDatagramSize */
+#define	CDC_NCM_MAX_DATAGRAM_SIZE		2048	/* bytes */
+
+/*
+ * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
+ * the last NULL entry. Any additional datagrams in NTB would be discarded.
+ */
+#define	CDC_NCM_DPT_DATAGRAMS_MAX		32
+
+/* Restart the timer, if amount of datagrams is less than given value */
+#define	CDC_NCM_RESTART_TIMER_DATAGRAM_CNT	3
+
+/* The following macro defines the minimum header space */
+#define	CDC_NCM_MIN_HDR_SIZE \
+	(sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
+	(CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
+
+struct connection_speed_change {
+	__le32	USBitRate; /* holds 3GPP downlink value, bits per second */
+	__le32	DSBitRate; /* holds 3GPP uplink value, bits per second */
+} __attribute__ ((packed));
+
+struct cdc_ncm_data {
+	struct usb_cdc_ncm_nth16 nth16;
+	struct usb_cdc_ncm_ndp16 ndp16;
+	struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
+};
+
+struct cdc_ncm_ctx {
+	struct cdc_ncm_data rx_ncm;
+	struct cdc_ncm_data tx_ncm;
+	struct usb_cdc_ncm_ntb_parameters ncm_parm;
+	struct timer_list tx_timer;
+
+	const struct usb_cdc_ncm_desc *func_desc;
+	const struct usb_cdc_header_desc *header_desc;
+	const struct usb_cdc_union_desc *union_desc;
+	const struct usb_cdc_ether_desc *ether_desc;
+
+	struct net_device *netdev;
+	struct usb_device *udev;
+	struct usb_host_endpoint *in_ep;
+	struct usb_host_endpoint *out_ep;
+	struct usb_host_endpoint *status_ep;
+	struct usb_interface *intf;
+	struct usb_interface *control;
+	struct usb_interface *data;
+
+	struct sk_buff *tx_curr_skb;
+	struct sk_buff *tx_rem_skb;
+
+	spinlock_t mtx;
+
+	u32 tx_timer_pending;
+	u32 tx_curr_offset;
+	u32 tx_curr_last_offset;
+	u32 tx_curr_frame_num;
+	u32 rx_speed;
+	u32 tx_speed;
+	u32 rx_max;
+	u32 tx_max;
+	u32 max_datagram_size;
+	u16 tx_max_datagrams;
+	u16 tx_remainder;
+	u16 tx_modulus;
+	u16 tx_ndp_modulus;
+	u16 tx_seq;
+	u16 connected;
+	u8 data_claimed;
+	u8 control_claimed;
+};
+
+static void cdc_ncm_tx_timeout(unsigned long arg);
+static const struct driver_info cdc_ncm_info;
+static struct usb_driver cdc_ncm_driver;
+static struct ethtool_ops cdc_ncm_ethtool_ops;
+
+static const struct usb_device_id cdc_devs[] = {
+	{ USB_INTERFACE_INFO(USB_CLASS_COMM,
+		USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
+		.driver_info = (unsigned long)&cdc_ncm_info,
+	},
+	{
+	},
+};
+
+MODULE_DEVICE_TABLE(usb, cdc_devs);
+
+static void
+cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
+{
+	struct usbnet *dev = netdev_priv(net);
+
+	strncpy(info->driver, dev->driver_name, sizeof(info->driver));
+	strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+	strncpy(info->fw_version, dev->driver_info->description,
+		sizeof(info->fw_version));
+	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
+}
+
+static int
+cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
+		   void *data, u16 flags, u16 *actlen, u16 timeout)
+{
+	int err;
+
+	err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
+				usb_rcvctrlpipe(ctx->udev, 0) :
+				usb_sndctrlpipe(ctx->udev, 0),
+				req->bNotificationType, req->bmRequestType,
+				req->wValue,
+				req->wIndex, data,
+				req->wLength, timeout);
+
+	if (err < 0) {
+		if (actlen)
+			*actlen = 0;
+		return err;
+	}
+
+	if (actlen)
+		*actlen = err;
+
+	return 0;
+}
+
+static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
+{
+	struct usb_cdc_notification req;
+	u32 val;
+	__le16 max_datagram_size;
+	u8 flags;
+	u8 iface_no;
+	int err;
+
+	iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
+
+	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+	req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
+	req.wValue = 0;
+	req.wIndex = cpu_to_le16(iface_no);
+	req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
+
+	err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
+	if (err) {
+		pr_debug("failed GET_NTB_PARAMETERS\n");
+		return 1;
+	}
+
+	/* read correct set of parameters according to device mode */
+	ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+	ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+	ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+	ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+	ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
+
+	if (ctx->func_desc != NULL)
+		flags = ctx->func_desc->bmNetworkCapabilities;
+	else
+		flags = 0;
+
+	pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
+		 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
+		 "wNdpOutAlignment=%u flags=0x%x\n",
+		 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
+		 ctx->tx_ndp_modulus, flags);
+
+	/* max count of tx datagrams without terminating NULL entry */
+	ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
+
+	/* verify maximum size of received NTB in bytes */
+	if ((ctx->rx_max <
+	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+	    (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
+		pr_debug("Using default maximum receive length=%d\n",
+						CDC_NCM_NTB_MAX_SIZE_RX);
+		ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
+	}
+
+	/* verify maximum size of transmitted NTB in bytes */
+	if ((ctx->tx_max <
+	    (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
+	    (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
+		pr_debug("Using default maximum transmit length=%d\n",
+						CDC_NCM_NTB_MAX_SIZE_TX);
+		ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
+	}
+
+	/*
+	 * verify that the structure alignment is:
+	 * - power of two
+	 * - not greater than the maximum transmit length
+	 * - not less than four bytes
+	 */
+	val = ctx->tx_ndp_modulus;
+
+	if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+	    (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+		pr_debug("Using default alignment: 4 bytes\n");
+		ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+	}
+
+	/*
+	 * verify that the payload alignment is:
+	 * - power of two
+	 * - not greater than the maximum transmit length
+	 * - not less than four bytes
+	 */
+	val = ctx->tx_modulus;
+
+	if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
+	    (val != ((-val) & val)) || (val >= ctx->tx_max)) {
+		pr_debug("Using default transmit modulus: 4 bytes\n");
+		ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
+	}
+
+	/* verify the payload remainder */
+	if (ctx->tx_remainder >= ctx->tx_modulus) {
+		pr_debug("Using default transmit remainder: 0 bytes\n");
+		ctx->tx_remainder = 0;
+	}
+
+	/* adjust TX-remainder according to NCM specification. */
+	ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
+						(ctx->tx_modulus - 1));
+
+	/* additional configuration */
+
+	/* set CRC Mode */
+	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
+	req.bNotificationType = USB_CDC_SET_CRC_MODE;
+	req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
+	req.wIndex = cpu_to_le16(iface_no);
+	req.wLength = 0;
+
+	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+	if (err)
+		pr_debug("Setting CRC mode off failed\n");
+
+	/* set NTB format */
+	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
+	req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
+	req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
+	req.wIndex = cpu_to_le16(iface_no);
+	req.wLength = 0;
+
+	err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
+	if (err)
+		pr_debug("Setting NTB format to 16-bit failed\n");
+
+	/* set Max Datagram Size (MTU) */
+	req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
+	req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
+	req.wValue = 0;
+	req.wIndex = cpu_to_le16(iface_no);
+	req.wLength = cpu_to_le16(2);
+
+	err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
+	if (err) {
+		pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
+			 CDC_NCM_MIN_DATAGRAM_SIZE);
+		/* use default */
+		ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+	} else {
+		ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
+
+		if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
+			ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
+		else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
+			ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
+	}
+
+	if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
+		ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
+
+	return 0;
+}
+
+static void
+cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
+{
+	struct usb_host_endpoint *e;
+	u8 ep;
+
+	for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
+
+		e = intf->cur_altsetting->endpoint + ep;
+		switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
+		case USB_ENDPOINT_XFER_INT:
+			if (usb_endpoint_dir_in(&e->desc)) {
+				if (ctx->status_ep == NULL)
+					ctx->status_ep = e;
+			}
+			break;
+
+		case USB_ENDPOINT_XFER_BULK:
+			if (usb_endpoint_dir_in(&e->desc)) {
+				if (ctx->in_ep == NULL)
+					ctx->in_ep = e;
+			} else {
+				if (ctx->out_ep == NULL)
+					ctx->out_ep = e;
+			}
+			break;
+
+		default:
+			break;
+		}
+	}
+}
+
+static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
+{
+	if (ctx == NULL)
+		return;
+
+	del_timer_sync(&ctx->tx_timer);
+
+	if (ctx->data_claimed) {
+		usb_set_intfdata(ctx->data, NULL);
+		usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
+	}
+
+	if (ctx->control_claimed) {
+		usb_set_intfdata(ctx->control, NULL);
+		usb_driver_release_interface(driver_of(ctx->intf),
+								ctx->control);
+	}
+
+	if (ctx->tx_rem_skb != NULL) {
+		dev_kfree_skb_any(ctx->tx_rem_skb);
+		ctx->tx_rem_skb = NULL;
+	}
+
+	if (ctx->tx_curr_skb != NULL) {
+		dev_kfree_skb_any(ctx->tx_curr_skb);
+		ctx->tx_curr_skb = NULL;
+	}
+
+	kfree(ctx);
+}
+
+static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+	struct cdc_ncm_ctx *ctx;
+	struct usb_driver *driver;
+	u8 *buf;
+	int len;
+	int temp;
+	u8 iface_no;
+
+	ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+	if (ctx == NULL)
+		goto error;
+
+	memset(ctx, 0, sizeof(*ctx));
+
+	init_timer(&ctx->tx_timer);
+	spin_lock_init(&ctx->mtx);
+	ctx->netdev = dev->net;
+
+	/* store ctx pointer in device data field */
+	dev->data[0] = (unsigned long)ctx;
+
+	/* get some pointers */
+	driver = driver_of(intf);
+	buf = intf->cur_altsetting->extra;
+	len = intf->cur_altsetting->extralen;
+
+	ctx->udev = dev->udev;
+	ctx->intf = intf;
+
+	/* parse through descriptors associated with control interface */
+	while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
+
+		if (buf[1] != USB_DT_CS_INTERFACE)
+			goto advance;
+
+		switch (buf[2]) {
+		case USB_CDC_UNION_TYPE:
+			if (buf[0] < sizeof(*(ctx->union_desc)))
+				break;
+
+			ctx->union_desc =
+					(const struct usb_cdc_union_desc *)buf;
+
+			ctx->control = usb_ifnum_to_if(dev->udev,
+					ctx->union_desc->bMasterInterface0);
+			ctx->data = usb_ifnum_to_if(dev->udev,
+					ctx->union_desc->bSlaveInterface0);
+			break;
+
+		case USB_CDC_ETHERNET_TYPE:
+			if (buf[0] < sizeof(*(ctx->ether_desc)))
+				break;
+
+			ctx->ether_desc =
+					(const struct usb_cdc_ether_desc *)buf;
+
+			dev->hard_mtu =
+				le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
+
+			if (dev->hard_mtu <
+			    (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
+				dev->hard_mtu =
+					CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
+
+			else if (dev->hard_mtu >
+				 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
+				dev->hard_mtu =
+					CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
+			break;
+
+		case USB_CDC_NCM_TYPE:
+			if (buf[0] < sizeof(*(ctx->func_desc)))
+				break;
+
+			ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
+			break;
+
+		default:
+			break;
+		}
+advance:
+		/* advance to next descriptor */
+		temp = buf[0];
+		buf += temp;
+		len -= temp;
+	}
+
+	/* check if we got everything */
+	if ((ctx->control == NULL) || (ctx->data == NULL) ||
+	    (ctx->ether_desc == NULL))
+		goto error;
+
+	/* claim interfaces, if any */
+	if (ctx->data != intf) {
+		temp = usb_driver_claim_interface(driver, ctx->data, dev);
+		if (temp)
+			goto error;
+		ctx->data_claimed = 1;
+	}
+
+	if (ctx->control != intf) {
+		temp = usb_driver_claim_interface(driver, ctx->control, dev);
+		if (temp)
+			goto error;
+		ctx->control_claimed = 1;
+	}
+
+	iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
+
+	/* reset data interface */
+	temp = usb_set_interface(dev->udev, iface_no, 0);
+	if (temp)
+		goto error;
+
+	/* initialize data interface */
+	if (cdc_ncm_setup(ctx))
+		goto error;
+
+	/* configure data interface */
+	temp = usb_set_interface(dev->udev, iface_no, 1);
+	if (temp)
+		goto error;
+
+	cdc_ncm_find_endpoints(ctx, ctx->data);
+	cdc_ncm_find_endpoints(ctx, ctx->control);
+
+	if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
+	    (ctx->status_ep == NULL))
+		goto error;
+
+	dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
+
+	usb_set_intfdata(ctx->data, dev);
+	usb_set_intfdata(ctx->control, dev);
+	usb_set_intfdata(ctx->intf, dev);
+
+	temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
+	if (temp)
+		goto error;
+
+	dev_info(&dev->udev->dev, "MAC-Address: "
+				"0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+				dev->net->dev_addr[0], dev->net->dev_addr[1],
+				dev->net->dev_addr[2], dev->net->dev_addr[3],
+				dev->net->dev_addr[4], dev->net->dev_addr[5]);
+
+	dev->in = usb_rcvbulkpipe(dev->udev,
+		ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+	dev->out = usb_sndbulkpipe(dev->udev,
+		ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+	dev->status = ctx->status_ep;
+	dev->rx_urb_size = ctx->rx_max;
+
+	/*
+	 * We should get an event when network connection is "connected" or
+	 * "disconnected". Set network connection in "disconnected" state
+	 * (carrier is OFF) during attach, so the IP network stack does not
+	 * start IPv6 negotiation and more.
+	 */
+	netif_carrier_off(dev->net);
+	ctx->tx_speed = ctx->rx_speed = 0;
+	return 0;
+
+error:
+	cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
+	dev->data[0] = 0;
+	dev_info(&dev->udev->dev, "Descriptor failure\n");
+	return -ENODEV;
+}
+
+static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
+{
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+	struct usb_driver *driver;
+
+	if (ctx == NULL)
+		return;		/* no setup */
+
+	driver = driver_of(intf);
+
+	usb_set_intfdata(ctx->data, NULL);
+	usb_set_intfdata(ctx->control, NULL);
+	usb_set_intfdata(ctx->intf, NULL);
+
+	/* release interfaces, if any */
+	if (ctx->data_claimed) {
+		usb_driver_release_interface(driver, ctx->data);
+		ctx->data_claimed = 0;
+	}
+
+	if (ctx->control_claimed) {
+		usb_driver_release_interface(driver, ctx->control);
+		ctx->control_claimed = 0;
+	}
+
+	cdc_ncm_free(ctx);
+}
+
+static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
+{
+	if (first >= max)
+		return;
+	if (first >= end)
+		return;
+	if (end > max)
+		end = max;
+	memset(ptr + first, 0, end - first);
+}
+
+static struct sk_buff *
+cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
+{
+	struct sk_buff *skb_out;
+	u32 rem;
+	u32 offset;
+	u32 last_offset;
+	u16 n = 0;
+	u8 timeout = 0;
+
+	/* if there is a remaining skb, it gets priority */
+	if (skb != NULL)
+		swap(skb, ctx->tx_rem_skb);
+	else
+		timeout = 1;
+
+	/*
+	 * +----------------+
+	 * | skb_out        |
+	 * +----------------+
+	 *           ^ offset
+	 *        ^ last_offset
+	 */
+
+	/* check if we are resuming an OUT skb */
+	if (ctx->tx_curr_skb != NULL) {
+		/* pop variables */
+		skb_out = ctx->tx_curr_skb;
+		offset = ctx->tx_curr_offset;
+		last_offset = ctx->tx_curr_last_offset;
+		n = ctx->tx_curr_frame_num;
+
+	} else {
+		/* reset variables */
+		skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
+		if (skb_out == NULL) {
+			if (skb != NULL) {
+				dev_kfree_skb_any(skb);
+				ctx->netdev->stats.tx_dropped++;
+			}
+			goto exit_no_skb;
+		}
+
+		/* make room for NTH and NDP */
+		offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+					ctx->tx_ndp_modulus) +
+					sizeof(struct usb_cdc_ncm_ndp16) +
+					(ctx->tx_max_datagrams + 1) *
+					sizeof(struct usb_cdc_ncm_dpe16);
+
+		/* store last valid offset before alignment */
+		last_offset = offset;
+		/* align first Datagram offset correctly */
+		offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+		/* zero buffer till the first IP datagram */
+		cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
+		n = 0;
+		ctx->tx_curr_frame_num = 0;
+	}
+
+	for (; n < ctx->tx_max_datagrams; n++) {
+		/* check if end of transmit buffer is reached */
+		if (offset >= ctx->tx_max)
+			break;
+
+		/* compute maximum buffer size */
+		rem = ctx->tx_max - offset;
+
+		if (skb == NULL) {
+			skb = ctx->tx_rem_skb;
+			ctx->tx_rem_skb = NULL;
+
+			/* check for end of skb */
+			if (skb == NULL)
+				break;
+		}
+
+		if (skb->len > rem) {
+			if (n == 0) {
+				/* won't fit, MTU problem? */
+				dev_kfree_skb_any(skb);
+				skb = NULL;
+				ctx->netdev->stats.tx_dropped++;
+			} else {
+				/* no room for skb - store for later */
+				if (ctx->tx_rem_skb != NULL) {
+					dev_kfree_skb_any(ctx->tx_rem_skb);
+					ctx->netdev->stats.tx_dropped++;
+				}
+				ctx->tx_rem_skb = skb;
+				skb = NULL;
+
+				/* loop one more time */
+				timeout = 1;
+			}
+			break;
+		}
+
+		memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
+
+		ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
+		ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
+
+		/* update offset */
+		offset += skb->len;
+
+		/* store last valid offset before alignment */
+		last_offset = offset;
+
+		/* align offset correctly */
+		offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
+
+		/* zero padding */
+		cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
+								ctx->tx_max);
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+	}
+
+	/* free up any dangling skb */
+	if (skb != NULL) {
+		dev_kfree_skb_any(skb);
+		skb = NULL;
+		ctx->netdev->stats.tx_dropped++;
+	}
+
+	ctx->tx_curr_frame_num = n;
+
+	if (n == 0) {
+		/* wait for more frames */
+		/* push variables */
+		ctx->tx_curr_skb = skb_out;
+		ctx->tx_curr_offset = offset;
+		ctx->tx_curr_last_offset = last_offset;
+		goto exit_no_skb;
+
+	} else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
+		/* wait for more frames */
+		/* push variables */
+		ctx->tx_curr_skb = skb_out;
+		ctx->tx_curr_offset = offset;
+		ctx->tx_curr_last_offset = last_offset;
+		/* set the pending count */
+		if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
+			ctx->tx_timer_pending = 2;
+		goto exit_no_skb;
+
+	} else {
+		/* frame goes out */
+		/* variables will be reset at next call */
+	}
+
+	/* check for overflow */
+	if (last_offset > ctx->tx_max)
+		last_offset = ctx->tx_max;
+
+	/* revert offset */
+	offset = last_offset;
+
+	/*
+	 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
+	 * we send buffers as it is. If we get more data, it would be more
+	 * efficient for USB HS mobile device with DMA engine to receive a full
+	 * size NTB, than canceling DMA transfer and receiving a short packet.
+	 */
+	if (offset > CDC_NCM_MIN_TX_PKT)
+		offset = ctx->tx_max;
+
+	/* final zero padding */
+	cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
+
+	/* store last offset */
+	last_offset = offset;
+
+	if ((last_offset < ctx->tx_max) && ((last_offset %
+			le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
+		/* force short packet */
+		*(((u8 *)skb_out->data) + last_offset) = 0;
+		last_offset++;
+	}
+
+	/* zero the rest of the DPEs plus the last NULL entry */
+	for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
+		ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
+		ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
+	}
+
+	/* fill out 16-bit NTB header */
+	ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
+	ctx->tx_ncm.nth16.wHeaderLength =
+					cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
+	ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
+	ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
+	ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
+							ctx->tx_ndp_modulus);
+
+	memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
+	ctx->tx_seq++;
+
+	/* fill out 16-bit NDP table */
+	ctx->tx_ncm.ndp16.dwSignature =
+				cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
+	rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
+					sizeof(struct usb_cdc_ncm_dpe16));
+	ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
+	ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
+
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
+						&(ctx->tx_ncm.ndp16),
+						sizeof(ctx->tx_ncm.ndp16));
+
+	memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
+					sizeof(ctx->tx_ncm.ndp16),
+					&(ctx->tx_ncm.dpe16),
+					(ctx->tx_curr_frame_num + 1) *
+					sizeof(struct usb_cdc_ncm_dpe16));
+
+	/* set frame length */
+	skb_put(skb_out, last_offset);
+
+	/* return skb */
+	ctx->tx_curr_skb = NULL;
+	return skb_out;
+
+exit_no_skb:
+	return NULL;
+}
+
+static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
+{
+	/* start timer, if not already started */
+	if (timer_pending(&ctx->tx_timer) == 0) {
+		ctx->tx_timer.function = &cdc_ncm_tx_timeout;
+		ctx->tx_timer.data = (unsigned long)ctx;
+		ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
+		add_timer(&ctx->tx_timer);
+	}
+}
+
+static void cdc_ncm_tx_timeout(unsigned long arg)
+{
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
+	u8 restart;
+
+	spin_lock(&ctx->mtx);
+	if (ctx->tx_timer_pending != 0) {
+		ctx->tx_timer_pending--;
+		restart = 1;
+	} else
+		restart = 0;
+
+	spin_unlock(&ctx->mtx);
+
+	if (restart)
+		cdc_ncm_tx_timeout_start(ctx);
+	else if (ctx->netdev != NULL)
+		usbnet_start_xmit(NULL, ctx->netdev);
+}
+
+static struct sk_buff *
+cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
+{
+	struct sk_buff *skb_out;
+	struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
+	u8 need_timer = 0;
+
+	/*
+	 * The Ethernet API we are using does not support transmitting
+	 * multiple Ethernet frames in a single call. This driver will
+	 * accumulate multiple Ethernet frames and send out a larger
+	 * USB frame when the USB buffer is full or when a single jiffies
+	 * timeout happens.
+	 */
+	if (ctx == NULL)
+		goto error;
+
+	spin_lock(&ctx->mtx);
+	skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
+	if (ctx->tx_curr_skb != NULL)
+		need_timer = 1;
+	spin_unlock(&ctx->mtx);
+
+	/* Start timer, if there is a remaining skb */
+	if (need_timer)
+		cdc_ncm_tx_timeout_start(ctx);
+
+	if (skb_out)
+		dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
+	return skb_out;
+
+error:
+	if (skb != NULL)
+		dev_kfree_skb_any(skb);
+
+	return NULL;
+}
+
+static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
+{
+	struct sk_buff *skb;
+	struct cdc_ncm_ctx *ctx;
+	int sumlen;
+	int actlen;
+	int temp;
+	int nframes;
+	int x;
+	int offset;
+
+	ctx = (struct cdc_ncm_ctx *)dev->data[0];
+	if (ctx == NULL)
+		goto error;
+
+	actlen = skb_in->len;
+	sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
+
+	if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
+		pr_debug("frame too short\n");
+		goto error;
+	}
+
+	memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
+						sizeof(ctx->rx_ncm.nth16));
+
+	if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
+	    USB_CDC_NCM_NTH16_SIGN) {
+		pr_debug("invalid NTH16 signature <%u>\n",
+			 le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
+		goto error;
+	}
+
+	temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
+	if (temp > sumlen) {
+		pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
+		goto error;
+	}
+
+	temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
+	if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
+		pr_debug("invalid DPT16 index\n");
+		goto error;
+	}
+
+	memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
+						sizeof(ctx->rx_ncm.ndp16));
+
+	if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
+	    USB_CDC_NCM_NDP16_NOCRC_SIGN) {
+		pr_debug("invalid DPT16 signature <%u>\n",
+			 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+		goto error;
+	}
+
+	if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
+	    USB_CDC_NCM_NDP16_LENGTH_MIN) {
+		pr_debug("invalid DPT16 length <%u>\n",
+			 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
+		goto error;
+	}
+
+	nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
+					sizeof(struct usb_cdc_ncm_ndp16)) /
+					sizeof(struct usb_cdc_ncm_dpe16));
+	nframes--; /* we process NDP entries except for the last one */
+
+	pr_debug("nframes = %u\n", nframes);
+
+	temp += sizeof(ctx->rx_ncm.ndp16);
+
+	if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
+		pr_debug("Invalid nframes = %d\n", nframes);
+		goto error;
+	}
+
+	if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
+		pr_debug("Truncating number of frames from %u to %u\n",
+					nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
+		nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
+	}
+
+	memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
+				nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
+
+	for (x = 0; x < nframes; x++) {
+		offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
+		temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
+
+		/*
+		 * CDC NCM ch. 3.7
+		 * All entries after first NULL entry are to be ignored
+		 */
+		if ((offset == 0) || (temp == 0)) {
+			if (!x)
+				goto error; /* empty NTB */
+			break;
+		}
+
+		/* sanity checking */
+		if (((offset + temp) > actlen) ||
+		    (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
+			pr_debug("invalid frame detected (ignored)"
+				"offset[%u]=%u, length=%u, skb=%p\n",
+							x, offset, temp, skb);
+			if (!x)
+				goto error;
+			break;
+
+		} else {
+			skb = skb_clone(skb_in, GFP_ATOMIC);
+			skb->len = temp;
+			skb->data = ((u8 *)skb_in->data) + offset;
+			skb_set_tail_pointer(skb, temp);
+			usbnet_skb_return(dev, skb);
+		}
+	}
+	return 1;
+error:
+	return 0;
+}
+
+static void
+cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
+		     struct connection_speed_change *data)
+{
+	uint32_t rx_speed = le32_to_cpu(data->USBitRate);
+	uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
+
+	/*
+	 * Currently the USB-NET API does not support reporting the actual
+	 * device speed. Do print it instead.
+	 */
+	if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
+		ctx->tx_speed = tx_speed;
+		ctx->rx_speed = rx_speed;
+
+		if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
+			printk(KERN_INFO KBUILD_MODNAME
+				": %s: %u mbit/s downlink "
+				"%u mbit/s uplink\n",
+				ctx->netdev->name,
+				(unsigned int)(rx_speed / 1000000U),
+				(unsigned int)(tx_speed / 1000000U));
+		} else {
+			printk(KERN_INFO KBUILD_MODNAME
+				": %s: %u kbit/s downlink "
+				"%u kbit/s uplink\n",
+				ctx->netdev->name,
+				(unsigned int)(rx_speed / 1000U),
+				(unsigned int)(tx_speed / 1000U));
+		}
+	}
+}
+
+static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
+{
+	struct cdc_ncm_ctx *ctx;
+	struct usb_cdc_notification *event;
+
+	ctx = (struct cdc_ncm_ctx *)dev->data[0];
+
+	if (urb->actual_length < sizeof(*event))
+		return;
+
+	/* test for split data in 8-byte chunks */
+	if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
+		cdc_ncm_speed_change(ctx,
+		      (struct connection_speed_change *)urb->transfer_buffer);
+		return;
+	}
+
+	event = urb->transfer_buffer;
+
+	switch (event->bNotificationType) {
+	case USB_CDC_NOTIFY_NETWORK_CONNECTION:
+		/*
+		 * According to the CDC NCM specification ch.7.1
+		 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
+		 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
+		 */
+		ctx->connected = event->wValue;
+
+		printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
+			" %sconnected\n",
+			ctx->netdev->name, ctx->connected ? "" : "dis");
+
+		if (ctx->connected)
+			netif_carrier_on(dev->net);
+		else {
+			netif_carrier_off(dev->net);
+			ctx->tx_speed = ctx->rx_speed = 0;
+		}
+		break;
+
+	case USB_CDC_NOTIFY_SPEED_CHANGE:
+		if (urb->actual_length <
+		    (sizeof(*event) + sizeof(struct connection_speed_change)))
+			set_bit(EVENT_STS_SPLIT, &dev->flags);
+		else
+			cdc_ncm_speed_change(ctx,
+				(struct connection_speed_change *) &event[1]);
+		break;
+
+	default:
+		dev_err(&dev->udev->dev, "NCM: unexpected "
+			"notification 0x%02x!\n", event->bNotificationType);
+		break;
+	}
+}
+
+static int cdc_ncm_check_connect(struct usbnet *dev)
+{
+	struct cdc_ncm_ctx *ctx;
+
+	ctx = (struct cdc_ncm_ctx *)dev->data[0];
+	if (ctx == NULL)
+		return 1;	/* disconnected */
+
+	return !ctx->connected;
+}
+
+static int
+cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
+{
+	return usbnet_probe(udev, prod);
+}
+
+static void cdc_ncm_disconnect(struct usb_interface *intf)
+{
+	struct usbnet *dev = usb_get_intfdata(intf);
+
+	if (dev == NULL)
+		return;		/* already disconnected */
+
+	usbnet_disconnect(intf);
+}
+
+static int cdc_ncm_manage_power(struct usbnet *dev, int status)
+{
+	dev->intf->needs_remote_wakeup = status;
+	return 0;
+}
+
+static const struct driver_info cdc_ncm_info = {
+	.description = "CDC NCM",
+	.flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
+	.bind = cdc_ncm_bind,
+	.unbind = cdc_ncm_unbind,
+	.check_connect = cdc_ncm_check_connect,
+	.manage_power = cdc_ncm_manage_power,
+	.status = cdc_ncm_status,
+	.rx_fixup = cdc_ncm_rx_fixup,
+	.tx_fixup = cdc_ncm_tx_fixup,
+};
+
+static struct usb_driver cdc_ncm_driver = {
+	.name = "cdc_ncm",
+	.id_table = cdc_devs,
+	.probe = cdc_ncm_probe,
+	.disconnect = cdc_ncm_disconnect,
+	.suspend = usbnet_suspend,
+	.resume = usbnet_resume,
+	.supports_autosuspend = 1,
+};
+
+static struct ethtool_ops cdc_ncm_ethtool_ops = {
+	.get_drvinfo = cdc_ncm_get_drvinfo,
+	.get_link = usbnet_get_link,
+	.get_msglevel = usbnet_get_msglevel,
+	.set_msglevel = usbnet_set_msglevel,
+	.get_settings = usbnet_get_settings,
+	.set_settings = usbnet_set_settings,
+	.nway_reset = usbnet_nway_reset,
+};
+
+static int __init cdc_ncm_init(void)
+{
+	printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
+	return usb_register(&cdc_ncm_driver);
+}
+
+module_init(cdc_ncm_init);
+
+static void __exit cdc_ncm_exit(void)
+{
+	usb_deregister(&cdc_ncm_driver);
+}
+
+module_exit(cdc_ncm_exit);
+
+MODULE_AUTHOR("Hans Petter Selasky");
+MODULE_DESCRIPTION("USB CDC NCM host driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index b154a94..93c6b5f 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1745,7 +1745,6 @@
 			    unsigned int cmd, unsigned long arg)
 {
 	struct hso_serial *serial =  get_serial_by_tty(tty);
-	void __user *uarg = (void __user *)arg;
 	int ret = 0;
 	D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
 
@@ -2994,12 +2993,14 @@
 
 	case HSO_INTF_BULK:
 		/* It's a regular bulk interface */
-		if (((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) &&
-		    !disable_net)
-			hso_dev = hso_create_net_device(interface, port_spec);
-		else
+		if ((port_spec & HSO_PORT_MASK) == HSO_PORT_NETWORK) {
+			if (!disable_net)
+				hso_dev =
+				    hso_create_net_device(interface, port_spec);
+		} else {
 			hso_dev =
 			    hso_create_bulk_serial_device(interface, port_spec);
+		}
 		if (!hso_dev)
 			goto exit;
 		break;
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99..7d42f9a 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@
 
 	/* Paranoid */
 	if (skb->len > IPHETH_BUF_SIZE) {
-		WARN(1, "%s: skb too large: %d bytes", __func__, skb->len);
+		WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
 		dev->net->stats.tx_dropped++;
 		dev_kfree_skb_irq(skb);
 		return NETDEV_TX_OK;
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09..ef36676 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@
 
 static int mdio_read(struct net_device *dev, int phy_id, int loc)
 {
-	pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+	pegasus_t *pegasus = netdev_priv(dev);
 	u16 res;
 
 	read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@
 
 static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
 {
-	pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev);
+	pegasus_t *pegasus = netdev_priv(dev);
 
 	write_mii_word(pegasus, phy_id, loc, val);
 }
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d1ac15c..ed1b432 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -802,10 +802,9 @@
 
 	dev_dbg(&dev->udev->dev, "%s", __func__);
 
-	/* Kill the timer then flush the work queue */
+	/* kill the timer and work */
 	del_timer_sync(&priv->sync_timer);
-
-	flush_scheduled_work();
+	cancel_work_sync(&priv->sierra_net_kevent);
 
 	/* tell modem we are going away */
 	status = sierra_net_send_cmd(dev, priv->shdwn_msg,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9d..ed9a416 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/pm_runtime.h>
 
 #define DRIVER_VERSION		"22-Aug-2005"
 
@@ -390,14 +391,19 @@
 		goto error;
 	// else network stack removes extra byte if we forced a short packet
 
-	if (skb->len)
-		usbnet_skb_return (dev, skb);
-	else {
-		netif_dbg(dev, rx_err, dev->net, "drop\n");
-error:
-		dev->net->stats.rx_errors++;
-		skb_queue_tail (&dev->done, skb);
+	if (skb->len) {
+		/* all data was already cloned from skb inside the driver */
+		if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+			dev_kfree_skb_any(skb);
+		else
+			usbnet_skb_return(dev, skb);
+		return;
 	}
+
+	netif_dbg(dev, rx_err, dev->net, "drop\n");
+error:
+	dev->net->stats.rx_errors++;
+	skb_queue_tail(&dev->done, skb);
 }
 
 /*-------------------------------------------------------------------------*/
@@ -970,7 +976,8 @@
 	struct usbnet		*dev = entry->dev;
 
 	if (urb->status == 0) {
-		dev->net->stats.tx_packets++;
+		if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
+			dev->net->stats.tx_packets++;
 		dev->net->stats.tx_bytes += entry->length;
 	} else {
 		dev->net->stats.tx_errors++;
@@ -1043,8 +1050,13 @@
 	if (info->tx_fixup) {
 		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
 		if (!skb) {
-			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
-			goto drop;
+			if (netif_msg_tx_err(dev)) {
+				netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
+				goto drop;
+			} else {
+				/* cdc_ncm collected packet; waits for more */
+				goto not_drop;
+			}
 		}
 	}
 	length = skb->len;
@@ -1066,13 +1078,18 @@
 	/* don't assume the hardware handles USB_ZERO_PACKET
 	 * NOTE:  strictly conforming cdc-ether devices should expect
 	 * the ZLP here, but ignore the one-byte packet.
+	 * NOTE2: CDC NCM specification is different from CDC ECM when
+	 * handling ZLP/short packets, so cdc_ncm driver will make short
+	 * packet itself if needed.
 	 */
 	if (length % dev->maxpacket == 0) {
 		if (!(info->flags & FLAG_SEND_ZLP)) {
-			urb->transfer_buffer_length++;
-			if (skb_tailroom(skb)) {
-				skb->data[skb->len] = 0;
-				__skb_put(skb, 1);
+			if (!(info->flags & FLAG_MULTI_PACKET)) {
+				urb->transfer_buffer_length++;
+				if (skb_tailroom(skb)) {
+					skb->data[skb->len] = 0;
+					__skb_put(skb, 1);
+				}
 			}
 		} else
 			urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1121,6 +1138,7 @@
 		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
 drop:
 		dev->net->stats.tx_dropped++;
+not_drop:
 		if (skb)
 			dev_kfree_skb_any (skb);
 		usb_free_urb (urb);
@@ -1230,8 +1248,7 @@
 	net = dev->net;
 	unregister_netdev (net);
 
-	/* we don't hold rtnl here ... */
-	flush_scheduled_work ();
+	cancel_work_sync(&dev->kevent);
 
 	if (dev->driver_info->unbind)
 		dev->driver_info->unbind (dev, intf);
@@ -1273,6 +1290,16 @@
 	struct usb_device		*xdev;
 	int				status;
 	const char			*name;
+	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);
+
+	/* usbnet already took usb runtime pm, so have to enable the feature
+	 * for usb interface, otherwise usb_autopm_get_interface may return
+	 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+	 */
+	if (!driver->supports_autosuspend) {
+		driver->supports_autosuspend = 1;
+		pm_runtime_enable(&udev->dev);
+	}
 
 	name = udev->dev.driver->name;
 	info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9d..5e7f069 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
 */
 
 #define DRV_NAME	"via-rhine"
-#define DRV_VERSION	"1.4.3"
-#define DRV_RELDATE	"2007-03-06"
+#define DRV_VERSION	"1.5.0"
+#define DRV_RELDATE	"2010-10-09"
 
 
 /* A few user-configurable values.
@@ -100,6 +100,7 @@
 #include <linux/mii.h>
 #include <linux/ethtool.h>
 #include <linux/crc32.h>
+#include <linux/if_vlan.h>
 #include <linux/bitops.h>
 #include <linux/workqueue.h>
 #include <asm/processor.h>	/* Processor type for cache alignment. */
@@ -133,6 +134,9 @@
 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
 
+#define MCAM_SIZE	32
+#define VCAM_SIZE	32
+
 /*
 		Theory of Operation
 
@@ -279,15 +283,16 @@
 /* Offsets to the device registers. */
 enum register_offsets {
 	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
-	ChipCmd1=0x09,
+	ChipCmd1=0x09, TQWake=0x0A,
 	IntrStatus=0x0C, IntrEnable=0x0E,
 	MulticastFilter0=0x10, MulticastFilter1=0x14,
 	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
-	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
 	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
 	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
 	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
 	StickyHW=0x83, IntrStatus2=0x84,
+	CamMask=0x88, CamCon=0x92, CamAddr=0x93,
 	WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
 	WOLcrClr1=0xA6, WOLcgClr=0xA7,
 	PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@
 	BackCaptureEffect=0x04, BackRandom=0x08
 };
 
+/* Bits in the TxConfig (TCR) register */
+enum tcr_bits {
+	TCR_PQEN=0x01,
+	TCR_LB0=0x02,		/* loopback[0] */
+	TCR_LB1=0x04,		/* loopback[1] */
+	TCR_OFSET=0x08,
+	TCR_RTGOPT=0x10,
+	TCR_RTFT0=0x20,
+	TCR_RTFT1=0x40,
+	TCR_RTSF=0x80,
+};
+
+/* Bits in the CamCon (CAMC) register */
+enum camcon_bits {
+	CAMC_CAMEN=0x01,
+	CAMC_VCAMSL=0x02,
+	CAMC_CAMWR=0x04,
+	CAMC_CAMRD=0x08,
+};
+
+/* Bits in the PCIBusConfig1 (BCR1) register */
+enum bcr1_bits {
+	BCR1_POT0=0x01,
+	BCR1_POT1=0x02,
+	BCR1_POT2=0x04,
+	BCR1_CTFT0=0x08,
+	BCR1_CTFT1=0x10,
+	BCR1_CTSF=0x20,
+	BCR1_TXQNOBK=0x40,	/* for VT6105 */
+	BCR1_VIDFR=0x80,	/* for VT6105 */
+	BCR1_MED0=0x40,		/* for VT6102 */
+	BCR1_MED1=0x80,		/* for VT6102 */
+};
+
 #ifdef USE_MMIO
 /* Registers we check that mmio and reg are the same. */
 static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@
 	DescOwn=0x80000000
 };
 
+/* Bits in *_desc.*_length */
+enum desc_length_bits {
+	DescTag=0x00010000
+};
+
 /* Bits in ChipCmd. */
 enum chip_cmd_bits {
 	CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@
 };
 
 struct rhine_private {
+	/* Bit mask for configured VLAN ids */
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
 	/* Descriptor rings */
 	struct rx_desc *rx_ring;
 	struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@
 	void __iomem *base;
 };
 
+#define BYTE_REG_BITS_ON(x, p)      do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
+#define WORD_REG_BITS_ON(x, p)      do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
+#define DWORD_REG_BITS_ON(x, p)     do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_IS_ON(x, p)   (ioread8((p)) & (x))
+#define WORD_REG_BITS_IS_ON(x, p)   (ioread16((p)) & (x))
+#define DWORD_REG_BITS_IS_ON(x, p)  (ioread32((p)) & (x))
+
+#define BYTE_REG_BITS_OFF(x, p)     do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
+#define WORD_REG_BITS_OFF(x, p)     do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
+#define DWORD_REG_BITS_OFF(x, p)    do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
+
+#define BYTE_REG_BITS_SET(x, m, p)   do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
+#define WORD_REG_BITS_SET(x, m, p)   do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
+#define DWORD_REG_BITS_SET(x, m, p)  do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
+
+
 static int  mdio_read(struct net_device *dev, int phy_id, int location);
 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
 static int  rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@
 static const struct ethtool_ops netdev_ethtool_ops;
 static int  rhine_close(struct net_device *dev);
 static void rhine_shutdown (struct pci_dev *pdev);
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
+static void rhine_init_cam_filter(struct net_device *dev);
+static void rhine_update_vcam(struct net_device *dev);
 
 #define RHINE_WAIT_FOR(condition) do {					\
 	int i=1024;							\
@@ -629,6 +701,8 @@
 	.ndo_set_mac_address 	 = eth_mac_addr,
 	.ndo_do_ioctl		 = netdev_ioctl,
 	.ndo_tx_timeout 	 = rhine_tx_timeout,
+	.ndo_vlan_rx_add_vid	 = rhine_vlan_rx_add_vid,
+	.ndo_vlan_rx_kill_vid	 = rhine_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller	 = rhine_poll,
 #endif
@@ -795,6 +869,10 @@
 	if (rp->quirks & rqRhineI)
 		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
 
+	if (pdev->revision >= VT6105M)
+		dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
+		NETIF_F_HW_VLAN_FILTER;
+
 	/* dev->name not defined before register_netdev()! */
 	rc = register_netdev(dev);
 	if (rc)
@@ -1040,6 +1118,167 @@
 		       netif_carrier_ok(mii->dev));
 }
 
+/**
+ * rhine_set_cam - set CAM multicast filters
+ * @ioaddr: register block of this Rhine
+ * @idx: multicast CAM index [0..MCAM_SIZE-1]
+ * @addr: multicast address (6 bytes)
+ *
+ * Load addresses into multicast filters.
+ */
+static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+	int i;
+
+	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+	wmb();
+
+	/* Paranoid -- idx out of range should never happen */
+	idx &= (MCAM_SIZE - 1);
+
+	iowrite8((u8) idx, ioaddr + CamAddr);
+
+	for (i = 0; i < 6; i++, addr++)
+		iowrite8(*addr, ioaddr + MulticastFilter0 + i);
+	udelay(10);
+	wmb();
+
+	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+	udelay(10);
+
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam - set CAM VLAN filters
+ * @ioaddr: register block of this Rhine
+ * @idx: VLAN CAM index [0..VCAM_SIZE-1]
+ * @addr: VLAN ID (2 bytes)
+ *
+ * Load addresses into VLAN filters.
+ */
+static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
+{
+	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+	wmb();
+
+	/* Paranoid -- idx out of range should never happen */
+	idx &= (VCAM_SIZE - 1);
+
+	iowrite8((u8) idx, ioaddr + CamAddr);
+
+	iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
+	udelay(10);
+	wmb();
+
+	iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
+	udelay(10);
+
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_cam_mask - set multicast CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: multicast CAM mask
+ *
+ * Mask sets multicast filters active/inactive.
+ */
+static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+	iowrite8(CAMC_CAMEN, ioaddr + CamCon);
+	wmb();
+
+	/* write mask */
+	iowrite32(mask, ioaddr + CamMask);
+
+	/* disable CAMEN */
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_set_vlan_cam_mask - set VLAN CAM mask
+ * @ioaddr: register block of this Rhine
+ * @mask: VLAN CAM mask
+ *
+ * Mask sets VLAN filters active/inactive.
+ */
+static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
+{
+	iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
+	wmb();
+
+	/* write mask */
+	iowrite32(mask, ioaddr + CamMask);
+
+	/* disable CAMEN */
+	iowrite8(0, ioaddr + CamCon);
+}
+
+/**
+ * rhine_init_cam_filter - initialize CAM filters
+ * @dev: network device
+ *
+ * Initialize (disable) hardware VLAN and multicast support on this
+ * Rhine.
+ */
+static void rhine_init_cam_filter(struct net_device *dev)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+	void __iomem *ioaddr = rp->base;
+
+	/* Disable all CAMs */
+	rhine_set_vlan_cam_mask(ioaddr, 0);
+	rhine_set_cam_mask(ioaddr, 0);
+
+	/* disable hardware VLAN support */
+	BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
+	BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+}
+
+/**
+ * rhine_update_vcam - update VLAN CAM filters
+ * @rp: rhine_private data of this Rhine
+ *
+ * Update VLAN CAM filters to match configuration change.
+ */
+static void rhine_update_vcam(struct net_device *dev)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+	void __iomem *ioaddr = rp->base;
+	u16 vid;
+	u32 vCAMmask = 0;	/* 32 vCAMs (6105M and better) */
+	unsigned int i = 0;
+
+	for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
+		rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
+		vCAMmask |= 1 << i;
+		if (++i >= VCAM_SIZE)
+			break;
+	}
+	rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
+}
+
+static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+
+	spin_lock_irq(&rp->lock);
+	set_bit(vid, rp->active_vlans);
+	rhine_update_vcam(dev);
+	spin_unlock_irq(&rp->lock);
+}
+
+static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct rhine_private *rp = netdev_priv(dev);
+
+	spin_lock_irq(&rp->lock);
+	clear_bit(vid, rp->active_vlans);
+	rhine_update_vcam(dev);
+	spin_unlock_irq(&rp->lock);
+}
+
 static void init_registers(struct net_device *dev)
 {
 	struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@
 
 	rhine_set_rx_mode(dev);
 
+	if (rp->pdev->revision >= VT6105M)
+		rhine_init_cam_filter(dev);
+
 	napi_enable(&rp->napi);
 
 	/* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@
 	rp->tx_ring[entry].desc_length =
 		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
 
+	if (unlikely(vlan_tx_tag_present(skb))) {
+		rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
+		/* request tagging */
+		rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
+	}
+	else
+		rp->tx_ring[entry].tx_status = 0;
+
 	/* lock eth irq */
 	spin_lock_irqsave(&rp->lock, flags);
 	wmb();
-	rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+	rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
 	wmb();
 
 	rp->cur_tx++;
 
 	/* Non-x86 Todo: explicitly flush cache lines here. */
 
+	if (vlan_tx_tag_present(skb))
+		/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+		BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
 	/* Wake the potentially-idle transmit channel */
 	iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
 	       ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@
 	spin_unlock(&rp->lock);
 }
 
+/**
+ * rhine_get_vlan_tci - extract TCI from Rx data buffer
+ * @skb: pointer to sk_buff
+ * @data_size: used data area of the buffer including CRC
+ *
+ * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
+ * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
+ * aligned following the CRC.
+ */
+static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
+{
+	u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
+	return ntohs(*(u16 *)trailer);
+}
+
 /* Process up to limit frames from receive ring */
 static int rhine_rx(struct net_device *dev, int limit)
 {
@@ -1454,6 +1723,7 @@
 	for (count = 0; count < limit; ++count) {
 		struct rx_desc *desc = rp->rx_head_desc;
 		u32 desc_status = le32_to_cpu(desc->rx_status);
+		u32 desc_length = le32_to_cpu(desc->desc_length);
 		int data_size = desc_status >> 16;
 
 		if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@
 			struct sk_buff *skb = NULL;
 			/* Length should omit the CRC */
 			int pkt_len = data_size - 4;
+			u16 vlan_tci = 0;
 
 			/* Check if the packet is long enough to accept without
 			   copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@
 						 rp->rx_buf_sz,
 						 PCI_DMA_FROMDEVICE);
 			}
+
+			if (unlikely(desc_length & DescTag))
+				vlan_tci = rhine_get_vlan_tci(skb, data_size);
+
 			skb->protocol = eth_type_trans(skb, dev);
+
+			if (unlikely(desc_length & DescTag))
+				__vlan_hwaccel_put_tag(skb, vlan_tci);
 			netif_receive_skb(skb);
 			dev->stats.rx_bytes += pkt_len;
 			dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@
 
 		iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
 		       ioaddr + ChipCmd);
+
+		if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
+			/* Tx queues are bits 7-0 (first Tx queue: bit 7) */
+			BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
+
 		iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
 		       ioaddr + ChipCmd1);
 		IOSYNC;
@@ -1631,7 +1914,7 @@
 	}
 	if (intr_status & IntrTxUnderrun) {
 		if (rp->tx_thresh < 0xE0)
-			iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+			BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
 		if (debug > 1)
 			printk(KERN_INFO "%s: Transmitter underrun, Tx "
 			       "threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@
 	    (intr_status & (IntrTxAborted |
 	     IntrTxUnderrun | IntrTxDescRace)) == 0) {
 		if (rp->tx_thresh < 0xE0) {
-			iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
+			BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
 		}
 		if (debug > 1)
 			printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@
 	struct rhine_private *rp = netdev_priv(dev);
 	void __iomem *ioaddr = rp->base;
 	u32 mc_filter[2];	/* Multicast hash filter */
-	u8 rx_mode;		/* Note: 0x02=accept runt, 0x01=accept errs */
+	u8 rx_mode = 0x0C;	/* Note: 0x02=accept runt, 0x01=accept errs */
+	struct netdev_hw_addr *ha;
 
 	if (dev->flags & IFF_PROMISC) {		/* Set promiscuous. */
 		rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@
 		/* Too many to match, or accept all multicasts. */
 		iowrite32(0xffffffff, ioaddr + MulticastFilter0);
 		iowrite32(0xffffffff, ioaddr + MulticastFilter1);
-		rx_mode = 0x0C;
+	} else if (rp->pdev->revision >= VT6105M) {
+		int i = 0;
+		u32 mCAMmask = 0;	/* 32 mCAMs (6105M and better) */
+		netdev_for_each_mc_addr(ha, dev) {
+			if (i == MCAM_SIZE)
+				break;
+			rhine_set_cam(ioaddr, i, ha->addr);
+			mCAMmask |= 1 << i;
+			i++;
+		}
+		rhine_set_cam_mask(ioaddr, mCAMmask);
 	} else {
-		struct netdev_hw_addr *ha;
-
 		memset(mc_filter, 0, sizeof(mc_filter));
 		netdev_for_each_mc_addr(ha, dev) {
 			int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@
 		}
 		iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
 		iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
-		rx_mode = 0x0C;
 	}
-	iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
+	/* enable/disable VLAN receive filtering */
+	if (rp->pdev->revision >= VT6105M) {
+		if (dev->flags & IFF_PROMISC)
+			BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+		else
+			BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
+	}
+	BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
 }
 
 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@
 	if (!netif_running(dev))
 		return 0;
 
-        if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
+	if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
 		printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
 
 	ret = pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb6b67f..b6d4028 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -986,9 +986,15 @@
 		goto unregister;
 	}
 
-	vi->status = VIRTIO_NET_S_LINK_UP;
-	virtnet_update_status(vi);
-	netif_carrier_on(dev);
+	/* Assume link up if device can't report link status,
+	   otherwise get link status from config. */
+	if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+		netif_carrier_off(dev);
+		virtnet_update_status(vi);
+	} else {
+		vi->status = VIRTIO_NET_S_LINK_UP;
+		netif_carrier_on(dev);
+	}
 
 	pr_debug("virtnet: registered device %s\n", dev->name);
 	return 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e0..0169be7 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,9 @@
 
 static atomic_t devices_found;
 
+#define VMXNET3_MAX_DEVICES 10
+static int enable_mq = 1;
+static int irq_share_mode;
 
 /*
  *    Enable/Disable the given intr
@@ -99,7 +102,7 @@
 static bool
 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 {
-	return netif_queue_stopped(adapter->netdev);
+	return tq->stopped;
 }
 
 
@@ -107,7 +110,7 @@
 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 {
 	tq->stopped = false;
-	netif_start_queue(adapter->netdev);
+	netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
 }
 
 
@@ -115,7 +118,7 @@
 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
 {
 	tq->stopped = false;
-	netif_wake_queue(adapter->netdev);
+	netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
 }
 
 
@@ -124,7 +127,7 @@
 {
 	tq->stopped = true;
 	tq->num_stop++;
-	netif_stop_queue(adapter->netdev);
+	netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
 }
 
 
@@ -135,6 +138,7 @@
 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
 {
 	u32 ret;
+	int i;
 
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
 	ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -145,22 +149,28 @@
 		if (!netif_carrier_ok(adapter->netdev))
 			netif_carrier_on(adapter->netdev);
 
-		if (affectTxQueue)
-			vmxnet3_tq_start(&adapter->tx_queue, adapter);
+		if (affectTxQueue) {
+			for (i = 0; i < adapter->num_tx_queues; i++)
+				vmxnet3_tq_start(&adapter->tx_queue[i],
+						 adapter);
+		}
 	} else {
 		printk(KERN_INFO "%s: NIC Link is Down\n",
 		       adapter->netdev->name);
 		if (netif_carrier_ok(adapter->netdev))
 			netif_carrier_off(adapter->netdev);
 
-		if (affectTxQueue)
-			vmxnet3_tq_stop(&adapter->tx_queue, adapter);
+		if (affectTxQueue) {
+			for (i = 0; i < adapter->num_tx_queues; i++)
+				vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
+		}
 	}
 }
 
 static void
 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
 {
+	int i;
 	u32 events = le32_to_cpu(adapter->shared->ecr);
 	if (!events)
 		return;
@@ -176,16 +186,18 @@
 		VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
 				       VMXNET3_CMD_GET_QUEUE_STATUS);
 
-		if (adapter->tqd_start->status.stopped) {
-			printk(KERN_ERR "%s: tq error 0x%x\n",
-			       adapter->netdev->name,
-			       le32_to_cpu(adapter->tqd_start->status.error));
-		}
-		if (adapter->rqd_start->status.stopped) {
-			printk(KERN_ERR "%s: rq error 0x%x\n",
-			       adapter->netdev->name,
-			       adapter->rqd_start->status.error);
-		}
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			if (adapter->tqd_start[i].status.stopped)
+				dev_err(&adapter->netdev->dev,
+					"%s: tq[%d] error 0x%x\n",
+					adapter->netdev->name, i, le32_to_cpu(
+					adapter->tqd_start[i].status.error));
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			if (adapter->rqd_start[i].status.stopped)
+				dev_err(&adapter->netdev->dev,
+					"%s: rq[%d] error 0x%x\n",
+					adapter->netdev->name, i,
+					adapter->rqd_start[i].status.error);
 
 		schedule_work(&adapter->work);
 	}
@@ -410,7 +422,7 @@
 }
 
 
-void
+static void
 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
 		   struct vmxnet3_adapter *adapter)
 {
@@ -437,6 +449,17 @@
 }
 
 
+/* Destroy all tx queues */
+void
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
+}
+
+
 static void
 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
 		struct vmxnet3_adapter *adapter)
@@ -518,6 +541,14 @@
 	return -ENOMEM;
 }
 
+static void
+vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
+}
 
 /*
  *    starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +763,17 @@
 }
 
 
+/* Init all tx queues */
+static void
+vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
+}
+
+
 /*
  *    parse and copy relevant protocol headers:
  *      For a tso pkt, relevant headers are L2/3/4 including options
@@ -903,6 +945,21 @@
 		}
 	}
 
+	spin_lock_irqsave(&tq->tx_lock, flags);
+
+	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
+		tq->stats.tx_ring_full++;
+		dev_dbg(&adapter->netdev->dev,
+			"tx queue stopped on %s, next2comp %u"
+			" next2fill %u\n", adapter->netdev->name,
+			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
+
+		vmxnet3_tq_stop(tq, adapter);
+		spin_unlock_irqrestore(&tq->tx_lock, flags);
+		return NETDEV_TX_BUSY;
+	}
+
+
 	ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
 	if (ret >= 0) {
 		BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -926,20 +983,6 @@
 		goto drop_pkt;
 	}
 
-	spin_lock_irqsave(&tq->tx_lock, flags);
-
-	if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
-		tq->stats.tx_ring_full++;
-		dev_dbg(&adapter->netdev->dev,
-			"tx queue stopped on %s, next2comp %u"
-			" next2fill %u\n", adapter->netdev->name,
-			tq->tx_ring.next2comp, tq->tx_ring.next2fill);
-
-		vmxnet3_tq_stop(tq, adapter);
-		spin_unlock_irqrestore(&tq->tx_lock, flags);
-		return NETDEV_TX_BUSY;
-	}
-
 	/* fill tx descs related to addr & len */
 	vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
 
@@ -1000,7 +1043,8 @@
 	if (le32_to_cpu(tq->shared->txNumDeferred) >=
 					le32_to_cpu(tq->shared->txThreshold)) {
 		tq->shared->txNumDeferred = 0;
-		VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD,
+		VMXNET3_WRITE_BAR0_REG(adapter,
+				       VMXNET3_REG_TXPROD + tq->qid * 8,
 				       tq->tx_ring.next2fill);
 	}
 
@@ -1020,7 +1064,10 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 
-	return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev);
+		BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
+		return vmxnet3_tq_xmit(skb,
+				       &adapter->tx_queue[skb->queue_mapping],
+				       adapter, netdev);
 }
 
 
@@ -1106,9 +1153,9 @@
 			break;
 		}
 		num_rxd++;
-
+		BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
 		idx = rcd->rxdIdx;
-		ring_idx = rcd->rqID == rq->qid ? 0 : 1;
+		ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
 		vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
 				  &rxCmdDesc);
 		rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1307,16 @@
 }
 
 
+static void
+vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
+}
+
+
 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
 			struct vmxnet3_adapter *adapter)
 {
@@ -1351,6 +1408,25 @@
 
 
 static int
+vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
+		if (unlikely(err)) {
+			dev_err(&adapter->netdev->dev, "%s: failed to "
+				"initialize rx queue%i\n",
+				adapter->netdev->name, i);
+			break;
+		}
+	}
+	return err;
+
+}
+
+
+static int
 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
 {
 	int i;
@@ -1398,32 +1474,176 @@
 
 
 static int
+vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
+		if (unlikely(err)) {
+			dev_err(&adapter->netdev->dev,
+				"%s: failed to create rx queue%i\n",
+				adapter->netdev->name, i);
+			goto err_out;
+		}
+	}
+	return err;
+err_out:
+	vmxnet3_rq_destroy_all(adapter);
+	return err;
+
+}
+
+/* Multiple queue aware polling function for tx and rx */
+
+static int
 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
 {
+	int rcd_done = 0, i;
 	if (unlikely(adapter->shared->ecr))
 		vmxnet3_process_events(adapter);
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
 
-	vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter);
-	return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
+						   adapter, budget);
+	return rcd_done;
 }
 
 
 static int
 vmxnet3_poll(struct napi_struct *napi, int budget)
 {
-	struct vmxnet3_adapter *adapter = container_of(napi,
-					  struct vmxnet3_adapter, napi);
+	struct vmxnet3_rx_queue *rx_queue = container_of(napi,
+					  struct vmxnet3_rx_queue, napi);
 	int rxd_done;
 
-	rxd_done = vmxnet3_do_poll(adapter, budget);
+	rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
 
 	if (rxd_done < budget) {
 		napi_complete(napi);
-		vmxnet3_enable_intr(adapter, 0);
+		vmxnet3_enable_all_intrs(rx_queue->adapter);
 	}
 	return rxd_done;
 }
 
+/*
+ * NAPI polling function for MSI-X mode with multiple Rx queues
+ * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
+ */
+
+static int
+vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
+{
+	struct vmxnet3_rx_queue *rq = container_of(napi,
+						struct vmxnet3_rx_queue, napi);
+	struct vmxnet3_adapter *adapter = rq->adapter;
+	int rxd_done;
+
+	/* When sharing interrupt with corresponding tx queue, process
+	 * tx completions in that queue as well
+	 */
+	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
+		struct vmxnet3_tx_queue *tq =
+				&adapter->tx_queue[rq - adapter->rx_queue];
+		vmxnet3_tq_tx_complete(tq, adapter);
+	}
+
+	rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
+
+	if (rxd_done < budget) {
+		napi_complete(napi);
+		vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
+	}
+	return rxd_done;
+}
+
+
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Handle completion interrupts on tx queues
+ * Returns whether or not the intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_tx(int irq, void *data)
+{
+	struct vmxnet3_tx_queue *tq = data;
+	struct vmxnet3_adapter *adapter = tq->adapter;
+
+	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+		vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
+
+	/* Handle the case where only one irq is allocate for all tx queues */
+	if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+		int i;
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
+			vmxnet3_tq_tx_complete(txq, adapter);
+		}
+	} else {
+		vmxnet3_tq_tx_complete(tq, adapter);
+	}
+	vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
+
+	return IRQ_HANDLED;
+}
+
+
+/*
+ * Handle completion interrupts on rx queues. Returns whether or not the
+ * intr is handled
+ */
+
+static irqreturn_t
+vmxnet3_msix_rx(int irq, void *data)
+{
+	struct vmxnet3_rx_queue *rq = data;
+	struct vmxnet3_adapter *adapter = rq->adapter;
+
+	/* disable intr if needed */
+	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+		vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
+	napi_schedule(&rq->napi);
+
+	return IRQ_HANDLED;
+}
+
+/*
+ *----------------------------------------------------------------------------
+ *
+ * vmxnet3_msix_event --
+ *
+ *    vmxnet3 msix event intr handler
+ *
+ * Result:
+ *    whether or not the intr is handled
+ *
+ *----------------------------------------------------------------------------
+ */
+
+static irqreturn_t
+vmxnet3_msix_event(int irq, void *data)
+{
+	struct net_device *dev = data;
+	struct vmxnet3_adapter *adapter = netdev_priv(dev);
+
+	/* disable intr if needed */
+	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+		vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
+
+	if (adapter->shared->ecr)
+		vmxnet3_process_events(adapter);
+
+	vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
+
+	return IRQ_HANDLED;
+}
+
+#endif /* CONFIG_PCI_MSI  */
+
 
 /* Interrupt handler for vmxnet3  */
 static irqreturn_t
@@ -1432,7 +1652,7 @@
 	struct net_device *dev = dev_id;
 	struct vmxnet3_adapter *adapter = netdev_priv(dev);
 
-	if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) {
+	if (adapter->intr.type == VMXNET3_IT_INTX) {
 		u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
 		if (unlikely(icr == 0))
 			/* not ours */
@@ -1442,77 +1662,144 @@
 
 	/* disable intr if needed */
 	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
-		vmxnet3_disable_intr(adapter, 0);
+		vmxnet3_disable_all_intrs(adapter);
 
-	napi_schedule(&adapter->napi);
+	napi_schedule(&adapter->rx_queue[0].napi);
 
 	return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 
-
 /* netpoll callback. */
 static void
 vmxnet3_netpoll(struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
-	int irq;
 
-#ifdef CONFIG_PCI_MSI
-	if (adapter->intr.type == VMXNET3_IT_MSIX)
-		irq = adapter->intr.msix_entries[0].vector;
-	else
-#endif
-		irq = adapter->pdev->irq;
+	if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
+		vmxnet3_disable_all_intrs(adapter);
 
-	disable_irq(irq);
-	vmxnet3_intr(irq, netdev);
-	enable_irq(irq);
+	vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
+	vmxnet3_enable_all_intrs(adapter);
+
 }
-#endif
+#endif	/* CONFIG_NET_POLL_CONTROLLER */
 
 static int
 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
 {
-	int err;
+	struct vmxnet3_intr *intr = &adapter->intr;
+	int err = 0, i;
+	int vector = 0;
 
 #ifdef CONFIG_PCI_MSI
 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
-		/* we only use 1 MSI-X vector */
-		err = request_irq(adapter->intr.msix_entries[0].vector,
-				  vmxnet3_intr, 0, adapter->netdev->name,
-				  adapter->netdev);
-	} else if (adapter->intr.type == VMXNET3_IT_MSI) {
+		for (i = 0; i < adapter->num_tx_queues; i++) {
+			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+				sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
+					adapter->netdev->name, vector);
+				err = request_irq(
+					      intr->msix_entries[vector].vector,
+					      vmxnet3_msix_tx, 0,
+					      adapter->tx_queue[i].name,
+					      &adapter->tx_queue[i]);
+			} else {
+				sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
+					adapter->netdev->name, vector);
+			}
+			if (err) {
+				dev_err(&adapter->netdev->dev,
+					"Failed to request irq for MSIX, %s, "
+					"error %d\n",
+					adapter->tx_queue[i].name, err);
+				return err;
+			}
+
+			/* Handle the case where only 1 MSIx was allocated for
+			 * all tx queues */
+			if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
+				for (; i < adapter->num_tx_queues; i++)
+					adapter->tx_queue[i].comp_ring.intr_idx
+								= vector;
+				vector++;
+				break;
+			} else {
+				adapter->tx_queue[i].comp_ring.intr_idx
+								= vector++;
+			}
+		}
+		if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
+			vector = 0;
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
+				sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
+					adapter->netdev->name, vector);
+			else
+				sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
+					adapter->netdev->name, vector);
+			err = request_irq(intr->msix_entries[vector].vector,
+					  vmxnet3_msix_rx, 0,
+					  adapter->rx_queue[i].name,
+					  &(adapter->rx_queue[i]));
+			if (err) {
+				printk(KERN_ERR "Failed to request irq for MSIX"
+				       ", %s, error %d\n",
+				       adapter->rx_queue[i].name, err);
+				return err;
+			}
+
+			adapter->rx_queue[i].comp_ring.intr_idx = vector++;
+		}
+
+		sprintf(intr->event_msi_vector_name, "%s-event-%d",
+			adapter->netdev->name, vector);
+		err = request_irq(intr->msix_entries[vector].vector,
+				  vmxnet3_msix_event, 0,
+				  intr->event_msi_vector_name, adapter->netdev);
+		intr->event_intr_idx = vector;
+
+	} else if (intr->type == VMXNET3_IT_MSI) {
+		adapter->num_rx_queues = 1;
 		err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
 				  adapter->netdev->name, adapter->netdev);
-	} else
+	} else {
 #endif
-	{
+		adapter->num_rx_queues = 1;
 		err = request_irq(adapter->pdev->irq, vmxnet3_intr,
 				  IRQF_SHARED, adapter->netdev->name,
 				  adapter->netdev);
+#ifdef CONFIG_PCI_MSI
 	}
-
-	if (err)
+#endif
+	intr->num_intrs = vector + 1;
+	if (err) {
 		printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
-		       ":%d\n", adapter->netdev->name, adapter->intr.type, err);
+		       ":%d\n", adapter->netdev->name, intr->type, err);
+	} else {
+		/* Number of rx queues will not change after this */
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+			rq->qid = i;
+			rq->qid2 = i + adapter->num_rx_queues;
+		}
 
 
-	if (!err) {
-		int i;
+
 		/* init our intr settings */
-		for (i = 0; i < adapter->intr.num_intrs; i++)
-			adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
-
-		/* next setup intr index for all intr sources */
-		adapter->tx_queue.comp_ring.intr_idx = 0;
-		adapter->rx_queue.comp_ring.intr_idx = 0;
-		adapter->intr.event_intr_idx = 0;
+		for (i = 0; i < intr->num_intrs; i++)
+			intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
+		if (adapter->intr.type != VMXNET3_IT_MSIX) {
+			adapter->intr.event_intr_idx = 0;
+			for (i = 0; i < adapter->num_tx_queues; i++)
+				adapter->tx_queue[i].comp_ring.intr_idx = 0;
+			adapter->rx_queue[0].comp_ring.intr_idx = 0;
+		}
 
 		printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
-		       "allocated\n", adapter->netdev->name, adapter->intr.type,
-		       adapter->intr.mask_mode, adapter->intr.num_intrs);
+		       "allocated\n", adapter->netdev->name, intr->type,
+		       intr->mask_mode, intr->num_intrs);
 	}
 
 	return err;
@@ -1522,18 +1809,32 @@
 static void
 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
 {
-	BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO ||
-	       adapter->intr.num_intrs <= 0);
+	struct vmxnet3_intr *intr = &adapter->intr;
+	BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
 
-	switch (adapter->intr.type) {
+	switch (intr->type) {
 #ifdef CONFIG_PCI_MSI
 	case VMXNET3_IT_MSIX:
 	{
-		int i;
+		int i, vector = 0;
 
-		for (i = 0; i < adapter->intr.num_intrs; i++)
-			free_irq(adapter->intr.msix_entries[i].vector,
-				 adapter->netdev);
+		if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
+			for (i = 0; i < adapter->num_tx_queues; i++) {
+				free_irq(intr->msix_entries[vector++].vector,
+					 &(adapter->tx_queue[i]));
+				if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
+					break;
+			}
+		}
+
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			free_irq(intr->msix_entries[vector++].vector,
+				 &(adapter->rx_queue[i]));
+		}
+
+		free_irq(intr->msix_entries[vector].vector,
+			 adapter->netdev);
+		BUG_ON(vector >= intr->num_intrs);
 		break;
 	}
 #endif
@@ -1727,6 +2028,15 @@
 	kfree(new_table);
 }
 
+void
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
+}
+
 
 /*
  *   Set up driver_shared based on settings in adapter.
@@ -1774,40 +2084,72 @@
 	devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
 	devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
 	devRead->misc.queueDescLen = cpu_to_le32(
-				     sizeof(struct Vmxnet3_TxQueueDesc) +
-				     sizeof(struct Vmxnet3_RxQueueDesc));
+		adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
+		adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
 
 	/* tx queue settings */
-	BUG_ON(adapter->tx_queue.tx_ring.base == NULL);
-
-	devRead->misc.numTxQueues = 1;
-	tqc = &adapter->tqd_start->conf;
-	tqc->txRingBasePA   = cpu_to_le64(adapter->tx_queue.tx_ring.basePA);
-	tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA);
-	tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA);
-	tqc->ddPA           = cpu_to_le64(virt_to_phys(
-						adapter->tx_queue.buf_info));
-	tqc->txRingSize     = cpu_to_le32(adapter->tx_queue.tx_ring.size);
-	tqc->dataRingSize   = cpu_to_le32(adapter->tx_queue.data_ring.size);
-	tqc->compRingSize   = cpu_to_le32(adapter->tx_queue.comp_ring.size);
-	tqc->ddLen          = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) *
-			      tqc->txRingSize);
-	tqc->intrIdx        = adapter->tx_queue.comp_ring.intr_idx;
+	devRead->misc.numTxQueues =  adapter->num_tx_queues;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
+		BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
+		tqc = &adapter->tqd_start[i].conf;
+		tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
+		tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
+		tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
+		tqc->ddPA           = cpu_to_le64(virt_to_phys(tq->buf_info));
+		tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
+		tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
+		tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
+		tqc->ddLen          = cpu_to_le32(
+					sizeof(struct vmxnet3_tx_buf_info) *
+					tqc->txRingSize);
+		tqc->intrIdx        = tq->comp_ring.intr_idx;
+	}
 
 	/* rx queue settings */
-	devRead->misc.numRxQueues = 1;
-	rqc = &adapter->rqd_start->conf;
-	rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA);
-	rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA);
-	rqc->compRingBasePA  = cpu_to_le64(adapter->rx_queue.comp_ring.basePA);
-	rqc->ddPA            = cpu_to_le64(virt_to_phys(
-						adapter->rx_queue.buf_info));
-	rqc->rxRingSize[0]   = cpu_to_le32(adapter->rx_queue.rx_ring[0].size);
-	rqc->rxRingSize[1]   = cpu_to_le32(adapter->rx_queue.rx_ring[1].size);
-	rqc->compRingSize    = cpu_to_le32(adapter->rx_queue.comp_ring.size);
-	rqc->ddLen           = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) *
-			       (rqc->rxRingSize[0] + rqc->rxRingSize[1]));
-	rqc->intrIdx         = adapter->rx_queue.comp_ring.intr_idx;
+	devRead->misc.numRxQueues = adapter->num_rx_queues;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[i];
+		rqc = &adapter->rqd_start[i].conf;
+		rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
+		rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
+		rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
+		rqc->ddPA            = cpu_to_le64(virt_to_phys(
+							rq->buf_info));
+		rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
+		rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
+		rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
+		rqc->ddLen           = cpu_to_le32(
+					sizeof(struct vmxnet3_rx_buf_info) *
+					(rqc->rxRingSize[0] +
+					 rqc->rxRingSize[1]));
+		rqc->intrIdx         = rq->comp_ring.intr_idx;
+	}
+
+#ifdef VMXNET3_RSS
+	memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
+
+	if (adapter->rss) {
+		struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+		devRead->misc.uptFeatures |= UPT1_F_RSS;
+		devRead->misc.numRxQueues = adapter->num_rx_queues;
+		rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
+				    UPT1_RSS_HASH_TYPE_IPV4 |
+				    UPT1_RSS_HASH_TYPE_TCP_IPV6 |
+				    UPT1_RSS_HASH_TYPE_IPV6;
+		rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
+		rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
+		rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
+		get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
+		for (i = 0; i < rssConf->indTableSize; i++)
+			rssConf->indTable[i] = i % adapter->num_rx_queues;
+
+		devRead->rssConfDesc.confVer = 1;
+		devRead->rssConfDesc.confLen = sizeof(*rssConf);
+		devRead->rssConfDesc.confPA  = virt_to_phys(rssConf);
+	}
+
+#endif /* VMXNET3_RSS */
 
 	/* intr settings */
 	devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1829,18 +2171,18 @@
 int
 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
 {
-	int err;
+	int err, i;
 	u32 ret;
 
-	dev_dbg(&adapter->netdev->dev,
-		"%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes"
-		" %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size,
-		adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size,
-		adapter->rx_queue.rx_ring[0].size,
-		adapter->rx_queue.rx_ring[1].size);
+	dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
+		" ring sizes %u %u %u\n", adapter->netdev->name,
+		adapter->skb_buf_size, adapter->rx_buf_per_pkt,
+		adapter->tx_queue[0].tx_ring.size,
+		adapter->rx_queue[0].rx_ring[0].size,
+		adapter->rx_queue[0].rx_ring[1].size);
 
-	vmxnet3_tq_init(&adapter->tx_queue, adapter);
-	err = vmxnet3_rq_init(&adapter->rx_queue, adapter);
+	vmxnet3_tq_init_all(adapter);
+	err = vmxnet3_rq_init_all(adapter);
 	if (err) {
 		printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
 		       adapter->netdev->name, err);
@@ -1870,10 +2212,15 @@
 		err = -EINVAL;
 		goto activate_err;
 	}
-	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD,
-			       adapter->rx_queue.rx_ring[0].next2fill);
-	VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2,
-			       adapter->rx_queue.rx_ring[1].next2fill);
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		VMXNET3_WRITE_BAR0_REG(adapter,
+				VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
+				adapter->rx_queue[i].rx_ring[0].next2fill);
+		VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
+				(i * VMXNET3_REG_ALIGN)),
+				adapter->rx_queue[i].rx_ring[1].next2fill);
+	}
 
 	/* Apply the rx filter settins last. */
 	vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2230,8 @@
 	 * tx queue if the link is up.
 	 */
 	vmxnet3_check_link(adapter, true);
-
-	napi_enable(&adapter->napi);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_enable(&adapter->rx_queue[i].napi);
 	vmxnet3_enable_all_intrs(adapter);
 	clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
 	return 0;
@@ -1896,7 +2243,7 @@
 irq_err:
 rq_err:
 	/* free up buffers we allocated */
-	vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+	vmxnet3_rq_cleanup_all(adapter);
 	return err;
 }
 
@@ -1911,6 +2258,7 @@
 int
 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
 {
+	int i;
 	if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
 		return 0;
 
@@ -1919,13 +2267,14 @@
 			       VMXNET3_CMD_QUIESCE_DEV);
 	vmxnet3_disable_all_intrs(adapter);
 
-	napi_disable(&adapter->napi);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_disable(&adapter->rx_queue[i].napi);
 	netif_tx_disable(adapter->netdev);
 	adapter->link_speed = 0;
 	netif_carrier_off(adapter->netdev);
 
-	vmxnet3_tq_cleanup(&adapter->tx_queue, adapter);
-	vmxnet3_rq_cleanup(&adapter->rx_queue, adapter);
+	vmxnet3_tq_cleanup_all(adapter);
+	vmxnet3_rq_cleanup_all(adapter);
 	vmxnet3_free_irqs(adapter);
 	return 0;
 }
@@ -2047,7 +2396,9 @@
 static void
 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
 {
-	size_t sz;
+	size_t sz, i, ring0_size, ring1_size, comp_size;
+	struct vmxnet3_rx_queue	*rq = &adapter->rx_queue[0];
+
 
 	if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
 				    VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2420,19 @@
 	 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
 	 */
 	sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
-	adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size +
-					     sz - 1) / sz * sz;
-	adapter->rx_queue.rx_ring[0].size = min_t(u32,
-					    adapter->rx_queue.rx_ring[0].size,
-					    VMXNET3_RX_RING_MAX_SIZE / sz * sz);
+	ring0_size = adapter->rx_queue[0].rx_ring[0].size;
+	ring0_size = (ring0_size + sz - 1) / sz * sz;
+	ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
+			   sz * sz);
+	ring1_size = adapter->rx_queue[0].rx_ring[1].size;
+	comp_size = ring0_size + ring1_size;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rq = &adapter->rx_queue[i];
+		rq->rx_ring[0].size = ring0_size;
+		rq->rx_ring[1].size = ring1_size;
+		rq->comp_ring.size = comp_size;
+	}
 }
 
 
@@ -2081,29 +2440,53 @@
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
 		      u32 rx_ring_size, u32 rx_ring2_size)
 {
-	int err;
+	int err = 0, i;
 
-	adapter->tx_queue.tx_ring.size   = tx_ring_size;
-	adapter->tx_queue.data_ring.size = tx_ring_size;
-	adapter->tx_queue.comp_ring.size = tx_ring_size;
-	adapter->tx_queue.shared = &adapter->tqd_start->ctrl;
-	adapter->tx_queue.stopped = true;
-	err = vmxnet3_tq_create(&adapter->tx_queue, adapter);
-	if (err)
-		return err;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct vmxnet3_tx_queue	*tq = &adapter->tx_queue[i];
+		tq->tx_ring.size   = tx_ring_size;
+		tq->data_ring.size = tx_ring_size;
+		tq->comp_ring.size = tx_ring_size;
+		tq->shared = &adapter->tqd_start[i].ctrl;
+		tq->stopped = true;
+		tq->adapter = adapter;
+		tq->qid = i;
+		err = vmxnet3_tq_create(tq, adapter);
+		/*
+		 * Too late to change num_tx_queues. We cannot do away with
+		 * lesser number of queues than what we asked for
+		 */
+		if (err)
+			goto queue_err;
+	}
 
-	adapter->rx_queue.rx_ring[0].size = rx_ring_size;
-	adapter->rx_queue.rx_ring[1].size = rx_ring2_size;
+	adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
+	adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
 	vmxnet3_adjust_rx_ring_size(adapter);
-	adapter->rx_queue.comp_ring.size  = adapter->rx_queue.rx_ring[0].size +
-					    adapter->rx_queue.rx_ring[1].size;
-	adapter->rx_queue.qid  = 0;
-	adapter->rx_queue.qid2 = 1;
-	adapter->rx_queue.shared = &adapter->rqd_start->ctrl;
-	err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
-	if (err)
-		vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
-
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
+		/* qid and qid2 for rx queues will be assigned later when num
+		 * of rx queues is finalized after allocating intrs */
+		rq->shared = &adapter->rqd_start[i].ctrl;
+		rq->adapter = adapter;
+		err = vmxnet3_rq_create(rq, adapter);
+		if (err) {
+			if (i == 0) {
+				printk(KERN_ERR "Could not allocate any rx"
+				       "queues. Aborting.\n");
+				goto queue_err;
+			} else {
+				printk(KERN_INFO "Number of rx queues changed "
+				       "to : %d.\n", i);
+				adapter->num_rx_queues = i;
+				err = 0;
+				break;
+			}
+		}
+	}
+	return err;
+queue_err:
+	vmxnet3_tq_destroy_all(adapter);
 	return err;
 }
 
@@ -2111,11 +2494,12 @@
 vmxnet3_open(struct net_device *netdev)
 {
 	struct vmxnet3_adapter *adapter;
-	int err;
+	int err, i;
 
 	adapter = netdev_priv(netdev);
 
-	spin_lock_init(&adapter->tx_queue.tx_lock);
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		spin_lock_init(&adapter->tx_queue[i].tx_lock);
 
 	err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
 				    VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2514,8 @@
 	return 0;
 
 activate_err:
-	vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
-	vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+	vmxnet3_rq_destroy_all(adapter);
+	vmxnet3_tq_destroy_all(adapter);
 queue_err:
 	return err;
 }
@@ -2151,8 +2535,8 @@
 
 	vmxnet3_quiesce_dev(adapter);
 
-	vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
-	vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
+	vmxnet3_rq_destroy_all(adapter);
+	vmxnet3_tq_destroy_all(adapter);
 
 	clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
 
@@ -2164,6 +2548,8 @@
 void
 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
 {
+	int i;
+
 	/*
 	 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
 	 * vmxnet3_close() will deadlock.
@@ -2171,7 +2557,8 @@
 	BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
 
 	/* we need to enable NAPI, otherwise dev_close will deadlock */
-	napi_enable(&adapter->napi);
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_enable(&adapter->rx_queue[i].napi);
 	dev_close(adapter->netdev);
 }
 
@@ -2202,14 +2589,11 @@
 		vmxnet3_reset_dev(adapter);
 
 		/* we need to re-create the rx queue based on the new mtu */
-		vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+		vmxnet3_rq_destroy_all(adapter);
 		vmxnet3_adjust_rx_ring_size(adapter);
-		adapter->rx_queue.comp_ring.size  =
-					adapter->rx_queue.rx_ring[0].size +
-					adapter->rx_queue.rx_ring[1].size;
-		err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
+		err = vmxnet3_rq_create_all(adapter);
 		if (err) {
-			printk(KERN_ERR "%s: failed to re-create rx queue,"
+			printk(KERN_ERR "%s: failed to re-create rx queues,"
 				" error %d. Closing it.\n", netdev->name, err);
 			goto out;
 		}
@@ -2274,6 +2658,55 @@
 	mac[5] = (tmp >> 8) & 0xff;
 }
 
+#ifdef CONFIG_PCI_MSI
+
+/*
+ * Enable MSIx vectors.
+ * Returns :
+ *	0 on successful enabling of required vectors,
+ *	VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
+ *	 could be enabled.
+ *	number of vectors which can be enabled otherwise (this number is smaller
+ *	 than VMXNET3_LINUX_MIN_MSIX_VECT)
+ */
+
+static int
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
+			     int vectors)
+{
+	int err = 0, vector_threshold;
+	vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
+
+	while (vectors >= vector_threshold) {
+		err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
+				      vectors);
+		if (!err) {
+			adapter->intr.num_intrs = vectors;
+			return 0;
+		} else if (err < 0) {
+			printk(KERN_ERR "Failed to enable MSI-X for %s, error"
+			       " %d\n",	adapter->netdev->name, err);
+			vectors = 0;
+		} else if (err < vector_threshold) {
+			break;
+		} else {
+			/* If fails to enable required number of MSI-x vectors
+			 * try enabling 3 of them. One each for rx, tx and event
+			 */
+			vectors = vector_threshold;
+			printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
+			       " %d instead\n", vectors, adapter->netdev->name,
+			       vector_threshold);
+		}
+	}
+
+	printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
+	       " are lower than min threshold required.\n");
+	return err;
+}
+
+
+#endif /* CONFIG_PCI_MSI */
 
 static void
 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2293,16 +2726,47 @@
 
 #ifdef CONFIG_PCI_MSI
 	if (adapter->intr.type == VMXNET3_IT_MSIX) {
-		int err;
+		int vector, err = 0;
 
-		adapter->intr.msix_entries[0].entry = 0;
-		err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
-				      VMXNET3_LINUX_MAX_MSIX_VECT);
-		if (!err) {
-			adapter->intr.num_intrs = 1;
-			adapter->intr.type = VMXNET3_IT_MSIX;
+		adapter->intr.num_intrs = (adapter->share_intr ==
+					   VMXNET3_INTR_TXSHARE) ? 1 :
+					   adapter->num_tx_queues;
+		adapter->intr.num_intrs += (adapter->share_intr ==
+					   VMXNET3_INTR_BUDDYSHARE) ? 0 :
+					   adapter->num_rx_queues;
+		adapter->intr.num_intrs += 1;		/* for link event */
+
+		adapter->intr.num_intrs = (adapter->intr.num_intrs >
+					   VMXNET3_LINUX_MIN_MSIX_VECT
+					   ? adapter->intr.num_intrs :
+					   VMXNET3_LINUX_MIN_MSIX_VECT);
+
+		for (vector = 0; vector < adapter->intr.num_intrs; vector++)
+			adapter->intr.msix_entries[vector].entry = vector;
+
+		err = vmxnet3_acquire_msix_vectors(adapter,
+						   adapter->intr.num_intrs);
+		/* If we cannot allocate one MSIx vector per queue
+		 * then limit the number of rx queues to 1
+		 */
+		if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+			if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
+			    || adapter->num_rx_queues != 2) {
+				adapter->share_intr = VMXNET3_INTR_TXSHARE;
+				printk(KERN_ERR "Number of rx queues : 1\n");
+				adapter->num_rx_queues = 1;
+				adapter->intr.num_intrs =
+						VMXNET3_LINUX_MIN_MSIX_VECT;
+			}
 			return;
 		}
+		if (!err)
+			return;
+
+		/* If we cannot allocate MSIx vectors use only one rx queue */
+		printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
+		       "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
+
 		adapter->intr.type = VMXNET3_IT_MSI;
 	}
 
@@ -2310,12 +2774,15 @@
 		int err;
 		err = pci_enable_msi(adapter->pdev);
 		if (!err) {
+			adapter->num_rx_queues = 1;
 			adapter->intr.num_intrs = 1;
 			return;
 		}
 	}
 #endif /* CONFIG_PCI_MSI */
 
+	adapter->num_rx_queues = 1;
+	printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
 	adapter->intr.type = VMXNET3_IT_INTX;
 
 	/* INT-X related setting */
@@ -2343,6 +2810,7 @@
 
 	printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
 	schedule_work(&adapter->work);
+	netif_wake_queue(adapter->netdev);
 }
 
 
@@ -2399,8 +2867,29 @@
 	struct net_device *netdev;
 	struct vmxnet3_adapter *adapter;
 	u8 mac[ETH_ALEN];
+	int size;
+	int num_tx_queues;
+	int num_rx_queues;
 
-	netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
+#ifdef VMXNET3_RSS
+	if (enable_mq)
+		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+				    (int)num_online_cpus());
+	else
+#endif
+		num_rx_queues = 1;
+
+	if (enable_mq)
+		num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
+				    (int)num_online_cpus());
+	else
+		num_tx_queues = 1;
+
+	netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
+				   max(num_tx_queues, num_rx_queues));
+	printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
+	       num_tx_queues, num_rx_queues);
+
 	if (!netdev) {
 		printk(KERN_ERR "Failed to alloc ethernet device for adapter "
 			"%s\n",	pci_name(pdev));
@@ -2422,9 +2911,12 @@
 		goto err_alloc_shared;
 	}
 
-	adapter->tqd_start = pci_alloc_consistent(adapter->pdev,
-			     sizeof(struct Vmxnet3_TxQueueDesc) +
-			     sizeof(struct Vmxnet3_RxQueueDesc),
+	adapter->num_rx_queues = num_rx_queues;
+	adapter->num_tx_queues = num_tx_queues;
+
+	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+	size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
+	adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
 			     &adapter->queue_desc_pa);
 
 	if (!adapter->tqd_start) {
@@ -2433,8 +2925,8 @@
 		err = -ENOMEM;
 		goto err_alloc_queue_desc;
 	}
-	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start
-							    + 1);
+	adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
+							adapter->num_tx_queues);
 
 	adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
 	if (adapter->pm_conf == NULL) {
@@ -2444,6 +2936,17 @@
 		goto err_alloc_pm;
 	}
 
+#ifdef VMXNET3_RSS
+
+	adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
+	if (adapter->rss_conf == NULL) {
+		printk(KERN_ERR "Failed to allocate memory for %s\n",
+		       pci_name(pdev));
+		err = -ENOMEM;
+		goto err_alloc_rss;
+	}
+#endif /* VMXNET3_RSS */
+
 	err = vmxnet3_alloc_pci_resources(adapter, &dma64);
 	if (err < 0)
 		goto err_alloc_pci;
@@ -2471,18 +2974,48 @@
 	vmxnet3_declare_features(adapter, dma64);
 
 	adapter->dev_number = atomic_read(&devices_found);
+
+	 adapter->share_intr = irq_share_mode;
+	if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
+	    adapter->num_tx_queues != adapter->num_rx_queues)
+		adapter->share_intr = VMXNET3_INTR_DONTSHARE;
+
 	vmxnet3_alloc_intr_resources(adapter);
 
+#ifdef VMXNET3_RSS
+	if (adapter->num_rx_queues > 1 &&
+	    adapter->intr.type == VMXNET3_IT_MSIX) {
+		adapter->rss = true;
+		printk(KERN_INFO "RSS is enabled.\n");
+	} else {
+		adapter->rss = false;
+	}
+#endif
+
 	vmxnet3_read_mac_addr(adapter, mac);
 	memcpy(netdev->dev_addr,  mac, netdev->addr_len);
 
 	netdev->netdev_ops = &vmxnet3_netdev_ops;
-	netdev->watchdog_timeo = 5 * HZ;
 	vmxnet3_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
 
 	INIT_WORK(&adapter->work, vmxnet3_reset_work);
 
-	netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64);
+	if (adapter->intr.type == VMXNET3_IT_MSIX) {
+		int i;
+		for (i = 0; i < adapter->num_rx_queues; i++) {
+			netif_napi_add(adapter->netdev,
+				       &adapter->rx_queue[i].napi,
+				       vmxnet3_poll_rx_only, 64);
+		}
+	} else {
+		netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
+			       vmxnet3_poll, 64);
+	}
+
+	netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
+	netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
+
 	SET_NETDEV_DEV(netdev, &pdev->dev);
 	err = register_netdev(netdev);
 
@@ -2502,11 +3035,14 @@
 err_ver:
 	vmxnet3_free_pci_resources(adapter);
 err_alloc_pci:
+#ifdef VMXNET3_RSS
+	kfree(adapter->rss_conf);
+err_alloc_rss:
+#endif
 	kfree(adapter->pm_conf);
 err_alloc_pm:
-	pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
-			    sizeof(struct Vmxnet3_RxQueueDesc),
-			    adapter->tqd_start, adapter->queue_desc_pa);
+	pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+			    adapter->queue_desc_pa);
 err_alloc_queue_desc:
 	pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
 			    adapter->shared, adapter->shared_pa);
@@ -2522,17 +3058,32 @@
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	int size = 0;
+	int num_rx_queues;
 
-	flush_scheduled_work();
+#ifdef VMXNET3_RSS
+	if (enable_mq)
+		num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
+				    (int)num_online_cpus());
+	else
+#endif
+		num_rx_queues = 1;
+
+	cancel_work_sync(&adapter->work);
 
 	unregister_netdev(netdev);
 
 	vmxnet3_free_intr_resources(adapter);
 	vmxnet3_free_pci_resources(adapter);
+#ifdef VMXNET3_RSS
+	kfree(adapter->rss_conf);
+#endif
 	kfree(adapter->pm_conf);
-	pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) +
-			    sizeof(struct Vmxnet3_RxQueueDesc),
-			    adapter->tqd_start, adapter->queue_desc_pa);
+
+	size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
+	size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
+	pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
+			    adapter->queue_desc_pa);
 	pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
 			    adapter->shared, adapter->shared_pa);
 	free_netdev(netdev);
@@ -2563,7 +3114,7 @@
 	vmxnet3_free_intr_resources(adapter);
 
 	netif_device_detach(netdev);
-	netif_stop_queue(netdev);
+	netif_tx_stop_all_queues(netdev);
 
 	/* Create wake-up filters. */
 	pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070b..8e17fc8 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -151,44 +151,42 @@
 	struct UPT1_TxStats *devTxStats;
 	struct UPT1_RxStats *devRxStats;
 	struct net_device_stats *net_stats = &netdev->stats;
+	int i;
 
 	adapter = netdev_priv(netdev);
 
 	/* Collect the dev stats into the shared area */
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 
-	/* Assuming that we have a single queue device */
-	devTxStats = &adapter->tqd_start->stats;
-	devRxStats = &adapter->rqd_start->stats;
-
-	/* Get access to the driver stats per queue */
-	drvTxStats = &adapter->tx_queue.stats;
-	drvRxStats = &adapter->rx_queue.stats;
-
 	memset(net_stats, 0, sizeof(*net_stats));
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		devTxStats = &adapter->tqd_start[i].stats;
+		drvTxStats = &adapter->tx_queue[i].stats;
+		net_stats->tx_packets += devTxStats->ucastPktsTxOK +
+					devTxStats->mcastPktsTxOK +
+					devTxStats->bcastPktsTxOK;
+		net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
+				      devTxStats->mcastBytesTxOK +
+				      devTxStats->bcastBytesTxOK;
+		net_stats->tx_errors += devTxStats->pktsTxError;
+		net_stats->tx_dropped += drvTxStats->drop_total;
+	}
 
-	net_stats->rx_packets = devRxStats->ucastPktsRxOK +
-				devRxStats->mcastPktsRxOK +
-				devRxStats->bcastPktsRxOK;
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		devRxStats = &adapter->rqd_start[i].stats;
+		drvRxStats = &adapter->rx_queue[i].stats;
+		net_stats->rx_packets += devRxStats->ucastPktsRxOK +
+					devRxStats->mcastPktsRxOK +
+					devRxStats->bcastPktsRxOK;
 
-	net_stats->tx_packets = devTxStats->ucastPktsTxOK +
-				devTxStats->mcastPktsTxOK +
-				devTxStats->bcastPktsTxOK;
+		net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
+				      devRxStats->mcastBytesRxOK +
+				      devRxStats->bcastBytesRxOK;
 
-	net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
-			      devRxStats->mcastBytesRxOK +
-			      devRxStats->bcastBytesRxOK;
-
-	net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
-			      devTxStats->mcastBytesTxOK +
-			      devTxStats->bcastBytesTxOK;
-
-	net_stats->rx_errors = devRxStats->pktsRxError;
-	net_stats->tx_errors = devTxStats->pktsTxError;
-	net_stats->rx_dropped = drvRxStats->drop_total;
-	net_stats->tx_dropped = drvTxStats->drop_total;
-	net_stats->multicast =  devRxStats->mcastPktsRxOK;
-
+		net_stats->rx_errors += devRxStats->pktsRxError;
+		net_stats->rx_dropped += drvRxStats->drop_total;
+		net_stats->multicast +=  devRxStats->mcastPktsRxOK;
+	}
 	return net_stats;
 }
 
@@ -307,24 +305,26 @@
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u8 *base;
 	int i;
+	int j = 0;
 
 	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
 
 	/* this does assume each counter is 64-bit wide */
+/* TODO change this for multiple queues */
 
-	base = (u8 *)&adapter->tqd_start->stats;
+	base = (u8 *)&adapter->tqd_start[j].stats;
 	for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
 		*buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
 
-	base = (u8 *)&adapter->tx_queue.stats;
+	base = (u8 *)&adapter->tx_queue[j].stats;
 	for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
 		*buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
 
-	base = (u8 *)&adapter->rqd_start->stats;
+	base = (u8 *)&adapter->rqd_start[j].stats;
 	for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
 		*buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
 
-	base = (u8 *)&adapter->rx_queue.stats;
+	base = (u8 *)&adapter->rx_queue[j].stats;
 	for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
 		*buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
 
@@ -339,6 +339,7 @@
 {
 	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
 	u32 *buf = p;
+	int i = 0;
 
 	memset(p, 0, vmxnet3_get_regs_len(netdev));
 
@@ -347,28 +348,29 @@
 	/* Update vmxnet3_get_regs_len if we want to dump more registers */
 
 	/* make each ring use multiple of 16 bytes */
-	buf[0] = adapter->tx_queue.tx_ring.next2fill;
-	buf[1] = adapter->tx_queue.tx_ring.next2comp;
-	buf[2] = adapter->tx_queue.tx_ring.gen;
+/* TODO change this for multiple queues */
+	buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
+	buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
+	buf[2] = adapter->tx_queue[i].tx_ring.gen;
 	buf[3] = 0;
 
-	buf[4] = adapter->tx_queue.comp_ring.next2proc;
-	buf[5] = adapter->tx_queue.comp_ring.gen;
-	buf[6] = adapter->tx_queue.stopped;
+	buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
+	buf[5] = adapter->tx_queue[i].comp_ring.gen;
+	buf[6] = adapter->tx_queue[i].stopped;
 	buf[7] = 0;
 
-	buf[8] = adapter->rx_queue.rx_ring[0].next2fill;
-	buf[9] = adapter->rx_queue.rx_ring[0].next2comp;
-	buf[10] = adapter->rx_queue.rx_ring[0].gen;
+	buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
+	buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
+	buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
 	buf[11] = 0;
 
-	buf[12] = adapter->rx_queue.rx_ring[1].next2fill;
-	buf[13] = adapter->rx_queue.rx_ring[1].next2comp;
-	buf[14] = adapter->rx_queue.rx_ring[1].gen;
+	buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
+	buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
+	buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
 	buf[15] = 0;
 
-	buf[16] = adapter->rx_queue.comp_ring.next2proc;
-	buf[17] = adapter->rx_queue.comp_ring.gen;
+	buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
+	buf[17] = adapter->rx_queue[i].comp_ring.gen;
 	buf[18] = 0;
 	buf[19] = 0;
 }
@@ -435,8 +437,10 @@
 	param->rx_mini_max_pending = 0;
 	param->rx_jumbo_max_pending = 0;
 
-	param->rx_pending = adapter->rx_queue.rx_ring[0].size;
-	param->tx_pending = adapter->tx_queue.tx_ring.size;
+	param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
+			    adapter->num_rx_queues;
+	param->tx_pending = adapter->tx_queue[0].tx_ring.size *
+			    adapter->num_tx_queues;
 	param->rx_mini_pending = 0;
 	param->rx_jumbo_pending = 0;
 }
@@ -480,8 +484,8 @@
 							   sz) != 0)
 		return -EINVAL;
 
-	if (new_tx_ring_size == adapter->tx_queue.tx_ring.size &&
-			new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) {
+	if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
+	    new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
 		return 0;
 	}
 
@@ -498,11 +502,12 @@
 
 		/* recreate the rx queue and the tx queue based on the
 		 * new sizes */
-		vmxnet3_tq_destroy(&adapter->tx_queue, adapter);
-		vmxnet3_rq_destroy(&adapter->rx_queue, adapter);
+		vmxnet3_tq_destroy_all(adapter);
+		vmxnet3_rq_destroy_all(adapter);
 
 		err = vmxnet3_create_queues(adapter, new_tx_ring_size,
 			new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
+
 		if (err) {
 			/* failed, most likely because of OOM, try default
 			 * size */
@@ -535,6 +540,66 @@
 }
 
 
+static int
+vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
+		  void *rules)
+{
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	switch (info->cmd) {
+	case ETHTOOL_GRXRINGS:
+		info->data = adapter->num_rx_queues;
+		return 0;
+	}
+	return -EOPNOTSUPP;
+}
+
+#ifdef VMXNET3_RSS
+static int
+vmxnet3_get_rss_indir(struct net_device *netdev,
+		      struct ethtool_rxfh_indir *p)
+{
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+	unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
+
+	p->size = rssConf->indTableSize;
+	while (n--)
+		p->ring_index[n] = rssConf->indTable[n];
+	return 0;
+
+}
+
+static int
+vmxnet3_set_rss_indir(struct net_device *netdev,
+		      const struct ethtool_rxfh_indir *p)
+{
+	unsigned int i;
+	struct vmxnet3_adapter *adapter = netdev_priv(netdev);
+	struct UPT1_RSSConf *rssConf = adapter->rss_conf;
+
+	if (p->size != rssConf->indTableSize)
+		return -EINVAL;
+	for (i = 0; i < rssConf->indTableSize; i++) {
+		/*
+		 * Return with error code if any of the queue indices
+		 * is out of range
+		 */
+		if (p->ring_index[i] < 0 ||
+		    p->ring_index[i] >= adapter->num_rx_queues)
+			return -EINVAL;
+	}
+
+	for (i = 0; i < rssConf->indTableSize; i++)
+		rssConf->indTable[i] = p->ring_index[i];
+
+	VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
+			       VMXNET3_CMD_UPDATE_RSSIDT);
+
+	return 0;
+
+}
+#endif
+
 static struct ethtool_ops vmxnet3_ethtool_ops = {
 	.get_settings      = vmxnet3_get_settings,
 	.get_drvinfo       = vmxnet3_get_drvinfo,
@@ -558,6 +623,11 @@
 	.get_ethtool_stats = vmxnet3_get_ethtool_stats,
 	.get_ringparam     = vmxnet3_get_ringparam,
 	.set_ringparam     = vmxnet3_set_ringparam,
+	.get_rxnfc         = vmxnet3_get_rxnfc,
+#ifdef VMXNET3_RSS
+	.get_rxfh_indir    = vmxnet3_get_rss_indir,
+	.set_rxfh_indir    = vmxnet3_set_rss_indir,
+#endif
 };
 
 void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf2288..7fadeed 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.0.14.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.0.16.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01000E00
+#define VMXNET3_DRIVER_VERSION_NUM      0x01001000
 
+#if defined(CONFIG_PCI_MSI)
+	/* RSS only makes sense if MSI-X is supported. */
+	#define VMXNET3_RSS
+#endif
 
 /*
  * Capabilities
@@ -218,16 +222,19 @@
 };
 
 struct vmxnet3_tx_queue {
+	char			name[IFNAMSIZ+8]; /* To identify interrupt */
+	struct vmxnet3_adapter		*adapter;
 	spinlock_t                      tx_lock;
 	struct vmxnet3_cmd_ring         tx_ring;
-	struct vmxnet3_tx_buf_info     *buf_info;
+	struct vmxnet3_tx_buf_info      *buf_info;
 	struct vmxnet3_tx_data_ring     data_ring;
 	struct vmxnet3_comp_ring        comp_ring;
-	struct Vmxnet3_TxQueueCtrl            *shared;
+	struct Vmxnet3_TxQueueCtrl      *shared;
 	struct vmxnet3_tq_driver_stats  stats;
 	bool                            stopped;
 	int                             num_stop;  /* # of times the queue is
 						    * stopped */
+	int				qid;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
 enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@
 };
 
 struct vmxnet3_rx_queue {
+	char			name[IFNAMSIZ + 8]; /* To identify interrupt */
+	struct vmxnet3_adapter	  *adapter;
+	struct napi_struct        napi;
 	struct vmxnet3_cmd_ring   rx_ring[2];
 	struct vmxnet3_comp_ring  comp_ring;
 	struct vmxnet3_rx_ctx     rx_ctx;
@@ -271,7 +281,16 @@
 	struct vmxnet3_rq_driver_stats  stats;
 } __attribute__((__aligned__(SMP_CACHE_BYTES)));
 
-#define VMXNET3_LINUX_MAX_MSIX_VECT     1
+#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
+#define VMXNET3_DEVICE_MAX_RX_QUEUES 8   /* Keep this value as a power of 2 */
+
+/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
+#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
+
+#define VMXNET3_LINUX_MAX_MSIX_VECT     (VMXNET3_DEVICE_MAX_TX_QUEUES + \
+					 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
+#define VMXNET3_LINUX_MIN_MSIX_VECT     3    /* 1 for each : tx, rx and event */
+
 
 struct vmxnet3_intr {
 	enum vmxnet3_intr_mask_mode  mask_mode;
@@ -279,27 +298,32 @@
 	u8  num_intrs;			/* # of intr vectors */
 	u8  event_intr_idx;		/* idx of the intr vector for event */
 	u8  mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
+	char	event_msi_vector_name[IFNAMSIZ+11];
 #ifdef CONFIG_PCI_MSI
 	struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
 #endif
 };
 
+/* Interrupt sharing schemes, share_intr */
+#define VMXNET3_INTR_BUDDYSHARE 0    /* Corresponding tx,rx queues share irq */
+#define VMXNET3_INTR_TXSHARE 1	     /* All tx queues share one irq */
+#define VMXNET3_INTR_DONTSHARE 2     /* each queue has its own irq */
+
+
 #define VMXNET3_STATE_BIT_RESETTING   0
 #define VMXNET3_STATE_BIT_QUIESCED    1
 struct vmxnet3_adapter {
-	struct vmxnet3_tx_queue         tx_queue;
-	struct vmxnet3_rx_queue         rx_queue;
-	struct napi_struct              napi;
-	struct vlan_group              *vlan_grp;
-
-	struct vmxnet3_intr             intr;
-
-	struct Vmxnet3_DriverShared    *shared;
-	struct Vmxnet3_PMConf          *pm_conf;
-	struct Vmxnet3_TxQueueDesc     *tqd_start;     /* first tx queue desc */
-	struct Vmxnet3_RxQueueDesc     *rqd_start;     /* first rx queue desc */
-	struct net_device              *netdev;
-	struct pci_dev                 *pdev;
+	struct vmxnet3_tx_queue		tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
+	struct vmxnet3_rx_queue		rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
+	struct vlan_group		*vlan_grp;
+	struct vmxnet3_intr		intr;
+	struct Vmxnet3_DriverShared	*shared;
+	struct Vmxnet3_PMConf		*pm_conf;
+	struct Vmxnet3_TxQueueDesc	*tqd_start;     /* all tx queue desc */
+	struct Vmxnet3_RxQueueDesc	*rqd_start;	/* all rx queue desc */
+	struct net_device		*netdev;
+	struct net_device_stats		net_stats;
+	struct pci_dev			*pdev;
 
 	u8			__iomem *hw_addr0; /* for BAR 0 */
 	u8			__iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +332,12 @@
 	bool				rxcsum;
 	bool				lro;
 	bool				jumbo_frame;
+#ifdef VMXNET3_RSS
+	struct UPT1_RSSConf		*rss_conf;
+	bool				rss;
+#endif
+	u32				num_rx_queues;
+	u32				num_tx_queues;
 
 	/* rx buffer related */
 	unsigned			skb_buf_size;
@@ -327,6 +357,7 @@
 	unsigned long  state;    /* VMXNET3_STATE_BIT_xxx */
 
 	int dev_number;
+	int share_intr;
 };
 
 #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val)  \
@@ -366,12 +397,10 @@
 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
 
 void
-vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
-		   struct vmxnet3_adapter *adapter);
+vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
 
 void
-vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
-		   struct vmxnet3_adapter *adapter);
+vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
 
 int
 vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3..01c05f5 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,343 +19,95 @@
 
 #include "vxge-traffic.h"
 #include "vxge-config.h"
+#include "vxge-main.h"
 
-static enum vxge_hw_status
-__vxge_hw_fifo_create(
-	struct __vxge_hw_vpath_handle *vpath_handle,
-	struct vxge_hw_fifo_attr *attr);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_abort(
-	struct __vxge_hw_fifo *fifoh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_reset(
-	struct __vxge_hw_fifo *ringh);
-
-static enum vxge_hw_status
-__vxge_hw_fifo_delete(
-	struct __vxge_hw_vpath_handle *vpath_handle);
-
-static struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
-			u32 size);
-
-static void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
-			struct __vxge_hw_blockpool_entry *entry);
-
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
-					void *block_addr,
-					u32 length,
-					struct pci_dev *dma_h,
-					struct pci_dev *acc_handle);
-
-static enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
-			struct __vxge_hw_blockpool  *blockpool,
-			u32 pool_size,
-			u32 pool_max);
-
-static void
-__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
-
-static void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
-			u32 size,
-			struct vxge_hw_mempool_dma *dma_object);
-
-static void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
-			void *memblock,
-			u32 size,
-			struct vxge_hw_mempool_dma *dma_object);
-
-
-static struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-			enum __vxge_hw_channel_type type, u32 length,
-			u32 per_dtr_space, void *userdata);
-
-static void
-__vxge_hw_channel_free(
-	struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status
-__vxge_hw_channel_initialize(
-	struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status
-__vxge_hw_channel_reset(
-	struct __vxge_hw_channel *channel);
-
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
-
-static enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
-
-static enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
-
-static void
-__vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-	u32 vp_id,
-	struct vxge_hw_vpath_reg __iomem *vpath_reg,
-	struct vxge_hw_device_hw_info *hw_info);
-
-static enum vxge_hw_status
-__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
-
-static void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
-
-static enum vxge_hw_status
-__vxge_hw_device_register_poll(
-	void __iomem	*reg,
-	u64 mask, u32 max_millis);
-
-static inline enum vxge_hw_status
-__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
-			  u64 mask, u32 max_millis)
-{
-	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
-	wmb();
-
-	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
-	wmb();
-
-	return  __vxge_hw_device_register_poll(addr, mask, max_millis);
+#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {				\
+	status = __vxge_hw_vpath_stats_access(vpath,			\
+					      VXGE_HW_STATS_OP_READ,	\
+					      offset,			\
+					      &val64);			\
+	if (status != VXGE_HW_OK)					\
+		return status;						\
 }
 
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
-			 u32 item_size,	u32 private_size, u32 items_initial,
-			 u32 items_max,	struct vxge_hw_mempool_cbs *mp_callback,
-			 void *userdata);
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
-			  struct vxge_hw_vpath_stats_hw_info *hw_stats);
-
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
-
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
-
-static u64
-__vxge_hw_vpath_pci_func_mode_get(u32  vp_id,
-				  struct vxge_hw_vpath_reg __iomem *vpath_reg);
-
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-			 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
-
-
-static enum vxge_hw_status
-__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-			   struct vxge_hw_device_hw_info *hw_info);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
-
 static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
-			     u32 operation, u32 offset,	u64 *stat);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath	*vpath,
-				  struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
-
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath	*vpath,
-				  struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
-
-/*
- * __vxge_hw_channel_allocate - Allocate memory for channel
- * This function allocates required memory for the channel and various arrays
- * in the channel
- */
-struct __vxge_hw_channel*
-__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
-			   enum __vxge_hw_channel_type type,
-	u32 length, u32 per_dtr_space, void *userdata)
+vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
 {
-	struct __vxge_hw_channel *channel;
-	struct __vxge_hw_device *hldev;
-	int size = 0;
-	u32 vp_id;
+	u64 val64;
 
-	hldev = vph->vpath->hldev;
-	vp_id = vph->vpath->vp_id;
-
-	switch (type) {
-	case VXGE_HW_CHANNEL_TYPE_FIFO:
-		size = sizeof(struct __vxge_hw_fifo);
-		break;
-	case VXGE_HW_CHANNEL_TYPE_RING:
-		size = sizeof(struct __vxge_hw_ring);
-		break;
-	default:
-		break;
-	}
-
-	channel = kzalloc(size, GFP_KERNEL);
-	if (channel == NULL)
-		goto exit0;
-	INIT_LIST_HEAD(&channel->item);
-
-	channel->common_reg = hldev->common_reg;
-	channel->first_vp_id = hldev->first_vp_id;
-	channel->type = type;
-	channel->devh = hldev;
-	channel->vph = vph;
-	channel->userdata = userdata;
-	channel->per_dtr_space = per_dtr_space;
-	channel->length = length;
-	channel->vp_id = vp_id;
-
-	channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-	if (channel->work_arr == NULL)
-		goto exit1;
-
-	channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-	if (channel->free_arr == NULL)
-		goto exit1;
-	channel->free_ptr = length;
-
-	channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-	if (channel->reserve_arr == NULL)
-		goto exit1;
-	channel->reserve_ptr = length;
-	channel->reserve_top = 0;
-
-	channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
-	if (channel->orig_arr == NULL)
-		goto exit1;
-
-	return channel;
-exit1:
-	__vxge_hw_channel_free(channel);
-
-exit0:
-	return NULL;
+	val64 = readq(&vp_reg->rxmac_vcfg0);
+	val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
+	writeq(val64, &vp_reg->rxmac_vcfg0);
+	val64 = readq(&vp_reg->rxmac_vcfg0);
 }
 
 /*
- * __vxge_hw_channel_free - Free memory allocated for channel
- * This function deallocates memory from the channel and various arrays
- * in the channel
+ * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
  */
-void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
 {
-	kfree(channel->work_arr);
-	kfree(channel->free_arr);
-	kfree(channel->reserve_arr);
-	kfree(channel->orig_arr);
-	kfree(channel);
-}
-
-/*
- * __vxge_hw_channel_initialize - Initialize a channel
- * This function initializes a channel by properly setting the
- * various references
- */
-enum vxge_hw_status
-__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
-{
-	u32 i;
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
 	struct __vxge_hw_virtualpath *vpath;
+	u64 val64, rxd_count, rxd_spat;
+	int count = 0, total_count = 0;
 
-	vpath = channel->vph->vpath;
+	vpath = &hldev->virtual_paths[vp_id];
+	vp_reg = vpath->vp_reg;
 
-	if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
-		for (i = 0; i < channel->length; i++)
-			channel->orig_arr[i] = channel->reserve_arr[i];
-	}
+	vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
 
-	switch (channel->type) {
-	case VXGE_HW_CHANNEL_TYPE_FIFO:
-		vpath->fifoh = (struct __vxge_hw_fifo *)channel;
-		channel->stats = &((struct __vxge_hw_fifo *)
-				channel)->stats->common_stats;
-		break;
-	case VXGE_HW_CHANNEL_TYPE_RING:
-		vpath->ringh = (struct __vxge_hw_ring *)channel;
-		channel->stats = &((struct __vxge_hw_ring *)
-				channel)->stats->common_stats;
-		break;
-	default:
-		break;
-	}
+	/* Check that the ring controller for this vpath has enough free RxDs
+	 * to send frames to the host.  This is done by reading the
+	 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
+	 * RXD_SPAT value for the vpath.
+	 */
+	val64 = readq(&vp_reg->prc_cfg6);
+	rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
+	/* Use a factor of 2 when comparing rxd_count against rxd_spat for some
+	 * leg room.
+	 */
+	rxd_spat *= 2;
 
-	return VXGE_HW_OK;
+	do {
+		mdelay(1);
+
+		rxd_count = readq(&vp_reg->prc_rxd_doorbell);
+
+		/* Check that the ring controller for this vpath does
+		 * not have any frame in its pipeline.
+		 */
+		val64 = readq(&vp_reg->frm_in_progress_cnt);
+		if ((rxd_count <= rxd_spat) || (val64 > 0))
+			count = 0;
+		else
+			count++;
+		total_count++;
+	} while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
+			(total_count < VXGE_HW_MAX_POLLING_COUNT));
+
+	if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+		printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
+			__func__);
+
+	return total_count;
 }
 
-/*
- * __vxge_hw_channel_reset - Resets a channel
- * This function resets a channel by properly setting the various references
+/* vxge_hw_device_wait_receive_idle - This function waits until all frames
+ * stored in the frame buffer for each vpath assigned to the given
+ * function (hldev) have been sent to the host.
  */
-enum vxge_hw_status
-__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
+void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
 {
-	u32 i;
+	int i, total_count = 0;
 
-	for (i = 0; i < channel->length; i++) {
-		if (channel->reserve_arr != NULL)
-			channel->reserve_arr[i] = channel->orig_arr[i];
-		if (channel->free_arr != NULL)
-			channel->free_arr[i] = NULL;
-		if (channel->work_arr != NULL)
-			channel->work_arr[i] = NULL;
+	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+		if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
+			continue;
+
+		total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
+		if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
+			break;
 	}
-	channel->free_ptr = channel->length;
-	channel->reserve_ptr = channel->length;
-	channel->reserve_top = 0;
-	channel->post_index = 0;
-	channel->compl_index = 0;
-
-	return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_pci_e_init
- * Initialize certain PCI/PCI-X configuration registers
- * with recommended values. Save config space for future hw resets.
- */
-void
-__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
-{
-	u16 cmd = 0;
-
-	/* Set the PErr Repconse bit and SERR in PCI command register. */
-	pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
-	cmd |= 0x140;
-	pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
-
-	pci_save_state(hldev->pdev);
 }
 
 /*
@@ -390,7 +142,360 @@
 	return ret;
 }
 
- /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
+static inline enum vxge_hw_status
+__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
+			  u64 mask, u32 max_millis)
+{
+	__vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
+	wmb();
+	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
+	wmb();
+
+	return __vxge_hw_device_register_poll(addr, mask, max_millis);
+}
+
+static enum vxge_hw_status
+vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
+		     u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
+		     u64 *steer_ctrl)
+{
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
+	enum vxge_hw_status status;
+	u64 val64;
+	u32 retry = 0, max_retry = 100;
+
+	vp_reg = vpath->vp_reg;
+
+	if (vpath->vp_open) {
+		max_retry = 3;
+		spin_lock(&vpath->lock);
+	}
+
+	writeq(*data0, &vp_reg->rts_access_steer_data0);
+	writeq(*data1, &vp_reg->rts_access_steer_data1);
+	wmb();
+
+	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
+		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
+		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
+		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
+		*steer_ctrl;
+
+	status = __vxge_hw_pio_mem_write64(val64,
+					   &vp_reg->rts_access_steer_ctrl,
+					   VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+					   VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+
+	/* The __vxge_hw_device_register_poll can udelay for a significant
+	 * amount of time, blocking other proccess from the CPU.  If it delays
+	 * for ~5secs, a NMI error can occur.  A way around this is to give up
+	 * the processor via msleep, but this is not allowed is under lock.
+	 * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
+	 * 1sec and sleep for 10ms until the firmware operation has completed
+	 * or timed-out.
+	 */
+	while ((status != VXGE_HW_OK) && retry++ < max_retry) {
+		if (!vpath->vp_open)
+			msleep(20);
+		status = __vxge_hw_device_register_poll(
+					&vp_reg->rts_access_steer_ctrl,
+					VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
+					VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+	}
+
+	if (status != VXGE_HW_OK)
+		goto out;
+
+	val64 = readq(&vp_reg->rts_access_steer_ctrl);
+	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
+		*data0 = readq(&vp_reg->rts_access_steer_data0);
+		*data1 = readq(&vp_reg->rts_access_steer_data1);
+		*steer_ctrl = val64;
+	} else
+		status = VXGE_HW_FAIL;
+
+out:
+	if (vpath->vp_open)
+		spin_unlock(&vpath->lock);
+	return status;
+}
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+			     u32 *minor, u32 *build)
+{
+	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+	struct __vxge_hw_virtualpath *vpath;
+	enum vxge_hw_status status;
+
+	vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+	status = vxge_hw_vpath_fw_api(vpath,
+				      VXGE_HW_FW_UPGRADE_ACTION,
+				      VXGE_HW_FW_UPGRADE_MEMO,
+				      VXGE_HW_FW_UPGRADE_OFFSET_READ,
+				      &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK)
+		return status;
+
+	*major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+	*minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+	*build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+	return status;
+}
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
+{
+	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+	struct __vxge_hw_virtualpath *vpath;
+	enum vxge_hw_status status;
+	u32 ret;
+
+	vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+	status = vxge_hw_vpath_fw_api(vpath,
+				      VXGE_HW_FW_UPGRADE_ACTION,
+				      VXGE_HW_FW_UPGRADE_MEMO,
+				      VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
+				      &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
+		goto exit;
+	}
+
+	ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
+	if (ret != 1) {
+		vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
+				__func__, ret);
+		status = VXGE_HW_FAIL;
+	}
+
+exit:
+	return status;
+}
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
+{
+	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+	struct __vxge_hw_virtualpath *vpath;
+	enum vxge_hw_status status;
+	int ret_code, sec_code;
+
+	vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+	/* send upgrade start command */
+	status = vxge_hw_vpath_fw_api(vpath,
+				      VXGE_HW_FW_UPGRADE_ACTION,
+				      VXGE_HW_FW_UPGRADE_MEMO,
+				      VXGE_HW_FW_UPGRADE_OFFSET_START,
+				      &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
+				__func__);
+		return status;
+	}
+
+	/* Transfer fw image to adapter 16 bytes at a time */
+	for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
+		steer_ctrl = 0;
+
+		/* The next 128bits of fwdata to be loaded onto the adapter */
+		data0 = *((u64 *)fwdata);
+		data1 = *((u64 *)fwdata + 1);
+
+		status = vxge_hw_vpath_fw_api(vpath,
+					      VXGE_HW_FW_UPGRADE_ACTION,
+					      VXGE_HW_FW_UPGRADE_MEMO,
+					      VXGE_HW_FW_UPGRADE_OFFSET_SEND,
+					      &data0, &data1, &steer_ctrl);
+		if (status != VXGE_HW_OK) {
+			vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
+					__func__);
+			goto out;
+		}
+
+		ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
+		switch (ret_code) {
+		case VXGE_HW_FW_UPGRADE_OK:
+			/* All OK, send next 16 bytes. */
+			break;
+		case VXGE_FW_UPGRADE_BYTES2SKIP:
+			/* skip bytes in the stream */
+			fwdata += (data0 >> 8) & 0xFFFFFFFF;
+			break;
+		case VXGE_HW_FW_UPGRADE_DONE:
+			goto out;
+		case VXGE_HW_FW_UPGRADE_ERR:
+			sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
+			switch (sec_code) {
+			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
+			case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
+				printk(KERN_ERR
+				       "corrupted data from .ncf file\n");
+				break;
+			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
+			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
+			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
+			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
+			case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
+				printk(KERN_ERR "invalid .ncf file\n");
+				break;
+			case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
+				printk(KERN_ERR "buffer overflow\n");
+				break;
+			case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
+				printk(KERN_ERR "failed to flash the image\n");
+				break;
+			case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
+				printk(KERN_ERR
+				       "generic error. Unknown error type\n");
+				break;
+			default:
+				printk(KERN_ERR "Unknown error of type %d\n",
+				       sec_code);
+				break;
+			}
+			status = VXGE_HW_FAIL;
+			goto out;
+		default:
+			printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
+			status = VXGE_HW_FAIL;
+			goto out;
+		}
+		/* point to next 16 bytes */
+		fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
+	}
+out:
+	return status;
+}
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+				struct eprom_image *img)
+{
+	u64 data0 = 0, data1 = 0, steer_ctrl = 0;
+	struct __vxge_hw_virtualpath *vpath;
+	enum vxge_hw_status status;
+	int i;
+
+	vpath = &hldev->virtual_paths[hldev->first_vp_id];
+
+	for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+		data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
+		data1 = steer_ctrl = 0;
+
+		status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			VXGE_HW_FW_API_GET_EPROM_REV,
+			0, &data0, &data1, &steer_ctrl);
+		if (status != VXGE_HW_OK)
+			break;
+
+		img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
+		img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
+		img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
+		img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
+	}
+
+	return status;
+}
+
+/*
+ * __vxge_hw_channel_free - Free memory allocated for channel
+ * This function deallocates memory from the channel and various arrays
+ * in the channel
+ */
+static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
+{
+	kfree(channel->work_arr);
+	kfree(channel->free_arr);
+	kfree(channel->reserve_arr);
+	kfree(channel->orig_arr);
+	kfree(channel);
+}
+
+/*
+ * __vxge_hw_channel_initialize - Initialize a channel
+ * This function initializes a channel by properly setting the
+ * various references
+ */
+static enum vxge_hw_status
+__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
+{
+	u32 i;
+	struct __vxge_hw_virtualpath *vpath;
+
+	vpath = channel->vph->vpath;
+
+	if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
+		for (i = 0; i < channel->length; i++)
+			channel->orig_arr[i] = channel->reserve_arr[i];
+	}
+
+	switch (channel->type) {
+	case VXGE_HW_CHANNEL_TYPE_FIFO:
+		vpath->fifoh = (struct __vxge_hw_fifo *)channel;
+		channel->stats = &((struct __vxge_hw_fifo *)
+				channel)->stats->common_stats;
+		break;
+	case VXGE_HW_CHANNEL_TYPE_RING:
+		vpath->ringh = (struct __vxge_hw_ring *)channel;
+		channel->stats = &((struct __vxge_hw_ring *)
+				channel)->stats->common_stats;
+		break;
+	default:
+		break;
+	}
+
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_channel_reset - Resets a channel
+ * This function resets a channel by properly setting the various references
+ */
+static enum vxge_hw_status
+__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
+{
+	u32 i;
+
+	for (i = 0; i < channel->length; i++) {
+		if (channel->reserve_arr != NULL)
+			channel->reserve_arr[i] = channel->orig_arr[i];
+		if (channel->free_arr != NULL)
+			channel->free_arr[i] = NULL;
+		if (channel->work_arr != NULL)
+			channel->work_arr[i] = NULL;
+	}
+	channel->free_ptr = channel->length;
+	channel->reserve_ptr = channel->length;
+	channel->reserve_top = 0;
+	channel->post_index = 0;
+	channel->compl_index = 0;
+
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_pci_e_init
+ * Initialize certain PCI/PCI-X configuration registers
+ * with recommended values. Save config space for future hw resets.
+ */
+static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
+{
+	u16 cmd = 0;
+
+	/* Set the PErr Repconse bit and SERR in PCI command register. */
+	pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
+	cmd |= 0x140;
+	pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
+
+	pci_save_state(hldev->pdev);
+}
+
+/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
  * in progress
  * This routine checks the vpath reset in progress register is turned zero
  */
@@ -405,6 +510,60 @@
 }
 
 /*
+ * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
+ * Set the swapper bits appropriately for the lagacy section.
+ */
+static enum vxge_hw_status
+__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
+{
+	u64 val64;
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	val64 = readq(&legacy_reg->toc_swapper_fb);
+
+	wmb();
+
+	switch (val64) {
+	case VXGE_HW_SWAPPER_INITIAL_VALUE:
+		return status;
+
+	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
+		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+			&legacy_reg->pifm_rd_swap_en);
+		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+			&legacy_reg->pifm_rd_flip_en);
+		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+			&legacy_reg->pifm_wr_swap_en);
+		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+			&legacy_reg->pifm_wr_flip_en);
+		break;
+
+	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
+		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
+			&legacy_reg->pifm_rd_swap_en);
+		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
+			&legacy_reg->pifm_wr_swap_en);
+		break;
+
+	case VXGE_HW_SWAPPER_BIT_FLIPPED:
+		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
+			&legacy_reg->pifm_rd_flip_en);
+		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
+			&legacy_reg->pifm_wr_flip_en);
+		break;
+	}
+
+	wmb();
+
+	val64 = readq(&legacy_reg->toc_swapper_fb);
+
+	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
+		status = VXGE_HW_ERR_SWAPPER_CTRL;
+
+	return status;
+}
+
+/*
  * __vxge_hw_device_toc_get
  * This routine sets the swapper and reads the toc pointer and returns the
  * memory mapped address of the toc
@@ -435,7 +594,7 @@
  * register location pointers in the device object. It waits until the ric is
  * completed initializing registers.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
 {
 	u64 val64;
@@ -496,26 +655,6 @@
 }
 
 /*
- * __vxge_hw_device_id_get
- * This routine returns sets the device id and revision numbers into the device
- * structure
- */
-void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
-{
-	u64 val64;
-
-	val64 = readq(&hldev->common_reg->titan_asic_id);
-	hldev->device_id =
-		(u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
-
-	hldev->major_revision =
-		(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
-
-	hldev->minor_revision =
-		(u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
-}
-
-/*
  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
  * This routine returns the Access Rights of the driver
  */
@@ -568,10 +707,25 @@
 }
 
 /*
+ * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
+ * Returns the function number of the vpath.
+ */
+static u32
+__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
+{
+	u64 val64;
+
+	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
+
+	return
+	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
+}
+
+/*
  * __vxge_hw_device_host_info_get
  * This routine returns the host type assignments
  */
-void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
+static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
 {
 	u64 val64;
 	u32 i;
@@ -584,16 +738,18 @@
 	hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
 
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
 		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
 			continue;
 
 		hldev->func_id =
-			__vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]);
+			__vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
 
 		hldev->access_rights = __vxge_hw_device_access_rights_get(
 			hldev->host_type, hldev->func_id);
 
+		hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
+		hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
+
 		hldev->first_vp_id = i;
 		break;
 	}
@@ -634,7 +790,8 @@
  * __vxge_hw_device_initialize
  * Initialize Titan-V hardware.
  */
-enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
+static enum vxge_hw_status
+__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
 {
 	enum vxge_hw_status status = VXGE_HW_OK;
 
@@ -650,6 +807,196 @@
 	return status;
 }
 
+/*
+ * __vxge_hw_vpath_fw_ver_get - Get the fw version
+ * Returns FW Version
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
+			   struct vxge_hw_device_hw_info *hw_info)
+{
+	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
+	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
+	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
+	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
+	u64 data0, data1 = 0, steer_ctrl = 0;
+	enum vxge_hw_status status;
+
+	status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			0, &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK)
+		goto exit;
+
+	fw_date->day =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
+	fw_date->month =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
+	fw_date->year =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
+
+	snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+		 fw_date->month, fw_date->day, fw_date->year);
+
+	fw_version->major =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
+	fw_version->minor =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
+	fw_version->build =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
+
+	snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+		 fw_version->major, fw_version->minor, fw_version->build);
+
+	flash_date->day =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
+	flash_date->month =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
+	flash_date->year =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
+
+	snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
+		 flash_date->month, flash_date->day, flash_date->year);
+
+	flash_version->major =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
+	flash_version->minor =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
+	flash_version->build =
+	    (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
+
+	snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
+		 flash_version->major, flash_version->minor,
+		 flash_version->build);
+
+exit:
+	return status;
+}
+
+/*
+ * __vxge_hw_vpath_card_info_get - Get the serial numbers,
+ * part number and product description.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
+			      struct vxge_hw_device_hw_info *hw_info)
+{
+	enum vxge_hw_status status;
+	u64 data0, data1 = 0, steer_ctrl = 0;
+	u8 *serial_number = hw_info->serial_number;
+	u8 *part_number = hw_info->part_number;
+	u8 *product_desc = hw_info->product_desc;
+	u32 i, j = 0;
+
+	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
+
+	status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			0, &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK)
+		return status;
+
+	((u64 *)serial_number)[0] = be64_to_cpu(data0);
+	((u64 *)serial_number)[1] = be64_to_cpu(data1);
+
+	data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
+	data1 = steer_ctrl = 0;
+
+	status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			0, &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK)
+		return status;
+
+	((u64 *)part_number)[0] = be64_to_cpu(data0);
+	((u64 *)part_number)[1] = be64_to_cpu(data1);
+
+	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
+	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
+		data0 = i;
+		data1 = steer_ctrl = 0;
+
+		status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			0, &data0, &data1, &steer_ctrl);
+		if (status != VXGE_HW_OK)
+			return status;
+
+		((u64 *)product_desc)[j++] = be64_to_cpu(data0);
+		((u64 *)product_desc)[j++] = be64_to_cpu(data1);
+	}
+
+	return status;
+}
+
+/*
+ * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
+ * Returns pci function mode
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
+				  struct vxge_hw_device_hw_info *hw_info)
+{
+	u64 data0, data1 = 0, steer_ctrl = 0;
+	enum vxge_hw_status status;
+
+	data0 = 0;
+
+	status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_FW_API_GET_FUNC_MODE,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			0, &data0, &data1, &steer_ctrl);
+	if (status != VXGE_HW_OK)
+		return status;
+
+	hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
+	return status;
+}
+
+/*
+ * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
+ *               from MAC address table.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
+			 u8 *macaddr, u8 *macaddr_mask)
+{
+	u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
+	    data0 = 0, data1 = 0, steer_ctrl = 0;
+	enum vxge_hw_status status;
+	int i;
+
+	do {
+		status = vxge_hw_vpath_fw_api(vpath, action,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
+			0, &data0, &data1, &steer_ctrl);
+		if (status != VXGE_HW_OK)
+			goto exit;
+
+		data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
+		data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
+									data1);
+
+		for (i = ETH_ALEN; i > 0; i--) {
+			macaddr[i - 1] = (u8) (data0 & 0xFF);
+			data0 >>= 8;
+
+			macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
+			data1 >>= 8;
+		}
+
+		action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
+		data0 = 0, data1 = 0, steer_ctrl = 0;
+
+	} while (!is_valid_ether_addr(macaddr));
+exit:
+	return status;
+}
+
 /**
  * vxge_hw_device_hw_info_get - Get the hw information
  * Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1012,9 @@
 	struct vxge_hw_toc_reg __iomem *toc;
 	struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
 	struct vxge_hw_common_reg __iomem *common_reg;
-	struct vxge_hw_vpath_reg __iomem *vpath_reg;
 	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
 	enum vxge_hw_status status;
+	struct __vxge_hw_virtualpath vpath;
 
 	memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
 
@@ -693,7 +1040,6 @@
 	   (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
 
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
 		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
 			continue;
 
@@ -702,7 +1048,7 @@
 		vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
 				(bar0 + val64);
 
-		hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg);
+		hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
 		if (__vxge_hw_device_access_rights_get(hw_info->host_type,
 			hw_info->func_id) &
 			VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1064,19 @@
 
 		val64 = readq(&toc->toc_vpath_pointer[i]);
 
-		vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+		vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+			       (bar0 + val64);
+		vpath.vp_open = 0;
 
-		hw_info->function_mode =
-			__vxge_hw_vpath_pci_func_mode_get(i, vpath_reg);
-
-		status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info);
+		status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
 		if (status != VXGE_HW_OK)
 			goto exit;
 
-		status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info);
+		status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
+		if (status != VXGE_HW_OK)
+			goto exit;
+
+		status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
 		if (status != VXGE_HW_OK)
 			goto exit;
 
@@ -735,14 +1084,15 @@
 	}
 
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
 		if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
 			continue;
 
 		val64 = readq(&toc->toc_vpath_pointer[i]);
-		vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64);
+		vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
+			       (bar0 + val64);
+		vpath.vp_open = 0;
 
-		status =  __vxge_hw_vpath_addr_get(i, vpath_reg,
+		status =  __vxge_hw_vpath_addr_get(&vpath,
 				hw_info->mac_addrs[i],
 				hw_info->mac_addr_masks[i]);
 		if (status != VXGE_HW_OK)
@@ -753,6 +1103,218 @@
 }
 
 /*
+ * __vxge_hw_blockpool_destroy - Deallocates the block pool
+ */
+static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
+{
+	struct __vxge_hw_device *hldev;
+	struct list_head *p, *n;
+	u16 ret;
+
+	if (blockpool == NULL) {
+		ret = 1;
+		goto exit;
+	}
+
+	hldev = blockpool->hldev;
+
+	list_for_each_safe(p, n, &blockpool->free_block_list) {
+		pci_unmap_single(hldev->pdev,
+			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+			((struct __vxge_hw_blockpool_entry *)p)->length,
+			PCI_DMA_BIDIRECTIONAL);
+
+		vxge_os_dma_free(hldev->pdev,
+			((struct __vxge_hw_blockpool_entry *)p)->memblock,
+			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+		kfree(p);
+		blockpool->pool_size--;
+	}
+
+	list_for_each_safe(p, n, &blockpool->free_entry_list) {
+		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+		kfree((void *)p);
+	}
+	ret = 0;
+exit:
+	return;
+}
+
+/*
+ * __vxge_hw_blockpool_create - Create block pool
+ */
+static enum vxge_hw_status
+__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
+			   struct __vxge_hw_blockpool *blockpool,
+			   u32 pool_size,
+			   u32 pool_max)
+{
+	u32 i;
+	struct __vxge_hw_blockpool_entry *entry = NULL;
+	void *memblock;
+	dma_addr_t dma_addr;
+	struct pci_dev *dma_handle;
+	struct pci_dev *acc_handle;
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	if (blockpool == NULL) {
+		status = VXGE_HW_FAIL;
+		goto blockpool_create_exit;
+	}
+
+	blockpool->hldev = hldev;
+	blockpool->block_size = VXGE_HW_BLOCK_SIZE;
+	blockpool->pool_size = 0;
+	blockpool->pool_max = pool_max;
+	blockpool->req_out = 0;
+
+	INIT_LIST_HEAD(&blockpool->free_block_list);
+	INIT_LIST_HEAD(&blockpool->free_entry_list);
+
+	for (i = 0; i < pool_size + pool_max; i++) {
+		entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+				GFP_KERNEL);
+		if (entry == NULL) {
+			__vxge_hw_blockpool_destroy(blockpool);
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto blockpool_create_exit;
+		}
+		list_add(&entry->item, &blockpool->free_entry_list);
+	}
+
+	for (i = 0; i < pool_size; i++) {
+		memblock = vxge_os_dma_malloc(
+				hldev->pdev,
+				VXGE_HW_BLOCK_SIZE,
+				&dma_handle,
+				&acc_handle);
+		if (memblock == NULL) {
+			__vxge_hw_blockpool_destroy(blockpool);
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto blockpool_create_exit;
+		}
+
+		dma_addr = pci_map_single(hldev->pdev, memblock,
+				VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (unlikely(pci_dma_mapping_error(hldev->pdev,
+				dma_addr))) {
+			vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
+			__vxge_hw_blockpool_destroy(blockpool);
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto blockpool_create_exit;
+		}
+
+		if (!list_empty(&blockpool->free_entry_list))
+			entry = (struct __vxge_hw_blockpool_entry *)
+				list_first_entry(&blockpool->free_entry_list,
+					struct __vxge_hw_blockpool_entry,
+					item);
+
+		if (entry == NULL)
+			entry =
+			    kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
+					GFP_KERNEL);
+		if (entry != NULL) {
+			list_del(&entry->item);
+			entry->length = VXGE_HW_BLOCK_SIZE;
+			entry->memblock = memblock;
+			entry->dma_addr = dma_addr;
+			entry->acc_handle = acc_handle;
+			entry->dma_handle = dma_handle;
+			list_add(&entry->item,
+					  &blockpool->free_block_list);
+			blockpool->pool_size++;
+		} else {
+			__vxge_hw_blockpool_destroy(blockpool);
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto blockpool_create_exit;
+		}
+	}
+
+blockpool_create_exit:
+	return status;
+}
+
+/*
+ * __vxge_hw_device_fifo_config_check - Check fifo configuration.
+ * Check the fifo configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
+{
+	if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
+	    (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
+		return VXGE_HW_BADCFG_FIFO_BLOCKS;
+
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_vpath_config_check - Check vpath configuration.
+ * Check the vpath configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
+{
+	enum vxge_hw_status status;
+
+	if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
+	    (vp_config->min_bandwidth >	VXGE_HW_VPATH_BANDWIDTH_MAX))
+		return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
+
+	status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
+	if (status != VXGE_HW_OK)
+		return status;
+
+	if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
+		((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
+		(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
+		return VXGE_HW_BADCFG_VPATH_MTU;
+
+	if ((vp_config->rpa_strip_vlan_tag !=
+		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
+		(vp_config->rpa_strip_vlan_tag !=
+		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
+		(vp_config->rpa_strip_vlan_tag !=
+		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
+		return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
+
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_config_check - Check device configuration.
+ * Check the device configuration
+ */
+static enum vxge_hw_status
+__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
+{
+	u32 i;
+	enum vxge_hw_status status;
+
+	if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
+	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
+	    (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
+	    (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
+		return VXGE_HW_BADCFG_INTR_MODE;
+
+	if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
+	    (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
+		return VXGE_HW_BADCFG_RTS_MAC_EN;
+
+	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
+		status = __vxge_hw_device_vpath_config_check(
+				&new_config->vp_config[i]);
+		if (status != VXGE_HW_OK)
+			return status;
+	}
+
+	return VXGE_HW_OK;
+}
+
+/*
  * vxge_hw_device_initialize - Initialize Titan device.
  * Initialize Titan device. Note that all the arguments of this public API
  * are 'IN', including @hldev. Driver cooperates with
@@ -776,14 +1338,12 @@
 	if (status != VXGE_HW_OK)
 		goto exit;
 
-	hldev = (struct __vxge_hw_device *)
-			vmalloc(sizeof(struct __vxge_hw_device));
+	hldev = vzalloc(sizeof(struct __vxge_hw_device));
 	if (hldev == NULL) {
 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
 		goto exit;
 	}
 
-	memset(hldev, 0, sizeof(struct __vxge_hw_device));
 	hldev->magic = VXGE_HW_DEVICE_MAGIC;
 
 	vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -806,7 +1366,6 @@
 		vfree(hldev);
 		goto exit;
 	}
-	__vxge_hw_device_id_get(hldev);
 
 	__vxge_hw_device_host_info_get(hldev);
 
@@ -814,7 +1373,6 @@
 	nblocks++;
 
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
 		if (!(hldev->vpath_assignments & vxge_mBIT(i)))
 			continue;
 
@@ -839,7 +1397,6 @@
 	}
 
 	status = __vxge_hw_device_initialize(hldev);
-
 	if (status != VXGE_HW_OK) {
 		vxge_hw_device_terminate(hldev);
 		goto exit;
@@ -865,6 +1422,242 @@
 }
 
 /*
+ * __vxge_hw_vpath_stats_access - Get the statistics from the given location
+ *                           and offset and perform an operation
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
+			     u32 operation, u32 offset, u64 *stat)
+{
+	u64 val64;
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+		goto vpath_stats_access_exit;
+	}
+
+	vp_reg = vpath->vp_reg;
+
+	val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
+		 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
+		 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
+
+	status = __vxge_hw_pio_mem_write64(val64,
+				&vp_reg->xmac_stats_access_cmd,
+				VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
+				vpath->hldev->config.device_poll_millis);
+	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
+		*stat = readq(&vp_reg->xmac_stats_access_data);
+	else
+		*stat = 0;
+
+vpath_stats_access_exit:
+	return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
+			struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
+{
+	u64 *val64;
+	int i;
+	u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	val64 = (u64 *)vpath_tx_stats;
+
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+		goto exit;
+	}
+
+	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
+		status = __vxge_hw_vpath_stats_access(vpath,
+					VXGE_HW_STATS_OP_READ,
+					offset, val64);
+		if (status != VXGE_HW_OK)
+			goto exit;
+		offset++;
+		val64++;
+	}
+exit:
+	return status;
+}
+
+/*
+ * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
+			struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
+{
+	u64 *val64;
+	enum vxge_hw_status status = VXGE_HW_OK;
+	int i;
+	u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
+	val64 = (u64 *) vpath_rx_stats;
+
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+		goto exit;
+	}
+	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
+		status = __vxge_hw_vpath_stats_access(vpath,
+					VXGE_HW_STATS_OP_READ,
+					offset >> 3, val64);
+		if (status != VXGE_HW_OK)
+			goto exit;
+
+		offset += 8;
+		val64++;
+	}
+exit:
+	return status;
+}
+
+/*
+ * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
+			  struct vxge_hw_vpath_stats_hw_info *hw_stats)
+{
+	u64 val64;
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+		goto exit;
+	}
+	vp_reg = vpath->vp_reg;
+
+	val64 = readq(&vp_reg->vpath_debug_stats0);
+	hw_stats->ini_num_mwr_sent =
+		(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
+
+	val64 = readq(&vp_reg->vpath_debug_stats1);
+	hw_stats->ini_num_mrd_sent =
+		(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
+
+	val64 = readq(&vp_reg->vpath_debug_stats2);
+	hw_stats->ini_num_cpl_rcvd =
+		(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
+
+	val64 = readq(&vp_reg->vpath_debug_stats3);
+	hw_stats->ini_num_mwr_byte_sent =
+		VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
+
+	val64 = readq(&vp_reg->vpath_debug_stats4);
+	hw_stats->ini_num_cpl_byte_rcvd =
+		VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
+
+	val64 = readq(&vp_reg->vpath_debug_stats5);
+	hw_stats->wrcrdtarb_xoff =
+		(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
+
+	val64 = readq(&vp_reg->vpath_debug_stats6);
+	hw_stats->rdcrdtarb_xoff =
+		(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
+
+	val64 = readq(&vp_reg->vpath_genstats_count01);
+	hw_stats->vpath_genstats_count0 =
+	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
+		val64);
+
+	val64 = readq(&vp_reg->vpath_genstats_count01);
+	hw_stats->vpath_genstats_count1 =
+	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
+		val64);
+
+	val64 = readq(&vp_reg->vpath_genstats_count23);
+	hw_stats->vpath_genstats_count2 =
+	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
+		val64);
+
+	val64 = readq(&vp_reg->vpath_genstats_count01);
+	hw_stats->vpath_genstats_count3 =
+	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
+		val64);
+
+	val64 = readq(&vp_reg->vpath_genstats_count4);
+	hw_stats->vpath_genstats_count4 =
+	(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
+		val64);
+
+	val64 = readq(&vp_reg->vpath_genstats_count5);
+	hw_stats->vpath_genstats_count5 =
+	(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
+		val64);
+
+	status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
+	if (status != VXGE_HW_OK)
+		goto exit;
+
+	status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
+	if (status != VXGE_HW_OK)
+		goto exit;
+
+	VXGE_HW_VPATH_STATS_PIO_READ(
+		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
+
+	hw_stats->prog_event_vnum0 =
+			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
+
+	hw_stats->prog_event_vnum1 =
+			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
+
+	VXGE_HW_VPATH_STATS_PIO_READ(
+		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
+
+	hw_stats->prog_event_vnum2 =
+			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
+
+	hw_stats->prog_event_vnum3 =
+			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
+
+	val64 = readq(&vp_reg->rx_multi_cast_stats);
+	hw_stats->rx_multi_cast_frame_discard =
+		(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
+
+	val64 = readq(&vp_reg->rx_frm_transferred);
+	hw_stats->rx_frm_transferred =
+		(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
+
+	val64 = readq(&vp_reg->rxd_returned);
+	hw_stats->rxd_returned =
+		(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
+
+	val64 = readq(&vp_reg->dbg_stats_rx_mpa);
+	hw_stats->rx_mpa_len_fail_frms =
+		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
+	hw_stats->rx_mpa_mrk_fail_frms =
+		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
+	hw_stats->rx_mpa_crc_fail_frms =
+		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
+
+	val64 = readq(&vp_reg->dbg_stats_rx_fau);
+	hw_stats->rx_permitted_frms =
+		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
+	hw_stats->rx_vp_reset_discarded_frms =
+	(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
+	hw_stats->rx_wol_frms =
+		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
+
+	val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
+	hw_stats->tx_vp_reset_discarded_frms =
+	(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
+		val64);
+exit:
+	return status;
+}
+
+/*
  * vxge_hw_device_stats_get - Get the device hw statistics.
  * Returns the vpath h/w stats for the device.
  */
@@ -876,7 +1669,6 @@
 	enum vxge_hw_status status = VXGE_HW_OK;
 
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
 		if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
 			(hldev->virtual_paths[i].vp_open ==
 				VXGE_HW_VP_NOT_OPEN))
@@ -1031,7 +1823,6 @@
 
 	status = vxge_hw_device_xmac_aggr_stats_get(hldev,
 					0, &xmac_stats->aggr_stats[0]);
-
 	if (status != VXGE_HW_OK)
 		goto exit;
 
@@ -1165,7 +1956,6 @@
  * It can be used to set or reset Pause frame generation or reception
  * support of the NIC.
  */
-
 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
 						 u32 port, u32 tx, u32 rx)
 {
@@ -1407,115 +2197,550 @@
 }
 
 /*
- * __vxge_hw_ring_create - Create a Ring
- * This function creates Ring and initializes it.
- *
+ * __vxge_hw_channel_allocate - Allocate memory for channel
+ * This function allocates required memory for the channel and various arrays
+ * in the channel
  */
-static enum vxge_hw_status
-__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
-		      struct vxge_hw_ring_attr *attr)
+static struct __vxge_hw_channel *
+__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
+			   enum __vxge_hw_channel_type type,
+			   u32 length, u32 per_dtr_space,
+			   void *userdata)
 {
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct __vxge_hw_ring *ring;
-	u32 ring_length;
-	struct vxge_hw_ring_config *config;
+	struct __vxge_hw_channel *channel;
 	struct __vxge_hw_device *hldev;
+	int size = 0;
 	u32 vp_id;
-	struct vxge_hw_mempool_cbs ring_mp_callback;
 
-	if ((vp == NULL) || (attr == NULL)) {
+	hldev = vph->vpath->hldev;
+	vp_id = vph->vpath->vp_id;
+
+	switch (type) {
+	case VXGE_HW_CHANNEL_TYPE_FIFO:
+		size = sizeof(struct __vxge_hw_fifo);
+		break;
+	case VXGE_HW_CHANNEL_TYPE_RING:
+		size = sizeof(struct __vxge_hw_ring);
+		break;
+	default:
+		break;
+	}
+
+	channel = kzalloc(size, GFP_KERNEL);
+	if (channel == NULL)
+		goto exit0;
+	INIT_LIST_HEAD(&channel->item);
+
+	channel->common_reg = hldev->common_reg;
+	channel->first_vp_id = hldev->first_vp_id;
+	channel->type = type;
+	channel->devh = hldev;
+	channel->vph = vph;
+	channel->userdata = userdata;
+	channel->per_dtr_space = per_dtr_space;
+	channel->length = length;
+	channel->vp_id = vp_id;
+
+	channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+	if (channel->work_arr == NULL)
+		goto exit1;
+
+	channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+	if (channel->free_arr == NULL)
+		goto exit1;
+	channel->free_ptr = length;
+
+	channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+	if (channel->reserve_arr == NULL)
+		goto exit1;
+	channel->reserve_ptr = length;
+	channel->reserve_top = 0;
+
+	channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
+	if (channel->orig_arr == NULL)
+		goto exit1;
+
+	return channel;
+exit1:
+	__vxge_hw_channel_free(channel);
+
+exit0:
+	return NULL;
+}
+
+/*
+ * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
+ * Adds a block to block pool
+ */
+static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
+					void *block_addr,
+					u32 length,
+					struct pci_dev *dma_h,
+					struct pci_dev *acc_handle)
+{
+	struct __vxge_hw_blockpool *blockpool;
+	struct __vxge_hw_blockpool_entry *entry = NULL;
+	dma_addr_t dma_addr;
+	enum vxge_hw_status status = VXGE_HW_OK;
+	u32 req_out;
+
+	blockpool = &devh->block_pool;
+
+	if (block_addr == NULL) {
+		blockpool->req_out--;
 		status = VXGE_HW_FAIL;
 		goto exit;
 	}
 
-	hldev = vp->vpath->hldev;
-	vp_id = vp->vpath->vp_id;
+	dma_addr = pci_map_single(devh->pdev, block_addr, length,
+				PCI_DMA_BIDIRECTIONAL);
 
-	config = &hldev->config.vp_config[vp_id].ring;
+	if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
+		vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
+		blockpool->req_out--;
+		status = VXGE_HW_FAIL;
+		goto exit;
+	}
 
-	ring_length = config->ring_blocks *
-			vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+	if (!list_empty(&blockpool->free_entry_list))
+		entry = (struct __vxge_hw_blockpool_entry *)
+			list_first_entry(&blockpool->free_entry_list,
+				struct __vxge_hw_blockpool_entry,
+				item);
 
-	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
-						VXGE_HW_CHANNEL_TYPE_RING,
-						ring_length,
-						attr->per_rxd_space,
-						attr->userdata);
+	if (entry == NULL)
+		entry =	vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
+	else
+		list_del(&entry->item);
 
-	if (ring == NULL) {
+	if (entry != NULL) {
+		entry->length = length;
+		entry->memblock = block_addr;
+		entry->dma_addr = dma_addr;
+		entry->acc_handle = acc_handle;
+		entry->dma_handle = dma_h;
+		list_add(&entry->item, &blockpool->free_block_list);
+		blockpool->pool_size++;
+		status = VXGE_HW_OK;
+	} else
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+	blockpool->req_out--;
+
+	req_out = blockpool->req_out;
+exit:
+	return;
+}
+
+static inline void
+vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
+{
+	gfp_t flags;
+	void *vaddr;
+
+	if (in_interrupt())
+		flags = GFP_ATOMIC | GFP_DMA;
+	else
+		flags = GFP_KERNEL | GFP_DMA;
+
+	vaddr = kmalloc((size), flags);
+
+	vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
+}
+
+/*
+ * __vxge_hw_blockpool_blocks_add - Request additional blocks
+ */
+static
+void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
+{
+	u32 nreq = 0, i;
+
+	if ((blockpool->pool_size  +  blockpool->req_out) <
+		VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
+		nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
+		blockpool->req_out += nreq;
+	}
+
+	for (i = 0; i < nreq; i++)
+		vxge_os_dma_malloc_async(
+			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+			blockpool->hldev, VXGE_HW_BLOCK_SIZE);
+}
+
+/*
+ * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
+ * Allocates a block of memory of given size, either from block pool
+ * or by calling vxge_os_dma_malloc()
+ */
+static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
+					struct vxge_hw_mempool_dma *dma_object)
+{
+	struct __vxge_hw_blockpool_entry *entry = NULL;
+	struct __vxge_hw_blockpool  *blockpool;
+	void *memblock = NULL;
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	blockpool = &devh->block_pool;
+
+	if (size != blockpool->block_size) {
+
+		memblock = vxge_os_dma_malloc(devh->pdev, size,
+						&dma_object->handle,
+						&dma_object->acc_handle);
+
+		if (memblock == NULL) {
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto exit;
+		}
+
+		dma_object->addr = pci_map_single(devh->pdev, memblock, size,
+					PCI_DMA_BIDIRECTIONAL);
+
+		if (unlikely(pci_dma_mapping_error(devh->pdev,
+				dma_object->addr))) {
+			vxge_os_dma_free(devh->pdev, memblock,
+				&dma_object->acc_handle);
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto exit;
+		}
+
+	} else {
+
+		if (!list_empty(&blockpool->free_block_list))
+			entry = (struct __vxge_hw_blockpool_entry *)
+				list_first_entry(&blockpool->free_block_list,
+					struct __vxge_hw_blockpool_entry,
+					item);
+
+		if (entry != NULL) {
+			list_del(&entry->item);
+			dma_object->addr = entry->dma_addr;
+			dma_object->handle = entry->dma_handle;
+			dma_object->acc_handle = entry->acc_handle;
+			memblock = entry->memblock;
+
+			list_add(&entry->item,
+				&blockpool->free_entry_list);
+			blockpool->pool_size--;
+		}
+
+		if (memblock != NULL)
+			__vxge_hw_blockpool_blocks_add(blockpool);
+	}
+exit:
+	return memblock;
+}
+
+/*
+ * __vxge_hw_blockpool_blocks_remove - Free additional blocks
+ */
+static void
+__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
+{
+	struct list_head *p, *n;
+
+	list_for_each_safe(p, n, &blockpool->free_block_list) {
+
+		if (blockpool->pool_size < blockpool->pool_max)
+			break;
+
+		pci_unmap_single(
+			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
+			((struct __vxge_hw_blockpool_entry *)p)->length,
+			PCI_DMA_BIDIRECTIONAL);
+
+		vxge_os_dma_free(
+			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
+			((struct __vxge_hw_blockpool_entry *)p)->memblock,
+			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
+
+		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
+
+		list_add(p, &blockpool->free_entry_list);
+
+		blockpool->pool_size--;
+
+	}
+}
+
+/*
+ * __vxge_hw_blockpool_free - Frees the memory allcoated with
+ *				__vxge_hw_blockpool_malloc
+ */
+static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
+				     void *memblock, u32 size,
+				     struct vxge_hw_mempool_dma *dma_object)
+{
+	struct __vxge_hw_blockpool_entry *entry = NULL;
+	struct __vxge_hw_blockpool  *blockpool;
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	blockpool = &devh->block_pool;
+
+	if (size != blockpool->block_size) {
+		pci_unmap_single(devh->pdev, dma_object->addr, size,
+			PCI_DMA_BIDIRECTIONAL);
+		vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
+	} else {
+
+		if (!list_empty(&blockpool->free_entry_list))
+			entry = (struct __vxge_hw_blockpool_entry *)
+				list_first_entry(&blockpool->free_entry_list,
+					struct __vxge_hw_blockpool_entry,
+					item);
+
+		if (entry == NULL)
+			entry =	vmalloc(sizeof(
+					struct __vxge_hw_blockpool_entry));
+		else
+			list_del(&entry->item);
+
+		if (entry != NULL) {
+			entry->length = size;
+			entry->memblock = memblock;
+			entry->dma_addr = dma_object->addr;
+			entry->acc_handle = dma_object->acc_handle;
+			entry->dma_handle = dma_object->handle;
+			list_add(&entry->item,
+					&blockpool->free_block_list);
+			blockpool->pool_size++;
+			status = VXGE_HW_OK;
+		} else
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+
+		if (status == VXGE_HW_OK)
+			__vxge_hw_blockpool_blocks_remove(blockpool);
+	}
+}
+
+/*
+ * vxge_hw_mempool_destroy
+ */
+static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
+{
+	u32 i, j;
+	struct __vxge_hw_device *devh = mempool->devh;
+
+	for (i = 0; i < mempool->memblocks_allocated; i++) {
+		struct vxge_hw_mempool_dma *dma_object;
+
+		vxge_assert(mempool->memblocks_arr[i]);
+		vxge_assert(mempool->memblocks_dma_arr + i);
+
+		dma_object = mempool->memblocks_dma_arr + i;
+
+		for (j = 0; j < mempool->items_per_memblock; j++) {
+			u32 index = i * mempool->items_per_memblock + j;
+
+			/* to skip last partially filled(if any) memblock */
+			if (index >= mempool->items_current)
+				break;
+		}
+
+		vfree(mempool->memblocks_priv_arr[i]);
+
+		__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
+				mempool->memblock_size, dma_object);
+	}
+
+	vfree(mempool->items_arr);
+	vfree(mempool->memblocks_dma_arr);
+	vfree(mempool->memblocks_priv_arr);
+	vfree(mempool->memblocks_arr);
+	vfree(mempool);
+}
+
+/*
+ * __vxge_hw_mempool_grow
+ * Will resize mempool up to %num_allocate value.
+ */
+static enum vxge_hw_status
+__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
+		       u32 *num_allocated)
+{
+	u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
+	u32 n_items = mempool->items_per_memblock;
+	u32 start_block_idx = mempool->memblocks_allocated;
+	u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	*num_allocated = 0;
+
+	if (end_block_idx > mempool->memblocks_max) {
 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
 		goto exit;
 	}
 
-	vp->vpath->ringh = ring;
-	ring->vp_id = vp_id;
-	ring->vp_reg = vp->vpath->vp_reg;
-	ring->common_reg = hldev->common_reg;
-	ring->stats = &vp->vpath->sw_stats->ring_stats;
-	ring->config = config;
-	ring->callback = attr->callback;
-	ring->rxd_init = attr->rxd_init;
-	ring->rxd_term = attr->rxd_term;
-	ring->buffer_mode = config->buffer_mode;
-	ring->rxds_limit = config->rxds_limit;
+	for (i = start_block_idx; i < end_block_idx; i++) {
+		u32 j;
+		u32 is_last = ((end_block_idx - 1) == i);
+		struct vxge_hw_mempool_dma *dma_object =
+			mempool->memblocks_dma_arr + i;
+		void *the_memblock;
 
-	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
-	ring->rxd_priv_size =
-		sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
-	ring->per_rxd_space = attr->per_rxd_space;
+		/* allocate memblock's private part. Each DMA memblock
+		 * has a space allocated for item's private usage upon
+		 * mempool's user request. Each time mempool grows, it will
+		 * allocate new memblock and its private part at once.
+		 * This helps to minimize memory usage a lot. */
+		mempool->memblocks_priv_arr[i] =
+				vzalloc(mempool->items_priv_size * n_items);
+		if (mempool->memblocks_priv_arr[i] == NULL) {
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto exit;
+		}
 
-	ring->rxd_priv_size =
-		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
-		VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
+		/* allocate DMA-capable memblock */
+		mempool->memblocks_arr[i] =
+			__vxge_hw_blockpool_malloc(mempool->devh,
+				mempool->memblock_size, dma_object);
+		if (mempool->memblocks_arr[i] == NULL) {
+			vfree(mempool->memblocks_priv_arr[i]);
+			status = VXGE_HW_ERR_OUT_OF_MEMORY;
+			goto exit;
+		}
 
-	/* how many RxDs can fit into one block. Depends on configured
-	 * buffer_mode. */
-	ring->rxds_per_block =
-		vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+		(*num_allocated)++;
+		mempool->memblocks_allocated++;
 
-	/* calculate actual RxD block private size */
-	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
-	ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
-	ring->mempool = __vxge_hw_mempool_create(hldev,
-				VXGE_HW_BLOCK_SIZE,
-				VXGE_HW_BLOCK_SIZE,
-				ring->rxdblock_priv_size,
-				ring->config->ring_blocks,
-				ring->config->ring_blocks,
-				&ring_mp_callback,
-				ring);
+		memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
 
-	if (ring->mempool == NULL) {
-		__vxge_hw_ring_delete(vp);
-		return VXGE_HW_ERR_OUT_OF_MEMORY;
+		the_memblock = mempool->memblocks_arr[i];
+
+		/* fill the items hash array */
+		for (j = 0; j < n_items; j++) {
+			u32 index = i * n_items + j;
+
+			if (first_time && index >= mempool->items_initial)
+				break;
+
+			mempool->items_arr[index] =
+				((char *)the_memblock + j*mempool->item_size);
+
+			/* let caller to do more job on each item */
+			if (mempool->item_func_alloc != NULL)
+				mempool->item_func_alloc(mempool, i,
+					dma_object, index, is_last);
+
+			mempool->items_current = index + 1;
+		}
+
+		if (first_time && mempool->items_current ==
+					mempool->items_initial)
+			break;
 	}
+exit:
+	return status;
+}
 
-	status = __vxge_hw_channel_initialize(&ring->channel);
-	if (status != VXGE_HW_OK) {
-		__vxge_hw_ring_delete(vp);
+/*
+ * vxge_hw_mempool_create
+ * This function will create memory pool object. Pool may grow but will
+ * never shrink. Pool consists of number of dynamically allocated blocks
+ * with size enough to hold %items_initial number of items. Memory is
+ * DMA-able but client must map/unmap before interoperating with the device.
+ */
+static struct vxge_hw_mempool *
+__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
+			 u32 memblock_size,
+			 u32 item_size,
+			 u32 items_priv_size,
+			 u32 items_initial,
+			 u32 items_max,
+			 struct vxge_hw_mempool_cbs *mp_callback,
+			 void *userdata)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	u32 memblocks_to_allocate;
+	struct vxge_hw_mempool *mempool = NULL;
+	u32 allocated;
+
+	if (memblock_size < item_size) {
+		status = VXGE_HW_FAIL;
 		goto exit;
 	}
 
-	/* Note:
-	 * Specifying rxd_init callback means two things:
-	 * 1) rxds need to be initialized by driver at channel-open time;
-	 * 2) rxds need to be posted at channel-open time
-	 *    (that's what the initial_replenish() below does)
-	 * Currently we don't have a case when the 1) is done without the 2).
-	 */
-	if (ring->rxd_init) {
-		status = vxge_hw_ring_replenish(ring);
-		if (status != VXGE_HW_OK) {
-			__vxge_hw_ring_delete(vp);
-			goto exit;
-		}
+	mempool = vzalloc(sizeof(struct vxge_hw_mempool));
+	if (mempool == NULL) {
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+		goto exit;
 	}
 
-	/* initial replenish will increment the counter in its post() routine,
-	 * we have to reset it */
-	ring->stats->common_stats.usage_cnt = 0;
+	mempool->devh			= devh;
+	mempool->memblock_size		= memblock_size;
+	mempool->items_max		= items_max;
+	mempool->items_initial		= items_initial;
+	mempool->item_size		= item_size;
+	mempool->items_priv_size	= items_priv_size;
+	mempool->item_func_alloc	= mp_callback->item_func_alloc;
+	mempool->userdata		= userdata;
+
+	mempool->memblocks_allocated = 0;
+
+	mempool->items_per_memblock = memblock_size / item_size;
+
+	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
+					mempool->items_per_memblock;
+
+	/* allocate array of memblocks */
+	mempool->memblocks_arr =
+		vzalloc(sizeof(void *) * mempool->memblocks_max);
+	if (mempool->memblocks_arr == NULL) {
+		__vxge_hw_mempool_destroy(mempool);
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+		mempool = NULL;
+		goto exit;
+	}
+
+	/* allocate array of private parts of items per memblocks */
+	mempool->memblocks_priv_arr =
+		vzalloc(sizeof(void *) * mempool->memblocks_max);
+	if (mempool->memblocks_priv_arr == NULL) {
+		__vxge_hw_mempool_destroy(mempool);
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+		mempool = NULL;
+		goto exit;
+	}
+
+	/* allocate array of memblocks DMA objects */
+	mempool->memblocks_dma_arr =
+		vzalloc(sizeof(struct vxge_hw_mempool_dma) *
+			mempool->memblocks_max);
+	if (mempool->memblocks_dma_arr == NULL) {
+		__vxge_hw_mempool_destroy(mempool);
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+		mempool = NULL;
+		goto exit;
+	}
+
+	/* allocate hash array of items */
+	mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
+	if (mempool->items_arr == NULL) {
+		__vxge_hw_mempool_destroy(mempool);
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+		mempool = NULL;
+		goto exit;
+	}
+
+	/* calculate initial number of memblocks */
+	memblocks_to_allocate = (mempool->items_initial +
+				 mempool->items_per_memblock - 1) /
+						mempool->items_per_memblock;
+
+	/* pre-allocate the mempool */
+	status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
+					&allocated);
+	if (status != VXGE_HW_OK) {
+		__vxge_hw_mempool_destroy(mempool);
+		status = VXGE_HW_ERR_OUT_OF_MEMORY;
+		mempool = NULL;
+		goto exit;
+	}
+
 exit:
-	return status;
+	return mempool;
 }
 
 /*
@@ -1578,7 +2803,8 @@
  * __vxge_hw_ring_delete - Removes the ring
  * This function freeup the memory pool and removes the ring
  */
-static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
+static enum vxge_hw_status
+__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
 {
 	struct __vxge_hw_ring *ring = vp->vpath->ringh;
 
@@ -1594,329 +2820,112 @@
 }
 
 /*
- * __vxge_hw_mempool_grow
- * Will resize mempool up to %num_allocate value.
+ * __vxge_hw_ring_create - Create a Ring
+ * This function creates Ring and initializes it.
  */
 static enum vxge_hw_status
-__vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
-		       u32 *num_allocated)
-{
-	u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
-	u32 n_items = mempool->items_per_memblock;
-	u32 start_block_idx = mempool->memblocks_allocated;
-	u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	*num_allocated = 0;
-
-	if (end_block_idx > mempool->memblocks_max) {
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-		goto exit;
-	}
-
-	for (i = start_block_idx; i < end_block_idx; i++) {
-		u32 j;
-		u32 is_last = ((end_block_idx - 1) == i);
-		struct vxge_hw_mempool_dma *dma_object =
-			mempool->memblocks_dma_arr + i;
-		void *the_memblock;
-
-		/* allocate memblock's private part. Each DMA memblock
-		 * has a space allocated for item's private usage upon
-		 * mempool's user request. Each time mempool grows, it will
-		 * allocate new memblock and its private part at once.
-		 * This helps to minimize memory usage a lot. */
-		mempool->memblocks_priv_arr[i] =
-				vmalloc(mempool->items_priv_size * n_items);
-		if (mempool->memblocks_priv_arr[i] == NULL) {
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto exit;
-		}
-
-		memset(mempool->memblocks_priv_arr[i], 0,
-			     mempool->items_priv_size * n_items);
-
-		/* allocate DMA-capable memblock */
-		mempool->memblocks_arr[i] =
-			__vxge_hw_blockpool_malloc(mempool->devh,
-				mempool->memblock_size, dma_object);
-		if (mempool->memblocks_arr[i] == NULL) {
-			vfree(mempool->memblocks_priv_arr[i]);
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto exit;
-		}
-
-		(*num_allocated)++;
-		mempool->memblocks_allocated++;
-
-		memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
-
-		the_memblock = mempool->memblocks_arr[i];
-
-		/* fill the items hash array */
-		for (j = 0; j < n_items; j++) {
-			u32 index = i * n_items + j;
-
-			if (first_time && index >= mempool->items_initial)
-				break;
-
-			mempool->items_arr[index] =
-				((char *)the_memblock + j*mempool->item_size);
-
-			/* let caller to do more job on each item */
-			if (mempool->item_func_alloc != NULL)
-				mempool->item_func_alloc(mempool, i,
-					dma_object, index, is_last);
-
-			mempool->items_current = index + 1;
-		}
-
-		if (first_time && mempool->items_current ==
-					mempool->items_initial)
-			break;
-	}
-exit:
-	return status;
-}
-
-/*
- * vxge_hw_mempool_create
- * This function will create memory pool object. Pool may grow but will
- * never shrink. Pool consists of number of dynamically allocated blocks
- * with size enough to hold %items_initial number of items. Memory is
- * DMA-able but client must map/unmap before interoperating with the device.
- */
-static struct vxge_hw_mempool*
-__vxge_hw_mempool_create(
-	struct __vxge_hw_device *devh,
-	u32 memblock_size,
-	u32 item_size,
-	u32 items_priv_size,
-	u32 items_initial,
-	u32 items_max,
-	struct vxge_hw_mempool_cbs *mp_callback,
-	void *userdata)
+__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
+		      struct vxge_hw_ring_attr *attr)
 {
 	enum vxge_hw_status status = VXGE_HW_OK;
-	u32 memblocks_to_allocate;
-	struct vxge_hw_mempool *mempool = NULL;
-	u32 allocated;
+	struct __vxge_hw_ring *ring;
+	u32 ring_length;
+	struct vxge_hw_ring_config *config;
+	struct __vxge_hw_device *hldev;
+	u32 vp_id;
+	struct vxge_hw_mempool_cbs ring_mp_callback;
 
-	if (memblock_size < item_size) {
+	if ((vp == NULL) || (attr == NULL)) {
 		status = VXGE_HW_FAIL;
 		goto exit;
 	}
 
-	mempool = (struct vxge_hw_mempool *)
-			vmalloc(sizeof(struct vxge_hw_mempool));
-	if (mempool == NULL) {
+	hldev = vp->vpath->hldev;
+	vp_id = vp->vpath->vp_id;
+
+	config = &hldev->config.vp_config[vp_id].ring;
+
+	ring_length = config->ring_blocks *
+			vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
+
+	ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
+						VXGE_HW_CHANNEL_TYPE_RING,
+						ring_length,
+						attr->per_rxd_space,
+						attr->userdata);
+	if (ring == NULL) {
 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
 		goto exit;
 	}
-	memset(mempool, 0, sizeof(struct vxge_hw_mempool));
 
-	mempool->devh			= devh;
-	mempool->memblock_size		= memblock_size;
-	mempool->items_max		= items_max;
-	mempool->items_initial		= items_initial;
-	mempool->item_size		= item_size;
-	mempool->items_priv_size	= items_priv_size;
-	mempool->item_func_alloc	= mp_callback->item_func_alloc;
-	mempool->userdata		= userdata;
+	vp->vpath->ringh = ring;
+	ring->vp_id = vp_id;
+	ring->vp_reg = vp->vpath->vp_reg;
+	ring->common_reg = hldev->common_reg;
+	ring->stats = &vp->vpath->sw_stats->ring_stats;
+	ring->config = config;
+	ring->callback = attr->callback;
+	ring->rxd_init = attr->rxd_init;
+	ring->rxd_term = attr->rxd_term;
+	ring->buffer_mode = config->buffer_mode;
+	ring->rxds_limit = config->rxds_limit;
 
-	mempool->memblocks_allocated = 0;
+	ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
+	ring->rxd_priv_size =
+		sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
+	ring->per_rxd_space = attr->per_rxd_space;
 
-	mempool->items_per_memblock = memblock_size / item_size;
+	ring->rxd_priv_size =
+		((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
+		VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
 
-	mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
-					mempool->items_per_memblock;
+	/* how many RxDs can fit into one block. Depends on configured
+	 * buffer_mode. */
+	ring->rxds_per_block =
+		vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
 
-	/* allocate array of memblocks */
-	mempool->memblocks_arr =
-		(void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
-	if (mempool->memblocks_arr == NULL) {
-		__vxge_hw_mempool_destroy(mempool);
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-		mempool = NULL;
-		goto exit;
+	/* calculate actual RxD block private size */
+	ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
+	ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
+	ring->mempool = __vxge_hw_mempool_create(hldev,
+				VXGE_HW_BLOCK_SIZE,
+				VXGE_HW_BLOCK_SIZE,
+				ring->rxdblock_priv_size,
+				ring->config->ring_blocks,
+				ring->config->ring_blocks,
+				&ring_mp_callback,
+				ring);
+	if (ring->mempool == NULL) {
+		__vxge_hw_ring_delete(vp);
+		return VXGE_HW_ERR_OUT_OF_MEMORY;
 	}
-	memset(mempool->memblocks_arr, 0,
-		sizeof(void *) * mempool->memblocks_max);
 
-	/* allocate array of private parts of items per memblocks */
-	mempool->memblocks_priv_arr =
-		(void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
-	if (mempool->memblocks_priv_arr == NULL) {
-		__vxge_hw_mempool_destroy(mempool);
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-		mempool = NULL;
-		goto exit;
-	}
-	memset(mempool->memblocks_priv_arr, 0,
-		    sizeof(void *) * mempool->memblocks_max);
-
-	/* allocate array of memblocks DMA objects */
-	mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
-		vmalloc(sizeof(struct vxge_hw_mempool_dma) *
-			mempool->memblocks_max);
-
-	if (mempool->memblocks_dma_arr == NULL) {
-		__vxge_hw_mempool_destroy(mempool);
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-		mempool = NULL;
-		goto exit;
-	}
-	memset(mempool->memblocks_dma_arr, 0,
-			sizeof(struct vxge_hw_mempool_dma) *
-			mempool->memblocks_max);
-
-	/* allocate hash array of items */
-	mempool->items_arr =
-		(void **) vmalloc(sizeof(void *) * mempool->items_max);
-	if (mempool->items_arr == NULL) {
-		__vxge_hw_mempool_destroy(mempool);
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-		mempool = NULL;
-		goto exit;
-	}
-	memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
-
-	/* calculate initial number of memblocks */
-	memblocks_to_allocate = (mempool->items_initial +
-				 mempool->items_per_memblock - 1) /
-						mempool->items_per_memblock;
-
-	/* pre-allocate the mempool */
-	status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
-					&allocated);
+	status = __vxge_hw_channel_initialize(&ring->channel);
 	if (status != VXGE_HW_OK) {
-		__vxge_hw_mempool_destroy(mempool);
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-		mempool = NULL;
+		__vxge_hw_ring_delete(vp);
 		goto exit;
 	}
 
-exit:
-	return mempool;
-}
-
-/*
- * vxge_hw_mempool_destroy
- */
-static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
-{
-	u32 i, j;
-	struct __vxge_hw_device *devh = mempool->devh;
-
-	for (i = 0; i < mempool->memblocks_allocated; i++) {
-		struct vxge_hw_mempool_dma *dma_object;
-
-		vxge_assert(mempool->memblocks_arr[i]);
-		vxge_assert(mempool->memblocks_dma_arr + i);
-
-		dma_object = mempool->memblocks_dma_arr + i;
-
-		for (j = 0; j < mempool->items_per_memblock; j++) {
-			u32 index = i * mempool->items_per_memblock + j;
-
-			/* to skip last partially filled(if any) memblock */
-			if (index >= mempool->items_current)
-				break;
+	/* Note:
+	 * Specifying rxd_init callback means two things:
+	 * 1) rxds need to be initialized by driver at channel-open time;
+	 * 2) rxds need to be posted at channel-open time
+	 *    (that's what the initial_replenish() below does)
+	 * Currently we don't have a case when the 1) is done without the 2).
+	 */
+	if (ring->rxd_init) {
+		status = vxge_hw_ring_replenish(ring);
+		if (status != VXGE_HW_OK) {
+			__vxge_hw_ring_delete(vp);
+			goto exit;
 		}
-
-		vfree(mempool->memblocks_priv_arr[i]);
-
-		__vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
-				mempool->memblock_size, dma_object);
 	}
 
-	vfree(mempool->items_arr);
-
-	vfree(mempool->memblocks_dma_arr);
-
-	vfree(mempool->memblocks_priv_arr);
-
-	vfree(mempool->memblocks_arr);
-
-	vfree(mempool);
-}
-
-/*
- * __vxge_hw_device_fifo_config_check - Check fifo configuration.
- * Check the fifo configuration
- */
-enum vxge_hw_status
-__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
-{
-	if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
-	     (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
-		return VXGE_HW_BADCFG_FIFO_BLOCKS;
-
-	return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_vpath_config_check - Check vpath configuration.
- * Check the vpath configuration
- */
-static enum vxge_hw_status
-__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
-{
-	enum vxge_hw_status status;
-
-	if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
-		(vp_config->min_bandwidth >
-					VXGE_HW_VPATH_BANDWIDTH_MAX))
-		return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
-
-	status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
-	if (status != VXGE_HW_OK)
-		return status;
-
-	if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
-		((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
-		(vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
-		return VXGE_HW_BADCFG_VPATH_MTU;
-
-	if ((vp_config->rpa_strip_vlan_tag !=
-		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
-		(vp_config->rpa_strip_vlan_tag !=
-		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
-		(vp_config->rpa_strip_vlan_tag !=
-		VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
-		return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
-
-	return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_config_check - Check device configuration.
- * Check the device configuration
- */
-enum vxge_hw_status
-__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
-{
-	u32 i;
-	enum vxge_hw_status status;
-
-	if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
-	   (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
-	   (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
-	   (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
-		return VXGE_HW_BADCFG_INTR_MODE;
-
-	if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
-	   (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
-		return VXGE_HW_BADCFG_RTS_MAC_EN;
-
-	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-		status = __vxge_hw_device_vpath_config_check(
-				&new_config->vp_config[i]);
-		if (status != VXGE_HW_OK)
-			return status;
-	}
-
-	return VXGE_HW_OK;
+	/* initial replenish will increment the counter in its post() routine,
+	 * we have to reset it */
+	ring->stats->common_stats.usage_cnt = 0;
+exit:
+	return status;
 }
 
 /*
@@ -1938,7 +2947,6 @@
 	device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
 
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
-
 		device_config->vp_config[i].vp_id = i;
 
 		device_config->vp_config[i].min_bandwidth =
@@ -2078,61 +3086,6 @@
 }
 
 /*
- * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
- * Set the swapper bits appropriately for the lagacy section.
- */
-static enum vxge_hw_status
-__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
-{
-	u64 val64;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	val64 = readq(&legacy_reg->toc_swapper_fb);
-
-	wmb();
-
-	switch (val64) {
-
-	case VXGE_HW_SWAPPER_INITIAL_VALUE:
-		return status;
-
-	case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
-		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
-			&legacy_reg->pifm_rd_swap_en);
-		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
-			&legacy_reg->pifm_rd_flip_en);
-		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
-			&legacy_reg->pifm_wr_swap_en);
-		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
-			&legacy_reg->pifm_wr_flip_en);
-		break;
-
-	case VXGE_HW_SWAPPER_BYTE_SWAPPED:
-		writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
-			&legacy_reg->pifm_rd_swap_en);
-		writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
-			&legacy_reg->pifm_wr_swap_en);
-		break;
-
-	case VXGE_HW_SWAPPER_BIT_FLIPPED:
-		writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
-			&legacy_reg->pifm_rd_flip_en);
-		writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
-			&legacy_reg->pifm_wr_flip_en);
-		break;
-	}
-
-	wmb();
-
-	val64 = readq(&legacy_reg->toc_swapper_fb);
-
-	if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
-		status = VXGE_HW_ERR_SWAPPER_CTRL;
-
-	return status;
-}
-
-/*
  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
  * Set the swapper bits appropriately for the vpath.
  */
@@ -2156,9 +3109,8 @@
  * Set the swapper bits appropriately for the vpath.
  */
 static enum vxge_hw_status
-__vxge_hw_kdfc_swapper_set(
-	struct vxge_hw_legacy_reg __iomem *legacy_reg,
-	struct vxge_hw_vpath_reg __iomem *vpath_reg)
+__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
+			   struct vxge_hw_vpath_reg __iomem *vpath_reg)
 {
 	u64 val64;
 
@@ -2408,6 +3360,69 @@
 }
 
 /*
+ * __vxge_hw_fifo_abort - Returns the TxD
+ * This function terminates the TxDs of fifo
+ */
+static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
+{
+	void *txdlh;
+
+	for (;;) {
+		vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
+
+		if (txdlh == NULL)
+			break;
+
+		vxge_hw_channel_dtr_complete(&fifo->channel);
+
+		if (fifo->txdl_term) {
+			fifo->txdl_term(txdlh,
+			VXGE_HW_TXDL_STATE_POSTED,
+			fifo->channel.userdata);
+		}
+
+		vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
+	}
+
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_fifo_reset - Resets the fifo
+ * This function resets the fifo during vpath reset operation
+ */
+static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+
+	__vxge_hw_fifo_abort(fifo);
+	status = __vxge_hw_channel_reset(&fifo->channel);
+
+	return status;
+}
+
+/*
+ * __vxge_hw_fifo_delete - Removes the FIFO
+ * This function freeup the memory pool and removes the FIFO
+ */
+static enum vxge_hw_status
+__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
+{
+	struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
+
+	__vxge_hw_fifo_abort(fifo);
+
+	if (fifo->mempool)
+		__vxge_hw_mempool_destroy(fifo->mempool);
+
+	vp->vpath->fifoh = NULL;
+
+	__vxge_hw_channel_free(&fifo->channel);
+
+	return VXGE_HW_OK;
+}
+
+/*
  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
  * list callback
  * This function is callback passed to __vxge_hw_mempool_create to create memory
@@ -2453,7 +3468,7 @@
  * __vxge_hw_fifo_create - Create a FIFO
  * This function creates FIFO and initializes it.
  */
-enum vxge_hw_status
+static enum vxge_hw_status
 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
 		      struct vxge_hw_fifo_attr *attr)
 {
@@ -2572,68 +3587,6 @@
 }
 
 /*
- * __vxge_hw_fifo_abort - Returns the TxD
- * This function terminates the TxDs of fifo
- */
-static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
-{
-	void *txdlh;
-
-	for (;;) {
-		vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
-
-		if (txdlh == NULL)
-			break;
-
-		vxge_hw_channel_dtr_complete(&fifo->channel);
-
-		if (fifo->txdl_term) {
-			fifo->txdl_term(txdlh,
-			VXGE_HW_TXDL_STATE_POSTED,
-			fifo->channel.userdata);
-		}
-
-		vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
-	}
-
-	return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_fifo_reset - Resets the fifo
- * This function resets the fifo during vpath reset operation
- */
-static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	__vxge_hw_fifo_abort(fifo);
-	status = __vxge_hw_channel_reset(&fifo->channel);
-
-	return status;
-}
-
-/*
- * __vxge_hw_fifo_delete - Removes the FIFO
- * This function freeup the memory pool and removes the FIFO
- */
-enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
-{
-	struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
-
-	__vxge_hw_fifo_abort(fifo);
-
-	if (fifo->mempool)
-		__vxge_hw_mempool_destroy(fifo->mempool);
-
-	vp->vpath->fifoh = NULL;
-
-	__vxge_hw_channel_free(&fifo->channel);
-
-	return VXGE_HW_OK;
-}
-
-/*
  * __vxge_hw_vpath_pci_read - Read the content of given address
  *                          in pci config space.
  * Read from the vpath pci config space.
@@ -2675,297 +3628,6 @@
 	return status;
 }
 
-/*
- * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
- * Returns the function number of the vpath.
- */
-static u32
-__vxge_hw_vpath_func_id_get(u32 vp_id,
-	struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
-{
-	u64 val64;
-
-	val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
-
-	return
-	 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
-}
-
-/*
- * __vxge_hw_read_rts_ds - Program RTS steering critieria
- */
-static inline void
-__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
-		      u64 dta_struct_sel)
-{
-	writeq(0, &vpath_reg->rts_access_steer_ctrl);
-	wmb();
-	writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
-	writeq(0, &vpath_reg->rts_access_steer_data1);
-	wmb();
-}
-
-
-/*
- * __vxge_hw_vpath_card_info_get - Get the serial numbers,
- * part number and product description.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_card_info_get(
-	u32 vp_id,
-	struct vxge_hw_vpath_reg __iomem *vpath_reg,
-	struct vxge_hw_device_hw_info *hw_info)
-{
-	u32 i, j;
-	u64 val64;
-	u64 data1 = 0ULL;
-	u64 data2 = 0ULL;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	u8 *serial_number = hw_info->serial_number;
-	u8 *part_number = hw_info->part_number;
-	u8 *product_desc = hw_info->product_desc;
-
-	__vxge_hw_read_rts_ds(vpath_reg,
-		VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vpath_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-	if (status != VXGE_HW_OK)
-		return status;
-
-	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-		data1 = readq(&vpath_reg->rts_access_steer_data0);
-		((u64 *)serial_number)[0] = be64_to_cpu(data1);
-
-		data2 = readq(&vpath_reg->rts_access_steer_data1);
-		((u64 *)serial_number)[1] = be64_to_cpu(data2);
-		status = VXGE_HW_OK;
-	} else
-		*serial_number = 0;
-
-	__vxge_hw_read_rts_ds(vpath_reg,
-			VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vpath_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-	if (status != VXGE_HW_OK)
-		return status;
-
-	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-		data1 = readq(&vpath_reg->rts_access_steer_data0);
-		((u64 *)part_number)[0] = be64_to_cpu(data1);
-
-		data2 = readq(&vpath_reg->rts_access_steer_data1);
-		((u64 *)part_number)[1] = be64_to_cpu(data2);
-
-		status = VXGE_HW_OK;
-
-	} else
-		*part_number = 0;
-
-	j = 0;
-
-	for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
-	     i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
-
-		__vxge_hw_read_rts_ds(vpath_reg, i);
-
-		val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-		status = __vxge_hw_pio_mem_write64(val64,
-				&vpath_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-		if (status != VXGE_HW_OK)
-			return status;
-
-		val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-		if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-			data1 = readq(&vpath_reg->rts_access_steer_data0);
-			((u64 *)product_desc)[j++] = be64_to_cpu(data1);
-
-			data2 = readq(&vpath_reg->rts_access_steer_data1);
-			((u64 *)product_desc)[j++] = be64_to_cpu(data2);
-
-			status = VXGE_HW_OK;
-		} else
-			*product_desc = 0;
-	}
-
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_fw_ver_get - Get the fw version
- * Returns FW Version
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_fw_ver_get(
-	u32 vp_id,
-	struct vxge_hw_vpath_reg __iomem *vpath_reg,
-	struct vxge_hw_device_hw_info *hw_info)
-{
-	u64 val64;
-	u64 data1 = 0ULL;
-	u64 data2 = 0ULL;
-	struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
-	struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
-	struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
-	struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vpath_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-		data1 = readq(&vpath_reg->rts_access_steer_data0);
-		data2 = readq(&vpath_reg->rts_access_steer_data1);
-
-		fw_date->day =
-			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
-						data1);
-		fw_date->month =
-			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
-						data1);
-		fw_date->year =
-			(u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
-						data1);
-
-		snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
-			fw_date->month, fw_date->day, fw_date->year);
-
-		fw_version->major =
-		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
-		fw_version->minor =
-		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
-		fw_version->build =
-		    (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
-
-		snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
-		    fw_version->major, fw_version->minor, fw_version->build);
-
-		flash_date->day =
-		  (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
-		flash_date->month =
-		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
-		flash_date->year =
-		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
-
-		snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
-			"%2.2d/%2.2d/%4.4d",
-			flash_date->month, flash_date->day, flash_date->year);
-
-		flash_version->major =
-		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
-		flash_version->minor =
-		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
-		flash_version->build =
-		 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
-
-		snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
-			flash_version->major, flash_version->minor,
-			flash_version->build);
-
-		status = VXGE_HW_OK;
-
-	} else
-		status = VXGE_HW_FAIL;
-exit:
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
- * Returns pci function mode
- */
-static u64
-__vxge_hw_vpath_pci_func_mode_get(
-	u32  vp_id,
-	struct vxge_hw_vpath_reg __iomem *vpath_reg)
-{
-	u64 val64;
-	u64 data1 = 0ULL;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	__vxge_hw_read_rts_ds(vpath_reg,
-		VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vpath_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-		data1 = readq(&vpath_reg->rts_access_steer_data0);
-		status = VXGE_HW_OK;
-	} else {
-		data1 = 0;
-		status = VXGE_HW_FAIL;
-	}
-exit:
-	return data1;
-}
-
 /**
  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
  * @hldev: HW device.
@@ -2974,37 +3636,24 @@
  * Flicker the link LED.
  */
 enum vxge_hw_status
-vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev,
-			       u64 on_off)
+vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
 {
-	u64 val64;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
+	struct __vxge_hw_virtualpath *vpath;
+	u64 data0, data1 = 0, steer_ctrl = 0;
+	enum vxge_hw_status status;
 
 	if (hldev == NULL) {
 		status = VXGE_HW_ERR_INVALID_DEVICE;
 		goto exit;
 	}
 
-	vp_reg = hldev->vpath_reg[hldev->first_vp_id];
+	vpath = &hldev->virtual_paths[hldev->first_vp_id];
 
-	writeq(0, &vp_reg->rts_access_steer_ctrl);
-	wmb();
-	writeq(on_off, &vp_reg->rts_access_steer_data0);
-	writeq(0, &vp_reg->rts_access_steer_data1);
-	wmb();
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vp_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
+	data0 = on_off;
+	status = vxge_hw_vpath_fw_api(vpath,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
+			VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
+			0, &data0, &data1, &steer_ctrl);
 exit:
 	return status;
 }
@@ -3013,63 +3662,38 @@
  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
  */
 enum vxge_hw_status
-__vxge_hw_vpath_rts_table_get(
-	struct __vxge_hw_vpath_handle *vp,
-	u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2)
+__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
+			      u32 action, u32 rts_table, u32 offset,
+			      u64 *data0, u64 *data1)
 {
-	u64 val64;
-	struct __vxge_hw_virtualpath *vpath;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-	enum vxge_hw_status status = VXGE_HW_OK;
+	enum vxge_hw_status status;
+	u64 steer_ctrl = 0;
 
 	if (vp == NULL) {
 		status = VXGE_HW_ERR_INVALID_HANDLE;
 		goto exit;
 	}
 
-	vpath = vp->vpath;
-	vp_reg = vpath->vp_reg;
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
 	if ((rts_table ==
-		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
+	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
 	    (rts_table ==
-		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
+	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
 	    (rts_table ==
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
+	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
 	    (rts_table ==
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
-		val64 = val64 |	VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
+	     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
+		steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
 	}
 
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vp_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				vpath->hldev->config.device_poll_millis);
-
+	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+				      data0, data1, &steer_ctrl);
 	if (status != VXGE_HW_OK)
 		goto exit;
 
-	val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-		*data1 = readq(&vp_reg->rts_access_steer_data0);
-
-		if ((rts_table ==
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
-		(rts_table ==
-		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
-			*data2 = readq(&vp_reg->rts_access_steer_data1);
-		}
-		status = VXGE_HW_OK;
-	} else
-		status = VXGE_HW_FAIL;
+	if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
+	    (rts_table !=
+	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+		*data1 = 0;
 exit:
 	return status;
 }
@@ -3078,107 +3702,27 @@
  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
  */
 enum vxge_hw_status
-__vxge_hw_vpath_rts_table_set(
-	struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table,
-	u32 offset, u64 data1, u64 data2)
+__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
+			      u32 rts_table, u32 offset, u64 steer_data0,
+			      u64 steer_data1)
 {
-	u64 val64;
-	struct __vxge_hw_virtualpath *vpath;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
+	u64 data0, data1 = 0, steer_ctrl = 0;
+	enum vxge_hw_status status;
 
 	if (vp == NULL) {
 		status = VXGE_HW_ERR_INVALID_HANDLE;
 		goto exit;
 	}
 
-	vpath = vp->vpath;
-	vp_reg = vpath->vp_reg;
-
-	writeq(data1, &vp_reg->rts_access_steer_data0);
-	wmb();
+	data0 = steer_data0;
 
 	if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
 	    (rts_table ==
-		VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
-		writeq(data2, &vp_reg->rts_access_steer_data1);
-		wmb();
-	}
+	     VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
+		data1 = steer_data1;
 
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vp_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				vpath->hldev->config.device_poll_millis);
-
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	val64 = readq(&vp_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
-		status = VXGE_HW_OK;
-	else
-		status = VXGE_HW_FAIL;
-exit:
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
- *               from MAC address table.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_addr_get(
-	u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
-	u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
-{
-	u32 i;
-	u64 val64;
-	u64 data1 = 0ULL;
-	u64 data2 = 0ULL;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
-		VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vpath_reg->rts_access_steer_ctrl,
-				VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
-				VXGE_HW_DEF_DEVICE_POLL_MILLIS);
-
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	val64 = readq(&vpath_reg->rts_access_steer_ctrl);
-
-	if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
-
-		data1 = readq(&vpath_reg->rts_access_steer_data0);
-		data2 = readq(&vpath_reg->rts_access_steer_data1);
-
-		data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
-		data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
-							data2);
-
-		for (i = ETH_ALEN; i > 0; i--) {
-			macaddr[i-1] = (u8)(data1 & 0xFF);
-			data1 >>= 8;
-
-			macaddr_mask[i-1] = (u8)(data2 & 0xFF);
-			data2 >>= 8;
-		}
-		status = VXGE_HW_OK;
-	} else
-		status = VXGE_HW_FAIL;
+	status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
+				      &data0, &data1, &steer_ctrl);
 exit:
 	return status;
 }
@@ -3204,6 +3748,8 @@
 		     VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
 		     VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
 			0, &data0, &data1);
+	if (status != VXGE_HW_OK)
+		goto exit;
 
 	data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
 			VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -3771,10 +4317,10 @@
 	vp_reg = vpath->vp_reg;
 	config = vpath->vp_config;
 
-	writeq((u64)0, &vp_reg->tim_dest_addr);
-	writeq((u64)0, &vp_reg->tim_vpath_map);
-	writeq((u64)0, &vp_reg->tim_bitmap);
-	writeq((u64)0, &vp_reg->tim_remap);
+	writeq(0, &vp_reg->tim_dest_addr);
+	writeq(0, &vp_reg->tim_vpath_map);
+	writeq(0, &vp_reg->tim_bitmap);
+	writeq(0, &vp_reg->tim_remap);
 
 	if (config->ring.enable == VXGE_HW_RING_ENABLE)
 		writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -3876,8 +4422,7 @@
 
 		if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
-			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
-					config->tti.util_sel);
+			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
 		}
 
 		if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3981,8 +4526,7 @@
 
 		if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
 			val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
-			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
-					config->rti.util_sel);
+			val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
 		}
 
 		if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4003,11 +4547,15 @@
 	writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
 	writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
 
+	val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
+	val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
+	val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
+	writeq(val64, &vp_reg->tim_wrkld_clc);
+
 	return status;
 }
 
-void
-vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
+void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
 {
 	struct __vxge_hw_virtualpath *vpath;
 	struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4018,17 +4566,15 @@
 	vp_reg = vpath->vp_reg;
 	config = vpath->vp_config;
 
-	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
+	if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
+	    config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
+		config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
 		val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-
-		if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
-			config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
-			val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
-			writeq(val64,
-			&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
-		}
+		val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
+		writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
 	}
 }
+
 /*
  * __vxge_hw_vpath_initialize
  * This routine is the final phase of init which initializes the
@@ -4052,22 +4598,18 @@
 	vp_reg = vpath->vp_reg;
 
 	status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
-
 	if (status != VXGE_HW_OK)
 		goto exit;
 
 	status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
-
 	if (status != VXGE_HW_OK)
 		goto exit;
 
 	status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
-
 	if (status != VXGE_HW_OK)
 		goto exit;
 
 	status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
-
 	if (status != VXGE_HW_OK)
 		goto exit;
 
@@ -4075,7 +4617,6 @@
 
 	/* Get MRRS value from device control */
 	status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
-
 	if (status == VXGE_HW_OK) {
 		val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
 		val64 &=
@@ -4099,6 +4640,28 @@
 }
 
 /*
+ * __vxge_hw_vp_terminate - Terminate Virtual Path structure
+ * This routine closes all channels it opened and freeup memory
+ */
+static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
+{
+	struct __vxge_hw_virtualpath *vpath;
+
+	vpath = &hldev->virtual_paths[vp_id];
+
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
+		goto exit;
+
+	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
+		vpath->hldev->tim_int_mask1, vpath->vp_id);
+	hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
+
+	memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
+exit:
+	return;
+}
+
+/*
  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
  * This routine is the initial phase of init which resets the vpath and
  * initializes the software support structures.
@@ -4117,6 +4680,7 @@
 
 	vpath = &hldev->virtual_paths[vp_id];
 
+	spin_lock_init(&hldev->virtual_paths[vp_id].lock);
 	vpath->vp_id = vp_id;
 	vpath->vp_open = VXGE_HW_VP_OPEN;
 	vpath->hldev = hldev;
@@ -4127,14 +4691,12 @@
 	__vxge_hw_vpath_reset(hldev, vp_id);
 
 	status = __vxge_hw_vpath_reset_check(vpath);
-
 	if (status != VXGE_HW_OK) {
 		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
 		goto exit;
 	}
 
 	status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
-
 	if (status != VXGE_HW_OK) {
 		memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
 		goto exit;
@@ -4148,7 +4710,6 @@
 		hldev->tim_int_mask1, vp_id);
 
 	status = __vxge_hw_vpath_initialize(hldev, vp_id);
-
 	if (status != VXGE_HW_OK)
 		__vxge_hw_vp_terminate(hldev, vp_id);
 exit:
@@ -4156,29 +4717,6 @@
 }
 
 /*
- * __vxge_hw_vp_terminate - Terminate Virtual Path structure
- * This routine closes all channels it opened and freeup memory
- */
-static void
-__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
-{
-	struct __vxge_hw_virtualpath *vpath;
-
-	vpath = &hldev->virtual_paths[vp_id];
-
-	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
-		goto exit;
-
-	VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
-		vpath->hldev->tim_int_mask1, vpath->vp_id);
-	hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
-
-	memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
-exit:
-	return;
-}
-
-/*
  * vxge_hw_vpath_mtu_set - Set MTU.
  * Set new MTU value. Example, to use jumbo frames:
  * vxge_hw_vpath_mtu_set(my_device, 9600);
@@ -4215,6 +4753,64 @@
 }
 
 /*
+ * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
+ * Enable the DMA vpath statistics. The function is to be called to re-enable
+ * the adapter to update stats into the host memory
+ */
+static enum vxge_hw_status
+vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct __vxge_hw_virtualpath *vpath;
+
+	vpath = vp->vpath;
+
+	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
+		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
+		goto exit;
+	}
+
+	memcpy(vpath->hw_stats_sav, vpath->hw_stats,
+			sizeof(struct vxge_hw_vpath_stats_hw_info));
+
+	status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
+exit:
+	return status;
+}
+
+/*
+ * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
+ * This function allocates a block from block pool or from the system
+ */
+static struct __vxge_hw_blockpool_entry *
+__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
+{
+	struct __vxge_hw_blockpool_entry *entry = NULL;
+	struct __vxge_hw_blockpool  *blockpool;
+
+	blockpool = &devh->block_pool;
+
+	if (size == blockpool->block_size) {
+
+		if (!list_empty(&blockpool->free_block_list))
+			entry = (struct __vxge_hw_blockpool_entry *)
+				list_first_entry(&blockpool->free_block_list,
+					struct __vxge_hw_blockpool_entry,
+					item);
+
+		if (entry != NULL) {
+			list_del(&entry->item);
+			blockpool->pool_size--;
+		}
+	}
+
+	if (entry != NULL)
+		__vxge_hw_blockpool_blocks_add(blockpool);
+
+	return entry;
+}
+
+/*
  * vxge_hw_vpath_open - Open a virtual path on a given adapter
  * This function is used to open access to virtual path of an
  * adapter for offload, GRO operations. This function returns
@@ -4238,19 +4834,15 @@
 
 	status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
 			&hldev->config.vp_config[attr->vp_id]);
-
 	if (status != VXGE_HW_OK)
 		goto vpath_open_exit1;
 
-	vp = (struct __vxge_hw_vpath_handle *)
-		vmalloc(sizeof(struct __vxge_hw_vpath_handle));
+	vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
 	if (vp == NULL) {
 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
 		goto vpath_open_exit2;
 	}
 
-	memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
-
 	vp->vpath = vpath;
 
 	if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -4273,7 +4865,6 @@
 
 	vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
 				VXGE_HW_BLOCK_SIZE);
-
 	if (vpath->stats_block == NULL) {
 		status = VXGE_HW_ERR_OUT_OF_MEMORY;
 		goto vpath_open_exit8;
@@ -4332,19 +4923,20 @@
  * This function is used to close access to virtual path opened
  * earlier.
  */
-void
-vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
+void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
 {
-	struct __vxge_hw_virtualpath *vpath = NULL;
+	struct __vxge_hw_virtualpath *vpath = vp->vpath;
+	struct __vxge_hw_ring *ring = vpath->ringh;
+	struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
 	u64 new_count, val64, val164;
-	struct __vxge_hw_ring *ring;
 
-	vpath = vp->vpath;
-	ring = vpath->ringh;
+	if (vdev->titan1) {
+		new_count = readq(&vpath->vp_reg->rxdmem_size);
+		new_count &= 0x1fff;
+	} else
+		new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
 
-	new_count = readq(&vpath->vp_reg->rxdmem_size);
-	new_count &= 0x1fff;
-	val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
+	val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
 
 	writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
 		&vpath->vp_reg->prc_rxd_doorbell);
@@ -4367,6 +4959,29 @@
 }
 
 /*
+ * __vxge_hw_blockpool_block_free - Frees a block from block pool
+ * @devh: Hal device
+ * @entry: Entry of block to be freed
+ *
+ * This function frees a block from block pool
+ */
+static void
+__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
+			       struct __vxge_hw_blockpool_entry *entry)
+{
+	struct __vxge_hw_blockpool  *blockpool;
+
+	blockpool = &devh->block_pool;
+
+	if (entry->length == blockpool->block_size) {
+		list_add(&entry->item, &blockpool->free_block_list);
+		blockpool->pool_size++;
+	}
+
+	__vxge_hw_blockpool_blocks_remove(blockpool);
+}
+
+/*
  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
  * This function is used to close access to virtual path opened
  * earlier.
@@ -4414,7 +5029,9 @@
 
 	__vxge_hw_vp_terminate(devh, vp_id);
 
+	spin_lock(&vpath->lock);
 	vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
+	spin_unlock(&vpath->lock);
 
 vpath_close_exit:
 	return status;
@@ -4515,730 +5132,3 @@
 	__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
 		&hldev->common_reg->cmn_rsthdlr_cfg1);
 }
-
-/*
- * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
- * Enable the DMA vpath statistics. The function is to be called to re-enable
- * the adapter to update stats into the host memory
- */
-static enum vxge_hw_status
-vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct __vxge_hw_virtualpath *vpath;
-
-	vpath = vp->vpath;
-
-	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-		goto exit;
-	}
-
-	memcpy(vpath->hw_stats_sav, vpath->hw_stats,
-			sizeof(struct vxge_hw_vpath_stats_hw_info));
-
-	status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
-exit:
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_access - Get the statistics from the given location
- *                           and offset and perform an operation
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
-			     u32 operation, u32 offset, u64 *stat)
-{
-	u64 val64;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-		goto vpath_stats_access_exit;
-	}
-
-	vp_reg = vpath->vp_reg;
-
-	val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
-		 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
-		 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
-
-	status = __vxge_hw_pio_mem_write64(val64,
-				&vp_reg->xmac_stats_access_cmd,
-				VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
-				vpath->hldev->config.device_poll_millis);
-
-	if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
-		*stat = readq(&vp_reg->xmac_stats_access_data);
-	else
-		*stat = 0;
-
-vpath_stats_access_exit:
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_tx_stats_get(
-	struct __vxge_hw_virtualpath *vpath,
-	struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
-{
-	u64 *val64;
-	int i;
-	u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	val64 = (u64 *) vpath_tx_stats;
-
-	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-		goto exit;
-	}
-
-	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
-		status = __vxge_hw_vpath_stats_access(vpath,
-					VXGE_HW_STATS_OP_READ,
-					offset, val64);
-		if (status != VXGE_HW_OK)
-			goto exit;
-		offset++;
-		val64++;
-	}
-exit:
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
-				  struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
-{
-	u64 *val64;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	int i;
-	u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
-	val64 = (u64 *) vpath_rx_stats;
-
-	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-		goto exit;
-	}
-	for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
-		status = __vxge_hw_vpath_stats_access(vpath,
-					VXGE_HW_STATS_OP_READ,
-					offset >> 3, val64);
-		if (status != VXGE_HW_OK)
-			goto exit;
-
-		offset += 8;
-		val64++;
-	}
-exit:
-	return status;
-}
-
-/*
- * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
-			  struct vxge_hw_vpath_stats_hw_info *hw_stats)
-{
-	u64 val64;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-	if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
-		status = VXGE_HW_ERR_VPATH_NOT_OPEN;
-		goto exit;
-	}
-	vp_reg = vpath->vp_reg;
-
-	val64 = readq(&vp_reg->vpath_debug_stats0);
-	hw_stats->ini_num_mwr_sent =
-		(u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
-
-	val64 = readq(&vp_reg->vpath_debug_stats1);
-	hw_stats->ini_num_mrd_sent =
-		(u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
-
-	val64 = readq(&vp_reg->vpath_debug_stats2);
-	hw_stats->ini_num_cpl_rcvd =
-		(u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
-
-	val64 = readq(&vp_reg->vpath_debug_stats3);
-	hw_stats->ini_num_mwr_byte_sent =
-		VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
-
-	val64 = readq(&vp_reg->vpath_debug_stats4);
-	hw_stats->ini_num_cpl_byte_rcvd =
-		VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
-
-	val64 = readq(&vp_reg->vpath_debug_stats5);
-	hw_stats->wrcrdtarb_xoff =
-		(u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
-
-	val64 = readq(&vp_reg->vpath_debug_stats6);
-	hw_stats->rdcrdtarb_xoff =
-		(u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
-
-	val64 = readq(&vp_reg->vpath_genstats_count01);
-	hw_stats->vpath_genstats_count0 =
-	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
-		val64);
-
-	val64 = readq(&vp_reg->vpath_genstats_count01);
-	hw_stats->vpath_genstats_count1 =
-	(u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
-		val64);
-
-	val64 = readq(&vp_reg->vpath_genstats_count23);
-	hw_stats->vpath_genstats_count2 =
-	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
-		val64);
-
-	val64 = readq(&vp_reg->vpath_genstats_count01);
-	hw_stats->vpath_genstats_count3 =
-	(u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
-		val64);
-
-	val64 = readq(&vp_reg->vpath_genstats_count4);
-	hw_stats->vpath_genstats_count4 =
-	(u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
-		val64);
-
-	val64 = readq(&vp_reg->vpath_genstats_count5);
-	hw_stats->vpath_genstats_count5 =
-	(u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
-		val64);
-
-	status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
-	if (status != VXGE_HW_OK)
-		goto exit;
-
-	VXGE_HW_VPATH_STATS_PIO_READ(
-		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
-
-	hw_stats->prog_event_vnum0 =
-			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
-
-	hw_stats->prog_event_vnum1 =
-			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
-
-	VXGE_HW_VPATH_STATS_PIO_READ(
-		VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
-
-	hw_stats->prog_event_vnum2 =
-			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
-
-	hw_stats->prog_event_vnum3 =
-			(u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
-
-	val64 = readq(&vp_reg->rx_multi_cast_stats);
-	hw_stats->rx_multi_cast_frame_discard =
-		(u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
-
-	val64 = readq(&vp_reg->rx_frm_transferred);
-	hw_stats->rx_frm_transferred =
-		(u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
-
-	val64 = readq(&vp_reg->rxd_returned);
-	hw_stats->rxd_returned =
-		(u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
-
-	val64 = readq(&vp_reg->dbg_stats_rx_mpa);
-	hw_stats->rx_mpa_len_fail_frms =
-		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
-	hw_stats->rx_mpa_mrk_fail_frms =
-		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
-	hw_stats->rx_mpa_crc_fail_frms =
-		(u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
-
-	val64 = readq(&vp_reg->dbg_stats_rx_fau);
-	hw_stats->rx_permitted_frms =
-		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
-	hw_stats->rx_vp_reset_discarded_frms =
-	(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
-	hw_stats->rx_wol_frms =
-		(u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
-
-	val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
-	hw_stats->tx_vp_reset_discarded_frms =
-	(u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
-		val64);
-exit:
-	return status;
-}
-
-
-static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
-					unsigned long size)
-{
-	gfp_t flags;
-	void *vaddr;
-
-	if (in_interrupt())
-		flags = GFP_ATOMIC | GFP_DMA;
-	else
-		flags = GFP_KERNEL | GFP_DMA;
-
-	vaddr = kmalloc((size), flags);
-
-	vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
-}
-
-static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
-			     struct pci_dev **p_dma_acch)
-{
-	unsigned long misaligned = *(unsigned long *)p_dma_acch;
-	u8 *tmp = (u8 *)vaddr;
-	tmp -= misaligned;
-	kfree((void *)tmp);
-}
-
-/*
- * __vxge_hw_blockpool_create - Create block pool
- */
-
-enum vxge_hw_status
-__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
-			   struct __vxge_hw_blockpool *blockpool,
-			   u32 pool_size,
-			   u32 pool_max)
-{
-	u32 i;
-	struct __vxge_hw_blockpool_entry *entry = NULL;
-	void *memblock;
-	dma_addr_t dma_addr;
-	struct pci_dev *dma_handle;
-	struct pci_dev *acc_handle;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	if (blockpool == NULL) {
-		status = VXGE_HW_FAIL;
-		goto blockpool_create_exit;
-	}
-
-	blockpool->hldev = hldev;
-	blockpool->block_size = VXGE_HW_BLOCK_SIZE;
-	blockpool->pool_size = 0;
-	blockpool->pool_max = pool_max;
-	blockpool->req_out = 0;
-
-	INIT_LIST_HEAD(&blockpool->free_block_list);
-	INIT_LIST_HEAD(&blockpool->free_entry_list);
-
-	for (i = 0; i < pool_size + pool_max; i++) {
-		entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
-				GFP_KERNEL);
-		if (entry == NULL) {
-			__vxge_hw_blockpool_destroy(blockpool);
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto blockpool_create_exit;
-		}
-		list_add(&entry->item, &blockpool->free_entry_list);
-	}
-
-	for (i = 0; i < pool_size; i++) {
-
-		memblock = vxge_os_dma_malloc(
-				hldev->pdev,
-				VXGE_HW_BLOCK_SIZE,
-				&dma_handle,
-				&acc_handle);
-
-		if (memblock == NULL) {
-			__vxge_hw_blockpool_destroy(blockpool);
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto blockpool_create_exit;
-		}
-
-		dma_addr = pci_map_single(hldev->pdev, memblock,
-				VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
-
-		if (unlikely(pci_dma_mapping_error(hldev->pdev,
-				dma_addr))) {
-
-			vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
-			__vxge_hw_blockpool_destroy(blockpool);
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto blockpool_create_exit;
-		}
-
-		if (!list_empty(&blockpool->free_entry_list))
-			entry = (struct __vxge_hw_blockpool_entry *)
-				list_first_entry(&blockpool->free_entry_list,
-					struct __vxge_hw_blockpool_entry,
-					item);
-
-		if (entry == NULL)
-			entry =
-			    kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
-					GFP_KERNEL);
-		if (entry != NULL) {
-			list_del(&entry->item);
-			entry->length = VXGE_HW_BLOCK_SIZE;
-			entry->memblock = memblock;
-			entry->dma_addr = dma_addr;
-			entry->acc_handle = acc_handle;
-			entry->dma_handle = dma_handle;
-			list_add(&entry->item,
-					  &blockpool->free_block_list);
-			blockpool->pool_size++;
-		} else {
-			__vxge_hw_blockpool_destroy(blockpool);
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto blockpool_create_exit;
-		}
-	}
-
-blockpool_create_exit:
-	return status;
-}
-
-/*
- * __vxge_hw_blockpool_destroy - Deallocates the block pool
- */
-
-void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
-{
-
-	struct __vxge_hw_device *hldev;
-	struct list_head *p, *n;
-	u16 ret;
-
-	if (blockpool == NULL) {
-		ret = 1;
-		goto exit;
-	}
-
-	hldev = blockpool->hldev;
-
-	list_for_each_safe(p, n, &blockpool->free_block_list) {
-
-		pci_unmap_single(hldev->pdev,
-			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-			((struct __vxge_hw_blockpool_entry *)p)->length,
-			PCI_DMA_BIDIRECTIONAL);
-
-		vxge_os_dma_free(hldev->pdev,
-			((struct __vxge_hw_blockpool_entry *)p)->memblock,
-			&((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
-
-		list_del(
-			&((struct __vxge_hw_blockpool_entry *)p)->item);
-		kfree(p);
-		blockpool->pool_size--;
-	}
-
-	list_for_each_safe(p, n, &blockpool->free_entry_list) {
-		list_del(
-			&((struct __vxge_hw_blockpool_entry *)p)->item);
-		kfree((void *)p);
-	}
-	ret = 0;
-exit:
-	return;
-}
-
-/*
- * __vxge_hw_blockpool_blocks_add - Request additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
-{
-	u32 nreq = 0, i;
-
-	if ((blockpool->pool_size  +  blockpool->req_out) <
-		VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
-		nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
-		blockpool->req_out += nreq;
-	}
-
-	for (i = 0; i < nreq; i++)
-		vxge_os_dma_malloc_async(
-			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
-			blockpool->hldev, VXGE_HW_BLOCK_SIZE);
-}
-
-/*
- * __vxge_hw_blockpool_blocks_remove - Free additional blocks
- */
-static
-void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
-{
-	struct list_head *p, *n;
-
-	list_for_each_safe(p, n, &blockpool->free_block_list) {
-
-		if (blockpool->pool_size < blockpool->pool_max)
-			break;
-
-		pci_unmap_single(
-			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
-			((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
-			((struct __vxge_hw_blockpool_entry *)p)->length,
-			PCI_DMA_BIDIRECTIONAL);
-
-		vxge_os_dma_free(
-			((struct __vxge_hw_device *)blockpool->hldev)->pdev,
-			((struct __vxge_hw_blockpool_entry *)p)->memblock,
-			&((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
-
-		list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
-
-		list_add(p, &blockpool->free_entry_list);
-
-		blockpool->pool_size--;
-
-	}
-}
-
-/*
- * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
- * Adds a block to block pool
- */
-static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
-					void *block_addr,
-					u32 length,
-					struct pci_dev *dma_h,
-					struct pci_dev *acc_handle)
-{
-	struct __vxge_hw_blockpool  *blockpool;
-	struct __vxge_hw_blockpool_entry  *entry = NULL;
-	dma_addr_t dma_addr;
-	enum vxge_hw_status status = VXGE_HW_OK;
-	u32 req_out;
-
-	blockpool = &devh->block_pool;
-
-	if (block_addr == NULL) {
-		blockpool->req_out--;
-		status = VXGE_HW_FAIL;
-		goto exit;
-	}
-
-	dma_addr = pci_map_single(devh->pdev, block_addr, length,
-				PCI_DMA_BIDIRECTIONAL);
-
-	if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
-
-		vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
-		blockpool->req_out--;
-		status = VXGE_HW_FAIL;
-		goto exit;
-	}
-
-
-	if (!list_empty(&blockpool->free_entry_list))
-		entry = (struct __vxge_hw_blockpool_entry *)
-			list_first_entry(&blockpool->free_entry_list,
-				struct __vxge_hw_blockpool_entry,
-				item);
-
-	if (entry == NULL)
-		entry = (struct __vxge_hw_blockpool_entry *)
-			vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
-	else
-		list_del(&entry->item);
-
-	if (entry != NULL) {
-		entry->length = length;
-		entry->memblock = block_addr;
-		entry->dma_addr = dma_addr;
-		entry->acc_handle = acc_handle;
-		entry->dma_handle = dma_h;
-		list_add(&entry->item, &blockpool->free_block_list);
-		blockpool->pool_size++;
-		status = VXGE_HW_OK;
-	} else
-		status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
-	blockpool->req_out--;
-
-	req_out = blockpool->req_out;
-exit:
-	return;
-}
-
-/*
- * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
- * Allocates a block of memory of given size, either from block pool
- * or by calling vxge_os_dma_malloc()
- */
-void *
-__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
-				struct vxge_hw_mempool_dma *dma_object)
-{
-	struct __vxge_hw_blockpool_entry *entry = NULL;
-	struct __vxge_hw_blockpool  *blockpool;
-	void *memblock = NULL;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	blockpool = &devh->block_pool;
-
-	if (size != blockpool->block_size) {
-
-		memblock = vxge_os_dma_malloc(devh->pdev, size,
-						&dma_object->handle,
-						&dma_object->acc_handle);
-
-		if (memblock == NULL) {
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto exit;
-		}
-
-		dma_object->addr = pci_map_single(devh->pdev, memblock, size,
-					PCI_DMA_BIDIRECTIONAL);
-
-		if (unlikely(pci_dma_mapping_error(devh->pdev,
-				dma_object->addr))) {
-			vxge_os_dma_free(devh->pdev, memblock,
-				&dma_object->acc_handle);
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-			goto exit;
-		}
-
-	} else {
-
-		if (!list_empty(&blockpool->free_block_list))
-			entry = (struct __vxge_hw_blockpool_entry *)
-				list_first_entry(&blockpool->free_block_list,
-					struct __vxge_hw_blockpool_entry,
-					item);
-
-		if (entry != NULL) {
-			list_del(&entry->item);
-			dma_object->addr = entry->dma_addr;
-			dma_object->handle = entry->dma_handle;
-			dma_object->acc_handle = entry->acc_handle;
-			memblock = entry->memblock;
-
-			list_add(&entry->item,
-				&blockpool->free_entry_list);
-			blockpool->pool_size--;
-		}
-
-		if (memblock != NULL)
-			__vxge_hw_blockpool_blocks_add(blockpool);
-	}
-exit:
-	return memblock;
-}
-
-/*
- * __vxge_hw_blockpool_free - Frees the memory allcoated with
-				__vxge_hw_blockpool_malloc
- */
-void
-__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
-			void *memblock, u32 size,
-			struct vxge_hw_mempool_dma *dma_object)
-{
-	struct __vxge_hw_blockpool_entry *entry = NULL;
-	struct __vxge_hw_blockpool  *blockpool;
-	enum vxge_hw_status status = VXGE_HW_OK;
-
-	blockpool = &devh->block_pool;
-
-	if (size != blockpool->block_size) {
-		pci_unmap_single(devh->pdev, dma_object->addr, size,
-			PCI_DMA_BIDIRECTIONAL);
-		vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
-	} else {
-
-		if (!list_empty(&blockpool->free_entry_list))
-			entry = (struct __vxge_hw_blockpool_entry *)
-				list_first_entry(&blockpool->free_entry_list,
-					struct __vxge_hw_blockpool_entry,
-					item);
-
-		if (entry == NULL)
-			entry = (struct __vxge_hw_blockpool_entry *)
-				vmalloc(sizeof(
-					struct __vxge_hw_blockpool_entry));
-		else
-			list_del(&entry->item);
-
-		if (entry != NULL) {
-			entry->length = size;
-			entry->memblock = memblock;
-			entry->dma_addr = dma_object->addr;
-			entry->acc_handle = dma_object->acc_handle;
-			entry->dma_handle = dma_object->handle;
-			list_add(&entry->item,
-					&blockpool->free_block_list);
-			blockpool->pool_size++;
-			status = VXGE_HW_OK;
-		} else
-			status = VXGE_HW_ERR_OUT_OF_MEMORY;
-
-		if (status == VXGE_HW_OK)
-			__vxge_hw_blockpool_blocks_remove(blockpool);
-	}
-}
-
-/*
- * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
- * This function allocates a block from block pool or from the system
- */
-struct __vxge_hw_blockpool_entry *
-__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
-{
-	struct __vxge_hw_blockpool_entry *entry = NULL;
-	struct __vxge_hw_blockpool  *blockpool;
-
-	blockpool = &devh->block_pool;
-
-	if (size == blockpool->block_size) {
-
-		if (!list_empty(&blockpool->free_block_list))
-			entry = (struct __vxge_hw_blockpool_entry *)
-				list_first_entry(&blockpool->free_block_list,
-					struct __vxge_hw_blockpool_entry,
-					item);
-
-		if (entry != NULL) {
-			list_del(&entry->item);
-			blockpool->pool_size--;
-		}
-	}
-
-	if (entry != NULL)
-		__vxge_hw_blockpool_blocks_add(blockpool);
-
-	return entry;
-}
-
-/*
- * __vxge_hw_blockpool_block_free - Frees a block from block pool
- * @devh: Hal device
- * @entry: Entry of block to be freed
- *
- * This function frees a block from block pool
- */
-void
-__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
-			struct __vxge_hw_blockpool_entry *entry)
-{
-	struct __vxge_hw_blockpool  *blockpool;
-
-	blockpool = &devh->block_pool;
-
-	if (entry->length == blockpool->block_size) {
-		list_add(&entry->item, &blockpool->free_block_list);
-		blockpool->pool_size++;
-	}
-
-	__vxge_hw_blockpool_blocks_remove(blockpool);
-}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861..e249e28 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
 #define VXGE_CACHE_LINE_SIZE 128
 #endif
 
-#define vxge_os_vaprintf(level, mask, fmt, ...) { \
-	char buff[255]; \
-		snprintf(buff, 255, fmt, __VA_ARGS__); \
-		printk(buff); \
-		printk("\n"); \
-}
-
 #ifndef VXGE_ALIGN
 #define VXGE_ALIGN(adrs, size) \
 	(((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
 #define VXGE_HW_MAX_MTU				9600
 #define VXGE_HW_DEFAULT_MTU			1500
 
-#ifdef VXGE_DEBUG_ASSERT
+#define VXGE_HW_MAX_ROM_IMAGES			8
 
+struct eprom_image {
+	u8 is_valid:1;
+	u8 index;
+	u8 type;
+	u16 version;
+};
+
+#ifdef VXGE_DEBUG_ASSERT
 /**
  * vxge_assert
  * @test: C-condition to check
@@ -48,16 +49,13 @@
  * compilation
  * time.
  */
-#define vxge_assert(test) { \
-	if (!(test)) \
-		vxge_os_bug("bad cond: "#test" at %s:%d\n", \
-				__FILE__, __LINE__); }
+#define vxge_assert(test) BUG_ON(!(test))
 #else
 #define vxge_assert(test)
 #endif /* end of VXGE_DEBUG_ASSERT */
 
 /**
- * enum enum vxge_debug_level
+ * enum vxge_debug_level
  * @VXGE_NONE: debug disabled
  * @VXGE_ERR: all errors going to be logged out
  * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@
 };
 
 /**
+ * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
+ * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
+ * @VXGE_HW_FW_UPGRADE_DONE:  upload completed
+ * @VXGE_HW_FW_UPGRADE_ERR:  upload error
+ * @VXGE_FW_UPGRADE_BYTES2SKIP:  skip bytes in the stream
+ *
+ */
+enum vxge_hw_fw_upgrade_code {
+	VXGE_HW_FW_UPGRADE_OK		= 0,
+	VXGE_HW_FW_UPGRADE_DONE		= 1,
+	VXGE_HW_FW_UPGRADE_ERR		= 2,
+	VXGE_FW_UPGRADE_BYTES2SKIP	= 3
+};
+
+/**
+ * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
+ * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
+ * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
+ * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
+ */
+enum vxge_hw_fw_upgrade_err_code {
+	VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1		= 1,
+	VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW		= 2,
+	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3		= 3,
+	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4		= 4,
+	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5		= 5,
+	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6		= 6,
+	VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7		= 7,
+	VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8		= 8,
+	VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN	= 9,
+	VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH		= 10
+};
+
+/**
  * struct vxge_hw_device_date - Date Format
  * @day: Day
  * @month: Month
@@ -275,9 +314,9 @@
 #define VXGE_HW_RING_DEFAULT					1
 
 	u32				ring_blocks;
-#define VXGE_HW_MIN_RING_BLOCKS				1
-#define VXGE_HW_MAX_RING_BLOCKS				128
-#define VXGE_HW_DEF_RING_BLOCKS				2
+#define VXGE_HW_MIN_RING_BLOCKS					1
+#define VXGE_HW_MAX_RING_BLOCKS					128
+#define VXGE_HW_DEF_RING_BLOCKS					2
 
 	u32				buffer_mode;
 #define VXGE_HW_RING_RXD_BUFFER_MODE_1				1
@@ -465,7 +504,6 @@
  * See also: vxge_hw_driver_initialize().
  */
 struct vxge_hw_uld_cbs {
-
 	void (*link_up)(struct __vxge_hw_device *devh);
 	void (*link_down)(struct __vxge_hw_device *devh);
 	void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@
 	struct vxge_hw_vpath_stats_hw_info	*hw_stats;
 	struct vxge_hw_vpath_stats_hw_info	*hw_stats_sav;
 	struct vxge_hw_vpath_stats_sw_info	*sw_stats;
+	spinlock_t lock;
 };
 
 /*
@@ -661,7 +700,7 @@
  *
  * This structure is used to store the callback information.
  */
-struct __vxge_hw_vpath_handle{
+struct __vxge_hw_vpath_handle {
 	struct list_head	item;
 	struct __vxge_hw_virtualpath	*vpath;
 };
@@ -674,9 +713,6 @@
 /**
  * struct __vxge_hw_device  - Hal device object
  * @magic: Magic Number
- * @device_id: PCI Device Id of the adapter
- * @major_revision: PCI Device major revision
- * @minor_revision: PCI Device minor revision
  * @bar0: BAR0 virtual address.
  * @pdev: Physical device handle
  * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@
 	u32				magic;
 #define VXGE_HW_DEVICE_MAGIC		0x12345678
 #define VXGE_HW_DEVICE_DEAD		0xDEADDEAD
-	u16				device_id;
-	u8				major_revision;
-	u8				minor_revision;
 	void __iomem			*bar0;
 	struct pci_dev			*pdev;
 	struct net_device		*ndev;
@@ -731,6 +764,7 @@
 	u32				debug_level;
 	u32				level_err;
 	u32				level_trace;
+	u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
 };
 
 #define VXGE_HW_INFO_LEN	64
@@ -781,8 +815,8 @@
 	u8		serial_number[VXGE_HW_INFO_LEN];
 	u8		part_number[VXGE_HW_INFO_LEN];
 	u8		product_desc[VXGE_HW_INFO_LEN];
-	u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
-	u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+	u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
+	u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
 };
 
 /**
@@ -829,20 +863,10 @@
 				loc, \
 				offset, \
 				&val64);			\
-								\
 	if (status != VXGE_HW_OK)				\
 		return status;						\
 }
 
-#define VXGE_HW_VPATH_STATS_PIO_READ(offset) {				\
-	status = __vxge_hw_vpath_stats_access(vpath, \
-			VXGE_HW_STATS_OP_READ, \
-			offset, \
-			&val64);					\
-	if (status != VXGE_HW_OK)					\
-		return status;						\
-}
-
 /*
  * struct __vxge_hw_ring - Ring channel.
  * @channel: Channel "base" of this ring, the common part of all HW
@@ -1114,7 +1138,7 @@
  *             lookup to determine the transmit port.
  *             01: Send on physical Port1.
  *             10: Send on physical Port0.
-	*	       11: Send on both ports.
+ *	       11: Send on both ports.
  *             Bits 18 to 21 - Reserved
  *             Bits 22 to 23 - Gather_Code. This field is set by the host and
  *             is used to describe how individual buffers comprise a frame.
@@ -1413,12 +1437,12 @@
  * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
  */
 struct vxge_hw_rth_hash_types {
-	u8 hash_type_tcpipv4_en;
-	u8 hash_type_ipv4_en;
-	u8 hash_type_tcpipv6_en;
-	u8 hash_type_ipv6_en;
-	u8 hash_type_tcpipv6ex_en;
-	u8 hash_type_ipv6ex_en;
+	u8 hash_type_tcpipv4_en:1,
+	   hash_type_ipv4_en:1,
+	   hash_type_tcpipv6_en:1,
+	   hash_type_ipv6_en:1,
+	   hash_type_tcpipv6ex_en:1,
+	   hash_type_ipv6ex_en:1;
 };
 
 void vxge_hw_device_debug_set(
@@ -1893,6 +1917,15 @@
 	return vaddr;
 }
 
+static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
+			struct pci_dev **p_dma_acch)
+{
+	unsigned long misaligned = *(unsigned long *)p_dma_acch;
+	u8 *tmp = (u8 *)vaddr;
+	tmp -= misaligned;
+	kfree((void *)tmp);
+}
+
 /*
  * __vxge_hw_mempool_item_priv - will return pointer on per item private space
  */
@@ -1962,7 +1995,6 @@
 void
 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
 
-
 #ifndef readq
 static inline u64 readq(void __iomem *addr)
 {
@@ -2000,7 +2032,7 @@
 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
 
 /**
- * vxge_debug
+ * vxge_debug_ll
  * @level: level of debug verbosity.
  * @mask: mask for the debug
  * @buf: Circular buffer for tracing
@@ -2012,26 +2044,13 @@
  * may be compiled out if DEBUG macro was never defined.
  * See also: enum vxge_debug_level{}.
  */
-
-#define vxge_trace_aux(level, mask, fmt, ...) \
-{\
-		vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
-}
-
-#define vxge_debug(module, level, mask, fmt, ...) { \
-if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
-	(level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
-	if ((mask & VXGE_DEBUG_MASK) == mask)\
-		vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
-} \
-}
-
 #if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
-#define vxge_debug_ll(level, mask, fmt, ...) \
-{\
-	vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\
-}
-
+#define vxge_debug_ll(level, mask, fmt, ...) do {			       \
+	if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) ||  \
+	    (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
+		if ((mask & VXGE_DEBUG_MASK) == mask)			       \
+			printk(fmt "\n", __VA_ARGS__);			       \
+} while (0)
 #else
 #define vxge_debug_ll(level, mask, fmt, ...)
 #endif
@@ -2051,4 +2070,26 @@
 
 enum vxge_hw_status
 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
+
+#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
+#define VXGE_HW_MAX_POLLING_COUNT 100
+
+void
+vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
+			     u32 *minor, u32 *build);
+
+enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
+
+enum vxge_hw_status
+vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
+		     int size);
+
+enum vxge_hw_status
+vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
+				struct eprom_image *eprom_image_data);
+
+int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
 #endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746e..1dd3a21 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
  *                 Virtualized Server Adapter.
  * Copyright(c) 2002-2010 Exar Corp.
  ******************************************************************************/
-#include<linux/ethtool.h>
+#include <linux/ethtool.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
  * Return value:
  * 0 on success.
  */
-
 static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
 {
 	/* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@
  * Returns driver specefic information like name, version etc.. to ethtool.
  */
 static void vxge_ethtool_gdrvinfo(struct net_device *dev,
-			struct ethtool_drvinfo *info)
+				  struct ethtool_drvinfo *info)
 {
-	struct vxgedev *vdev;
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 	strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
 	strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
 	strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@
  * buffer area.
  */
 static void vxge_ethtool_gregs(struct net_device *dev,
-			struct ethtool_regs *regs, void *space)
+			       struct ethtool_regs *regs, void *space)
 {
 	int index, offset;
 	enum vxge_hw_status status;
 	u64 reg;
-	u64 *reg_space = (u64 *) space;
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-	struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
-					pci_get_drvdata(vdev->pdev);
+	u64 *reg_space = (u64 *)space;
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct __vxge_hw_device *hldev = vdev->devh;
 
 	regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
 	regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@
  */
 static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-	struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-			pci_get_drvdata(vdev->pdev);
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct __vxge_hw_device *hldev = vdev->devh;
 
 	vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
 	msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@
  *  void
  */
 static void vxge_ethtool_getpause_data(struct net_device *dev,
-					struct ethtool_pauseparam *ep)
+				       struct ethtool_pauseparam *ep)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-	struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-			pci_get_drvdata(vdev->pdev);
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct __vxge_hw_device *hldev = vdev->devh;
 
 	vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
 }
@@ -188,11 +183,10 @@
  * int, returns 0 on Success
  */
 static int vxge_ethtool_setpause_data(struct net_device *dev,
-					struct ethtool_pauseparam *ep)
+				      struct ethtool_pauseparam *ep)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-	struct __vxge_hw_device  *hldev = (struct __vxge_hw_device  *)
-			pci_get_drvdata(vdev->pdev);
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct __vxge_hw_device *hldev = vdev->devh;
 
 	vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
 
@@ -209,9 +203,8 @@
 	enum vxge_hw_status status;
 	enum vxge_hw_status swstatus;
 	struct vxge_vpath *vpath = NULL;
-
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
-	struct __vxge_hw_device  *hldev = vdev->devh;
+	struct vxgedev *vdev = netdev_priv(dev);
+	struct __vxge_hw_device *hldev = vdev->devh;
 	struct vxge_hw_xmac_stats *xmac_stats;
 	struct vxge_hw_device_stats_sw_info *sw_stats;
 	struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@
 	kfree(hw_stats);
 }
 
-static void vxge_ethtool_get_strings(struct net_device *dev,
-			      u32 stringset, u8 *data)
+static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
+				     u8 *data)
 {
 	int stat_size = 0;
 	int i, j;
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 	switch (stringset) {
 	case ETH_SS_STATS:
 		vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@
 
 static int vxge_ethtool_get_regs_len(struct net_device *dev)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 
 	return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
 }
 
 static u32 vxge_get_rx_csum(struct net_device *dev)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 
 	return vdev->rx_csum;
 }
 
 static int vxge_set_rx_csum(struct net_device *dev, u32 data)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 
 	if (data)
 		vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@
 
 static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
 {
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 
 	switch (sset) {
 	case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@
 	}
 }
 
+static int vxge_set_flags(struct net_device *dev, u32 data)
+{
+	struct vxgedev *vdev = netdev_priv(dev);
+	enum vxge_hw_status status;
+
+	if (data & ~ETH_FLAG_RXHASH)
+		return -EOPNOTSUPP;
+
+	if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
+		return 0;
+
+	if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
+		return -EINVAL;
+
+	vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
+
+	/* Enabling RTH requires some of the logic in vxge_device_register and a
+	 * vpath reset.  Due to these restrictions, only allow modification
+	 * while the interface is down.
+	 */
+	status = vxge_reset_all_vpaths(vdev);
+	if (status != VXGE_HW_OK) {
+		vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
+		return -EFAULT;
+	}
+
+	if (vdev->devh->config.rth_en)
+		dev->features |= NETIF_F_RXHASH;
+	else
+		dev->features &= ~NETIF_F_RXHASH;
+
+	return 0;
+}
+
+static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
+{
+	struct vxgedev *vdev = netdev_priv(dev);
+
+	if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
+		printk(KERN_INFO "Single Function Mode is required to flash the"
+		       " firmware\n");
+		return -EINVAL;
+	}
+
+	if (netif_running(dev)) {
+		printk(KERN_INFO "Interface %s must be down to flash the "
+		       "firmware\n", dev->name);
+		return -EBUSY;
+	}
+
+	return vxge_fw_upgrade(vdev, parms->data, 1);
+}
+
 static const struct ethtool_ops vxge_ethtool_ops = {
 	.get_settings		= vxge_ethtool_gset,
 	.set_settings		= vxge_ethtool_sset,
@@ -1131,7 +1177,7 @@
 	.get_rx_csum		= vxge_get_rx_csum,
 	.set_rx_csum		= vxge_set_rx_csum,
 	.get_tx_csum		= ethtool_op_get_tx_csum,
-	.set_tx_csum		= ethtool_op_set_tx_hw_csum,
+	.set_tx_csum		= ethtool_op_set_tx_ipv6_csum,
 	.get_sg			= ethtool_op_get_sg,
 	.set_sg			= ethtool_op_set_sg,
 	.get_tso		= ethtool_op_get_tso,
@@ -1140,6 +1186,8 @@
 	.phys_id		= vxge_ethtool_idnic,
 	.get_sset_count		= vxge_ethtool_get_sset_count,
 	.get_ethtool_stats	= vxge_get_ethtool_stats,
+	.set_flags		= vxge_set_flags,
+	.flash_device		= vxge_fw_flash,
 };
 
 void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f..537ad87 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
 #include <net/ip.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/net_tstamp.h>
 #include "vxge-main.h"
 #include "vxge-reg.h"
 
@@ -82,16 +84,6 @@
 
 static struct vxge_drv_config *driver_config;
 
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-					     struct macInfo *mac);
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
-					     struct macInfo *mac);
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
-
 static inline int is_vxge_card_up(struct vxgedev *vdev)
 {
 	return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -148,11 +140,10 @@
  * This function is called during interrupt context to notify link up state
  * change.
  */
-static void
-vxge_callback_link_up(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
 {
 	struct net_device *dev = hldev->ndev;
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 		vdev->ndev->name, __func__, __LINE__);
@@ -172,11 +163,10 @@
  * This function is called during interrupt context to notify link down state
  * change.
  */
-static void
-vxge_callback_link_down(struct __vxge_hw_device *hldev)
+static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
 {
 	struct net_device *dev = hldev->ndev;
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -195,7 +185,7 @@
  *
  * Allocate SKB.
  */
-static struct sk_buff*
+static struct sk_buff *
 vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
 {
 	struct net_device    *dev;
@@ -369,7 +359,7 @@
 		 u8 t_code, void *userdata)
 {
 	struct vxge_ring *ring = (struct vxge_ring *)userdata;
-	struct  net_device *dev = ring->ndev;
+	struct net_device *dev = ring->ndev;
 	unsigned int dma_sizes;
 	void *first_dtr = NULL;
 	int dtr_cnt = 0;
@@ -413,7 +403,6 @@
 
 		prefetch((char *)skb + L1_CACHE_BYTES);
 		if (unlikely(t_code)) {
-
 			if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
 				VXGE_HW_OK) {
 
@@ -436,9 +425,7 @@
 		}
 
 		if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
-
 			if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
-
 				if (!vxge_rx_map(dtr, ring)) {
 					skb_put(skb, pkt_length);
 
@@ -513,6 +500,23 @@
 		else
 			skb_checksum_none_assert(skb);
 
+
+		if (ring->rx_hwts) {
+			struct skb_shared_hwtstamps *skb_hwts;
+			u32 ns = *(u32 *)(skb->head + pkt_length);
+
+			skb_hwts = skb_hwtstamps(skb);
+			skb_hwts->hwtstamp = ns_to_ktime(ns);
+			skb_hwts->syststamp.tv64 = 0;
+		}
+
+		/* rth_hash_type and rth_it_hit are non-zero regardless of
+		 * whether rss is enabled.  Only the rth_value is zero/non-zero
+		 * if rss is disabled/enabled, so key off of that.
+		 */
+		if (ext_info.rth_value)
+			skb->rxhash = ext_info.rth_value;
+
 		vxge_rx_complete(ring, skb, ext_info.vlan,
 			pkt_length, &ext_info);
 
@@ -660,6 +664,65 @@
 	return FALSE;
 }
 
+static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+	struct vxge_mac_addrs *new_mac_entry;
+	u8 *mac_address = NULL;
+
+	if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
+		return TRUE;
+
+	new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
+	if (!new_mac_entry) {
+		vxge_debug_mem(VXGE_ERR,
+			"%s: memory allocation failed",
+			VXGE_DRIVER_NAME);
+		return FALSE;
+	}
+
+	list_add(&new_mac_entry->item, &vpath->mac_addr_list);
+
+	/* Copy the new mac address to the list */
+	mac_address = (u8 *)&new_mac_entry->macaddr;
+	memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+	new_mac_entry->state = mac->state;
+	vpath->mac_addr_cnt++;
+
+	/* Is this a multicast address */
+	if (0x01 & mac->macaddr[0])
+		vpath->mcast_addr_cnt++;
+
+	return TRUE;
+}
+
+/* Add a mac address to DA table */
+static enum vxge_hw_status
+vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct vxge_vpath *vpath;
+	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
+
+	if (0x01 & mac->macaddr[0]) /* multicast address */
+		duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
+	else
+		duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
+
+	vpath = &vdev->vpaths[mac->vpath_no];
+	status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
+						mac->macmask, duplicate_mode);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR,
+			"DA config add entry failed for vpath:%d",
+			vpath->device_id);
+	} else
+		if (FALSE == vxge_mac_list_add(vpath, mac))
+			status = -EPERM;
+
+	return status;
+}
+
 static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
 {
 	struct macInfo mac_info;
@@ -670,7 +733,7 @@
 	struct vxge_vpath *vpath = NULL;
 	struct __vxge_hw_device *hldev;
 
-	hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+	hldev = pci_get_drvdata(vdev->pdev);
 
 	mac_address = (u8 *)&mac_addr;
 	memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +832,7 @@
 		return NETDEV_TX_OK;
 	}
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 
 	if (unlikely(!is_vxge_card_up(vdev))) {
 		vxge_debug_tx(VXGE_ERR,
@@ -1005,6 +1068,50 @@
 		"%s:%d  Exiting...", __func__, __LINE__);
 }
 
+static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+	struct list_head *entry, *next;
+	u64 del_mac = 0;
+	u8 *mac_address = (u8 *) (&del_mac);
+
+	/* Copy the mac address to delete from the list */
+	memcpy(mac_address, mac->macaddr, ETH_ALEN);
+
+	list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+		if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
+			list_del(entry);
+			kfree((struct vxge_mac_addrs *)entry);
+			vpath->mac_addr_cnt--;
+
+			/* Is this a multicast address */
+			if (0x01 & mac->macaddr[0])
+				vpath->mcast_addr_cnt--;
+			return TRUE;
+		}
+	}
+
+	return FALSE;
+}
+
+/* delete a mac address from DA table */
+static enum vxge_hw_status
+vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct vxge_vpath *vpath;
+
+	vpath = &vdev->vpaths[mac->vpath_no];
+	status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
+						mac->macmask);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR,
+			"DA config delete entry failed for vpath:%d",
+			vpath->device_id);
+	} else
+		vxge_mac_list_del(vpath, mac);
+	return status;
+}
+
 /**
  * vxge_set_multicast
  * @dev: pointer to the device structure
@@ -1034,7 +1141,7 @@
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s:%d", __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 	hldev = (struct __vxge_hw_device  *)vdev->devh;
 
 	if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1201,7 @@
 		/* Delete previous MC's */
 		for (i = 0; i < mcast_cnt; i++) {
 			list_for_each_safe(entry, next, list_head) {
-				mac_entry = (struct vxge_mac_addrs *) entry;
+				mac_entry = (struct vxge_mac_addrs *)entry;
 				/* Copy the mac address to delete */
 				mac_address = (u8 *)&mac_entry->macaddr;
 				memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1244,7 @@
 		/* Delete previous MC's */
 		for (i = 0; i < mcast_cnt; i++) {
 			list_for_each_safe(entry, next, list_head) {
-				mac_entry = (struct vxge_mac_addrs *) entry;
+				mac_entry = (struct vxge_mac_addrs *)entry;
 				/* Copy the mac address to delete */
 				mac_address = (u8 *)&mac_entry->macaddr;
 				memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1291,14 @@
 {
 	struct sockaddr *addr = p;
 	struct vxgedev *vdev;
-	struct __vxge_hw_device  *hldev;
+	struct __vxge_hw_device *hldev;
 	enum vxge_hw_status status = VXGE_HW_OK;
 	struct macInfo mac_info_new, mac_info_old;
 	int vpath_idx = 0;
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 	hldev = vdev->devh;
 
 	if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1399,13 @@
 static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
 {
 	struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
+	struct __vxge_hw_device *hldev;
 	int msix_id;
 
+	hldev = pci_get_drvdata(vdev->pdev);
+
+	vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
+
 	vxge_hw_vpath_intr_disable(vpath->handle);
 
 	if (vdev->config.intr_type == INTA)
@@ -1310,6 +1422,95 @@
 	}
 }
 
+/* list all mac addresses from DA table */
+static enum vxge_hw_status
+vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	unsigned char macmask[ETH_ALEN];
+	unsigned char macaddr[ETH_ALEN];
+
+	status = vxge_hw_vpath_mac_addr_get(vpath->handle,
+				macaddr, macmask);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR,
+			"DA config list entry failed for vpath:%d",
+			vpath->device_id);
+		return status;
+	}
+
+	while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
+		status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
+				macaddr, macmask);
+		if (status != VXGE_HW_OK)
+			break;
+	}
+
+	return status;
+}
+
+/* Store all mac addresses from the list to the DA table */
+static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct macInfo mac_info;
+	u8 *mac_address = NULL;
+	struct list_head *entry, *next;
+
+	memset(&mac_info, 0, sizeof(struct macInfo));
+
+	if (vpath->is_open) {
+		list_for_each_safe(entry, next, &vpath->mac_addr_list) {
+			mac_address =
+				(u8 *)&
+				((struct vxge_mac_addrs *)entry)->macaddr;
+			memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
+			((struct vxge_mac_addrs *)entry)->state =
+				VXGE_LL_MAC_ADDR_IN_DA_TABLE;
+			/* does this mac address already exist in da table? */
+			status = vxge_search_mac_addr_in_da_table(vpath,
+				&mac_info);
+			if (status != VXGE_HW_OK) {
+				/* Add this mac address to the DA table */
+				status = vxge_hw_vpath_mac_addr_add(
+					vpath->handle, mac_info.macaddr,
+					mac_info.macmask,
+				    VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
+				if (status != VXGE_HW_OK) {
+					vxge_debug_init(VXGE_ERR,
+					    "DA add entry failed for vpath:%d",
+					    vpath->device_id);
+					((struct vxge_mac_addrs *)entry)->state
+						= VXGE_LL_MAC_ADDR_IN_LIST;
+				}
+			}
+		}
+	}
+
+	return status;
+}
+
+/* Store all vlan ids from the list to the vid table */
+static enum vxge_hw_status
+vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
+{
+	enum vxge_hw_status status = VXGE_HW_OK;
+	struct vxgedev *vdev = vpath->vdev;
+	u16 vid;
+
+	if (vdev->vlgrp && vpath->is_open) {
+
+		for (vid = 0; vid < VLAN_N_VID; vid++) {
+			if (!vlan_group_get_device(vdev->vlgrp, vid))
+				continue;
+			/* Add these vlan to the vid table */
+			status = vxge_hw_vpath_vid_add(vpath->handle, vid);
+		}
+	}
+
+	return status;
+}
+
 /*
  * vxge_reset_vpath
  * @vdev: pointer to vdev
@@ -1405,12 +1606,16 @@
 	}
 
 	if (event == VXGE_LL_FULL_RESET) {
+		netif_carrier_off(vdev->ndev);
+
 		/* wait for all the vpath reset to complete */
 		for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
 			while (test_bit(vp_id, &vdev->vp_reset))
 				msleep(50);
 		}
 
+		netif_carrier_on(vdev->ndev);
+
 		/* if execution mode is set to debug, don't reset the adapter */
 		if (unlikely(vdev->exec_mode)) {
 			vxge_debug_init(VXGE_ERR,
@@ -1423,6 +1628,7 @@
 	}
 
 	if (event == VXGE_LL_FULL_RESET) {
+		vxge_hw_device_wait_receive_idle(vdev->devh);
 		vxge_hw_device_intr_disable(vdev->devh);
 
 		switch (vdev->cric_err_event) {
@@ -1563,9 +1769,14 @@
  *
  * driver may reset the chip on events of serr, eccerr, etc
  */
-static int vxge_reset(struct vxgedev *vdev)
+static void vxge_reset(struct work_struct *work)
 {
-	return do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
+	struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
+
+	if (!netif_running(vdev->ndev))
+		return;
+
+	do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
 }
 
 /**
@@ -1608,8 +1819,7 @@
 	int budget_org = budget;
 	struct vxge_ring *ring;
 
-	struct __vxge_hw_device  *hldev = (struct __vxge_hw_device *)
-		pci_get_drvdata(vdev->pdev);
+	struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
 
 	for (i = 0; i < vdev->no_of_vpath; i++) {
 		ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1855,11 @@
  */
 static void vxge_netpoll(struct net_device *dev)
 {
-	struct __vxge_hw_device  *hldev;
+	struct __vxge_hw_device *hldev;
 	struct vxgedev *vdev;
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
-	hldev = (struct __vxge_hw_device  *)pci_get_drvdata(vdev->pdev);
+	vdev = netdev_priv(dev);
+	hldev = pci_get_drvdata(vdev->pdev);
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
@@ -1689,15 +1899,6 @@
 		mtable[index] = index % vdev->no_of_vpath;
 	}
 
-	/* Fill RTH hash types */
-	hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
-	hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
-	hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
-	hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
-	hash_types.hash_type_tcpipv6ex_en =
-					vdev->config.rth_hash_type_tcpipv6ex;
-	hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
-
 	/* set indirection table, bucket-to-vpath mapping */
 	status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
 						vdev->no_of_vpath,
@@ -1710,19 +1911,27 @@
 		return status;
 	}
 
+	/* Fill RTH hash types */
+	hash_types.hash_type_tcpipv4_en   = vdev->config.rth_hash_type_tcpipv4;
+	hash_types.hash_type_ipv4_en      = vdev->config.rth_hash_type_ipv4;
+	hash_types.hash_type_tcpipv6_en   = vdev->config.rth_hash_type_tcpipv6;
+	hash_types.hash_type_ipv6_en      = vdev->config.rth_hash_type_ipv6;
+	hash_types.hash_type_tcpipv6ex_en =
+					vdev->config.rth_hash_type_tcpipv6ex;
+	hash_types.hash_type_ipv6ex_en    = vdev->config.rth_hash_type_ipv6ex;
+
 	/*
-	* Because the itable_set() method uses the active_table field
-	* for the target virtual path the RTH config should be updated
-	* for all VPATHs. The h/w only uses the lowest numbered VPATH
-	* when steering frames.
-	*/
+	 * Because the itable_set() method uses the active_table field
+	 * for the target virtual path the RTH config should be updated
+	 * for all VPATHs. The h/w only uses the lowest numbered VPATH
+	 * when steering frames.
+	 */
 	 for (index = 0; index < vdev->no_of_vpath; index++) {
 		status = vxge_hw_vpath_rts_rth_set(
 				vdev->vpaths[index].handle,
 				vdev->config.rth_algorithm,
 				&hash_types,
 				vdev->config.rth_bkt_sz);
-
 		 if (status != VXGE_HW_OK) {
 			vxge_debug_init(VXGE_ERR,
 				"RTH configuration failed for vpath:%d",
@@ -1734,201 +1943,8 @@
 	return status;
 }
 
-static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
-{
-	struct vxge_mac_addrs *new_mac_entry;
-	u8 *mac_address = NULL;
-
-	if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
-		return TRUE;
-
-	new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
-	if (!new_mac_entry) {
-		vxge_debug_mem(VXGE_ERR,
-			"%s: memory allocation failed",
-			VXGE_DRIVER_NAME);
-		return FALSE;
-	}
-
-	list_add(&new_mac_entry->item, &vpath->mac_addr_list);
-
-	/* Copy the new mac address to the list */
-	mac_address = (u8 *)&new_mac_entry->macaddr;
-	memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
-	new_mac_entry->state = mac->state;
-	vpath->mac_addr_cnt++;
-
-	/* Is this a multicast address */
-	if (0x01 & mac->macaddr[0])
-		vpath->mcast_addr_cnt++;
-
-	return TRUE;
-}
-
-/* Add a mac address to DA table */
-static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
-					     struct macInfo *mac)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxge_vpath *vpath;
-	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
-
-	if (0x01 & mac->macaddr[0]) /* multicast address */
-		duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
-	else
-		duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
-
-	vpath = &vdev->vpaths[mac->vpath_no];
-	status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
-						mac->macmask, duplicate_mode);
-	if (status != VXGE_HW_OK) {
-		vxge_debug_init(VXGE_ERR,
-			"DA config add entry failed for vpath:%d",
-			vpath->device_id);
-	} else
-		if (FALSE == vxge_mac_list_add(vpath, mac))
-			status = -EPERM;
-
-	return status;
-}
-
-static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
-{
-	struct list_head *entry, *next;
-	u64 del_mac = 0;
-	u8 *mac_address = (u8 *) (&del_mac);
-
-	/* Copy the mac address to delete from the list */
-	memcpy(mac_address, mac->macaddr, ETH_ALEN);
-
-	list_for_each_safe(entry, next, &vpath->mac_addr_list) {
-		if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
-			list_del(entry);
-			kfree((struct vxge_mac_addrs *)entry);
-			vpath->mac_addr_cnt--;
-
-			/* Is this a multicast address */
-			if (0x01 & mac->macaddr[0])
-				vpath->mcast_addr_cnt--;
-			return TRUE;
-		}
-	}
-
-	return FALSE;
-}
-/* delete a mac address from DA table */
-static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
-					     struct macInfo *mac)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxge_vpath *vpath;
-
-	vpath = &vdev->vpaths[mac->vpath_no];
-	status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
-						mac->macmask);
-	if (status != VXGE_HW_OK) {
-		vxge_debug_init(VXGE_ERR,
-			"DA config delete entry failed for vpath:%d",
-			vpath->device_id);
-	} else
-		vxge_mac_list_del(vpath, mac);
-	return status;
-}
-
-/* list all mac addresses from DA table */
-enum vxge_hw_status
-static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
-					struct macInfo *mac)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-	unsigned char macmask[ETH_ALEN];
-	unsigned char macaddr[ETH_ALEN];
-
-	status = vxge_hw_vpath_mac_addr_get(vpath->handle,
-				macaddr, macmask);
-	if (status != VXGE_HW_OK) {
-		vxge_debug_init(VXGE_ERR,
-			"DA config list entry failed for vpath:%d",
-			vpath->device_id);
-		return status;
-	}
-
-	while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
-
-		status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
-				macaddr, macmask);
-		if (status != VXGE_HW_OK)
-			break;
-	}
-
-	return status;
-}
-
-/* Store all vlan ids from the list to the vid table */
-static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct vxgedev *vdev = vpath->vdev;
-	u16 vid;
-
-	if (vdev->vlgrp && vpath->is_open) {
-
-		for (vid = 0; vid < VLAN_N_VID; vid++) {
-			if (!vlan_group_get_device(vdev->vlgrp, vid))
-				continue;
-			/* Add these vlan to the vid table */
-			status = vxge_hw_vpath_vid_add(vpath->handle, vid);
-		}
-	}
-
-	return status;
-}
-
-/* Store all mac addresses from the list to the DA table */
-static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
-{
-	enum vxge_hw_status status = VXGE_HW_OK;
-	struct macInfo mac_info;
-	u8 *mac_address = NULL;
-	struct list_head *entry, *next;
-
-	memset(&mac_info, 0, sizeof(struct macInfo));
-
-	if (vpath->is_open) {
-
-		list_for_each_safe(entry, next, &vpath->mac_addr_list) {
-			mac_address =
-				(u8 *)&
-				((struct vxge_mac_addrs *)entry)->macaddr;
-			memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
-			((struct vxge_mac_addrs *)entry)->state =
-				VXGE_LL_MAC_ADDR_IN_DA_TABLE;
-			/* does this mac address already exist in da table? */
-			status = vxge_search_mac_addr_in_da_table(vpath,
-				&mac_info);
-			if (status != VXGE_HW_OK) {
-				/* Add this mac address to the DA table */
-				status = vxge_hw_vpath_mac_addr_add(
-					vpath->handle, mac_info.macaddr,
-					mac_info.macmask,
-				    VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
-				if (status != VXGE_HW_OK) {
-					vxge_debug_init(VXGE_ERR,
-					    "DA add entry failed for vpath:%d",
-					    vpath->device_id);
-					((struct vxge_mac_addrs *)entry)->state
-						= VXGE_LL_MAC_ADDR_IN_LIST;
-				}
-			}
-		}
-	}
-
-	return status;
-}
-
 /* reset vpaths */
-static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
 {
 	enum vxge_hw_status status = VXGE_HW_OK;
 	struct vxge_vpath *vpath;
@@ -1988,8 +2004,23 @@
 
 	for (i = 0; i < vdev->no_of_vpath; i++) {
 		vpath = &vdev->vpaths[i];
-
 		vxge_assert(vpath->is_configured);
+
+		if (!vdev->titan1) {
+			struct vxge_hw_vp_config *vcfg;
+			vcfg = &vdev->devh->config.vp_config[vpath->device_id];
+
+			vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
+			vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
+			vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
+			vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
+			vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
+			vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
+			vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
+			vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
+			vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
+		}
+
 		attr.vp_id = vpath->device_id;
 		attr.fifo_attr.callback = vxge_xmit_compl;
 		attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2004,6 +2035,7 @@
 
 		vpath->ring.ndev = vdev->ndev;
 		vpath->ring.pdev = vdev->pdev;
+
 		status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
 		if (status == VXGE_HW_OK) {
 			vpath->fifo.handle =
@@ -2024,6 +2056,7 @@
 				vdev->config.fifo_indicate_max_pkts;
 			vpath->ring.rx_vector_no = 0;
 			vpath->ring.rx_csum = vdev->rx_csum;
+			vpath->ring.rx_hwts = vdev->rx_hwts;
 			vpath->is_open = 1;
 			vdev->vp_handles[i] = vpath->handle;
 			vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2031,11 +2064,10 @@
 			vdev->stats.vpaths_open++;
 		} else {
 			vdev->stats.vpath_open_fail++;
-			vxge_debug_init(VXGE_ERR,
-				"%s: vpath: %d failed to open "
-				"with status: %d",
-			    vdev->ndev->name, vpath->device_id,
-				status);
+			vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
+					"open with status: %d",
+					vdev->ndev->name, vpath->device_id,
+					status);
 			vxge_close_vpaths(vdev, 0);
 			return -EPERM;
 		}
@@ -2043,6 +2075,7 @@
 		vp_id = vpath->handle->vpath->vp_id;
 		vdev->vpaths_deployed |= vxge_mBIT(vp_id);
 	}
+
 	return VXGE_HW_OK;
 }
 
@@ -2062,21 +2095,20 @@
 	struct __vxge_hw_device *hldev;
 	u64 reason;
 	enum vxge_hw_status status;
-	struct vxgedev *vdev = (struct vxgedev *) dev_id;;
+	struct vxgedev *vdev = (struct vxgedev *)dev_id;
 
 	vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
 	dev = vdev->ndev;
-	hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev);
+	hldev = pci_get_drvdata(vdev->pdev);
 
 	if (pci_channel_offline(vdev->pdev))
 		return IRQ_NONE;
 
 	if (unlikely(!is_vxge_card_up(vdev)))
-		return IRQ_NONE;
+		return IRQ_HANDLED;
 
-	status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode,
-			&reason);
+	status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
 	if (status == VXGE_HW_OK) {
 		vxge_hw_device_mask_all(hldev);
 
@@ -2301,8 +2333,8 @@
 
 static void vxge_rem_isr(struct vxgedev *vdev)
 {
-	struct __vxge_hw_device  *hldev;
-	hldev = (struct __vxge_hw_device  *) pci_get_drvdata(vdev->pdev);
+	struct __vxge_hw_device *hldev;
+	hldev = pci_get_drvdata(vdev->pdev);
 
 #ifdef CONFIG_PCI_MSI
 	if (vdev->config.intr_type == MSI_X) {
@@ -2529,8 +2561,7 @@
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-static int
-vxge_open(struct net_device *dev)
+static int vxge_open(struct net_device *dev)
 {
 	enum vxge_hw_status status;
 	struct vxgedev *vdev;
@@ -2539,11 +2570,12 @@
 	int ret = 0;
 	int i;
 	u64 val64, function_mode;
+
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s: %s:%d", dev->name, __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
-	hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+	vdev = netdev_priv(dev);
+	hldev = pci_get_drvdata(vdev->pdev);
 	function_mode = vdev->config.device_hw_info.function_mode;
 
 	/* make sure you have link off by default every time Nic is
@@ -2598,6 +2630,8 @@
 			goto out2;
 		}
 	}
+	printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
+	       hldev->config.rth_en ? "enabled" : "disabled");
 
 	for (i = 0; i < vdev->no_of_vpath; i++) {
 		vpath = &vdev->vpaths[i];
@@ -2683,9 +2717,10 @@
 		vxge_os_timer(vdev->vp_reset_timer,
 			vxge_poll_vp_reset, vdev, (HZ/2));
 
-	if (vdev->vp_lockup_timer.function == NULL)
-		vxge_os_timer(vdev->vp_lockup_timer,
-			vxge_poll_vp_lockup, vdev, (HZ/2));
+	/* There is no need to check for RxD leak and RxD lookup on Titan1A */
+	if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
+		vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
+			      HZ / 2);
 
 	set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
 
@@ -2767,8 +2802,8 @@
 	vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
 		dev->name, __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
-	hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev);
+	vdev = netdev_priv(dev);
+	hldev = pci_get_drvdata(vdev->pdev);
 
 	if (unlikely(!is_vxge_card_up(vdev)))
 		return 0;
@@ -2778,7 +2813,6 @@
 	while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
 		msleep(50);
 
-	clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
 	if (do_io) {
 		/* Put the vpath back in normal mode */
 		vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2789,7 +2823,6 @@
 					struct vxge_hw_mrpcim_reg,
 					rts_mgr_cbasin_cfg),
 				&val64);
-
 		if (status == VXGE_HW_OK) {
 			val64 &= ~vpath_vector;
 			status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2818,10 +2851,17 @@
 
 		smp_wmb();
 	}
-	del_timer_sync(&vdev->vp_lockup_timer);
+
+	if (vdev->titan1)
+		del_timer_sync(&vdev->vp_lockup_timer);
 
 	del_timer_sync(&vdev->vp_reset_timer);
 
+	if (do_io)
+		vxge_hw_device_wait_receive_idle(hldev);
+
+	clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
+
 	/* Disable napi */
 	if (vdev->config.intr_type != MSI_X)
 		napi_disable(&vdev->napi);
@@ -2838,8 +2878,6 @@
 	if (do_io)
 		vxge_hw_device_intr_disable(vdev->devh);
 
-	mdelay(1000);
-
 	vxge_rem_isr(vdev);
 
 	vxge_napi_del_all(vdev);
@@ -2868,8 +2906,7 @@
  * Return value: '0' on success and an appropriate (-)ve integer as
  * defined in errno.h file on failure.
  */
-static int
-vxge_close(struct net_device *dev)
+static int vxge_close(struct net_device *dev)
 {
 	do_vxge_close(dev, 1);
 	return 0;
@@ -2943,9 +2980,7 @@
 		net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
 		net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
 		net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
-		net_stats->rx_dropped +=
-			vdev->vpaths[k].ring.stats.rx_dropped;
-
+		net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
 		net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
 		net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
 		net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -2954,6 +2989,101 @@
 	return net_stats;
 }
 
+static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
+						 int enable)
+{
+	enum vxge_hw_status status;
+	u64 val64;
+
+	/* Timestamp is passed to the driver via the FCS, therefore we
+	 * must disable the FCS stripping by the adapter.  Since this is
+	 * required for the driver to load (due to a hardware bug),
+	 * there is no need to do anything special here.
+	 */
+	if (enable)
+		val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
+			VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
+			VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
+	else
+		val64 = 0;
+
+	status = vxge_hw_mgmt_reg_write(vdev->devh,
+					vxge_hw_mgmt_reg_type_mrpcim,
+					0,
+					offsetof(struct vxge_hw_mrpcim_reg,
+						 xmac_timestamp),
+					val64);
+	vxge_hw_device_flush_io(vdev->devh);
+	return status;
+}
+
+static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
+{
+	struct hwtstamp_config config;
+	enum vxge_hw_status status;
+	int i;
+
+	if (copy_from_user(&config, data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	/* Transmit HW Timestamp not supported */
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		break;
+	case HWTSTAMP_TX_ON:
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		status = vxge_timestamp_config(vdev, 0);
+		if (status != VXGE_HW_OK)
+			return -EFAULT;
+
+		vdev->rx_hwts = 0;
+		config.rx_filter = HWTSTAMP_FILTER_NONE;
+		break;
+
+	case HWTSTAMP_FILTER_ALL:
+	case HWTSTAMP_FILTER_SOME:
+	case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+	case HWTSTAMP_FILTER_PTP_V2_EVENT:
+	case HWTSTAMP_FILTER_PTP_V2_SYNC:
+	case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+		status = vxge_timestamp_config(vdev, 1);
+		if (status != VXGE_HW_OK)
+			return -EFAULT;
+
+		vdev->rx_hwts = 1;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+
+	default:
+		 return -ERANGE;
+	}
+
+	for (i = 0; i < vdev->no_of_vpath; i++)
+		vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
+
+	if (copy_to_user(data, &config, sizeof(config)))
+		return -EFAULT;
+
+	return 0;
+}
+
 /**
  * vxge_ioctl
  * @dev: Device pointer.
@@ -2966,7 +3096,20 @@
  */
 static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 {
-	return -EOPNOTSUPP;
+	struct vxgedev *vdev = netdev_priv(dev);
+	int ret;
+
+	switch (cmd) {
+	case SIOCSHWTSTAMP:
+		ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
+		if (ret)
+			return ret;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return 0;
 }
 
 /**
@@ -2977,18 +3120,17 @@
  * This function is triggered if the Tx Queue is stopped
  * for a pre-defined amount of time when the Interface is still up.
  */
-static void
-vxge_tx_watchdog(struct net_device *dev)
+static void vxge_tx_watchdog(struct net_device *dev)
 {
 	struct vxgedev *vdev;
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 
 	vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
 
-	vxge_reset(vdev);
+	schedule_work(&vdev->reset_task);
 	vxge_debug_entryexit(VXGE_TRACE,
 		"%s:%d  Exiting...", __func__, __LINE__);
 }
@@ -3012,7 +3154,7 @@
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 
 	vpath = &vdev->vpaths[0];
 	if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3203,7 @@
 	struct vxge_vpath *vpath;
 	int vp_id;
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 
 	/* Add these vlan to the vid table */
 	for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3230,7 @@
 
 	vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
 
-	vdev = (struct vxgedev *)netdev_priv(dev);
+	vdev = netdev_priv(dev);
 
 	vlan_group_set_device(vdev->vlgrp, vid, NULL);
 
@@ -3110,21 +3252,31 @@
 	.ndo_start_xmit         = vxge_xmit,
 	.ndo_validate_addr      = eth_validate_addr,
 	.ndo_set_multicast_list = vxge_set_multicast,
-
 	.ndo_do_ioctl           = vxge_ioctl,
-
 	.ndo_set_mac_address    = vxge_set_mac_addr,
 	.ndo_change_mtu         = vxge_change_mtu,
 	.ndo_vlan_rx_register   = vxge_vlan_rx_register,
 	.ndo_vlan_rx_kill_vid   = vxge_vlan_rx_kill_vid,
 	.ndo_vlan_rx_add_vid	= vxge_vlan_rx_add_vid,
-
 	.ndo_tx_timeout         = vxge_tx_watchdog,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller    = vxge_netpoll,
 #endif
 };
 
+static int __devinit vxge_device_revision(struct vxgedev *vdev)
+{
+	int ret;
+	u8 revision;
+
+	ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
+	if (ret)
+		return -EIO;
+
+	vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
+	return 0;
+}
+
 static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
 					  struct vxge_config *config,
 					  int high_dma, int no_of_vpath,
@@ -3163,6 +3315,11 @@
 	vdev->pdev = hldev->pdev;
 	memcpy(&vdev->config, config, sizeof(struct vxge_config));
 	vdev->rx_csum = 1;	/* Enable Rx CSUM by default. */
+	vdev->rx_hwts = 0;
+
+	ret = vxge_device_revision(vdev);
+	if (ret < 0)
+		goto _out1;
 
 	SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
 
@@ -3175,9 +3332,15 @@
 	ndev->netdev_ops = &vxge_netdev_ops;
 
 	ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
+	INIT_WORK(&vdev->reset_task, vxge_reset);
 
 	vxge_initialize_ethtool_ops(ndev);
 
+	if (vdev->config.rth_steering != NO_STEERING) {
+		ndev->features |= NETIF_F_RXHASH;
+		hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
+	}
+
 	/* Allocate memory for vpath */
 	vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
 				no_of_vpath, GFP_KERNEL);
@@ -3191,7 +3354,7 @@
 
 	ndev->features |= NETIF_F_SG;
 
-	ndev->features |= NETIF_F_HW_CSUM;
+	ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 	vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
 		"%s : checksuming enabled", __func__);
 
@@ -3227,6 +3390,7 @@
 		"%s: Ethernet device registered",
 		ndev->name);
 
+	hldev->ndev = ndev;
 	*vdev_out = vdev;
 
 	/* Resetting the Device stats */
@@ -3261,36 +3425,27 @@
  *
  * This function will unregister and free network device
  */
-static void
-vxge_device_unregister(struct __vxge_hw_device *hldev)
+static void vxge_device_unregister(struct __vxge_hw_device *hldev)
 {
 	struct vxgedev *vdev;
 	struct net_device *dev;
 	char buf[IFNAMSIZ];
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-	(VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-	u32 level_trace;
-#endif
 
 	dev = hldev->ndev;
 	vdev = netdev_priv(dev);
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-	(VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-	level_trace = vdev->level_trace;
-#endif
-	vxge_debug_entryexit(level_trace,
-		"%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
 
-	memcpy(buf, vdev->ndev->name, IFNAMSIZ);
+	vxge_debug_entryexit(vdev->level_trace,	"%s: %s:%d", vdev->ndev->name,
+			     __func__, __LINE__);
+
+	strncpy(buf, dev->name, IFNAMSIZ);
 
 	/* in 2.6 will call stop() if device is up */
 	unregister_netdev(dev);
 
-	flush_scheduled_work();
-
-	vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf);
-	vxge_debug_entryexit(level_trace,
-		"%s: %s:%d  Exiting...", buf, __func__, __LINE__);
+	vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
+			buf);
+	vxge_debug_entryexit(vdev->level_trace,	"%s: %s:%d  Exiting...", buf,
+			     __func__, __LINE__);
 }
 
 /*
@@ -3304,7 +3459,7 @@
 			enum vxge_hw_event type, u64 vp_id)
 {
 	struct net_device *dev = hldev->ndev;
-	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
+	struct vxgedev *vdev = netdev_priv(dev);
 	struct vxge_vpath *vpath = NULL;
 	int vpath_idx;
 
@@ -3527,9 +3682,9 @@
 		device_config->vp_config[i].tti.timer_ac_en =
 				VXGE_HW_TIM_TIMER_AC_ENABLE;
 
-		/* For msi-x with napi (each vector
-		has a handler of its own) -
-		Set CI to OFF for all vpaths */
+		/* For msi-x with napi (each vector has a handler of its own) -
+		 * Set CI to OFF for all vpaths
+		 */
 		device_config->vp_config[i].tti.timer_ci_en =
 			VXGE_HW_TIM_TIMER_CI_DISABLE;
 
@@ -3559,10 +3714,13 @@
 
 		device_config->vp_config[i].ring.ring_blocks  =
 						VXGE_HW_DEF_RING_BLOCKS;
+
 		device_config->vp_config[i].ring.buffer_mode =
 			VXGE_HW_RING_RXD_BUFFER_MODE_1;
+
 		device_config->vp_config[i].ring.rxds_limit  =
 				VXGE_HW_DEF_RING_RXDS_LIMIT;
+
 		device_config->vp_config[i].ring.scatter_mode =
 					VXGE_HW_RING_SCATTER_MODE_A;
 
@@ -3642,6 +3800,7 @@
 		device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
 		break;
 	}
+
 	/* Timer period between device poll */
 	device_config->device_poll_millis = VXGE_TIMER_DELAY;
 
@@ -3653,16 +3812,10 @@
 
 	vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
 			__func__);
-	vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
-			device_config->dma_blockpool_initial);
-	vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
-			device_config->dma_blockpool_max);
 	vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
 			device_config->intr_mode);
 	vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
 			device_config->device_poll_millis);
-	vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
-			device_config->rts_mac_en);
 	vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
 			device_config->rth_en);
 	vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -3751,9 +3904,6 @@
 		vxge_debug_init(VXGE_TRACE,
 			"%s: MAC Address learning enabled", vdev->ndev->name);
 
-	vxge_debug_init(VXGE_TRACE,
-		"%s: Rx doorbell mode enabled", vdev->ndev->name);
-
 	for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
 		if (!vxge_bVALn(vpath_mask, i, 1))
 			continue;
@@ -3766,14 +3916,6 @@
 			((struct __vxge_hw_device  *)(vdev->devh))->
 				config.vp_config[i].rpa_strip_vlan_tag
 			? "Enabled" : "Disabled");
-		vxge_debug_init(VXGE_TRACE,
-			"%s: Ring blocks : %d", vdev->ndev->name,
-			((struct __vxge_hw_device  *)(vdev->devh))->
-				config.vp_config[i].ring.ring_blocks);
-		vxge_debug_init(VXGE_TRACE,
-			"%s: Fifo blocks : %d", vdev->ndev->name,
-			((struct __vxge_hw_device  *)(vdev->devh))->
-				config.vp_config[i].fifo.fifo_blocks);
 		vxge_debug_ll_config(VXGE_TRACE,
 			"%s: Max frags : %d", vdev->ndev->name,
 			((struct __vxge_hw_device  *)(vdev->devh))->
@@ -3813,8 +3955,7 @@
 static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
 						pci_channel_state_t state)
 {
-	struct __vxge_hw_device  *hldev =
-		(struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+	struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
 	struct net_device *netdev = hldev->ndev;
 
 	netif_device_detach(netdev);
@@ -3843,8 +3984,7 @@
  */
 static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
 {
-	struct __vxge_hw_device  *hldev =
-		(struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+	struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
 	struct net_device *netdev = hldev->ndev;
 
 	struct vxgedev *vdev = netdev_priv(netdev);
@@ -3855,7 +3995,7 @@
 	}
 
 	pci_set_master(pdev);
-	vxge_reset(vdev);
+	do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
 
 	return PCI_ERS_RESULT_RECOVERED;
 }
@@ -3869,8 +4009,7 @@
  */
 static void vxge_io_resume(struct pci_dev *pdev)
 {
-	struct __vxge_hw_device  *hldev =
-		(struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+	struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
 	struct net_device *netdev = hldev->ndev;
 
 	if (netif_running(netdev)) {
@@ -3914,6 +4053,156 @@
 	return num_functions;
 }
 
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
+{
+	struct __vxge_hw_device *hldev = vdev->devh;
+	u32 maj, min, bld, cmaj, cmin, cbld;
+	enum vxge_hw_status status;
+	const struct firmware *fw;
+	int ret;
+
+	ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
+	if (ret) {
+		vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
+				VXGE_DRIVER_NAME, fw_name);
+		goto out;
+	}
+
+	/* Load the new firmware onto the adapter */
+	status = vxge_update_fw_image(hldev, fw->data, fw->size);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR,
+				"%s: FW image download to adapter failed '%s'.",
+				VXGE_DRIVER_NAME, fw_name);
+		ret = -EIO;
+		goto out;
+	}
+
+	/* Read the version of the new firmware */
+	status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR,
+				"%s: Upgrade read version failed '%s'.",
+				VXGE_DRIVER_NAME, fw_name);
+		ret = -EIO;
+		goto out;
+	}
+
+	cmaj = vdev->config.device_hw_info.fw_version.major;
+	cmin = vdev->config.device_hw_info.fw_version.minor;
+	cbld = vdev->config.device_hw_info.fw_version.build;
+	/* It's possible the version in /lib/firmware is not the latest version.
+	 * If so, we could get into a loop of trying to upgrade to the latest
+	 * and flashing the older version.
+	 */
+	if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
+	    !override) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
+	       maj, min, bld);
+
+	/* Flash the adapter with the new firmware */
+	status = vxge_hw_flash_fw(hldev);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
+				VXGE_DRIVER_NAME, fw_name);
+		ret = -EIO;
+		goto out;
+	}
+
+	printk(KERN_NOTICE "Upgrade of firmware successful!  Adapter must be "
+	       "hard reset before using, thus requiring a system reboot or a "
+	       "hotplug event.\n");
+
+out:
+	return ret;
+}
+
+static int vxge_probe_fw_update(struct vxgedev *vdev)
+{
+	u32 maj, min, bld;
+	int ret, gpxe = 0;
+	char *fw_name;
+
+	maj = vdev->config.device_hw_info.fw_version.major;
+	min = vdev->config.device_hw_info.fw_version.minor;
+	bld = vdev->config.device_hw_info.fw_version.build;
+
+	if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
+		return 0;
+
+	/* Ignore the build number when determining if the current firmware is
+	 * "too new" to load the driver
+	 */
+	if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
+		vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
+				"version, unable to load driver\n",
+				VXGE_DRIVER_NAME);
+		return -EINVAL;
+	}
+
+	/* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
+	 * work with this driver.
+	 */
+	if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
+		vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
+				"upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
+		return -EINVAL;
+	}
+
+	/* If file not specified, determine gPXE or not */
+	if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
+		int i;
+		for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
+			if (vdev->devh->eprom_versions[i]) {
+				gpxe = 1;
+				break;
+			}
+	}
+	if (gpxe)
+		fw_name = "vxge/X3fw-pxe.ncf";
+	else
+		fw_name = "vxge/X3fw.ncf";
+
+	ret = vxge_fw_upgrade(vdev, fw_name, 0);
+	/* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
+	 * probe, so ignore them
+	 */
+	if (ret != -EINVAL && ret != -ENOENT)
+		return -EIO;
+	else
+		ret = 0;
+
+	if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
+	    VXGE_FW_VER(maj, min, 0)) {
+		vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
+				" be used with this driver.\n"
+				"Please get the latest version from "
+				"ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
+				VXGE_DRIVER_NAME, maj, min, bld);
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __devinit is_sriov_initialized(struct pci_dev *pdev)
+{
+	int pos;
+	u16 ctrl;
+
+	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+	if (pos) {
+		pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
+		if (ctrl & PCI_SRIOV_CTRL_VFE)
+			return 1;
+	}
+	return 0;
+}
+
 /**
  * vxge_probe
  * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4217,7 @@
 static int __devinit
 vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
 {
-	struct __vxge_hw_device  *hldev;
+	struct __vxge_hw_device *hldev;
 	enum vxge_hw_status status;
 	int ret;
 	int high_dma = 0;
@@ -3951,9 +4240,10 @@
 	attr.pdev = pdev;
 
 	/* In SRIOV-17 mode, functions of the same adapter
-	 * can be deployed on different buses */
-	if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) ||
-		(device != PCI_SLOT(pdev->devfn))))
+	 * can be deployed on different buses
+	 */
+	if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
+	    !pdev->is_virtfn)
 		new_device = 1;
 
 	bus = pdev->bus->number;
@@ -3971,6 +4261,7 @@
 		driver_config->config_dev_cnt = 0;
 		driver_config->total_dev_cnt = 0;
 	}
+
 	/* Now making the CPU based no of vpath calculation
 	 * applicable for individual functions as well.
 	 */
@@ -3993,11 +4284,11 @@
 		goto _exit0;
 	}
 
-	ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL);
+	ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
 	if (!ll_config) {
 		ret = -ENOMEM;
 		vxge_debug_init(VXGE_ERR,
-			"ll_config : malloc failed %s %d",
+			"device_config : malloc failed %s %d",
 			__FILE__, __LINE__);
 		goto _exit0;
 	}
@@ -4041,7 +4332,7 @@
 		goto _exit1;
 	}
 
-	if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) {
+	if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
 		vxge_debug_init(VXGE_ERR,
 			"%s : request regions failed", __func__);
 		ret = -ENODEV;
@@ -4072,16 +4363,6 @@
 		goto _exit3;
 	}
 
-	if (ll_config->device_hw_info.fw_version.major !=
-		VXGE_DRIVER_FW_VERSION_MAJOR) {
-		vxge_debug_init(VXGE_ERR,
-			"%s: Incorrect firmware version."
-			"Please upgrade the firmware to version 1.x.x",
-			VXGE_DRIVER_NAME);
-		ret = -EINVAL;
-		goto _exit3;
-	}
-
 	vpath_mask = ll_config->device_hw_info.vpath_mask;
 	if (vpath_mask == 0) {
 		vxge_debug_ll_config(VXGE_TRACE,
@@ -4110,14 +4391,13 @@
 		num_vfs = vxge_get_num_vfs(function_mode) - 1;
 
 	/* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
-	if (is_sriov(function_mode) && (max_config_dev > 1) &&
-		(ll_config->intr_type != INTA) &&
-		(is_privileged == VXGE_HW_OK)) {
-		ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
-			? (max_config_dev - 1) : num_vfs);
+	if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
+	   (ll_config->intr_type != INTA)) {
+		ret = pci_enable_sriov(pdev, num_vfs);
 		if (ret)
 			vxge_debug_ll_config(VXGE_ERR,
 				"Failed in enabling SRIOV mode: %d\n", ret);
+			/* No need to fail out, as an error here is non-fatal */
 	}
 
 	/*
@@ -4145,11 +4425,37 @@
 			goto _exit3;
 	}
 
+	if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
+			ll_config->device_hw_info.fw_version.minor,
+			ll_config->device_hw_info.fw_version.build) >=
+	    VXGE_EPROM_FW_VER) {
+		struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
+
+		status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
+		if (status != VXGE_HW_OK) {
+			vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
+					VXGE_DRIVER_NAME);
+			/* This is a non-fatal error, continue */
+		}
+
+		for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
+			hldev->eprom_versions[i] = img[i].version;
+			if (!img[i].is_valid)
+				break;
+			vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
+					"%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
+					VXGE_EPROM_IMG_MAJOR(img[i].version),
+					VXGE_EPROM_IMG_MINOR(img[i].version),
+					VXGE_EPROM_IMG_FIX(img[i].version),
+					VXGE_EPROM_IMG_BUILD(img[i].version));
+		}
+	}
+
 	/* if FCS stripping is not disabled in MAC fail driver load */
-	if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
-		vxge_debug_init(VXGE_ERR,
-			"%s: FCS stripping is not disabled in MAC"
-			" failing driver load", VXGE_DRIVER_NAME);
+	status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
+	if (status != VXGE_HW_OK) {
+		vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
+				" failing driver load", VXGE_DRIVER_NAME);
 		ret = -EINVAL;
 		goto _exit4;
 	}
@@ -4163,28 +4469,32 @@
 	ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
 	ll_config->addr_learn_en = addr_learn_en;
 	ll_config->rth_algorithm = RTH_ALG_JENKINS;
-	ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4;
-	ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
-	ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE;
+	ll_config->rth_hash_type_tcpipv4 = 1;
+	ll_config->rth_hash_type_ipv4 = 0;
+	ll_config->rth_hash_type_tcpipv6 = 0;
+	ll_config->rth_hash_type_ipv6 = 0;
+	ll_config->rth_hash_type_tcpipv6ex = 0;
+	ll_config->rth_hash_type_ipv6ex = 0;
 	ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
 	ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
 	ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
 
-	if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
-		&vdev)) {
+	ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
+				   &vdev);
+	if (ret) {
 		ret = -EINVAL;
 		goto _exit4;
 	}
 
+	ret = vxge_probe_fw_update(vdev);
+	if (ret)
+		goto _exit5;
+
 	vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
 	VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
 		vxge_hw_device_trace_level_get(hldev));
 
 	/* set private HW device info */
-	hldev->ndev = vdev->ndev;
 	vdev->mtu = VXGE_HW_DEFAULT_MTU;
 	vdev->bar0 = attr.bar0;
 	vdev->max_vpath_supported = max_vpath_supported;
@@ -4278,15 +4588,13 @@
 
 	/* Copy the station mac address to the list */
 	for (i = 0; i < vdev->no_of_vpath; i++) {
-		entry =	(struct vxge_mac_addrs *)
-				kzalloc(sizeof(struct vxge_mac_addrs),
-					GFP_KERNEL);
+		entry =	kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
 		if (NULL == entry) {
 			vxge_debug_init(VXGE_ERR,
 				"%s: mac_addr_list : memory allocation failed",
 				vdev->ndev->name);
 			ret = -EPERM;
-			goto _exit5;
+			goto _exit6;
 		}
 		macaddr = (u8 *)&entry->macaddr;
 		memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4634,10 @@
 	kfree(ll_config);
 	return 0;
 
-_exit5:
+_exit6:
 	for (i = 0; i < vdev->no_of_vpath; i++)
 		vxge_free_mac_add_list(&vdev->vpaths[i]);
-
+_exit5:
 	vxge_device_unregister(hldev);
 _exit4:
 	pci_disable_sriov(pdev);
@@ -4337,7 +4645,7 @@
 _exit3:
 	iounmap(attr.bar0);
 _exit2:
-	pci_release_regions(pdev);
+	pci_release_region(pdev, 0);
 _exit1:
 	pci_disable_device(pdev);
 _exit0:
@@ -4354,34 +4662,25 @@
  * Description: This function is called by the Pci subsystem to release a
  * PCI device and free up all resource held up by the device.
  */
-static void __devexit
-vxge_remove(struct pci_dev *pdev)
+static void __devexit vxge_remove(struct pci_dev *pdev)
 {
-	struct __vxge_hw_device  *hldev;
+	struct __vxge_hw_device *hldev;
 	struct vxgedev *vdev = NULL;
 	struct net_device *dev;
 	int i = 0;
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-	(VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-	u32 level_trace;
-#endif
 
-	hldev = (struct __vxge_hw_device  *) pci_get_drvdata(pdev);
+	hldev = pci_get_drvdata(pdev);
 
 	if (hldev == NULL)
 		return;
+
 	dev = hldev->ndev;
 	vdev = netdev_priv(dev);
 
-#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
-	(VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
-	level_trace = vdev->level_trace;
-#endif
-	vxge_debug_entryexit(level_trace,
-		"%s:%d", __func__, __LINE__);
+	vxge_debug_entryexit(vdev->level_trace,	"%s:%d", __func__, __LINE__);
 
-	vxge_debug_init(level_trace,
-		"%s : removing PCI device...", __func__);
+	vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
+			__func__);
 	vxge_device_unregister(hldev);
 
 	for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4394,21 +4693,19 @@
 
 	iounmap(vdev->bar0);
 
-	pci_disable_sriov(pdev);
-
 	/* we are safe to free it now */
 	free_netdev(dev);
 
-	vxge_debug_init(level_trace,
-		"%s:%d  Device unregistered", __func__, __LINE__);
+	vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
+			__func__, __LINE__);
 
 	vxge_hw_device_terminate(hldev);
 
 	pci_disable_device(pdev);
-	pci_release_regions(pdev);
+	pci_release_region(pdev, 0);
 	pci_set_drvdata(pdev, NULL);
-	vxge_debug_entryexit(level_trace,
-		"%s:%d  Exiting...", __func__, __LINE__);
+	vxge_debug_entryexit(vdev->level_trace,	"%s:%d  Exiting...", __func__,
+			     __LINE__);
 }
 
 static struct pci_error_handlers vxge_err_handler = {
@@ -4444,6 +4741,10 @@
 		return -ENOMEM;
 
 	ret = pci_register_driver(&vxge_driver);
+	if (ret) {
+		kfree(driver_config);
+		goto err;
+	}
 
 	if (driver_config->config_dev_cnt &&
 	   (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4451,10 +4752,7 @@
 			"%s: Configured %d of %d devices",
 			VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
 			driver_config->total_dev_cnt);
-
-	if (ret)
-		kfree(driver_config);
-
+err:
 	return ret;
 }
 
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536..5746fed 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
 
 #define PCI_DEVICE_ID_TITAN_WIN		0x5733
 #define PCI_DEVICE_ID_TITAN_UNI		0x5833
+#define VXGE_HW_TITAN1_PCI_REVISION	1
+#define VXGE_HW_TITAN1A_PCI_REVISION	2
+
 #define	VXGE_USE_DEFAULT		0xffffffff
 #define VXGE_HW_VPATH_MSIX_ACTIVE	4
 #define VXGE_ALARM_MSIX_ID		2
@@ -53,11 +56,13 @@
 
 #define VXGE_TTI_BTIMER_VAL 250000
 
-#define VXGE_TTI_LTIMER_VAL 1000
-#define VXGE_TTI_RTIMER_VAL 0
-#define VXGE_RTI_BTIMER_VAL 250
-#define VXGE_RTI_LTIMER_VAL 100
-#define VXGE_RTI_RTIMER_VAL 0
+#define VXGE_TTI_LTIMER_VAL	1000
+#define VXGE_T1A_TTI_LTIMER_VAL	80
+#define VXGE_TTI_RTIMER_VAL	0
+#define VXGE_T1A_TTI_RTIMER_VAL	400
+#define VXGE_RTI_BTIMER_VAL	250
+#define VXGE_RTI_LTIMER_VAL	100
+#define VXGE_RTI_RTIMER_VAL	0
 #define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
 #define VXGE_ISR_POLLING_CNT 	8
 #define VXGE_MAX_CONFIG_DEV	0xFF
@@ -76,14 +81,32 @@
 #define TTI_TX_UFC_B	40
 #define TTI_TX_UFC_C	60
 #define TTI_TX_UFC_D	100
+#define TTI_T1A_TX_UFC_A	30
+#define TTI_T1A_TX_UFC_B	80
+/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
+/* Slope - 93 */
+/* 60 - 9k Mtu, 140 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_C(mtu)	(60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
 
-#define RTI_RX_URANGE_A	5
-#define RTI_RX_URANGE_B	15
-#define RTI_RX_URANGE_C	40
-#define RTI_RX_UFC_A	1
-#define RTI_RX_UFC_B	5
-#define RTI_RX_UFC_C	10
-#define RTI_RX_UFC_D	15
+/* Slope - 37 */
+/* 100 - 9k Mtu, 300 - 1.5k mtu */
+#define TTI_T1A_TX_UFC_D(mtu)	(100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
+
+
+#define RTI_RX_URANGE_A		5
+#define RTI_RX_URANGE_B		15
+#define RTI_RX_URANGE_C		40
+#define RTI_T1A_RX_URANGE_A	1
+#define RTI_T1A_RX_URANGE_B	20
+#define RTI_T1A_RX_URANGE_C	50
+#define RTI_RX_UFC_A		1
+#define RTI_RX_UFC_B		5
+#define RTI_RX_UFC_C		10
+#define RTI_RX_UFC_D		15
+#define RTI_T1A_RX_UFC_B	20
+#define RTI_T1A_RX_UFC_C	50
+#define RTI_T1A_RX_UFC_D	60
+
 
 /* Milli secs timer period */
 #define VXGE_TIMER_DELAY		10000
@@ -145,15 +168,15 @@
 
 	int		addr_learn_en;
 
-	int		rth_steering;
-	int		rth_algorithm;
-	int		rth_hash_type_tcpipv4;
-	int		rth_hash_type_ipv4;
-	int		rth_hash_type_tcpipv6;
-	int		rth_hash_type_ipv6;
-	int		rth_hash_type_tcpipv6ex;
-	int		rth_hash_type_ipv6ex;
-	int		rth_bkt_sz;
+	u32		rth_steering:2,
+			rth_algorithm:2,
+			rth_hash_type_tcpipv4:1,
+			rth_hash_type_ipv4:1,
+			rth_hash_type_tcpipv6:1,
+			rth_hash_type_ipv6:1,
+			rth_hash_type_tcpipv6ex:1,
+			rth_hash_type_ipv6ex:1,
+			rth_bkt_sz:8;
 	int		rth_jhash_golden_ratio;
 	int		tx_steering_type;
 	int 	fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@
 	 */
 	int driver_id;
 
-	 /* copy of the flag indicating whether rx_csum is to be used */
-	u32 rx_csum;
+	/* copy of the flag indicating whether rx_csum is to be used */
+	u32 rx_csum:1,
+	    rx_hwts:1;
 
 	int pkts_processed;
 	int budget;
@@ -281,8 +305,8 @@
 	int is_configured;
 	int is_open;
 	struct vxgedev *vdev;
-	u8 (macaddr)[ETH_ALEN];
-	u8 (macmask)[ETH_ALEN];
+	u8 macaddr[ETH_ALEN];
+	u8 macmask[ETH_ALEN];
 
 #define VXGE_MAX_LEARN_MAC_ADDR_CNT	2048
 	/* mac addresses currently programmed into NIC */
@@ -327,7 +351,9 @@
 	u16		all_multi_flg;
 
 	 /* A flag indicating whether rx_csum is to be used or not. */
-	u32	rx_csum;
+	u32	rx_csum:1,
+		rx_hwts:1,
+		titan1:1;
 
 	struct vxge_msix_entry *vxge_entries;
 	struct msix_entry *entries;
@@ -369,6 +395,7 @@
 	u32 		level_err;
 	u32 		level_trace;
 	char		fw_version[VXGE_HW_FW_STRLEN];
+	struct work_struct reset_task;
 };
 
 struct vxge_rx_priv {
@@ -387,8 +414,6 @@
 	static int p = val; \
 	module_param(p, int, 0)
 
-#define vxge_os_bug(fmt...)		{ printk(fmt); BUG(); }
-
 #define vxge_os_timer(timer, handle, arg, exp) do { \
 		init_timer(&timer); \
 		timer.function = handle; \
@@ -396,7 +421,10 @@
 		mod_timer(&timer, (jiffies + exp)); \
 	} while (0);
 
-extern void vxge_initialize_ethtool_ops(struct net_device *ndev);
+void vxge_initialize_ethtool_ops(struct net_device *ndev);
+enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
+int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
+
 /**
  * #define VXGE_DEBUG_INIT: debug for initialization functions
  * #define VXGE_DEBUG_TX	 : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c96..3e658b1 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
 #define VXGE_HW_TITAN_VPMGMT_REG_SPACES			17
 #define VXGE_HW_TITAN_VPATH_REG_SPACES			17
 
+#define VXGE_HW_FW_API_GET_EPROM_REV			31
+
+#define VXGE_EPROM_IMG_MAJOR(val)		(u32) vxge_bVALn(val, 48, 4)
+#define VXGE_EPROM_IMG_MINOR(val)		(u32) vxge_bVALn(val, 52, 4)
+#define VXGE_EPROM_IMG_FIX(val)			(u32) vxge_bVALn(val, 56, 4)
+#define VXGE_EPROM_IMG_BUILD(val)		(u32) vxge_bVALn(val, 60, 4)
+
+#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val)		vxge_bVALn(val, 16, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_VALID(val)		vxge_bVALn(val, 31, 1)
+#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val)		vxge_bVALn(val, 40, 8)
+#define VXGE_HW_GET_EPROM_IMAGE_REV(val)		vxge_bVALn(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val)	vxge_vBIT(val, 16, 8)
+
+#define VXGE_HW_FW_API_GET_FUNC_MODE			29
+#define VXGE_HW_GET_FUNC_MODE_VAL(val)			(val & 0xFF)
+
+#define VXGE_HW_FW_UPGRADE_MEMO				13
+#define VXGE_HW_FW_UPGRADE_ACTION			16
+#define VXGE_HW_FW_UPGRADE_OFFSET_START			2
+#define VXGE_HW_FW_UPGRADE_OFFSET_SEND			3
+#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT		4
+#define VXGE_HW_FW_UPGRADE_OFFSET_READ			5
+
+#define VXGE_HW_FW_UPGRADE_BLK_SIZE			16
+#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val)		(val & 0xff)
+#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val)		((val >> 8) & 0xff)
+
 #define VXGE_HW_ASIC_MODE_RESERVED				0
 #define VXGE_HW_ASIC_MODE_NO_IOV				1
 #define VXGE_HW_ASIC_MODE_SR_IOV				2
@@ -165,13 +192,13 @@
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE		2
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN		3
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG	5
-#define	VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT	6
+#define	VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT		6
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG	7
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK		8
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY		9
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS		10
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS		11
-#define	VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT	12
+#define	VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT		12
 #define	VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO		13
 
 #define	VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
 #define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
 							vxge_bVALn(bits, 48, 16)
 #define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
+#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
 
 #define	VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
 							vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@
 #define	VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN	vxge_mBIT(9)
 #define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
 #define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
+#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val)	vxge_bVALn(val, 36, 9)
 /*0x00a78*/	u64	prc_cfg7;
 #define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
 #define	VXGE_HW_PRC_CFG7_SMART_SCAT_EN	vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4bdb611..42cc298 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,13 +17,6 @@
 #include "vxge-config.h"
 #include "vxge-main.h"
 
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
-			      u32 vp_id, enum vxge_hw_event type);
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
-			      u32 skip_alarms);
-
 /*
  * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
  * @vp: Virtual Path handle.
@@ -419,6 +412,384 @@
 }
 
 /**
+ * __vxge_hw_device_handle_error - Handle error
+ * @hldev: HW device
+ * @vp_id: Vpath Id
+ * @type: Error type. Please see enum vxge_hw_event{}
+ *
+ * Handle error.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
+			      enum vxge_hw_event type)
+{
+	switch (type) {
+	case VXGE_HW_EVENT_UNKNOWN:
+		break;
+	case VXGE_HW_EVENT_RESET_START:
+	case VXGE_HW_EVENT_RESET_COMPLETE:
+	case VXGE_HW_EVENT_LINK_DOWN:
+	case VXGE_HW_EVENT_LINK_UP:
+		goto out;
+	case VXGE_HW_EVENT_ALARM_CLEARED:
+		goto out;
+	case VXGE_HW_EVENT_ECCERR:
+	case VXGE_HW_EVENT_MRPCIM_ECCERR:
+		goto out;
+	case VXGE_HW_EVENT_FIFO_ERR:
+	case VXGE_HW_EVENT_VPATH_ERR:
+	case VXGE_HW_EVENT_CRITICAL_ERR:
+	case VXGE_HW_EVENT_SERR:
+		break;
+	case VXGE_HW_EVENT_SRPCIM_SERR:
+	case VXGE_HW_EVENT_MRPCIM_SERR:
+		goto out;
+	case VXGE_HW_EVENT_SLOT_FREEZE:
+		break;
+	default:
+		vxge_assert(0);
+		goto out;
+	}
+
+	/* notify driver */
+	if (hldev->uld_callbacks.crit_err)
+		hldev->uld_callbacks.crit_err(
+			(struct __vxge_hw_device *)hldev,
+			type, vp_id);
+out:
+
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_down_ind
+ * @hldev: HW device handle.
+ *
+ * Link down indication handler. The function is invoked by HW when
+ * Titan indicates that the link is down.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
+{
+	/*
+	 * If the previous link state is not down, return.
+	 */
+	if (hldev->link_state == VXGE_HW_LINK_DOWN)
+		goto exit;
+
+	hldev->link_state = VXGE_HW_LINK_DOWN;
+
+	/* notify driver */
+	if (hldev->uld_callbacks.link_down)
+		hldev->uld_callbacks.link_down(hldev);
+exit:
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_device_handle_link_up_ind
+ * @hldev: HW device handle.
+ *
+ * Link up indication handler. The function is invoked by HW when
+ * Titan indicates that the link is up for programmable amount of time.
+ */
+static enum vxge_hw_status
+__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
+{
+	/*
+	 * If the previous link state is not down, return.
+	 */
+	if (hldev->link_state == VXGE_HW_LINK_UP)
+		goto exit;
+
+	hldev->link_state = VXGE_HW_LINK_UP;
+
+	/* notify driver */
+	if (hldev->uld_callbacks.link_up)
+		hldev->uld_callbacks.link_up(hldev);
+exit:
+	return VXGE_HW_OK;
+}
+
+/*
+ * __vxge_hw_vpath_alarm_process - Process Alarms.
+ * @vpath: Virtual Path.
+ * @skip_alarms: Do not clear the alarms
+ *
+ * Process vpath alarms.
+ *
+ */
+static enum vxge_hw_status
+__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
+			      u32 skip_alarms)
+{
+	u64 val64;
+	u64 alarm_status;
+	u64 pic_status;
+	struct __vxge_hw_device *hldev = NULL;
+	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
+	u64 mask64;
+	struct vxge_hw_vpath_stats_sw_info *sw_stats;
+	struct vxge_hw_vpath_reg __iomem *vp_reg;
+
+	if (vpath == NULL) {
+		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+			alarm_event);
+		goto out2;
+	}
+
+	hldev = vpath->hldev;
+	vp_reg = vpath->vp_reg;
+	alarm_status = readq(&vp_reg->vpath_general_int_status);
+
+	if (alarm_status == VXGE_HW_ALL_FOXES) {
+		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
+			alarm_event);
+		goto out;
+	}
+
+	sw_stats = vpath->sw_stats;
+
+	if (alarm_status & ~(
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
+		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
+		sw_stats->error_stats.unknown_alarms++;
+
+		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
+			alarm_event);
+		goto out;
+	}
+
+	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
+
+		val64 = readq(&vp_reg->xgmac_vp_int_status);
+
+		if (val64 &
+		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
+
+			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
+
+			if (((val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
+			     (!(val64 &
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
+			    ((val64 &
+			     VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
+			     (!(val64 &
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
+				     ))) {
+				sw_stats->error_stats.network_sustained_fault++;
+
+				writeq(
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
+					&vp_reg->asic_ntwk_vp_err_mask);
+
+				__vxge_hw_device_handle_link_down_ind(hldev);
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
+			}
+
+			if (((val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
+			     (!(val64 &
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
+			    ((val64 &
+			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
+			     (!(val64 &
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
+				     ))) {
+
+				sw_stats->error_stats.network_sustained_ok++;
+
+				writeq(
+				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
+					&vp_reg->asic_ntwk_vp_err_mask);
+
+				__vxge_hw_device_handle_link_up_ind(hldev);
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_LINK_UP, alarm_event);
+			}
+
+			writeq(VXGE_HW_INTR_MASK_ALL,
+				&vp_reg->asic_ntwk_vp_err_reg);
+
+			alarm_event = VXGE_HW_SET_LEVEL(
+				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
+
+			if (skip_alarms)
+				return VXGE_HW_OK;
+		}
+	}
+
+	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
+
+		pic_status = readq(&vp_reg->vpath_ppif_int_status);
+
+		if (pic_status &
+		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
+
+			val64 = readq(&vp_reg->general_errors_reg);
+			mask64 = readq(&vp_reg->general_errors_mask);
+
+			if ((val64 &
+				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
+				~mask64) {
+				sw_stats->error_stats.ini_serr_det++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_SERR, alarm_event);
+			}
+
+			if ((val64 &
+			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
+				~mask64) {
+				sw_stats->error_stats.dblgen_fifo0_overflow++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
+			}
+
+			if ((val64 &
+			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
+				~mask64)
+				sw_stats->error_stats.statsb_pif_chain_error++;
+
+			if ((val64 &
+			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
+				~mask64)
+				sw_stats->error_stats.statsb_drop_timeout++;
+
+			if ((val64 &
+				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
+				~mask64)
+				sw_stats->error_stats.target_illegal_access++;
+
+			if (!skip_alarms) {
+				writeq(VXGE_HW_INTR_MASK_ALL,
+					&vp_reg->general_errors_reg);
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_ALARM_CLEARED,
+					alarm_event);
+			}
+		}
+
+		if (pic_status &
+		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
+
+			val64 = readq(&vp_reg->kdfcctl_errors_reg);
+			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
+
+			if ((val64 &
+			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
+				~mask64) {
+				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_FIFO_ERR,
+					alarm_event);
+			}
+
+			if ((val64 &
+			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
+				~mask64) {
+				sw_stats->error_stats.kdfcctl_fifo0_poison++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_FIFO_ERR,
+					alarm_event);
+			}
+
+			if ((val64 &
+			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
+				~mask64) {
+				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_FIFO_ERR,
+					alarm_event);
+			}
+
+			if (!skip_alarms) {
+				writeq(VXGE_HW_INTR_MASK_ALL,
+					&vp_reg->kdfcctl_errors_reg);
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_ALARM_CLEARED,
+					alarm_event);
+			}
+		}
+
+	}
+
+	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
+
+		val64 = readq(&vp_reg->wrdma_alarm_status);
+
+		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
+
+			val64 = readq(&vp_reg->prc_alarm_reg);
+			mask64 = readq(&vp_reg->prc_alarm_mask);
+
+			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
+				~mask64)
+				sw_stats->error_stats.prc_ring_bumps++;
+
+			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
+				~mask64) {
+				sw_stats->error_stats.prc_rxdcm_sc_err++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_VPATH_ERR,
+					alarm_event);
+			}
+
+			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
+				& ~mask64) {
+				sw_stats->error_stats.prc_rxdcm_sc_abort++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+						VXGE_HW_EVENT_VPATH_ERR,
+						alarm_event);
+			}
+
+			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
+				 & ~mask64) {
+				sw_stats->error_stats.prc_quanta_size_err++;
+
+				alarm_event = VXGE_HW_SET_LEVEL(
+					VXGE_HW_EVENT_VPATH_ERR,
+					alarm_event);
+			}
+
+			if (!skip_alarms) {
+				writeq(VXGE_HW_INTR_MASK_ALL,
+					&vp_reg->prc_alarm_reg);
+				alarm_event = VXGE_HW_SET_LEVEL(
+						VXGE_HW_EVENT_ALARM_CLEARED,
+						alarm_event);
+			}
+		}
+	}
+out:
+	hldev->stats.sw_dev_err_stats.vpath_alarms++;
+out2:
+	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
+		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
+		return VXGE_HW_OK;
+
+	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
+
+	if (alarm_event == VXGE_HW_EVENT_SERR)
+		return VXGE_HW_ERR_CRITICAL;
+
+	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
+		VXGE_HW_ERR_SLOT_FREEZE :
+		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
+		VXGE_HW_ERR_VPATH;
+}
+
+/**
  * vxge_hw_device_begin_irq - Begin IRQ processing.
  * @hldev: HW device handle.
  * @skip_alarms: Do not clear the alarms
@@ -513,108 +884,6 @@
 	return ret;
 }
 
-/*
- * __vxge_hw_device_handle_link_up_ind
- * @hldev: HW device handle.
- *
- * Link up indication handler. The function is invoked by HW when
- * Titan indicates that the link is up for programmable amount of time.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
-{
-	/*
-	 * If the previous link state is not down, return.
-	 */
-	if (hldev->link_state == VXGE_HW_LINK_UP)
-		goto exit;
-
-	hldev->link_state = VXGE_HW_LINK_UP;
-
-	/* notify driver */
-	if (hldev->uld_callbacks.link_up)
-		hldev->uld_callbacks.link_up(hldev);
-exit:
-	return VXGE_HW_OK;
-}
-
-/*
- * __vxge_hw_device_handle_link_down_ind
- * @hldev: HW device handle.
- *
- * Link down indication handler. The function is invoked by HW when
- * Titan indicates that the link is down.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
-{
-	/*
-	 * If the previous link state is not down, return.
-	 */
-	if (hldev->link_state == VXGE_HW_LINK_DOWN)
-		goto exit;
-
-	hldev->link_state = VXGE_HW_LINK_DOWN;
-
-	/* notify driver */
-	if (hldev->uld_callbacks.link_down)
-		hldev->uld_callbacks.link_down(hldev);
-exit:
-	return VXGE_HW_OK;
-}
-
-/**
- * __vxge_hw_device_handle_error - Handle error
- * @hldev: HW device
- * @vp_id: Vpath Id
- * @type: Error type. Please see enum vxge_hw_event{}
- *
- * Handle error.
- */
-static enum vxge_hw_status
-__vxge_hw_device_handle_error(
-		struct __vxge_hw_device *hldev,
-		u32 vp_id,
-		enum vxge_hw_event type)
-{
-	switch (type) {
-	case VXGE_HW_EVENT_UNKNOWN:
-		break;
-	case VXGE_HW_EVENT_RESET_START:
-	case VXGE_HW_EVENT_RESET_COMPLETE:
-	case VXGE_HW_EVENT_LINK_DOWN:
-	case VXGE_HW_EVENT_LINK_UP:
-		goto out;
-	case VXGE_HW_EVENT_ALARM_CLEARED:
-		goto out;
-	case VXGE_HW_EVENT_ECCERR:
-	case VXGE_HW_EVENT_MRPCIM_ECCERR:
-		goto out;
-	case VXGE_HW_EVENT_FIFO_ERR:
-	case VXGE_HW_EVENT_VPATH_ERR:
-	case VXGE_HW_EVENT_CRITICAL_ERR:
-	case VXGE_HW_EVENT_SERR:
-		break;
-	case VXGE_HW_EVENT_SRPCIM_SERR:
-	case VXGE_HW_EVENT_MRPCIM_SERR:
-		goto out;
-	case VXGE_HW_EVENT_SLOT_FREEZE:
-		break;
-	default:
-		vxge_assert(0);
-		goto out;
-	}
-
-	/* notify driver */
-	if (hldev->uld_callbacks.crit_err)
-		hldev->uld_callbacks.crit_err(
-			(struct __vxge_hw_device *)hldev,
-			type, vp_id);
-out:
-
-	return VXGE_HW_OK;
-}
-
 /**
  * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
  * condition that has caused the Tx and RX interrupt.
@@ -699,8 +968,8 @@
  * Posts a dtr to work array.
  *
  */
-static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel,
-				     void *dtrh)
+static void
+vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
 {
 	vxge_assert(channel->work_arr[channel->post_index] == NULL);
 
@@ -911,10 +1180,6 @@
  */
 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
 {
-	struct __vxge_hw_channel *channel;
-
-	channel = &ring->channel;
-
 	wmb();
 	vxge_hw_ring_rxd_post_post(ring, rxdh);
 }
@@ -1868,284 +2133,6 @@
 }
 
 /*
- * __vxge_hw_vpath_alarm_process - Process Alarms.
- * @vpath: Virtual Path.
- * @skip_alarms: Do not clear the alarms
- *
- * Process vpath alarms.
- *
- */
-static enum vxge_hw_status
-__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
-			      u32 skip_alarms)
-{
-	u64 val64;
-	u64 alarm_status;
-	u64 pic_status;
-	struct __vxge_hw_device *hldev = NULL;
-	enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
-	u64 mask64;
-	struct vxge_hw_vpath_stats_sw_info *sw_stats;
-	struct vxge_hw_vpath_reg __iomem *vp_reg;
-
-	if (vpath == NULL) {
-		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
-			alarm_event);
-		goto out2;
-	}
-
-	hldev = vpath->hldev;
-	vp_reg = vpath->vp_reg;
-	alarm_status = readq(&vp_reg->vpath_general_int_status);
-
-	if (alarm_status == VXGE_HW_ALL_FOXES) {
-		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
-			alarm_event);
-		goto out;
-	}
-
-	sw_stats = vpath->sw_stats;
-
-	if (alarm_status & ~(
-		VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
-		VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
-		VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
-		VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
-		sw_stats->error_stats.unknown_alarms++;
-
-		alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
-			alarm_event);
-		goto out;
-	}
-
-	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
-
-		val64 = readq(&vp_reg->xgmac_vp_int_status);
-
-		if (val64 &
-		VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
-
-			val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
-
-			if (((val64 &
-			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
-			     (!(val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
-			    ((val64 &
-			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
-			     (!(val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
-				     ))) {
-				sw_stats->error_stats.network_sustained_fault++;
-
-				writeq(
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
-					&vp_reg->asic_ntwk_vp_err_mask);
-
-				__vxge_hw_device_handle_link_down_ind(hldev);
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_LINK_DOWN, alarm_event);
-			}
-
-			if (((val64 &
-			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
-			     (!(val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
-			    ((val64 &
-			      VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
-			     (!(val64 &
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
-				     ))) {
-
-				sw_stats->error_stats.network_sustained_ok++;
-
-				writeq(
-				VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
-					&vp_reg->asic_ntwk_vp_err_mask);
-
-				__vxge_hw_device_handle_link_up_ind(hldev);
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_LINK_UP, alarm_event);
-			}
-
-			writeq(VXGE_HW_INTR_MASK_ALL,
-				&vp_reg->asic_ntwk_vp_err_reg);
-
-			alarm_event = VXGE_HW_SET_LEVEL(
-				VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
-
-			if (skip_alarms)
-				return VXGE_HW_OK;
-		}
-	}
-
-	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
-
-		pic_status = readq(&vp_reg->vpath_ppif_int_status);
-
-		if (pic_status &
-		    VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
-
-			val64 = readq(&vp_reg->general_errors_reg);
-			mask64 = readq(&vp_reg->general_errors_mask);
-
-			if ((val64 &
-				VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
-				~mask64) {
-				sw_stats->error_stats.ini_serr_det++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_SERR, alarm_event);
-			}
-
-			if ((val64 &
-			    VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
-				~mask64) {
-				sw_stats->error_stats.dblgen_fifo0_overflow++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_FIFO_ERR, alarm_event);
-			}
-
-			if ((val64 &
-			    VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
-				~mask64)
-				sw_stats->error_stats.statsb_pif_chain_error++;
-
-			if ((val64 &
-			   VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
-				~mask64)
-				sw_stats->error_stats.statsb_drop_timeout++;
-
-			if ((val64 &
-				VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
-				~mask64)
-				sw_stats->error_stats.target_illegal_access++;
-
-			if (!skip_alarms) {
-				writeq(VXGE_HW_INTR_MASK_ALL,
-					&vp_reg->general_errors_reg);
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_ALARM_CLEARED,
-					alarm_event);
-			}
-		}
-
-		if (pic_status &
-		    VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
-
-			val64 = readq(&vp_reg->kdfcctl_errors_reg);
-			mask64 = readq(&vp_reg->kdfcctl_errors_mask);
-
-			if ((val64 &
-			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
-				~mask64) {
-				sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_FIFO_ERR,
-					alarm_event);
-			}
-
-			if ((val64 &
-			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
-				~mask64) {
-				sw_stats->error_stats.kdfcctl_fifo0_poison++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_FIFO_ERR,
-					alarm_event);
-			}
-
-			if ((val64 &
-			    VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
-				~mask64) {
-				sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_FIFO_ERR,
-					alarm_event);
-			}
-
-			if (!skip_alarms) {
-				writeq(VXGE_HW_INTR_MASK_ALL,
-					&vp_reg->kdfcctl_errors_reg);
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_ALARM_CLEARED,
-					alarm_event);
-			}
-		}
-
-	}
-
-	if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
-
-		val64 = readq(&vp_reg->wrdma_alarm_status);
-
-		if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
-
-			val64 = readq(&vp_reg->prc_alarm_reg);
-			mask64 = readq(&vp_reg->prc_alarm_mask);
-
-			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
-				~mask64)
-				sw_stats->error_stats.prc_ring_bumps++;
-
-			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
-				~mask64) {
-				sw_stats->error_stats.prc_rxdcm_sc_err++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_VPATH_ERR,
-					alarm_event);
-			}
-
-			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
-				& ~mask64) {
-				sw_stats->error_stats.prc_rxdcm_sc_abort++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-						VXGE_HW_EVENT_VPATH_ERR,
-						alarm_event);
-			}
-
-			if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
-				 & ~mask64) {
-				sw_stats->error_stats.prc_quanta_size_err++;
-
-				alarm_event = VXGE_HW_SET_LEVEL(
-					VXGE_HW_EVENT_VPATH_ERR,
-					alarm_event);
-			}
-
-			if (!skip_alarms) {
-				writeq(VXGE_HW_INTR_MASK_ALL,
-					&vp_reg->prc_alarm_reg);
-				alarm_event = VXGE_HW_SET_LEVEL(
-						VXGE_HW_EVENT_ALARM_CLEARED,
-						alarm_event);
-			}
-		}
-	}
-out:
-	hldev->stats.sw_dev_err_stats.vpath_alarms++;
-out2:
-	if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
-		(alarm_event == VXGE_HW_EVENT_UNKNOWN))
-		return VXGE_HW_OK;
-
-	__vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
-
-	if (alarm_event == VXGE_HW_EVENT_SERR)
-		return VXGE_HW_ERR_CRITICAL;
-
-	return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
-		VXGE_HW_ERR_SLOT_FREEZE :
-		(alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
-		VXGE_HW_ERR_VPATH;
-}
-
-/*
  * vxge_hw_vpath_alarm_process - Process Alarms.
  * @vpath: Virtual Path.
  * @skip_alarms: Do not clear the alarms
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 9890d4d..8c3103f 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@
 	VXGE_HW_RING_T_CODE_MULTI_ERR			= 0xF
 };
 
-/**
- * enum enum vxge_hw_ring_hash_type - RTH hash types
- * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
- * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
- * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
- * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
- * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
- *
- * RTH hash types
- */
-enum vxge_hw_ring_hash_type {
-	VXGE_HW_RING_HASH_TYPE_NONE			= 0x0,
-	VXGE_HW_RING_HASH_TYPE_TCP_IPV4		= 0x1,
-	VXGE_HW_RING_HASH_TYPE_UDP_IPV4		= 0x2,
-	VXGE_HW_RING_HASH_TYPE_IPV4			= 0x3,
-	VXGE_HW_RING_HASH_TYPE_TCP_IPV6		= 0x4,
-	VXGE_HW_RING_HASH_TYPE_UDP_IPV6		= 0x5,
-	VXGE_HW_RING_HASH_TYPE_IPV6			= 0x6,
-	VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX	= 0x7,
-	VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX	= 0x8,
-	VXGE_HW_RING_HASH_TYPE_IPV6_EX		= 0x9
-};
-
 enum vxge_hw_status vxge_hw_ring_rxd_reserve(
 	struct __vxge_hw_ring *ring_handle,
 	void **rxdh);
@@ -2109,10 +2081,6 @@
 #endif
 };
 
-/* ========================= FIFO PRIVATE API ============================= */
-
-struct vxge_hw_fifo_attr;
-
 struct vxge_hw_mempool_cbs {
 	void (*item_func_alloc)(
 			struct vxge_hw_mempool *mempoolh,
@@ -2186,27 +2154,27 @@
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_add(
 	struct __vxge_hw_vpath_handle *vpath_handle,
-	u8 (macaddr)[ETH_ALEN],
-	u8 (macaddr_mask)[ETH_ALEN],
+	u8 *macaddr,
+	u8 *macaddr_mask,
 	enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
 
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_get(
 	struct __vxge_hw_vpath_handle *vpath_handle,
-	u8 (macaddr)[ETH_ALEN],
-	u8 (macaddr_mask)[ETH_ALEN]);
+	u8 *macaddr,
+	u8 *macaddr_mask);
 
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_get_next(
 	struct __vxge_hw_vpath_handle *vpath_handle,
-	u8 (macaddr)[ETH_ALEN],
-	u8 (macaddr_mask)[ETH_ALEN]);
+	u8 *macaddr,
+	u8 *macaddr_mask);
 
 enum vxge_hw_status
 vxge_hw_vpath_mac_addr_delete(
 	struct __vxge_hw_vpath_handle *vpath_handle,
-	u8 (macaddr)[ETH_ALEN],
-	u8 (macaddr_mask)[ETH_ALEN]);
+	u8 *macaddr,
+	u8 *macaddr_mask);
 
 enum vxge_hw_status
 vxge_hw_vpath_vid_add(
@@ -2313,6 +2281,7 @@
 
 int
 vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
+
 void
 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
 
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe1..ad2f99b 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -15,8 +15,35 @@
 #define VXGE_VERSION_H
 
 #define VXGE_VERSION_MAJOR	"2"
-#define VXGE_VERSION_MINOR	"0"
-#define VXGE_VERSION_FIX	"9"
-#define VXGE_VERSION_BUILD	"20840"
+#define VXGE_VERSION_MINOR	"5"
+#define VXGE_VERSION_FIX	"1"
+#define VXGE_VERSION_BUILD	"22082"
 #define VXGE_VERSION_FOR	"k"
+
+#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
+
+#define VXGE_DEAD_FW_VER_MAJOR	1
+#define VXGE_DEAD_FW_VER_MINOR	4
+#define VXGE_DEAD_FW_VER_BUILD	4
+
+#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
+				     VXGE_DEAD_FW_VER_MINOR, \
+				     VXGE_DEAD_FW_VER_BUILD)
+
+#define VXGE_EPROM_FW_VER_MAJOR	1
+#define VXGE_EPROM_FW_VER_MINOR	6
+#define VXGE_EPROM_FW_VER_BUILD	1
+
+#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
+				      VXGE_EPROM_FW_VER_MINOR, \
+				      VXGE_EPROM_FW_VER_BUILD)
+
+#define VXGE_CERT_FW_VER_MAJOR	1
+#define VXGE_CERT_FW_VER_MINOR	8
+#define VXGE_CERT_FW_VER_BUILD	1
+
+#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
+				     VXGE_CERT_FW_VER_MINOR, \
+				     VXGE_CERT_FW_VER_BUILD)
+
 #endif
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index d81ad83..cf05504 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -498,7 +498,6 @@
 static int x25_asy_close(struct net_device *dev)
 {
 	struct x25_asy *sl = netdev_priv(dev);
-	int err;
 
 	spin_lock(&sl->lock);
 	if (sl->tty)
@@ -507,10 +506,6 @@
 	netif_stop_queue(dev);
 	sl->rcount = 0;
 	sl->xleft  = 0;
-	err = lapb_unregister(dev);
-	if (err != LAPB_OK)
-		printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
-			err);
 	spin_unlock(&sl->lock);
 	return 0;
 }
@@ -595,6 +590,7 @@
 static void x25_asy_close_tty(struct tty_struct *tty)
 {
 	struct x25_asy *sl = tty->disc_data;
+	int err;
 
 	/* First make sure we're connected. */
 	if (!sl || sl->magic != X25_ASY_MAGIC)
@@ -605,6 +601,11 @@
 		dev_close(sl->dev);
 	rtnl_unlock();
 
+	err = lapb_unregister(sl->dev);
+	if (err != LAPB_OK)
+		printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",
+			err);
+
 	tty->disc_data = NULL;
 	sl->tty = NULL;
 	x25_asy_free(sl);
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index cdedab4..f060332 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -92,54 +92,6 @@
 		 "signal; values are appended to a list--setting one value "
 		 "as zero cleans the existing list and starts a new one.");
 
-static
-struct i2400m_work *__i2400m_work_setup(
-	struct i2400m *i2400m, void (*fn)(struct work_struct *),
-	gfp_t gfp_flags, const void *pl, size_t pl_size)
-{
-	struct i2400m_work *iw;
-
-	iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
-	if (iw == NULL)
-		return NULL;
-	iw->i2400m = i2400m_get(i2400m);
-	iw->pl_size = pl_size;
-	memcpy(iw->pl, pl, pl_size);
-	INIT_WORK(&iw->ws, fn);
-	return iw;
-}
-
-
-/*
- * Schedule i2400m's specific work on the system's queue.
- *
- * Used for a few cases where we really need it; otherwise, identical
- * to i2400m_queue_work().
- *
- * Returns < 0 errno code on error, 1 if ok.
- *
- * If it returns zero, something really bad happened, as it means the
- * works struct was already queued, but we have just allocated it, so
- * it should not happen.
- */
-static int i2400m_schedule_work(struct i2400m *i2400m,
-			 void (*fn)(struct work_struct *), gfp_t gfp_flags,
-			 const void *pl, size_t pl_size)
-{
-	int result;
-	struct i2400m_work *iw;
-
-	result = -ENOMEM;
-	iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
-	if (iw != NULL) {
-		result = schedule_work(&iw->ws);
-		if (WARN_ON(result == 0))
-			result = -ENXIO;
-	}
-	return result;
-}
-
-
 /*
  * WiMAX stack operation: relay a message from user space
  *
@@ -648,17 +600,11 @@
 static
 void __i2400m_dev_reset_handle(struct work_struct *ws)
 {
-	int result;
-	struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
-	const char *reason;
-	struct i2400m *i2400m = iw->i2400m;
+	struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
+	const char *reason = i2400m->reset_reason;
 	struct device *dev = i2400m_dev(i2400m);
 	struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
-
-	if (WARN_ON(iw->pl_size != sizeof(reason)))
-		reason = "SW BUG: reason n/a";
-	else
-		memcpy(&reason, iw->pl, sizeof(reason));
+	int result;
 
 	d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
 
@@ -733,8 +679,6 @@
 		}
 	}
 out:
-	i2400m_put(i2400m);
-	kfree(iw);
 	d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
 		ws, i2400m, reason);
 }
@@ -754,8 +698,8 @@
  */
 int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
 {
-	return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
-				    GFP_ATOMIC, &reason, sizeof(reason));
+	i2400m->reset_reason = reason;
+	return schedule_work(&i2400m->reset_ws);
 }
 EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
 
@@ -768,14 +712,9 @@
 static
 void __i2400m_error_recovery(struct work_struct *ws)
 {
-	struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
-	struct i2400m *i2400m = iw->i2400m;
+	struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
 
 	i2400m_reset(i2400m, I2400M_RT_BUS);
-
-	i2400m_put(i2400m);
-	kfree(iw);
-	return;
 }
 
 /*
@@ -805,18 +744,10 @@
  */
 void i2400m_error_recovery(struct i2400m *i2400m)
 {
-	struct device *dev = i2400m_dev(i2400m);
-
-	if (atomic_add_return(1, &i2400m->error_recovery) == 1) {
-		if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
-			GFP_ATOMIC, NULL, 0) < 0) {
-			dev_err(dev, "run out of memory for "
-				"scheduling an error recovery ?\n");
-			atomic_dec(&i2400m->error_recovery);
-		}
-	} else
+	if (atomic_add_return(1, &i2400m->error_recovery) == 1)
+		schedule_work(&i2400m->recovery_ws);
+	else
 		atomic_dec(&i2400m->error_recovery);
-	return;
 }
 EXPORT_SYMBOL_GPL(i2400m_error_recovery);
 
@@ -886,6 +817,10 @@
 
 	mutex_init(&i2400m->init_mutex);
 	/* wake_tx_ws is initialized in i2400m_tx_setup() */
+
+	INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
+	INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
+
 	atomic_set(&i2400m->bus_reset_retries, 0);
 
 	i2400m->alive = 0;
@@ -1040,6 +975,9 @@
 
 	i2400m_dev_stop(i2400m);
 
+	cancel_work_sync(&i2400m->reset_ws);
+	cancel_work_sync(&i2400m->recovery_ws);
+
 	i2400m_debugfs_rm(i2400m);
 	sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
 			   &i2400m_dev_attr_group);
@@ -1083,8 +1021,6 @@
 static
 void __exit i2400m_driver_exit(void)
 {
-	/* for scheds i2400m_dev_reset_handle() */
-	flush_scheduled_work();
 	i2400m_barker_db_exit();
 }
 module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 59ac770..17ecaa4 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -632,6 +632,11 @@
 	struct work_struct wake_tx_ws;
 	struct sk_buff *wake_tx_skb;
 
+	struct work_struct reset_ws;
+	const char *reset_reason;
+
+	struct work_struct recovery_ws;
+
 	struct dentry *debugfs_dentry;
 	const char *fw_name;		/* name of the current firmware image */
 	unsigned long fw_version;	/* version of the firmware interface */
@@ -896,20 +901,6 @@
 	return i2400m->wimax_dev.net_dev->dev.parent;
 }
 
-/*
- * Helper for scheduling simple work functions
- *
- * This struct can get any kind of payload attached (normally in the
- * form of a struct where you pack the stuff you want to pass to the
- * _work function).
- */
-struct i2400m_work {
-	struct work_struct ws;
-	struct i2400m *i2400m;
-	size_t pl_size;
-	u8 pl[0];
-};
-
 extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
 				   char *, size_t);
 extern int i2400m_msg_size_check(struct i2400m *,
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 9bfc26e..be428ca 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -590,7 +590,6 @@
 static
 void __exit i2400ms_driver_exit(void)
 {
-	flush_scheduled_work();	/* for the stuff we schedule */
 	sdio_unregister_driver(&i2400m_sdio_driver);
 }
 module_exit(i2400ms_driver_exit);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d3365ac..10e3ab3 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -780,7 +780,6 @@
 static
 void __exit i2400mu_driver_exit(void)
 {
-	flush_scheduled_work();	/* for the stuff we schedule from sysfs.c */
 	usb_deregister(&i2400mu_driver);
 }
 module_exit(i2400mu_driver_exit);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 1f5aa51..a16b3da 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -62,6 +62,7 @@
 static int ar9003_hw_power_interpolate(int32_t x,
 				       int32_t *px, int32_t *py, u_int16_t np);
 
+
 static const struct ar9300_eeprom ar9300_default = {
 	.eepromVersion = 2,
 	.templateVersion = 2,
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index dbb9869..18d63f5 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -858,7 +858,10 @@
 		return;
 	}
 
+	flush_work_sync(&ap->add_sta_proc_queue);
+
 #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
+	flush_work_sync(&ap->wds_oper_queue);
 	if (ap->crypt)
 		ap->crypt->deinit(ap->crypt_priv);
 	ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b7cb165..a8bddd8 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3317,7 +3317,13 @@
 
 	unregister_netdev(local->dev);
 
-	flush_scheduled_work();
+	flush_work_sync(&local->reset_queue);
+	flush_work_sync(&local->set_multicast_list_queue);
+	flush_work_sync(&local->set_tim_queue);
+#ifndef PRISM2_NO_STATION_MODES
+	flush_work_sync(&local->info_queue);
+#endif
+	flush_work_sync(&local->comms_qual_update);
 
 	lib80211_crypt_info_free(&local->crypt_info);
 
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ade3025..6f383cd 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -209,9 +209,6 @@
 	boolean
 	default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
 
-comment "rt2x00 leds support disabled due to modularized LEDS_CLASS and built-in rt2x00"
-	depends on RT2X00_LIB=y && LEDS_CLASS=m
-
 config RT2X00_LIB_DEBUGFS
 	bool "Ralink debugfs support"
 	depends on RT2X00_LIB && MAC80211_DEBUGFS
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 30f8d40..6a9b660 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -117,6 +117,7 @@
 
 	/* Allocate a single memory block for values and addresses. */
 	count16 = 2*count;
+	/* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
 	a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
 		                   GFP_KERNEL);
 	if (!a16) {
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955..de6c308 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -515,7 +515,7 @@
  */
 static int xemaclite_set_mac_address(struct net_device *dev, void *address)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 	struct sockaddr *addr = address;
 
 	if (netif_running(dev))
@@ -534,7 +534,7 @@
  */
 static void xemaclite_tx_timeout(struct net_device *dev)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 	unsigned long flags;
 
 	dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +578,7 @@
  */
 static void xemaclite_tx_handler(struct net_device *dev)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 
 	dev->stats.tx_packets++;
 	if (lp->deferred_skb) {
@@ -605,7 +605,7 @@
  */
 static void xemaclite_rx_handler(struct net_device *dev)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 	struct sk_buff *skb;
 	unsigned int align;
 	u32 len;
@@ -661,7 +661,7 @@
 {
 	bool tx_complete = 0;
 	struct net_device *dev = dev_id;
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 	void __iomem *base_addr = lp->base_addr;
 	u32 tx_status;
 
@@ -918,7 +918,7 @@
  */
 static int xemaclite_open(struct net_device *dev)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 	int retval;
 
 	/* Just to be safe, stop the device first */
@@ -987,7 +987,7 @@
  */
 static int xemaclite_close(struct net_device *dev)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 
 	netif_stop_queue(dev);
 	xemaclite_disable_interrupts(lp);
@@ -1001,21 +1001,6 @@
 }
 
 /**
- * xemaclite_get_stats - Get the stats for the net_device
- * @dev:	Pointer to the network device
- *
- * This function returns the address of the 'net_device_stats' structure for the
- * given network device. This structure holds usage statistics for the network
- * device.
- *
- * Return:	Pointer to the net_device_stats structure.
- */
-static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
-{
-	return &dev->stats;
-}
-
-/**
  * xemaclite_send - Transmit a frame
  * @orig_skb:	Pointer to the socket buffer to be transmitted
  * @dev:	Pointer to the network device
@@ -1031,7 +1016,7 @@
  */
 static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
 {
-	struct net_local *lp = (struct net_local *) netdev_priv(dev);
+	struct net_local *lp = netdev_priv(dev);
 	struct sk_buff *new_skb;
 	unsigned int len;
 	unsigned long flags;
@@ -1068,7 +1053,7 @@
 static void xemaclite_remove_ndev(struct net_device *ndev)
 {
 	if (ndev) {
-		struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+		struct net_local *lp = netdev_priv(ndev);
 
 		if (lp->base_addr)
 			iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1230,7 @@
 	struct device *dev = &of_dev->dev;
 	struct net_device *ndev = dev_get_drvdata(dev);
 
-	struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+	struct net_local *lp = netdev_priv(ndev);
 
 	/* Un-register the mii_bus, if configured */
 	if (lp->has_mdio) {
@@ -1285,7 +1270,6 @@
 	.ndo_start_xmit		= xemaclite_send,
 	.ndo_set_mac_address	= xemaclite_set_mac_address,
 	.ndo_tx_timeout		= xemaclite_tx_timeout,
-	.ndo_get_stats		= xemaclite_get_stats,
 #ifdef CONFIG_NET_POLL_CONTROLLER
 	.ndo_poll_controller = xemaclite_poll_controller,
 #endif
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a3292..ae07b3d 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@
 #define TX_BUF_SIZE 8192
 #define DMA_BUF_SIZE (RX_BUF_SIZE + 16)	/* 8k + 16 bytes for trailers */
 
-#define TX_TIMEOUT	10
+#define TX_TIMEOUT	(HZ/10)
 
 struct znet_private {
 	int rx_dma, tx_dma;
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index a87c498..3a5a6fc 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -13,7 +13,6 @@
 #include <linux/spinlock.h>
 #include <linux/pci.h>
 #include <linux/msi.h>
-#include <xen/xenbus.h>
 #include <xen/interface/io/pciif.h>
 #include <asm/xen/pci.h>
 #include <linux/interrupt.h>
@@ -576,8 +575,9 @@
 
 	pcidev = pci_get_bus_and_slot(bus, devfn);
 	if (!pcidev || !pcidev->driver) {
-		dev_err(&pcidev->dev,
-			"device or driver is NULL\n");
+		dev_err(&pdev->xdev->dev, "device or AER driver is NULL\n");
+		if (pcidev)
+			pci_dev_put(pcidev);
 		return result;
 	}
 	pdrv = pcidev->driver;
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 68cf0c9..7b5080c 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1159,11 +1159,11 @@
 
 	list_for_each_entry(port, &rio_mports, node) {
 		if (!request_mem_region(port->iores.start,
-					port->iores.end - port->iores.start,
+					resource_size(&port->iores),
 					port->name)) {
 			printk(KERN_ERR
 			       "RIO: Error requesting master port region 0x%016llx-0x%016llx\n",
-			       (u64)port->iores.start, (u64)port->iores.end - 1);
+			       (u64)port->iores.start, (u64)port->iores.end);
 			rc = -ENOMEM;
 			goto out;
 		}
diff --git a/drivers/rtc/rtc-ds1302.c b/drivers/rtc/rtc-ds1302.c
index 359d1e0..f0d6389 100644
--- a/drivers/rtc/rtc-ds1302.c
+++ b/drivers/rtc/rtc-ds1302.c
@@ -35,7 +35,7 @@
 
 #ifdef CONFIG_SH_SECUREEDGE5410
 #include <asm/rtc.h>
-#include <mach/snapgear.h>
+#include <mach/secureedge5410.h>
 
 #define	RTC_RESET	0x1000
 #define	RTC_IODATA	0x0800
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index 0f19d54..c9f13b9 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -1188,7 +1188,8 @@
 	spin_lock_irqsave(&card->ipm_lock, flags);
 	list_for_each(l, &card->ipm_list) {
 		ipm = list_entry(l, struct lcs_ipm_list, list);
-		for (im4 = in4_dev->mc_list; im4 != NULL; im4 = im4->next) {
+		for (im4 = rcu_dereference(in4_dev->mc_list);
+		     im4 != NULL; im4 = rcu_dereference(im4->next_rcu)) {
 			lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
 			if ( (ipm->ipm.ip_addr == im4->multiaddr) &&
 			     (memcmp(buf, &ipm->ipm.mac_addr,
@@ -1233,7 +1234,8 @@
 	unsigned long flags;
 
 	LCS_DBF_TEXT(4, trace, "setmclst");
-	for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
+	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rcu_dereference(im4->next_rcu)) {
 		lcs_get_mac_for_ipm(im4->multiaddr, buf, card->dev);
 		ipm = lcs_check_addr_entry(card, im4, buf);
 		if (ipm != NULL)
@@ -1269,10 +1271,10 @@
 	in4_dev = in_dev_get(card->dev);
 	if (in4_dev == NULL)
 		goto out;
-	read_lock(&in4_dev->mc_list_lock);
+	rcu_read_lock();
 	lcs_remove_mc_addresses(card,in4_dev);
 	lcs_set_mc_addresses(card, in4_dev);
-	read_unlock(&in4_dev->mc_list_lock);
+	rcu_read_unlock();
 	in_dev_put(in4_dev);
 
 	netif_carrier_off(card->dev);
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6be43eb..f47a714 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -440,7 +440,6 @@
 	 * index of buffer to be filled by driver; state EMPTY or PACKING
 	 */
 	int next_buf_to_fill;
-	int sync_iqdio_error;
 	/*
 	 * number of buffers that are currently filled (PRIMED)
 	 * -> these buffers are hardware-owned
@@ -695,14 +694,6 @@
 	int is_vmac;
 };
 
-struct qeth_skb_data {
-	__u32 magic;
-	int count;
-};
-
-#define QETH_SKB_MAGIC 0x71657468
-#define QETH_SIGA_CC2_RETRIES 3
-
 struct qeth_rx {
 	int b_count;
 	int b_index;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 7642670..b7d9dc0 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -877,8 +877,8 @@
 	return;
 }
 
-static void __qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-		 struct qeth_qdio_out_buffer *buf, unsigned int qeth_skip_skb)
+static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
+		struct qeth_qdio_out_buffer *buf)
 {
 	int i;
 	struct sk_buff *skb;
@@ -887,13 +887,11 @@
 	if (buf->buffer->element[0].flags & 0x40)
 		atomic_dec(&queue->set_pci_flags_count);
 
-	if (!qeth_skip_skb) {
+	skb = skb_dequeue(&buf->skb_list);
+	while (skb) {
+		atomic_dec(&skb->users);
+		dev_kfree_skb_any(skb);
 		skb = skb_dequeue(&buf->skb_list);
-		while (skb) {
-			atomic_dec(&skb->users);
-			dev_kfree_skb_any(skb);
-			skb = skb_dequeue(&buf->skb_list);
-		}
 	}
 	for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
 		if (buf->buffer->element[i].addr && buf->is_header[i])
@@ -909,12 +907,6 @@
 	atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
 }
 
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
-		struct qeth_qdio_out_buffer *buf)
-{
-	__qeth_clear_output_buffer(queue, buf, 0);
-}
-
 void qeth_clear_qdio_buffers(struct qeth_card *card)
 {
 	int i, j;
@@ -2833,7 +2825,6 @@
 		}
 	}
 
-	queue->sync_iqdio_error = 0;
 	queue->card->dev->trans_start = jiffies;
 	if (queue->card->options.performance_stats) {
 		queue->card->perf_stats.outbound_do_qdio_cnt++;
@@ -2849,10 +2840,7 @@
 		queue->card->perf_stats.outbound_do_qdio_time +=
 			qeth_get_micros() -
 			queue->card->perf_stats.outbound_do_qdio_start_time;
-	if (rc > 0) {
-		if (!(rc & QDIO_ERROR_SIGA_BUSY))
-			queue->sync_iqdio_error = rc & 3;
-	}
+	atomic_add(count, &queue->used_buffers);
 	if (rc) {
 		queue->card->stats.tx_errors += count;
 		/* ignore temporary SIGA errors without busy condition */
@@ -2866,7 +2854,6 @@
 		qeth_schedule_recovery(queue->card);
 		return;
 	}
-	atomic_add(count, &queue->used_buffers);
 	if (queue->card->options.performance_stats)
 		queue->card->perf_stats.bufs_sent += count;
 }
@@ -2916,7 +2903,7 @@
 {
 	struct qeth_card *card = (struct qeth_card *)card_ptr;
 
-	if (card->dev)
+	if (card->dev && (card->dev->flags & IFF_UP))
 		napi_schedule(&card->napi);
 }
 EXPORT_SYMBOL_GPL(qeth_qdio_start_poll);
@@ -2940,7 +2927,6 @@
 	struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
 	struct qeth_qdio_out_buffer *buffer;
 	int i;
-	unsigned qeth_send_err;
 
 	QETH_CARD_TEXT(card, 6, "qdouhdl");
 	if (qdio_error & QDIO_ERROR_ACTIVATE_CHECK_CONDITION) {
@@ -2956,9 +2942,8 @@
 	}
 	for (i = first_element; i < (first_element + count); ++i) {
 		buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q];
-		qeth_send_err = qeth_handle_send_error(card, buffer, qdio_error);
-		__qeth_clear_output_buffer(queue, buffer,
-			(qeth_send_err == QETH_SEND_ERROR_RETRY) ? 1 : 0);
+		qeth_handle_send_error(card, buffer, qdio_error);
+		qeth_clear_output_buffer(queue, buffer);
 	}
 	atomic_sub(count, &queue->used_buffers);
 	/* check if we need to do something on this outbound queue */
@@ -3183,10 +3168,7 @@
 		int offset, int hd_len)
 {
 	struct qeth_qdio_out_buffer *buffer;
-	struct sk_buff *skb1;
-	struct qeth_skb_data *retry_ctrl;
 	int index;
-	int rc;
 
 	/* spin until we get the queue ... */
 	while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
@@ -3205,25 +3187,6 @@
 	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
 	qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len);
 	qeth_flush_buffers(queue, index, 1);
-	if (queue->sync_iqdio_error == 2) {
-		skb1 = skb_dequeue(&buffer->skb_list);
-		while (skb1) {
-			atomic_dec(&skb1->users);
-			skb1 = skb_dequeue(&buffer->skb_list);
-		}
-		retry_ctrl = (struct qeth_skb_data *) &skb->cb[16];
-		if (retry_ctrl->magic != QETH_SKB_MAGIC) {
-			retry_ctrl->magic = QETH_SKB_MAGIC;
-			retry_ctrl->count = 0;
-		}
-		if (retry_ctrl->count < QETH_SIGA_CC2_RETRIES) {
-			retry_ctrl->count++;
-			rc = dev_queue_xmit(skb);
-		} else {
-			dev_kfree_skb_any(skb);
-			QETH_CARD_TEXT(card, 2, "qrdrop");
-		}
-	}
 	return 0;
 out:
 	atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index e37dd8c..07d5888 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -333,7 +333,7 @@
 	__u16 request_bits;
 	__u16 reply_bits;
 	__u32 no_entries;
-	char data;
+	char data; /* only for replies */
 } __attribute__((packed));
 
 /* used as parameter for arp_query reply */
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 42fa783..b5e967c 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -372,7 +372,7 @@
 	i = simple_strtoul(buf, &tmp, 16);
 	if ((i == 0) || (i == 1)) {
 		if (i == card->options.performance_stats)
-			goto out;;
+			goto out;
 		card->options.performance_stats = i;
 		if (i == 0)
 			memset(&card->perf_stats, 0,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 847e879..7a7a1b6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -849,8 +849,6 @@
 	card->state = CARD_STATE_UP;
 	netif_start_queue(dev);
 
-	if (!card->lan_online && netif_carrier_ok(dev))
-		netif_carrier_off(dev);
 	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
 		napi_enable(&card->napi);
 		napi_schedule(&card->napi);
@@ -1013,13 +1011,14 @@
 			dev_warn(&card->gdev->dev,
 				"The LAN is offline\n");
 			card->lan_online = 0;
-			goto out;
+			goto contin;
 		}
 		rc = -ENODEV;
 		goto out_remove;
 	} else
 		card->lan_online = 1;
 
+contin:
 	if ((card->info.type == QETH_CARD_TYPE_OSD) ||
 	    (card->info.type == QETH_CARD_TYPE_OSX))
 		/* configure isolation level */
@@ -1038,7 +1037,10 @@
 		goto out_remove;
 	}
 	card->state = CARD_STATE_SOFTSETUP;
-	netif_carrier_on(card->dev);
+	if (card->lan_online)
+		netif_carrier_on(card->dev);
+	else
+		netif_carrier_off(card->dev);
 
 	qeth_set_allowed_threads(card, 0xffffffff, 0);
 	if (recover_flag == CARD_STATE_RECOVER) {
@@ -1055,7 +1057,6 @@
 	}
 	/* let user_space know that device is online */
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
-out:
 	mutex_unlock(&card->conf_mutex);
 	mutex_unlock(&card->discipline_mutex);
 	return 0;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 74d1401..e227e46 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -30,6 +30,7 @@
 
 #include "qeth_l3.h"
 
+
 static int qeth_l3_set_offline(struct ccwgroup_device *);
 static int qeth_l3_recover(void *);
 static int qeth_l3_stop(struct net_device *);
@@ -455,8 +456,11 @@
 	QETH_CARD_TEXT(card, 2, "sdiplist");
 	QETH_CARD_HEX(card, 2, &card, sizeof(void *));
 
-	if (card->options.sniffer)
+	if ((card->state != CARD_STATE_UP &&
+	     card->state != CARD_STATE_SOFTSETUP) || card->options.sniffer) {
 		return;
+	}
+
 	spin_lock_irqsave(&card->ip_lock, flags);
 	tbd_list = card->ip_tbd_list;
 	card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -1796,7 +1800,8 @@
 	char buf[MAX_ADDR_LEN];
 
 	QETH_CARD_TEXT(card, 4, "addmc");
-	for (im4 = in4_dev->mc_list; im4; im4 = im4->next) {
+	for (im4 = rcu_dereference(in4_dev->mc_list); im4 != NULL;
+	     im4 = rcu_dereference(im4->next_rcu)) {
 		qeth_l3_get_mac_for_ipm(im4->multiaddr, buf, in4_dev->dev);
 		ipm = qeth_l3_get_addr_buffer(QETH_PROT_IPV4);
 		if (!ipm)
@@ -1828,9 +1833,9 @@
 		in_dev = in_dev_get(netdev);
 		if (!in_dev)
 			continue;
-		read_lock(&in_dev->mc_list_lock);
+		rcu_read_lock();
 		qeth_l3_add_mc(card, in_dev);
-		read_unlock(&in_dev->mc_list_lock);
+		rcu_read_unlock();
 		in_dev_put(in_dev);
 	}
 }
@@ -1843,10 +1848,10 @@
 	in4_dev = in_dev_get(card->dev);
 	if (in4_dev == NULL)
 		return;
-	read_lock(&in4_dev->mc_list_lock);
+	rcu_read_lock();
 	qeth_l3_add_mc(card, in4_dev);
 	qeth_l3_add_vlan_mc(card);
-	read_unlock(&in4_dev->mc_list_lock);
+	rcu_read_unlock();
 	in_dev_put(in4_dev);
 }
 
@@ -2454,22 +2459,46 @@
 	return rc;
 }
 
-static void qeth_l3_copy_arp_entries_stripped(struct qeth_arp_query_info *qinfo,
-		struct qeth_arp_query_data *qdata, int entry_size,
-		int uentry_size)
+static __u32 get_arp_entry_size(struct qeth_card *card,
+			struct qeth_arp_query_data *qdata,
+			struct qeth_arp_entrytype *type, __u8 strip_entries)
 {
-	char *entry_ptr;
-	char *uentry_ptr;
-	int i;
+	__u32 rc;
+	__u8 is_hsi;
 
-	entry_ptr = (char *)&qdata->data;
-	uentry_ptr = (char *)(qinfo->udata + qinfo->udata_offset);
-	for (i = 0; i < qdata->no_entries; ++i) {
-		/* strip off 32 bytes "media specific information" */
-		memcpy(uentry_ptr, (entry_ptr + 32), entry_size - 32);
-		entry_ptr += entry_size;
-		uentry_ptr += uentry_size;
+	is_hsi = qdata->reply_bits == 5;
+	if (type->ip == QETHARP_IP_ADDR_V4) {
+		QETH_CARD_TEXT(card, 4, "arpev4");
+		if (strip_entries) {
+			rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5_short) :
+				sizeof(struct qeth_arp_qi_entry7_short);
+		} else {
+			rc = is_hsi ? sizeof(struct qeth_arp_qi_entry5) :
+				sizeof(struct qeth_arp_qi_entry7);
+		}
+	} else if (type->ip == QETHARP_IP_ADDR_V6) {
+		QETH_CARD_TEXT(card, 4, "arpev6");
+		if (strip_entries) {
+			rc = is_hsi ?
+				sizeof(struct qeth_arp_qi_entry5_short_ipv6) :
+				sizeof(struct qeth_arp_qi_entry7_short_ipv6);
+		} else {
+			rc = is_hsi ?
+				sizeof(struct qeth_arp_qi_entry5_ipv6) :
+				sizeof(struct qeth_arp_qi_entry7_ipv6);
+		}
+	} else {
+		QETH_CARD_TEXT(card, 4, "arpinv");
+		rc = 0;
 	}
+
+	return rc;
+}
+
+static int arpentry_matches_prot(struct qeth_arp_entrytype *type, __u16 prot)
+{
+	return (type->ip == QETHARP_IP_ADDR_V4 && prot == QETH_PROT_IPV4) ||
+		(type->ip == QETHARP_IP_ADDR_V6 && prot == QETH_PROT_IPV6);
 }
 
 static int qeth_l3_arp_query_cb(struct qeth_card *card,
@@ -2478,72 +2507,77 @@
 	struct qeth_ipa_cmd *cmd;
 	struct qeth_arp_query_data *qdata;
 	struct qeth_arp_query_info *qinfo;
-	int entry_size;
-	int uentry_size;
 	int i;
+	int e;
+	int entrybytes_done;
+	int stripped_bytes;
+	__u8 do_strip_entries;
 
-	QETH_CARD_TEXT(card, 4, "arpquecb");
+	QETH_CARD_TEXT(card, 3, "arpquecb");
 
 	qinfo = (struct qeth_arp_query_info *) reply->param;
 	cmd = (struct qeth_ipa_cmd *) data;
+	QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.prot_version);
 	if (cmd->hdr.return_code) {
-		QETH_CARD_TEXT_(card, 4, "qaer1%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT(card, 4, "arpcberr");
+		QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
 		return 0;
 	}
 	if (cmd->data.setassparms.hdr.return_code) {
 		cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
-		QETH_CARD_TEXT_(card, 4, "qaer2%i", cmd->hdr.return_code);
+		QETH_CARD_TEXT(card, 4, "setaperr");
+		QETH_CARD_TEXT_(card, 4, "%i", cmd->hdr.return_code);
 		return 0;
 	}
 	qdata = &cmd->data.setassparms.data.query_arp;
-	switch (qdata->reply_bits) {
-	case 5:
-		uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry5);
-		if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
-			uentry_size = sizeof(struct qeth_arp_qi_entry5_short);
-		break;
-	case 7:
-		/* fall through to default */
-	default:
-		/* tr is the same as eth -> entry7 */
-		uentry_size = entry_size = sizeof(struct qeth_arp_qi_entry7);
-		if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
-			uentry_size = sizeof(struct qeth_arp_qi_entry7_short);
-		break;
-	}
-	/* check if there is enough room in userspace */
-	if ((qinfo->udata_len - qinfo->udata_offset) <
-			qdata->no_entries * uentry_size){
-		QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
-		cmd->hdr.return_code = -ENOMEM;
-		goto out_error;
-	}
-	QETH_CARD_TEXT_(card, 4, "anore%i",
-		       cmd->data.setassparms.hdr.number_of_replies);
-	QETH_CARD_TEXT_(card, 4, "aseqn%i", cmd->data.setassparms.hdr.seq_no);
 	QETH_CARD_TEXT_(card, 4, "anoen%i", qdata->no_entries);
 
-	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) {
-		/* strip off "media specific information" */
-		qeth_l3_copy_arp_entries_stripped(qinfo, qdata, entry_size,
-					       uentry_size);
-	} else
-		/*copy entries to user buffer*/
-		memcpy(qinfo->udata + qinfo->udata_offset,
-		       (char *)&qdata->data, qdata->no_entries*uentry_size);
+	do_strip_entries = (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES) > 0;
+	stripped_bytes = do_strip_entries ? QETH_QARP_MEDIASPECIFIC_BYTES : 0;
+	entrybytes_done = 0;
+	for (e = 0; e < qdata->no_entries; ++e) {
+		char *cur_entry;
+		__u32 esize;
+		struct qeth_arp_entrytype *etype;
 
-	qinfo->no_entries += qdata->no_entries;
-	qinfo->udata_offset += (qdata->no_entries*uentry_size);
+		cur_entry = &qdata->data + entrybytes_done;
+		etype = &((struct qeth_arp_qi_entry5 *) cur_entry)->type;
+		if (!arpentry_matches_prot(etype, cmd->hdr.prot_version)) {
+			QETH_CARD_TEXT(card, 4, "pmis");
+			QETH_CARD_TEXT_(card, 4, "%i", etype->ip);
+			break;
+		}
+		esize = get_arp_entry_size(card, qdata, etype,
+			do_strip_entries);
+		QETH_CARD_TEXT_(card, 5, "esz%i", esize);
+		if (!esize)
+			break;
+
+		if ((qinfo->udata_len - qinfo->udata_offset) < esize) {
+			QETH_CARD_TEXT_(card, 4, "qaer3%i", -ENOMEM);
+			cmd->hdr.return_code = -ENOMEM;
+			goto out_error;
+		}
+
+		memcpy(qinfo->udata + qinfo->udata_offset,
+			&qdata->data + entrybytes_done + stripped_bytes,
+			esize);
+		entrybytes_done += esize + stripped_bytes;
+		qinfo->udata_offset += esize;
+		++qinfo->no_entries;
+	}
 	/* check if all replies received ... */
 	if (cmd->data.setassparms.hdr.seq_no <
 	    cmd->data.setassparms.hdr.number_of_replies)
 		return 1;
+	QETH_CARD_TEXT_(card, 4, "nove%i", qinfo->no_entries);
 	memcpy(qinfo->udata, &qinfo->no_entries, 4);
 	/* keep STRIP_ENTRIES flag so the user program can distinguish
 	 * stripped entries from normal ones */
 	if (qinfo->mask_bits & QETH_QARP_STRIP_ENTRIES)
 		qdata->reply_bits |= QETH_QARP_STRIP_ENTRIES;
 	memcpy(qinfo->udata + QETH_QARP_MASK_OFFSET, &qdata->reply_bits, 2);
+	QETH_CARD_TEXT_(card, 4, "rc%i", 0);
 	return 0;
 out_error:
 	i = 0;
@@ -2566,45 +2600,86 @@
 				      reply_cb, reply_param);
 }
 
-static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
+static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
+	enum qeth_prot_versions prot,
+	struct qeth_arp_query_info *qinfo)
 {
 	struct qeth_cmd_buffer *iob;
-	struct qeth_arp_query_info qinfo = {0, };
+	struct qeth_ipa_cmd *cmd;
 	int tmp;
 	int rc;
 
+	QETH_CARD_TEXT_(card, 3, "qarpipv%i", prot);
+
+	iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
+			IPA_CMD_ASS_ARP_QUERY_INFO,
+			sizeof(struct qeth_arp_query_data) - sizeof(char),
+			prot);
+	cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+	cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
+	cmd->data.setassparms.data.query_arp.reply_bits = 0;
+	cmd->data.setassparms.data.query_arp.no_entries = 0;
+	rc = qeth_l3_send_ipa_arp_cmd(card, iob,
+			   QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
+			   qeth_l3_arp_query_cb, (void *)qinfo);
+	if (rc) {
+		tmp = rc;
+		QETH_DBF_MESSAGE(2,
+			"Error while querying ARP cache on %s: %s "
+			"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
+			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
+	}
+
+	return rc;
+}
+
+static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata)
+{
+	struct qeth_arp_query_info qinfo = {0, };
+	int rc;
+
 	QETH_CARD_TEXT(card, 3, "arpquery");
 
 	if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/
 			       IPA_ARP_PROCESSING)) {
-		return -EOPNOTSUPP;
+		QETH_CARD_TEXT(card, 3, "arpqnsup");
+		rc = -EOPNOTSUPP;
+		goto out;
 	}
 	/* get size of userspace buffer and mask_bits -> 6 bytes */
-	if (copy_from_user(&qinfo, udata, 6))
-		return -EFAULT;
+	if (copy_from_user(&qinfo, udata, 6)) {
+		rc = -EFAULT;
+		goto out;
+	}
 	qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
-	if (!qinfo.udata)
-		return -ENOMEM;
+	if (!qinfo.udata) {
+		rc = -ENOMEM;
+		goto out;
+	}
 	qinfo.udata_offset = QETH_QARP_ENTRIES_OFFSET;
-	iob = qeth_l3_get_setassparms_cmd(card, IPA_ARP_PROCESSING,
-				       IPA_CMD_ASS_ARP_QUERY_INFO,
-				       sizeof(int), QETH_PROT_IPV4);
-
-	rc = qeth_l3_send_ipa_arp_cmd(card, iob,
-				   QETH_SETASS_BASE_LEN+QETH_ARP_CMD_LEN,
-				   qeth_l3_arp_query_cb, (void *)&qinfo);
+	rc = qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV4, &qinfo);
 	if (rc) {
-		tmp = rc;
-		QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s "
-			"(0x%x/%d)\n", QETH_CARD_IFNAME(card),
-			qeth_l3_arp_get_error_cause(&rc), tmp, tmp);
 		if (copy_to_user(udata, qinfo.udata, 4))
 			rc = -EFAULT;
+			goto free_and_out;
 	} else {
-		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
+#ifdef CONFIG_QETH_IPV6
+		if (qinfo.mask_bits & QETH_QARP_WITH_IPV6) {
+			/* fails in case of GuestLAN QDIO mode */
+			qeth_l3_query_arp_cache_info(card, QETH_PROT_IPV6,
+				&qinfo);
+		}
+#endif
+		if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) {
+			QETH_CARD_TEXT(card, 4, "qactf");
 			rc = -EFAULT;
+			goto free_and_out;
+		}
+		QETH_CARD_TEXT_(card, 4, "qacts");
 	}
+free_and_out:
 	kfree(qinfo.udata);
+out:
 	return rc;
 }
 
@@ -2938,6 +3013,7 @@
 
 	/*fix header to TSO values ...*/
 	hdr->hdr.hdr.l3.id = QETH_HEADER_TYPE_TSO;
+	hdr->hdr.hdr.l3.length = skb->len - sizeof(struct qeth_hdr_tso);
 	/*set values which are fix for the first approach ...*/
 	hdr->ext.hdr_tot_len = (__u16) sizeof(struct qeth_hdr_ext_tso);
 	hdr->ext.imb_hdr_no  = 1;
@@ -3039,7 +3115,7 @@
 				skb_pull(new_skb, ETH_HLEN);
 		}
 
-		if (ipv == 6 && card->vlangrp &&
+		if (ipv != 4 && card->vlangrp &&
 				vlan_tx_tag_present(new_skb)) {
 			skb_push(new_skb, VLAN_HLEN);
 			skb_copy_to_linear_data(new_skb, new_skb->data + 4, 4);
@@ -3176,8 +3252,6 @@
 	card->state = CARD_STATE_UP;
 	netif_start_queue(dev);
 
-	if (!card->lan_online && netif_carrier_ok(dev))
-		netif_carrier_off(dev);
 	if (qdio_stop_irq(card->data.ccwdev, 0) >= 0) {
 		napi_enable(&card->napi);
 		napi_schedule(&card->napi);
@@ -3449,13 +3523,14 @@
 			dev_warn(&card->gdev->dev,
 				"The LAN is offline\n");
 			card->lan_online = 0;
-			goto out;
+			goto contin;
 		}
 		rc = -ENODEV;
 		goto out_remove;
 	} else
 		card->lan_online = 1;
 
+contin:
 	rc = qeth_l3_setadapter_parms(card);
 	if (rc)
 		QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
@@ -3480,10 +3555,13 @@
 		goto out_remove;
 	}
 	card->state = CARD_STATE_SOFTSETUP;
-	netif_carrier_on(card->dev);
 
 	qeth_set_allowed_threads(card, 0xffffffff, 0);
 	qeth_l3_set_ip_addr_list(card);
+	if (card->lan_online)
+		netif_carrier_on(card->dev);
+	else
+		netif_carrier_off(card->dev);
 	if (recover_flag == CARD_STATE_RECOVER) {
 		if (recovery_mode)
 			qeth_l3_open(card->dev);
@@ -3496,7 +3574,6 @@
 	}
 	/* let user_space know that device is online */
 	kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
-out:
 	mutex_unlock(&card->conf_mutex);
 	mutex_unlock(&card->discipline_mutex);
 	return 0;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 1de30eb..f3cf924 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -320,19 +320,11 @@
 				    "changed. The Linux SCSI layer does not "
 				    "automatically adjust these parameters.\n");
 
-		if (scmd->request->cmd_flags & REQ_HARDBARRIER)
-			/*
-			 * barrier requests should always retry on UA
-			 * otherwise block will get a spurious error
-			 */
-			return NEEDS_RETRY;
-		else
-			/*
-			 * for normal (non barrier) commands, pass the
-			 * UA upwards for a determination in the
-			 * completion functions
-			 */
-			return SUCCESS;
+		/*
+		 * Pass the UA upwards for a determination in the completion
+		 * functions.
+		 */
+		return SUCCESS;
 
 		/* these three are not supported */
 	case COPY_ABORTED:
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 4d8e14b..dd5e1ac 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2343,8 +2343,11 @@
 
 	/*
 	 * CTS flow control flag and modem status interrupts
+	 * Only disable MSI if no threads are waiting in
+	 * serial_core::uart_wait_modem_status
 	 */
-	up->ier &= ~UART_IER_MSI;
+	if (!waitqueue_active(&up->port.state->port.delta_msr_wait))
+		up->ier &= ~UART_IER_MSI;
 	if (!(up->bugs & UART_BUG_NOMSR) &&
 			UART_ENABLE_MS(&up->port, termios->c_cflag))
 		up->ier |= UART_IER_MSI;
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index 53be4d3..842e3b2 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -2285,6 +2285,8 @@
 
 static const struct pci_device_id softmodem_blacklist[] = {
 	{ PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */
+	{ PCI_VDEVICE(MOTOROLA, 0x3052), }, /* Motorola Si3052-based modem */
+	{ PCI_DEVICE(0x1543, 0x3052), }, /* Si3052-based modem, default IDs */
 };
 
 /*
@@ -2863,6 +2865,9 @@
 		PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL,
 		0, 0,
 		pbn_b0_4_1152000 },
+	{	PCI_VENDOR_ID_OXSEMI, 0x9505,
+		PCI_ANY_ID, PCI_ANY_ID, 0, 0,
+		pbn_b0_bt_2_921600 },
 
 		/*
 		 * The below card is a little controversial since it is the
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index a9eff2b..19cac9f 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -23,6 +23,7 @@
 #include <linux/tty.h>
 #include <linux/tty_flip.h>
 #include <linux/serial_core.h>
+#include <linux/dma-mapping.h>
 
 #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
 	defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
@@ -33,12 +34,10 @@
 #include <asm/gpio.h>
 #include <mach/bfin_serial_5xx.h>
 
-#ifdef CONFIG_SERIAL_BFIN_DMA
-#include <linux/dma-mapping.h>
+#include <asm/dma.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/cacheflush.h>
-#endif
 
 #ifdef CONFIG_SERIAL_BFIN_MODULE
 # undef CONFIG_EARLY_PRINTK
@@ -360,7 +359,6 @@
 		UART_PUT_CHAR(uart, xmit->buf[xmit->tail]);
 		xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
 		uart->port.icount.tx++;
-		SSYNC();
 	}
 
 	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
@@ -688,6 +686,13 @@
 
 # ifdef CONFIG_BF54x
 	{
+		/*
+		 * UART2 and UART3 on BF548 share interrupt PINs and DMA
+		 * controllers with SPORT2 and SPORT3. UART rx and tx
+		 * interrupts are generated in PIO mode only when configure
+		 * their peripheral mapping registers properly, which means
+		 * request corresponding DMA channels in PIO mode as well.
+		 */
 		unsigned uart_dma_ch_rx, uart_dma_ch_tx;
 
 		switch (uart->port.irq) {
@@ -734,8 +739,7 @@
 			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
 			IRQF_DISABLED, "BFIN_UART_CTS", uart)) {
 			uart->cts_pin = -1;
-			pr_info("Unable to attach BlackFin UART CTS interrupt.\
-				 So, disable it.\n");
+			pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n");
 		}
 	}
 	if (uart->rts_pin >= 0) {
@@ -747,8 +751,7 @@
 	if (request_irq(uart->status_irq,
 		bfin_serial_mctrl_cts_int,
 		IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) {
-		pr_info("Unable to attach BlackFin UART Modem \
-			Status interrupt.\n");
+		pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n");
 	}
 
 	/* CTS RTS PINs are negative assertive. */
@@ -846,6 +849,8 @@
 	if (termios->c_cflag & CMSPAR)
 		lcr |= STP;
 
+	spin_lock_irqsave(&uart->port.lock, flags);
+
 	port->read_status_mask = OE;
 	if (termios->c_iflag & INPCK)
 		port->read_status_mask |= (FE | PE);
@@ -875,8 +880,6 @@
 	if (termios->c_line != N_IRDA)
 		quot -= ANOMALY_05000230;
 
-	spin_lock_irqsave(&uart->port.lock, flags);
-
 	UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15);
 
 	/* Disable UART */
@@ -1321,6 +1324,14 @@
 	struct bfin_serial_port *uart;
 	struct ktermios t;
 
+#ifdef CONFIG_SERIAL_BFIN_CONSOLE
+	/*
+	 * If we are using early serial, don't let the normal console rewind
+	 * log buffer, since that causes things to be printed multiple times
+	 */
+	bfin_serial_console.flags &= ~CON_PRINTBUFFER;
+#endif
+
 	if (port == -1 || port >= nr_active_ports)
 		port = 0;
 	bfin_serial_init_ports();
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
index d4b711c..3374618 100644
--- a/drivers/serial/kgdboc.c
+++ b/drivers/serial/kgdboc.c
@@ -18,6 +18,7 @@
 #include <linux/tty.h>
 #include <linux/console.h>
 #include <linux/vt_kern.h>
+#include <linux/input.h>
 
 #define MAX_CONFIG_LEN		40
 
@@ -37,6 +38,61 @@
 static int			kgdb_tty_line;
 
 #ifdef CONFIG_KDB_KEYBOARD
+static int kgdboc_reset_connect(struct input_handler *handler,
+				struct input_dev *dev,
+				const struct input_device_id *id)
+{
+	input_reset_device(dev);
+
+	/* Retrun an error - we do not want to bind, just to reset */
+	return -ENODEV;
+}
+
+static void kgdboc_reset_disconnect(struct input_handle *handle)
+{
+	/* We do not expect anyone to actually bind to us */
+	BUG();
+}
+
+static const struct input_device_id kgdboc_reset_ids[] = {
+	{
+		.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
+		.evbit = { BIT_MASK(EV_KEY) },
+	},
+	{ }
+};
+
+static struct input_handler kgdboc_reset_handler = {
+	.connect	= kgdboc_reset_connect,
+	.disconnect	= kgdboc_reset_disconnect,
+	.name		= "kgdboc_reset",
+	.id_table	= kgdboc_reset_ids,
+};
+
+static DEFINE_MUTEX(kgdboc_reset_mutex);
+
+static void kgdboc_restore_input_helper(struct work_struct *dummy)
+{
+	/*
+	 * We need to take a mutex to prevent several instances of
+	 * this work running on different CPUs so they don't try
+	 * to register again already registered handler.
+	 */
+	mutex_lock(&kgdboc_reset_mutex);
+
+	if (input_register_handler(&kgdboc_reset_handler) == 0)
+		input_unregister_handler(&kgdboc_reset_handler);
+
+	mutex_unlock(&kgdboc_reset_mutex);
+}
+
+static DECLARE_WORK(kgdboc_restore_input_work, kgdboc_restore_input_helper);
+
+static void kgdboc_restore_input(void)
+{
+	schedule_work(&kgdboc_restore_input_work);
+}
+
 static int kgdboc_register_kbd(char **cptr)
 {
 	if (strncmp(*cptr, "kbd", 3) == 0) {
@@ -64,10 +120,12 @@
 			i--;
 		}
 	}
+	flush_work_sync(&kgdboc_restore_input_work);
 }
 #else /* ! CONFIG_KDB_KEYBOARD */
 #define kgdboc_register_kbd(x) 0
 #define kgdboc_unregister_kbd()
+#define kgdboc_restore_input()
 #endif /* ! CONFIG_KDB_KEYBOARD */
 
 static int kgdboc_option_setup(char *opt)
@@ -231,6 +289,7 @@
 		dbg_restore_graphics = 0;
 		con_debug_leave();
 	}
+	kgdboc_restore_input();
 }
 
 static struct kgdb_io kgdboc_io_ops = {
diff --git a/drivers/sh/clk/core.c b/drivers/sh/clk/core.c
index fd0d1b9..09615b5 100644
--- a/drivers/sh/clk/core.c
+++ b/drivers/sh/clk/core.c
@@ -90,8 +90,8 @@
 static long clk_rate_round_helper(struct clk_rate_round_data *rounder)
 {
 	unsigned long rate_error, rate_error_prev = ~0UL;
-	unsigned long rate_best_fit = rounder->rate;
 	unsigned long highest, lowest, freq;
+	long rate_best_fit = -ENOENT;
 	int i;
 
 	highest = 0;
@@ -146,7 +146,7 @@
 	};
 
 	if (clk->nr_freqs < 1)
-		return 0;
+		return -ENOSYS;
 
 	return clk_rate_round_helper(&table_round);
 }
@@ -541,6 +541,98 @@
 }
 EXPORT_SYMBOL_GPL(clk_round_rate);
 
+long clk_round_parent(struct clk *clk, unsigned long target,
+		      unsigned long *best_freq, unsigned long *parent_freq,
+		      unsigned int div_min, unsigned int div_max)
+{
+	struct cpufreq_frequency_table *freq, *best = NULL;
+	unsigned long error = ULONG_MAX, freq_high, freq_low, div;
+	struct clk *parent = clk_get_parent(clk);
+
+	if (!parent) {
+		*parent_freq = 0;
+		*best_freq = clk_round_rate(clk, target);
+		return abs(target - *best_freq);
+	}
+
+	for (freq = parent->freq_table; freq->frequency != CPUFREQ_TABLE_END;
+	     freq++) {
+		if (freq->frequency == CPUFREQ_ENTRY_INVALID)
+			continue;
+
+		if (unlikely(freq->frequency / target <= div_min - 1)) {
+			unsigned long freq_max;
+
+			freq_max = (freq->frequency + div_min / 2) / div_min;
+			if (error > target - freq_max) {
+				error = target - freq_max;
+				best = freq;
+				if (best_freq)
+					*best_freq = freq_max;
+			}
+
+			pr_debug("too low freq %lu, error %lu\n", freq->frequency,
+				 target - freq_max);
+
+			if (!error)
+				break;
+
+			continue;
+		}
+
+		if (unlikely(freq->frequency / target >= div_max)) {
+			unsigned long freq_min;
+
+			freq_min = (freq->frequency + div_max / 2) / div_max;
+			if (error > freq_min - target) {
+				error = freq_min - target;
+				best = freq;
+				if (best_freq)
+					*best_freq = freq_min;
+			}
+
+			pr_debug("too high freq %lu, error %lu\n", freq->frequency,
+				 freq_min - target);
+
+			if (!error)
+				break;
+
+			continue;
+		}
+
+		div = freq->frequency / target;
+		freq_high = freq->frequency / div;
+		freq_low = freq->frequency / (div + 1);
+
+		if (freq_high - target < error) {
+			error = freq_high - target;
+			best = freq;
+			if (best_freq)
+				*best_freq = freq_high;
+		}
+
+		if (target - freq_low < error) {
+			error = target - freq_low;
+			best = freq;
+			if (best_freq)
+				*best_freq = freq_low;
+		}
+
+		pr_debug("%u / %lu = %lu, / %lu = %lu, best %lu, parent %u\n",
+			 freq->frequency, div, freq_high, div + 1, freq_low,
+			 *best_freq, best->frequency);
+
+		if (!error)
+			break;
+	}
+
+	if (parent_freq)
+		*parent_freq = best->frequency;
+
+	return error;
+}
+EXPORT_SYMBOL_GPL(clk_round_parent);
+
 #ifdef CONFIG_PM
 static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
 {
diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c
index 873a99f..e5e9e67 100644
--- a/drivers/sh/intc/core.c
+++ b/drivers/sh/intc/core.c
@@ -79,7 +79,7 @@
 	 * Register the IRQ position with the global IRQ map, then insert
 	 * it in to the radix tree.
 	 */
-	irq_reserve_irqs(irq, 1);
+	irq_reserve_irq(irq);
 
 	raw_spin_lock_irqsave(&intc_big_lock, flags);
 	radix_tree_insert(&d->tree, enum_id, intc_irq_xlate_get(irq));
diff --git a/drivers/sh/intc/dynamic.c b/drivers/sh/intc/dynamic.c
index 4187cce..a3677c9 100644
--- a/drivers/sh/intc/dynamic.c
+++ b/drivers/sh/intc/dynamic.c
@@ -60,5 +60,5 @@
 	int i;
 
 	for (i = 0; i < nr_vecs; i++)
-		irq_reserve_irqs(evt2irq(vectors[i].vect), 1);
+		irq_reserve_irq(evt2irq(vectors[i].vect));
 }
diff --git a/drivers/staging/ath6kl/Kconfig b/drivers/staging/ath6kl/Kconfig
index ae2cdf4..8a5caa3 100644
--- a/drivers/staging/ath6kl/Kconfig
+++ b/drivers/staging/ath6kl/Kconfig
@@ -102,7 +102,7 @@
 
 config ATH6KL_CFG80211
 	bool "CFG80211 support"
-	depends on ATH6K_LEGACY
+	depends on ATH6K_LEGACY && CFG80211
 	help
 	Enables support for CFG80211 APIs. The default option is to use WEXT. Even with this option enabled, WEXT is not explicitly disabled and the onus of not exercising WEXT lies on the application(s) running in the user space.
 
diff --git a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
index 22c6c66..ee8b477 100644
--- a/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
+++ b/drivers/staging/ath6kl/hif/sdio/linux_sdio/src/hif_scatter.c
@@ -285,9 +285,9 @@
     do {
         
             /* check if host supports scatter requests and it meets our requirements */
-        if (device->func->card->host->max_hw_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
+        if (device->func->card->host->max_segs < MAX_SCATTER_ENTRIES_PER_REQ) {
             AR_DEBUG_PRINTF(ATH_DEBUG_ERR,("HIF-SCATTER : host only supports scatter of : %d entries, need: %d \n",
-                    device->func->card->host->max_hw_segs, MAX_SCATTER_ENTRIES_PER_REQ));
+                    device->func->card->host->max_segs, MAX_SCATTER_ENTRIES_PER_REQ));
             status = A_ENOTSUP;
             break;    
         }
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_drv.c b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
index c5a6d6c..a659f70 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_drv.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_drv.c
@@ -1126,7 +1126,7 @@
         if ((board_ext_address) && (fw_entry->size == (board_data_size + board_ext_data_size))) {
             A_UINT32 param;
 
-            status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(((A_UINT32)fw_entry->data) + board_data_size), board_ext_data_size);
+            status = BMIWriteMemory(ar->arHifDevice, board_ext_address, (A_UCHAR *)(fw_entry->data + board_data_size), board_ext_data_size);
 
             if (status != A_OK) {
                 AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("BMI operation failed: %d\n", __LINE__));
@@ -3030,7 +3030,8 @@
         A_UINT8 csumDest=0;
         A_UINT8 csum=skb->ip_summed;
         if(csumOffload && (csum==CHECKSUM_PARTIAL)){
-            csumStart=skb->csum_start-(skb->network_header-skb->head)+sizeof(ATH_LLC_SNAP_HDR);
+            csumStart = (skb->head + skb->csum_start - skb_network_header(skb) +
+			 sizeof(ATH_LLC_SNAP_HDR));
             csumDest=skb->csum_offset+csumStart;
         }
 #endif
diff --git a/drivers/staging/ath6kl/os/linux/cfg80211.c b/drivers/staging/ath6kl/os/linux/cfg80211.c
index c94ad29..7269d0a 100644
--- a/drivers/staging/ath6kl/os/linux/cfg80211.c
+++ b/drivers/staging/ath6kl/os/linux/cfg80211.c
@@ -808,7 +808,7 @@
 
 static int
 ar6k_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
-                      A_UINT8 key_index, const A_UINT8 *mac_addr,
+                      A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
                       struct key_params *params)
 {
     AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
@@ -901,7 +901,7 @@
 
 static int
 ar6k_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
-                      A_UINT8 key_index, const A_UINT8 *mac_addr)
+                      A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr)
 {
     AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
 
@@ -936,7 +936,8 @@
 
 static int
 ar6k_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
-                      A_UINT8 key_index, const A_UINT8 *mac_addr, void *cookie,
+                      A_UINT8 key_index, bool pairwise, const A_UINT8 *mac_addr,
+                      void *cookie,
                       void (*callback)(void *cookie, struct key_params*))
 {
     AR_SOFTC_T *ar = (AR_SOFTC_T *)ar6k_priv(ndev);
diff --git a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
deleted file mode 100644
index e69de29..0000000
--- a/drivers/staging/ath6kl/os/linux/include/athendpack_linux.h
+++ /dev/null
diff --git a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h b/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
deleted file mode 100644
index e69de29..0000000
--- a/drivers/staging/ath6kl/os/linux/include/athstartpack_linux.h
+++ /dev/null
diff --git a/drivers/staging/batman-adv/hard-interface.c b/drivers/staging/batman-adv/hard-interface.c
index 80cfa86..b68a7e5 100644
--- a/drivers/staging/batman-adv/hard-interface.c
+++ b/drivers/staging/batman-adv/hard-interface.c
@@ -165,7 +165,7 @@
 	       batman_if->net_dev->dev_addr, ETH_ALEN);
 }
 
-static void check_known_mac_addr(uint8_t *addr)
+static void check_known_mac_addr(struct net_device *net_dev)
 {
 	struct batman_if *batman_if;
 
@@ -175,11 +175,16 @@
 		    (batman_if->if_status != IF_TO_BE_ACTIVATED))
 			continue;
 
-		if (!compare_orig(batman_if->net_dev->dev_addr, addr))
+		if (batman_if->net_dev == net_dev)
+			continue;
+
+		if (!compare_orig(batman_if->net_dev->dev_addr,
+				  net_dev->dev_addr))
 			continue;
 
 		pr_warning("The newly added mac address (%pM) already exists "
-			   "on: %s\n", addr, batman_if->net_dev->name);
+			   "on: %s\n", net_dev->dev_addr,
+			   batman_if->net_dev->name);
 		pr_warning("It is strongly recommended to keep mac addresses "
 			   "unique to avoid problems!\n");
 	}
@@ -430,7 +435,7 @@
 	atomic_set(&batman_if->refcnt, 0);
 	hardif_hold(batman_if);
 
-	check_known_mac_addr(batman_if->net_dev->dev_addr);
+	check_known_mac_addr(batman_if->net_dev);
 
 	spin_lock(&if_list_lock);
 	list_add_tail_rcu(&batman_if->list, &if_list);
@@ -515,7 +520,7 @@
 			goto out;
 		}
 
-		check_known_mac_addr(batman_if->net_dev->dev_addr);
+		check_known_mac_addr(batman_if->net_dev);
 		update_mac_addresses(batman_if);
 
 		bat_priv = netdev_priv(batman_if->soft_iface);
diff --git a/drivers/staging/batman-adv/routing.c b/drivers/staging/batman-adv/routing.c
index 9010263..657b69e 100644
--- a/drivers/staging/batman-adv/routing.c
+++ b/drivers/staging/batman-adv/routing.c
@@ -1000,10 +1000,10 @@
 
 /* find a suitable router for this originator, and use
  * bonding if possible. */
-struct neigh_node *find_router(struct orig_node *orig_node,
+struct neigh_node *find_router(struct bat_priv *bat_priv,
+			       struct orig_node *orig_node,
 			       struct batman_if *recv_if)
 {
-	struct bat_priv *bat_priv;
 	struct orig_node *primary_orig_node;
 	struct orig_node *router_orig;
 	struct neigh_node *router, *first_candidate, *best_router;
@@ -1019,13 +1019,9 @@
 	/* without bonding, the first node should
 	 * always choose the default router. */
 
-	if (!recv_if)
-		return orig_node->router;
-
-	bat_priv = netdev_priv(recv_if->soft_iface);
 	bonding_enabled = atomic_read(&bat_priv->bonding_enabled);
 
-	if (!bonding_enabled)
+	if ((!recv_if) && (!bonding_enabled))
 		return orig_node->router;
 
 	router_orig = orig_node->router->orig_node;
@@ -1154,7 +1150,7 @@
 	orig_node = ((struct orig_node *)
 		     hash_find(bat_priv->orig_hash, unicast_packet->dest));
 
-	router = find_router(orig_node, recv_if);
+	router = find_router(bat_priv, orig_node, recv_if);
 
 	if (!router) {
 		spin_unlock_irqrestore(&bat_priv->orig_hash_lock, flags);
diff --git a/drivers/staging/batman-adv/routing.h b/drivers/staging/batman-adv/routing.h
index 06ea99d..92674c8 100644
--- a/drivers/staging/batman-adv/routing.h
+++ b/drivers/staging/batman-adv/routing.h
@@ -38,8 +38,8 @@
 int recv_bcast_packet(struct sk_buff *skb, struct batman_if *recv_if);
 int recv_vis_packet(struct sk_buff *skb, struct batman_if *recv_if);
 int recv_bat_packet(struct sk_buff *skb, struct batman_if *recv_if);
-struct neigh_node *find_router(struct orig_node *orig_node,
-		struct batman_if *recv_if);
+struct neigh_node *find_router(struct bat_priv *bat_priv,
+		struct orig_node *orig_node, struct batman_if *recv_if);
 void update_bonding_candidates(struct bat_priv *bat_priv,
 			       struct orig_node *orig_node);
 
diff --git a/drivers/staging/batman-adv/unicast.c b/drivers/staging/batman-adv/unicast.c
index 0dac50d..0459413 100644
--- a/drivers/staging/batman-adv/unicast.c
+++ b/drivers/staging/batman-adv/unicast.c
@@ -224,7 +224,7 @@
 	if (!orig_node)
 		orig_node = transtable_search(bat_priv, ethhdr->h_dest);
 
-	router = find_router(orig_node, NULL);
+	router = find_router(bat_priv, orig_node, NULL);
 
 	if (!router)
 		goto unlock;
diff --git a/drivers/staging/bcm/Bcmchar.c b/drivers/staging/bcm/Bcmchar.c
index 77fdfe2..fead9c5 100644
--- a/drivers/staging/bcm/Bcmchar.c
+++ b/drivers/staging/bcm/Bcmchar.c
@@ -1001,13 +1001,15 @@
 		}
 #endif
 		case IOCTL_BE_BUCKET_SIZE:
-			Adapter->BEBucketSize = *(PULONG)arg;
-			Status = STATUS_SUCCESS;
+			Status = 0;
+			if (get_user(Adapter->BEBucketSize, (unsigned long __user *)arg))
+				Status = -EFAULT;
 			break;
 
 		case IOCTL_RTPS_BUCKET_SIZE:
-			Adapter->rtPSBucketSize = *(PULONG)arg;
-			Status = STATUS_SUCCESS;
+			Status = 0;
+			if (get_user(Adapter->rtPSBucketSize, (unsigned long __user *)arg))
+				Status = -EFAULT;
 			break;
 		case IOCTL_CHIP_RESET:
 	    {
@@ -1028,11 +1030,15 @@
 		case IOCTL_QOS_THRESHOLD:
 		{
 			USHORT uiLoopIndex;
-			for(uiLoopIndex = 0 ; uiLoopIndex < NO_OF_QUEUES ; uiLoopIndex++)
-			{
-				Adapter->PackInfo[uiLoopIndex].uiThreshold = *(PULONG)arg;
+
+			Status = 0;
+			for (uiLoopIndex = 0; uiLoopIndex < NO_OF_QUEUES; uiLoopIndex++) {
+				if (get_user(Adapter->PackInfo[uiLoopIndex].uiThreshold,
+						(unsigned long __user *)arg)) {
+					Status = -EFAULT;
+					break;
+				}
 			}
-			Status = STATUS_SUCCESS;
 			break;
 		}
 
@@ -1093,7 +1099,8 @@
 		}
 		case IOCTL_BCM_GET_CURRENT_STATUS:
 		{
-			LINK_STATE *plink_state = NULL;
+			LINK_STATE plink_state;
+
 			/* Copy Ioctl Buffer structure */
 			if(copy_from_user(&IoBuffer, argp, sizeof(IOCTL_BUFFER)))
 			{
@@ -1101,13 +1108,19 @@
 				Status = -EFAULT;
 				break;
 			}
-			plink_state = (LINK_STATE*)arg;
-			plink_state->bIdleMode = (UCHAR)Adapter->IdleMode;
-			plink_state->bShutdownMode = Adapter->bShutStatus;
-			plink_state->ucLinkStatus = (UCHAR)Adapter->LinkStatus;
-			if(copy_to_user(IoBuffer.OutputBuffer,
-				(PUCHAR)plink_state, (UINT)IoBuffer.OutputLength))
-			{
+			if (IoBuffer.OutputLength != sizeof(plink_state)) {
+				Status = -EINVAL;
+				break;
+			}
+
+			if (copy_from_user(&plink_state, (void __user *)arg, sizeof(plink_state))) {
+				Status = -EFAULT;
+				break;
+			}
+			plink_state.bIdleMode = (UCHAR)Adapter->IdleMode;
+			plink_state.bShutdownMode = Adapter->bShutStatus;
+			plink_state.ucLinkStatus = (UCHAR)Adapter->LinkStatus;
+			if (copy_to_user(IoBuffer.OutputBuffer, &plink_state, IoBuffer.OutputLength)) {
 				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0, "Copy_to_user Failed..\n");
 				Status = -EFAULT;
 				break;
@@ -1331,7 +1344,9 @@
 						BCM_DEBUG_PRINT(Adapter,DBG_TYPE_PRINTK, 0, 0,"Copy From User space failed. status :%d", Status);
 						return -EFAULT;
 					}
-					uiSectorSize = *((PUINT)(IoBuffer.InputBuffer)); /* FIXME: unchecked __user access */
+					if (get_user(uiSectorSize, (unsigned int __user *)IoBuffer.InputBuffer))
+						return -EFAULT;
+
 					if((uiSectorSize < MIN_SECTOR_SIZE) || (uiSectorSize > MAX_SECTOR_SIZE))
 					{
 
diff --git a/drivers/staging/brcm80211/README b/drivers/staging/brcm80211/README
index c3ba9bb..c8f1cf1 100644
--- a/drivers/staging/brcm80211/README
+++ b/drivers/staging/brcm80211/README
@@ -90,5 +90,5 @@
 =============
 Brett Rudley	brudley@broadcom.com
 Henry Ptasinski henryp@broadcom.com
-Nohee Ko	noheek@broadcom.com
+Dowan Kim 	dowan@broadcom.com
 
diff --git a/drivers/staging/brcm80211/TODO b/drivers/staging/brcm80211/TODO
index 8803d30..dbf9041 100644
--- a/drivers/staging/brcm80211/TODO
+++ b/drivers/staging/brcm80211/TODO
@@ -45,5 +45,5 @@
 =====
 Brett Rudley <brudley@broadcom.com>
 Henry Ptasinski <henryp@broadcom.com>
-Nohee Ko <noheek@broadcom.com>
+Dowan Kim <dowan@broadcom.com>
 
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index bbbe7c5..9335f02 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -2222,8 +2222,6 @@
 	ASSERT(net);
 
 	ASSERT(!net->netdev_ops);
-	net->netdev_ops = &dhd_ops_virt;
-
 	net->netdev_ops = &dhd_ops_pri;
 
 	/*
diff --git a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
index 3f29488..ea08252 100644
--- a/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/staging/brcm80211/brcmfmac/wl_cfg80211.c
@@ -95,12 +95,12 @@
 					    struct net_device *dev,
 					    u8 key_idx);
 static s32 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
-				 u8 key_idx, const u8 *mac_addr,
+				 u8 key_idx, bool pairwise, const u8 *mac_addr,
 				 struct key_params *params);
 static s32 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
-				 u8 key_idx, const u8 *mac_addr);
+				 u8 key_idx, bool pairwise, const u8 *mac_addr);
 static s32 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
-				 u8 key_idx, const u8 *mac_addr,
+				 u8 key_idx, bool pairwise, const u8 *mac_addr,
 				 void *cookie, void (*callback) (void *cookie,
 								 struct
 								 key_params *
@@ -1615,7 +1615,7 @@
 
 static s32
 wl_cfg80211_add_key(struct wiphy *wiphy, struct net_device *dev,
-		    u8 key_idx, const u8 *mac_addr,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr,
 		    struct key_params *params)
 {
 	struct wl_wsec_key key;
@@ -1700,7 +1700,7 @@
 
 static s32
 wl_cfg80211_del_key(struct wiphy *wiphy, struct net_device *dev,
-		    u8 key_idx, const u8 *mac_addr)
+		    u8 key_idx, bool pairwise, const u8 *mac_addr)
 {
 	struct wl_wsec_key key;
 	s32 err = 0;
@@ -1756,7 +1756,7 @@
 
 static s32
 wl_cfg80211_get_key(struct wiphy *wiphy, struct net_device *dev,
-		    u8 key_idx, const u8 *mac_addr, void *cookie,
+		    u8 key_idx, bool pairwise, const u8 *mac_addr, void *cookie,
 		    void (*callback) (void *cookie, struct key_params * params))
 {
 	struct key_params params;
diff --git a/drivers/staging/cpia/cpia.c b/drivers/staging/cpia/cpia.c
index 933ae4c..0e740b8 100644
--- a/drivers/staging/cpia/cpia.c
+++ b/drivers/staging/cpia/cpia.c
@@ -3184,13 +3184,9 @@
 		goto oops;
 	}
 
-	err = -EINTR;
-	if(signal_pending(current))
-		goto oops;
-
 	/* Set ownership of /proc/cpia/videoX to current user */
 	if(cam->proc_entry)
-		cam->proc_entry->uid = current_uid();
+		cam->proc_entry->uid = current_euid();
 
 	/* set mark for loading first frame uncompressed */
 	cam->first_frame = 1;
diff --git a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
index 87a6487..20d5098 100644
--- a/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
+++ b/drivers/staging/ft1000/ft1000-usb/ft1000_chdev.c
@@ -286,7 +286,6 @@
     pid = kernel_thread (exec_mknod, (void *)info, 0);
 
     // initialize application information
-    info->appcnt = 0;
 
 //    if (ft1000_flarion_cnt == 0) {
 //
diff --git a/drivers/staging/hv/hv_utils.c b/drivers/staging/hv/hv_utils.c
index 702a478..a99e900 100644
--- a/drivers/staging/hv/hv_utils.c
+++ b/drivers/staging/hv/hv_utils.c
@@ -212,9 +212,6 @@
 			   recvlen, requestid);
 
 		icmsghdrp = (struct icmsg_hdr *)&buf[
-			sizeof(struct vmbuspipe_hdr)];
-
-		icmsghdrp = (struct icmsg_hdr *)&buf[
 				sizeof(struct vmbuspipe_hdr)];
 
 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
diff --git a/drivers/staging/intel_sst/intel_sst_app_interface.c b/drivers/staging/intel_sst/intel_sst_app_interface.c
index 463e5cb..9618c79 100644
--- a/drivers/staging/intel_sst/intel_sst_app_interface.c
+++ b/drivers/staging/intel_sst/intel_sst_app_interface.c
@@ -244,12 +244,12 @@
 	int retval, i;
 	struct stream_info *stream;
 	struct snd_sst_mmap_buff_entry *buf_entry;
+	struct snd_sst_mmap_buff_entry *tmp_buf;
 
 	pr_debug("sst:called for str_id %d\n", str_id);
 	retval = sst_validate_strid(str_id);
 	if (retval)
 		return -EINVAL;
-	BUG_ON(!mmap_buf);
 
 	stream = &sst_drv_ctx->streams[str_id];
 	if (stream->mmapped != true)
@@ -262,14 +262,24 @@
 	stream->curr_bytes = 0;
 	stream->cumm_bytes = 0;
 
+	tmp_buf = kcalloc(mmap_buf->entries, sizeof(*tmp_buf), GFP_KERNEL);
+	if (!tmp_buf)
+		return -ENOMEM;
+	if (copy_from_user(tmp_buf, (void __user *)mmap_buf->buff,
+			mmap_buf->entries * sizeof(*tmp_buf))) {
+		retval = -EFAULT;
+		goto out_free;
+	}
+
 	pr_debug("sst:new buffers count %d status %d\n",
 			mmap_buf->entries, stream->status);
-	buf_entry = mmap_buf->buff;
+	buf_entry = tmp_buf;
 	for (i = 0; i < mmap_buf->entries; i++) {
-		BUG_ON(!buf_entry);
 		bufs = kzalloc(sizeof(*bufs), GFP_KERNEL);
-		if (!bufs)
-			return -ENOMEM;
+		if (!bufs) {
+			retval = -ENOMEM;
+			goto out_free;
+		}
 		bufs->size = buf_entry->size;
 		bufs->offset = buf_entry->offset;
 		bufs->addr = sst_drv_ctx->mmap_mem;
@@ -293,13 +303,15 @@
 			if (sst_play_frame(str_id) < 0) {
 				pr_warn("sst: play frames fail\n");
 				mutex_unlock(&stream->lock);
-				return -EIO;
+				retval = -EIO;
+				goto out_free;
 			}
 		} else if (stream->ops == STREAM_OPS_CAPTURE) {
 			if (sst_capture_frame(str_id) < 0) {
 				pr_warn("sst: capture frame fail\n");
 				mutex_unlock(&stream->lock);
-				return -EIO;
+				retval = -EIO;
+				goto out_free;
 			}
 		}
 	}
@@ -314,6 +326,9 @@
 	if (retval >= 0)
 		retval = stream->cumm_bytes;
 	pr_debug("sst:end of play/rec ioctl bytes = %d!!\n", retval);
+
+out_free:
+	kfree(tmp_buf);
 	return retval;
 }
 
@@ -377,7 +392,7 @@
 {
 	struct sst_stream_bufs *stream_bufs;
 	unsigned long index, mmap_len;
-	unsigned char *bufp;
+	unsigned char __user *bufp;
 	unsigned long size, copied_size;
 	int retval = 0, add_to_list = 0;
 	static int sent_offset;
@@ -512,9 +527,7 @@
 			/* copy to user */
 			list_for_each_entry_safe(entry, _entry,
 						copy_to_list, node) {
-				if (copy_to_user((void *)
-					     iovec[entry->iov_index].iov_base +
-					     entry->iov_offset,
+				if (copy_to_user(iovec[entry->iov_index].iov_base + entry->iov_offset,
 					     kbufs->addr + entry->offset,
 					     entry->size)) {
 					/* Clean up the list and return error */
@@ -590,7 +603,7 @@
 			buf, (int) count, (int) stream->status);
 
 	stream->buf_type = SST_BUF_USER_STATIC;
-	iovec.iov_base = (void *)buf;
+	iovec.iov_base = buf;
 	iovec.iov_len  = count;
 	nr_segs = 1;
 
@@ -838,7 +851,7 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_SET_PARAMS): {
-		struct snd_sst_params *str_param = (struct snd_sst_params *)arg;
+		struct snd_sst_params str_param;
 
 		pr_debug("sst: IOCTL_SET_PARAMS recieved!\n");
 		if (minor != STREAM_MODULE) {
@@ -846,17 +859,25 @@
 			break;
 		}
 
+		if (copy_from_user(&str_param, (void __user *)arg,
+				sizeof(str_param))) {
+			retval = -EFAULT;
+			break;
+		}
+
 		if (!str_id) {
 
-			retval = sst_get_stream(str_param);
+			retval = sst_get_stream(&str_param);
 			if (retval > 0) {
 				struct stream_info *str_info;
+				char __user *dest;
+
 				sst_drv_ctx->stream_cnt++;
 				data->str_id = retval;
 				str_info = &sst_drv_ctx->streams[retval];
 				str_info->src = SST_DRV;
-				retval = copy_to_user(&str_param->stream_id,
-						&retval, sizeof(__u32));
+				dest = (char __user *)arg + offsetof(struct snd_sst_params, stream_id);
+				retval = copy_to_user(dest, &retval, sizeof(__u32));
 				if (retval)
 					retval = -EFAULT;
 			} else {
@@ -866,16 +887,14 @@
 		} else {
 			pr_debug("sst: SET_STREAM_PARAMS recieved!\n");
 			/* allocated set params only */
-			retval = sst_set_stream_param(str_id, str_param);
+			retval = sst_set_stream_param(str_id, &str_param);
 			/* Block the call for reply */
 			if (!retval) {
 				int sfreq = 0, word_size = 0, num_channel = 0;
-				sfreq =	str_param->sparams.uc.pcm_params.sfreq;
-				word_size = str_param->sparams.
-						uc.pcm_params.pcm_wd_sz;
-				num_channel = str_param->
-					sparams.uc.pcm_params.num_chan;
-				if (str_param->ops == STREAM_OPS_CAPTURE) {
+				sfreq =	str_param.sparams.uc.pcm_params.sfreq;
+				word_size = str_param.sparams.uc.pcm_params.pcm_wd_sz;
+				num_channel = str_param.sparams.uc.pcm_params.num_chan;
+				if (str_param.ops == STREAM_OPS_CAPTURE) {
 					sst_drv_ctx->scard_ops->\
 					set_pcm_audio_params(sfreq,
 						word_size, num_channel);
@@ -885,41 +904,39 @@
 		break;
 	}
 	case _IOC_NR(SNDRV_SST_SET_VOL): {
-		struct snd_sst_vol *set_vol;
-		struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
-		pr_debug("sst: SET_VOLUME recieved for %d!\n",
-				rec_vol->stream_id);
-		if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
-			pr_debug("sst: invalid operation!\n");
-			retval = -EPERM;
-			break;
-		}
-		set_vol = kzalloc(sizeof(*set_vol), GFP_ATOMIC);
-		if (!set_vol) {
-			pr_debug("sst: mem allocation failed\n");
-			retval = -ENOMEM;
-			break;
-		}
-		if (copy_from_user(set_vol, rec_vol, sizeof(*set_vol))) {
+		struct snd_sst_vol set_vol;
+
+		if (copy_from_user(&set_vol, (void __user *)arg,
+				sizeof(set_vol))) {
 			pr_debug("sst: copy failed\n");
 			retval = -EFAULT;
 			break;
 		}
-		retval = sst_set_vol(set_vol);
-		kfree(set_vol);
-		break;
-	}
-	case _IOC_NR(SNDRV_SST_GET_VOL): {
-		struct snd_sst_vol *rec_vol = (struct snd_sst_vol *)arg;
-		struct snd_sst_vol get_vol;
-		pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
-				rec_vol->stream_id);
-		if (minor == STREAM_MODULE && rec_vol->stream_id == 0) {
+		pr_debug("sst: SET_VOLUME recieved for %d!\n",
+				set_vol.stream_id);
+		if (minor == STREAM_MODULE && set_vol.stream_id == 0) {
 			pr_debug("sst: invalid operation!\n");
 			retval = -EPERM;
 			break;
 		}
-		get_vol.stream_id = rec_vol->stream_id;
+		retval = sst_set_vol(&set_vol);
+		break;
+	}
+	case _IOC_NR(SNDRV_SST_GET_VOL): {
+		struct snd_sst_vol get_vol;
+
+		if (copy_from_user(&get_vol, (void __user *)arg,
+				sizeof(get_vol))) {
+			retval = -EFAULT;
+			break;
+		}
+		pr_debug("sst: IOCTL_GET_VOLUME recieved for stream = %d!\n",
+				get_vol.stream_id);
+		if (minor == STREAM_MODULE && get_vol.stream_id == 0) {
+			pr_debug("sst: invalid operation!\n");
+			retval = -EPERM;
+			break;
+		}
 		retval = sst_get_vol(&get_vol);
 		if (retval) {
 			retval = -EIO;
@@ -928,7 +945,7 @@
 		pr_debug("sst: id:%d\n, vol:%d, ramp_dur:%d, ramp_type:%d\n",
 				get_vol.stream_id, get_vol.volume,
 				get_vol.ramp_duration, get_vol.ramp_type);
-		if (copy_to_user((struct snd_sst_vol *)arg,
+		if (copy_to_user((struct snd_sst_vol __user *)arg,
 				&get_vol, sizeof(get_vol))) {
 			retval = -EFAULT;
 			break;
@@ -938,25 +955,20 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_MUTE): {
-		struct snd_sst_mute *set_mute;
-		struct snd_sst_vol *rec_mute = (struct snd_sst_vol *)arg;
-		pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
-			rec_mute->stream_id);
-		if (minor == STREAM_MODULE && rec_mute->stream_id == 0) {
-			retval = -EPERM;
-			break;
-		}
-		set_mute = kzalloc(sizeof(*set_mute), GFP_ATOMIC);
-		if (!set_mute) {
-			retval = -ENOMEM;
-			break;
-		}
-		if (copy_from_user(set_mute, rec_mute, sizeof(*set_mute))) {
+		struct snd_sst_mute set_mute;
+
+		if (copy_from_user(&set_mute, (void __user *)arg,
+				sizeof(set_mute))) {
 			retval = -EFAULT;
 			break;
 		}
-		retval = sst_set_mute(set_mute);
-		kfree(set_mute);
+		pr_debug("sst: SNDRV_SST_SET_VOLUME recieved for %d!\n",
+			set_mute.stream_id);
+		if (minor == STREAM_MODULE && set_mute.stream_id == 0) {
+			retval = -EPERM;
+			break;
+		}
+		retval = sst_set_mute(&set_mute);
 		break;
 	}
 	case _IOC_NR(SNDRV_SST_STREAM_GET_PARAMS): {
@@ -973,7 +985,7 @@
 			retval = -EIO;
 			break;
 		}
-		if (copy_to_user((struct snd_sst_get_stream_params *)arg,
+		if (copy_to_user((struct snd_sst_get_stream_params __user *)arg,
 					&get_params, sizeof(get_params))) {
 			retval = -EFAULT;
 			break;
@@ -983,16 +995,22 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_MMAP_PLAY):
-	case _IOC_NR(SNDRV_SST_MMAP_CAPTURE):
+	case _IOC_NR(SNDRV_SST_MMAP_CAPTURE): {
+		struct snd_sst_mmap_buffs mmap_buf;
+
 		pr_debug("sst: SNDRV_SST_MMAP_PLAY/CAPTURE recieved!\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
 		}
-		retval = intel_sst_mmap_play_capture(str_id,
-				(struct snd_sst_mmap_buffs *)arg);
+		if (copy_from_user(&mmap_buf, (void __user *)arg,
+				sizeof(mmap_buf))) {
+			retval = -EFAULT;
+			break;
+		}
+		retval = intel_sst_mmap_play_capture(str_id, &mmap_buf);
 		break;
-
+	}
 	case _IOC_NR(SNDRV_SST_STREAM_DROP):
 		pr_debug("sst: SNDRV_SST_IOCTL_DROP recieved!\n");
 		if (minor != STREAM_MODULE) {
@@ -1003,7 +1021,6 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_GET_TSTAMP): {
-		unsigned long long *ms = (unsigned long long *)arg;
 		struct snd_sst_tstamp tstamp = {0};
 		unsigned long long time, freq, mod;
 
@@ -1013,14 +1030,14 @@
 			break;
 		}
 		memcpy_fromio(&tstamp,
-			((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
-			+(str_id * sizeof(tstamp))),
+			sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
 			sizeof(tstamp));
 		time = tstamp.samples_rendered;
 		freq = (unsigned long long) tstamp.sampling_frequency;
 		time = time * 1000; /* converting it to ms */
 		mod = do_div(time, freq);
-		if (copy_to_user(ms, &time, sizeof(*ms)))
+		if (copy_to_user((void __user *)arg, &time,
+				sizeof(unsigned long long)))
 			retval = -EFAULT;
 		break;
 	}
@@ -1065,92 +1082,118 @@
 	}
 
 	case _IOC_NR(SNDRV_SST_SET_TARGET_DEVICE): {
-		struct snd_sst_target_device *target_device;
+		struct snd_sst_target_device target_device;
 
 		pr_debug("sst: SET_TARGET_DEVICE recieved!\n");
-		target_device =	(struct snd_sst_target_device *)arg;
-		BUG_ON(!target_device);
+		if (copy_from_user(&target_device, (void __user *)arg,
+				sizeof(target_device))) {
+			retval = -EFAULT;
+			break;
+		}
 		if (minor != AM_MODULE) {
 			retval = -EBADRQC;
 			break;
 		}
-		retval = sst_target_device_select(target_device);
+		retval = sst_target_device_select(&target_device);
 		break;
 	}
 
 	case _IOC_NR(SNDRV_SST_DRIVER_INFO): {
-		struct snd_sst_driver_info *info =
-			(struct snd_sst_driver_info *)arg;
+		struct snd_sst_driver_info info;
 
 		pr_debug("sst: SNDRV_SST_DRIVER_INFO recived\n");
-		info->version = SST_VERSION_NUM;
+		info.version = SST_VERSION_NUM;
 		/* hard coding, shud get sumhow later */
-		info->active_pcm_streams = sst_drv_ctx->stream_cnt -
+		info.active_pcm_streams = sst_drv_ctx->stream_cnt -
 						sst_drv_ctx->encoded_cnt;
-		info->active_enc_streams = sst_drv_ctx->encoded_cnt;
-		info->max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
-		info->max_enc_streams = MAX_ENC_STREAM;
-		info->buf_per_stream = sst_drv_ctx->mmap_len;
+		info.active_enc_streams = sst_drv_ctx->encoded_cnt;
+		info.max_pcm_streams = MAX_ACTIVE_STREAM - MAX_ENC_STREAM;
+		info.max_enc_streams = MAX_ENC_STREAM;
+		info.buf_per_stream = sst_drv_ctx->mmap_len;
+		if (copy_to_user((void __user *)arg, &info,
+				sizeof(info)))
+			retval = -EFAULT;
 		break;
 	}
 
 	case _IOC_NR(SNDRV_SST_STREAM_DECODE): {
-		struct snd_sst_dbufs *param =
-				(struct snd_sst_dbufs *)arg, dbufs_local;
-		int i;
+		struct snd_sst_dbufs param;
+		struct snd_sst_dbufs dbufs_local;
 		struct snd_sst_buffs ibufs, obufs;
-		struct snd_sst_buff_entry ibuf_temp[param->ibufs->entries],
-				obuf_temp[param->obufs->entries];
+		struct snd_sst_buff_entry *ibuf_tmp, *obuf_tmp;
+		char __user *dest;
 
 		pr_debug("sst: SNDRV_SST_STREAM_DECODE recived\n");
 		if (minor != STREAM_MODULE) {
 			retval = -EBADRQC;
 			break;
 		}
-		if (!param) {
-			retval = -EINVAL;
+		if (copy_from_user(&param, (void __user *)arg,
+				sizeof(param))) {
+			retval = -EFAULT;
 			break;
 		}
 
-		dbufs_local.input_bytes_consumed = param->input_bytes_consumed;
+		dbufs_local.input_bytes_consumed = param.input_bytes_consumed;
 		dbufs_local.output_bytes_produced =
-					param->output_bytes_produced;
-		dbufs_local.ibufs = &ibufs;
-		dbufs_local.obufs = &obufs;
-		dbufs_local.ibufs->entries = param->ibufs->entries;
-		dbufs_local.ibufs->type = param->ibufs->type;
-		dbufs_local.obufs->entries = param->obufs->entries;
-		dbufs_local.obufs->type = param->obufs->type;
+					param.output_bytes_produced;
 
-		dbufs_local.ibufs->buff_entry = ibuf_temp;
-		for (i = 0; i < dbufs_local.ibufs->entries; i++) {
-			ibuf_temp[i].buffer =
-				param->ibufs->buff_entry[i].buffer;
-			ibuf_temp[i].size =
-				param->ibufs->buff_entry[i].size;
+		if (copy_from_user(&ibufs, (void __user *)param.ibufs, sizeof(ibufs))) {
+			retval = -EFAULT;
+			break;
 		}
-		dbufs_local.obufs->buff_entry = obuf_temp;
-		for (i = 0; i < dbufs_local.obufs->entries; i++) {
-			obuf_temp[i].buffer =
-				param->obufs->buff_entry[i].buffer;
-			obuf_temp[i].size =
-				param->obufs->buff_entry[i].size;
+		if (copy_from_user(&obufs, (void __user *)param.obufs, sizeof(obufs))) {
+			retval = -EFAULT;
+			break;
 		}
+
+		ibuf_tmp = kcalloc(ibufs.entries, sizeof(*ibuf_tmp), GFP_KERNEL);
+		obuf_tmp = kcalloc(obufs.entries, sizeof(*obuf_tmp), GFP_KERNEL);
+		if (!ibuf_tmp || !obuf_tmp) {
+			retval = -ENOMEM;
+			goto free_iobufs;
+		}
+
+		if (copy_from_user(ibuf_tmp, (void __user *)ibufs.buff_entry,
+				ibufs.entries * sizeof(*ibuf_tmp))) {
+			retval = -EFAULT;
+			goto free_iobufs;
+		}
+		ibufs.buff_entry = ibuf_tmp;
+		dbufs_local.ibufs = &ibufs;
+
+		if (copy_from_user(obuf_tmp, (void __user *)obufs.buff_entry,
+				obufs.entries * sizeof(*obuf_tmp))) {
+			retval = -EFAULT;
+			goto free_iobufs;
+		}
+		obufs.buff_entry = obuf_tmp;
+		dbufs_local.obufs = &obufs;
+
 		retval = sst_decode(str_id, &dbufs_local);
-		if (retval)
-			retval =  -EAGAIN;
-		if (copy_to_user(&param->input_bytes_consumed,
+		if (retval) {
+			retval = -EAGAIN;
+			goto free_iobufs;
+		}
+
+		dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
+		if (copy_to_user(dest,
 				&dbufs_local.input_bytes_consumed,
 				sizeof(unsigned long long))) {
-			retval =  -EFAULT;
-			break;
+			retval = -EFAULT;
+			goto free_iobufs;
 		}
-		if (copy_to_user(&param->output_bytes_produced,
+
+		dest = (char __user *)arg + offsetof(struct snd_sst_dbufs, input_bytes_consumed);
+		if (copy_to_user(dest,
 				&dbufs_local.output_bytes_produced,
 				sizeof(unsigned long long))) {
-			retval =  -EFAULT;
-			break;
+			retval = -EFAULT;
+			goto free_iobufs;
 		}
+free_iobufs:
+		kfree(ibuf_tmp);
+		kfree(obuf_tmp);
 		break;
 	}
 
@@ -1164,7 +1207,7 @@
 		break;
 
 	case _IOC_NR(SNDRV_SST_STREAM_BYTES_DECODED): {
-		unsigned long long *bytes = (unsigned long long *)arg;
+		unsigned long long __user *bytes = (unsigned long long __user *)arg;
 		struct snd_sst_tstamp tstamp = {0};
 
 		pr_debug("sst: STREAM_BYTES_DECODED recieved!\n");
@@ -1173,8 +1216,7 @@
 			break;
 		}
 		memcpy_fromio(&tstamp,
-			((void *)(sst_drv_ctx->mailbox + SST_TIME_STAMP)
-			+(str_id * sizeof(tstamp))),
+			sst_drv_ctx->mailbox + SST_TIME_STAMP + str_id * sizeof(tstamp),
 			sizeof(tstamp));
 		if (copy_to_user(bytes, &tstamp.bytes_processed,
 				sizeof(*bytes)))
@@ -1197,7 +1239,7 @@
 			kfree(fw_info);
 			break;
 		}
-		if (copy_to_user((struct snd_sst_dbufs *)arg,
+		if (copy_to_user((struct snd_sst_dbufs __user *)arg,
 				fw_info, sizeof(*fw_info))) {
 			kfree(fw_info);
 			retval = -EFAULT;
diff --git a/drivers/staging/intel_sst/intel_sst_common.h b/drivers/staging/intel_sst/intel_sst_common.h
index 73a98c8..bf0ead7 100644
--- a/drivers/staging/intel_sst/intel_sst_common.h
+++ b/drivers/staging/intel_sst/intel_sst_common.h
@@ -231,8 +231,8 @@
 	spinlock_t          pcm_lock;
 	bool			mmapped;
 	unsigned int		sg_index; /*  current buf Index  */
-	unsigned char		*cur_ptr; /*  Current static bufs  */
-	struct snd_sst_buf_entry *buf_entry;
+	unsigned char __user 	*cur_ptr; /*  Current static bufs  */
+	struct snd_sst_buf_entry __user *buf_entry;
 	struct sst_block	data_blk; /* stream ops block */
 	struct sst_block	ctrl_blk; /* stream control cmd block */
 	enum snd_sst_buf_type   buf_type;
diff --git a/drivers/staging/keucr/init.c b/drivers/staging/keucr/init.c
index 1934805..978bf87 100644
--- a/drivers/staging/keucr/init.c
+++ b/drivers/staging/keucr/init.c
@@ -22,7 +22,7 @@
 	int	result;
 	BYTE	MiscReg03 = 0;
 
-	printk("--- Initial Nedia ---\n");
+	printk("--- Init Media ---\n");
 	result = ENE_Read_BYTE(us, REG_CARD_STATUS, &MiscReg03);
 	if (result != USB_STOR_XFER_GOOD)
 	{
@@ -64,7 +64,7 @@
 	struct bulk_cb_wrap *bcb = (struct bulk_cb_wrap *) us->iobuf;
 	int result;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x01;
 	bcb->Flags			= 0x80;
@@ -92,7 +92,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->Flags = 0x80;
 	bcb->CDB[0] = 0xF2;
@@ -112,7 +112,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -161,7 +161,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -219,7 +219,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -341,7 +341,7 @@
 		break;
 	}
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x800;
 	bcb->Flags =0x00;
@@ -433,7 +433,7 @@
 
 	//printk("transport --- ENE_Read_Data\n");
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = length;
 	bcb->Flags =0x80;
@@ -470,7 +470,7 @@
 
 	//printk("transport --- ENE_Write_Data\n");
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = length;
 	bcb->Flags =0x00;
diff --git a/drivers/staging/keucr/ms.c b/drivers/staging/keucr/ms.c
index d4340a9..9a3fdb4 100644
--- a/drivers/staging/keucr/ms.c
+++ b/drivers/staging/keucr/ms.c
@@ -15,7 +15,7 @@
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x200*len;
 	bcb->Flags			= 0x00;
@@ -53,7 +53,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read Page Data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x200;
 	bcb->Flags			= 0x80;
@@ -69,7 +69,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read Extra Data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4;
 	bcb->Flags			= 0x80;
@@ -108,7 +108,7 @@
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x200;
 	bcb->Flags			= 0x80;
@@ -673,7 +673,7 @@
 	//printk("MS_LibReadExtraBlock --- PhyBlock = %x, PageNum = %x, blen = %x\n", PhyBlock, PageNum, blen);
 
 	// Read Extra Data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4 * blen;
 	bcb->Flags			= 0x80;
@@ -700,7 +700,7 @@
 	BYTE	ExtBuf[4];
 
 	//printk("MS_LibReadExtra --- PhyBlock = %x, PageNum = %x\n", PhyBlock, PageNum);
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4;
 	bcb->Flags			= 0x80;
@@ -807,7 +807,7 @@
 	if (result != USB_STOR_XFER_GOOD)
 		return USB_STOR_TRANSPORT_ERROR;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = 0x4;
 	bcb->Flags			= 0x80;
diff --git a/drivers/staging/keucr/msscsi.c b/drivers/staging/keucr/msscsi.c
index ad0c5c6..cb92d25 100644
--- a/drivers/staging/keucr/msscsi.c
+++ b/drivers/staging/keucr/msscsi.c
@@ -145,7 +145,7 @@
 		}
 
 		// set up the command wrapper
-		memset(bcb, 0, sizeof(bcb));
+		memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 		bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 		bcb->DataTransferLength = blenByte;
 		bcb->Flags  = 0x80;
@@ -193,7 +193,7 @@
 			blkno  = phyblk * 0x20 + PageNum;
 
 			// set up the command wrapper
-			memset(bcb, 0, sizeof(bcb));
+			memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 			bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 			bcb->DataTransferLength = 0x200 * len;
 			bcb->Flags  = 0x80;
@@ -250,7 +250,7 @@
 		}
 
 		// set up the command wrapper
-		memset(bcb, 0, sizeof(bcb));
+		memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 		bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 		bcb->DataTransferLength = blenByte;
 		bcb->Flags  = 0x00;
diff --git a/drivers/staging/keucr/sdscsi.c b/drivers/staging/keucr/sdscsi.c
index 6c332f8..d646507 100644
--- a/drivers/staging/keucr/sdscsi.c
+++ b/drivers/staging/keucr/sdscsi.c
@@ -152,7 +152,7 @@
 		bnByte = bn;
 		
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = blenByte;
 	bcb->Flags  = 0x80;
@@ -192,7 +192,7 @@
 		bnByte = bn;
 
 	// set up the command wrapper
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength = blenByte;
 	bcb->Flags  = 0x00;
diff --git a/drivers/staging/keucr/smilsub.c b/drivers/staging/keucr/smilsub.c
index 844b659..1b52535 100644
--- a/drivers/staging/keucr/smilsub.c
+++ b/drivers/staging/keucr/smilsub.c
@@ -266,7 +266,7 @@
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
 	// Read sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -281,7 +281,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read redundant
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
@@ -319,7 +319,7 @@
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
 	// Read sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200*count;
 	bcb->Flags			= 0x80;
@@ -334,7 +334,7 @@
 		return USB_STOR_TRANSPORT_ERROR;
 
 	// Read redundant
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
@@ -536,7 +536,7 @@
 	WriteAddr = WriteAddr*(WORD)Ssfdc.MaxSectors;
 
 	// Write sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200*count;
 	bcb->Flags			= 0x00;
@@ -754,7 +754,7 @@
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
 	// Write sect data
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x00;
@@ -791,7 +791,7 @@
 	addr=(WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
 	addr=addr*(WORD)Ssfdc.MaxSectors;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x200;
 	bcb->Flags			= 0x80;
@@ -827,7 +827,7 @@
 	addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
@@ -870,7 +870,7 @@
 	addr = (WORD)Media.Zone*Ssfdc.MaxBlocks+Media.PhyBlock;
 	addr = addr*(WORD)Ssfdc.MaxSectors+Media.Sector;
 
-	memset(bcb, 0, sizeof(bcb));
+	memset(bcb, 0, sizeof(struct bulk_cb_wrap));
 	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
 	bcb->DataTransferLength	= 0x10;
 	bcb->Flags			= 0x80;
diff --git a/drivers/staging/keucr/transport.c b/drivers/staging/keucr/transport.c
index fd98df6..111160c 100644
--- a/drivers/staging/keucr/transport.c
+++ b/drivers/staging/keucr/transport.c
@@ -40,7 +40,7 @@
 	us->current_urb->error_count = 0;
 	us->current_urb->status = 0;
 
-//	us->current_urb->transfer_flags = URB_NO_SETUP_DMA_MAP;
+	us->current_urb->transfer_flags = 0;
 	if (us->current_urb->transfer_buffer == us->iobuf)
 		us->current_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 	us->current_urb->transfer_dma = us->iobuf_dma;
diff --git a/drivers/staging/rt2860/common/cmm_aes.c b/drivers/staging/rt2860/common/cmm_aes.c
index 1d159ff..a99879b 100644
--- a/drivers/staging/rt2860/common/cmm_aes.c
+++ b/drivers/staging/rt2860/common/cmm_aes.c
@@ -330,8 +330,6 @@
 	for (i = 8; i < 14; i++)
 		mic_iv[i] = pn_vector[13 - i];	/* mic_iv[8:13] = PN[5:0] */
 #endif
-	i = (payload_length / 256);
-	i = (payload_length % 256);
 	mic_iv[14] = (unsigned char)(payload_length / 256);
 	mic_iv[15] = (unsigned char)(payload_length % 256);
 
diff --git a/drivers/staging/rt2860/usb_main_dev.c b/drivers/staging/rt2860/usb_main_dev.c
index ebf9074..ddacfc6 100644
--- a/drivers/staging/rt2860/usb_main_dev.c
+++ b/drivers/staging/rt2860/usb_main_dev.c
@@ -65,6 +65,7 @@
 	{USB_DEVICE(0x14B2, 0x3C07)},	/* AL */
 	{USB_DEVICE(0x050D, 0x8053)},	/* Belkin */
 	{USB_DEVICE(0x050D, 0x825B)},	/* Belkin */
+	{USB_DEVICE(0x050D, 0x935A)},	/* Belkin F6D4050 v1 */
 	{USB_DEVICE(0x050D, 0x935B)},	/* Belkin F6D4050 v2 */
 	{USB_DEVICE(0x14B2, 0x3C23)},	/* Airlink */
 	{USB_DEVICE(0x14B2, 0x3C27)},	/* Airlink */
diff --git a/drivers/staging/rtl8192e/r8192E_core.c b/drivers/staging/rtl8192e/r8192E_core.c
index a202194..b1786dc 100644
--- a/drivers/staging/rtl8192e/r8192E_core.c
+++ b/drivers/staging/rtl8192e/r8192E_core.c
@@ -5829,6 +5829,9 @@
                     }
                 }
 
+		pci_unmap_single(priv->pdev, *((dma_addr_t *) skb->cb),
+			priv->rxbuffersize, PCI_DMA_FROMDEVICE);
+
                 skb = new_skb;
                 priv->rx_buf[priv->rx_idx] = skb;
                 *((dma_addr_t *) skb->cb) = pci_map_single(priv->pdev, skb_tail_pointer(skb), priv->rxbuffersize, PCI_DMA_FROMDEVICE);
diff --git a/drivers/staging/solo6x10/solo6010-v4l2-enc.c b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
index bbf3d9c..097e82b 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2-enc.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2-enc.c
@@ -766,7 +766,7 @@
 				    &solo_enc->lock,
 				    V4L2_BUF_TYPE_VIDEO_CAPTURE,
 				    V4L2_FIELD_INTERLACED,
-				    sizeof(struct videobuf_buffer), fh);
+				    sizeof(struct videobuf_buffer), fh, NULL);
 
 	spin_unlock(&solo_enc->lock);
 
diff --git a/drivers/staging/solo6x10/solo6010-v4l2.c b/drivers/staging/solo6x10/solo6010-v4l2.c
index 9731fa0..6ffd21d 100644
--- a/drivers/staging/solo6x10/solo6010-v4l2.c
+++ b/drivers/staging/solo6x10/solo6010-v4l2.c
@@ -437,7 +437,7 @@
 				    &solo_dev->pdev->dev, &fh->slock,
 				    V4L2_BUF_TYPE_VIDEO_CAPTURE,
 				    SOLO_DISP_PIX_FIELD,
-				    sizeof(struct videobuf_buffer), fh);
+				    sizeof(struct videobuf_buffer), fh, NULL);
 
 	return 0;
 }
diff --git a/drivers/staging/stradis/stradis.c b/drivers/staging/stradis/stradis.c
index a057824..807dd7e 100644
--- a/drivers/staging/stradis/stradis.c
+++ b/drivers/staging/stradis/stradis.c
@@ -1286,6 +1286,7 @@
 	case VIDIOCGCAP:
 		{
 			struct video_capability b;
+			memset(&b, 0, sizeof(b));
 			strcpy(b.name, saa->video_dev.name);
 			b.type = VID_TYPE_CAPTURE | VID_TYPE_OVERLAY |
 				VID_TYPE_CLIPPING | VID_TYPE_FRAMERAM |
@@ -1416,6 +1417,7 @@
 	case VIDIOCGWIN:
 		{
 			struct video_window vw;
+			memset(&vw, 0, sizeof(vw));
 			vw.x = saa->win.x;
 			vw.y = saa->win.y;
 			vw.width = saa->win.width;
@@ -1448,6 +1450,7 @@
 	case VIDIOCGFBUF:
 		{
 			struct video_buffer v;
+			memset(&v, 0, sizeof(v));
 			v.base = (void *)saa->win.vidadr;
 			v.height = saa->win.sheight;
 			v.width = saa->win.swidth;
@@ -1492,6 +1495,7 @@
 	case VIDIOCGAUDIO:
 		{
 			struct video_audio v;
+			memset(&v, 0, sizeof(v));
 			v = saa->audio_dev;
 			v.flags &= ~(VIDEO_AUDIO_MUTE | VIDEO_AUDIO_MUTABLE);
 			v.flags |= VIDEO_AUDIO_MUTABLE | VIDEO_AUDIO_VOLUME;
@@ -1534,6 +1538,7 @@
 	case VIDIOCGUNIT:
 		{
 			struct video_unit vu;
+			memset(&vu, 0, sizeof(vu));
 			vu.video = saa->video_dev.minor;
 			vu.vbi = VIDEO_NO_UNIT;
 			vu.radio = VIDEO_NO_UNIT;
@@ -1888,6 +1893,7 @@
 
 	saa->user++;
 	if (saa->user > 1) {
+		saa->user--;
 		unlock_kernel();
 		return 0;	/* device open already, don't reset */
 	}
@@ -2000,10 +2006,13 @@
 	if (retval < 0) {
 		dev_err(&pdev->dev, "%d: error in registering video device!\n",
 			num);
-		goto errio;
+		goto errirq;
 	}
 
 	return 0;
+
+errirq:
+	free_irq(saa->irq, saa);
 errio:
 	iounmap(saa->saa7146_mem);
 err:
diff --git a/drivers/staging/tidspbridge/Kconfig b/drivers/staging/tidspbridge/Kconfig
index ff64d46..93de4f2 100644
--- a/drivers/staging/tidspbridge/Kconfig
+++ b/drivers/staging/tidspbridge/Kconfig
@@ -6,7 +6,6 @@
 	tristate "DSP Bridge driver"
 	depends on ARCH_OMAP3
 	select OMAP_MBOX_FWK
-	select OMAP_IOMMU
 	help
 	  DSP/BIOS Bridge is designed for platforms that contain a GPP and
 	  one or more attached DSPs.  The GPP is considered the master or
diff --git a/drivers/staging/tidspbridge/Makefile b/drivers/staging/tidspbridge/Makefile
index 50decc2..41c644c 100644
--- a/drivers/staging/tidspbridge/Makefile
+++ b/drivers/staging/tidspbridge/Makefile
@@ -2,18 +2,19 @@
 
 libgen = gen/gb.o gen/gs.o gen/gh.o gen/uuidutil.o
 libcore = core/chnl_sm.o core/msg_sm.o core/io_sm.o core/tiomap3430.o \
-		core/tiomap3430_pwr.o core/tiomap_io.o core/dsp-mmu.o \
+		core/tiomap3430_pwr.o core/tiomap_io.o \
 		core/ue_deh.o core/wdt.o core/dsp-clock.o core/sync.o
 libpmgr = pmgr/chnl.o pmgr/io.o pmgr/msg.o pmgr/cod.o pmgr/dev.o pmgr/dspapi.o \
-		pmgr/cmm.o pmgr/dbll.o
+		pmgr/dmm.o pmgr/cmm.o pmgr/dbll.o
 librmgr = rmgr/dbdcd.o rmgr/disp.o rmgr/drv.o rmgr/mgr.o rmgr/node.o \
 		rmgr/proc.o rmgr/pwr.o rmgr/rmm.o rmgr/strm.o rmgr/dspdrv.o \
 		rmgr/nldr.o rmgr/drv_interface.o
 libdload = dynload/cload.o dynload/getsection.o dynload/reloc.o \
 		 dynload/tramp.o
+libhw = hw/hw_mmu.o
 
 bridgedriver-y := $(libgen) $(libservices) $(libcore) $(libpmgr) $(librmgr) \
-			$(libdload)
+			$(libdload) $(libhw)
 
 #Machine dependent
 ccflags-y += -D_TI_ -D_DB_TIOMAP -DTMS32060 \
diff --git a/drivers/staging/tidspbridge/core/_deh.h b/drivers/staging/tidspbridge/core/_deh.h
index 8ae2633..16723cd 100644
--- a/drivers/staging/tidspbridge/core/_deh.h
+++ b/drivers/staging/tidspbridge/core/_deh.h
@@ -27,8 +27,9 @@
 struct deh_mgr {
 	struct bridge_dev_context *hbridge_context;	/* Bridge context. */
 	struct ntfy_object *ntfy_obj;	/* NTFY object */
-};
 
-int mmu_fault_isr(struct iommu *mmu);
+	/* MMU Fault DPC */
+	struct tasklet_struct dpc_tasklet;
+};
 
 #endif /* _DEH_ */
diff --git a/drivers/staging/tidspbridge/core/_tiomap.h b/drivers/staging/tidspbridge/core/_tiomap.h
index e0a801c..1c1f157 100644
--- a/drivers/staging/tidspbridge/core/_tiomap.h
+++ b/drivers/staging/tidspbridge/core/_tiomap.h
@@ -23,8 +23,8 @@
 #include <plat/clockdomain.h>
 #include <mach-omap2/prm-regbits-34xx.h>
 #include <mach-omap2/cm-regbits-34xx.h>
-#include <dspbridge/dsp-mmu.h>
 #include <dspbridge/devdefs.h>
+#include <hw_defs.h>
 #include <dspbridge/dspioctl.h>	/* for bridge_ioctl_extproc defn */
 #include <dspbridge/sync.h>
 #include <dspbridge/clk.h>
@@ -306,18 +306,6 @@
 
 #define CLEAR_BIT_INDEX(reg, index)   (reg &= ~(1 << (index)))
 
-struct shm_segs {
-	u32 seg0_da;
-	u32 seg0_pa;
-	u32 seg0_va;
-	u32 seg0_size;
-	u32 seg1_da;
-	u32 seg1_pa;
-	u32 seg1_va;
-	u32 seg1_size;
-};
-
-
 /* This Bridge driver's device context: */
 struct bridge_dev_context {
 	struct dev_object *hdev_obj;	/* Handle to Bridge device object. */
@@ -328,6 +316,7 @@
 	 */
 	u32 dw_dsp_ext_base_addr;	/* See the comment above */
 	u32 dw_api_reg_base;	/* API mem map'd registers */
+	void __iomem *dw_dsp_mmu_base;	/* DSP MMU Mapped registers */
 	u32 dw_api_clk_base;	/* CLK Registers */
 	u32 dw_dsp_clk_m2_base;	/* DSP Clock Module m2 */
 	u32 dw_public_rhea;	/* Pub Rhea */
@@ -339,8 +328,7 @@
 	u32 dw_internal_size;	/* Internal memory size */
 
 	struct omap_mbox *mbox;		/* Mail box handle */
-	struct iommu *dsp_mmu;      /* iommu for iva2 handler */
-	struct shm_segs sh_s;
+
 	struct cfg_hostres *resources;	/* Host Resources */
 
 	/*
@@ -353,6 +341,7 @@
 
 	/* TC Settings */
 	bool tc_word_swap_on;	/* Traffic Controller Word Swap */
+	struct pg_table_attrs *pt_attrs;
 	u32 dsp_per_clks;
 };
 
diff --git a/drivers/staging/tidspbridge/core/dsp-mmu.c b/drivers/staging/tidspbridge/core/dsp-mmu.c
deleted file mode 100644
index 983c95a..0000000
--- a/drivers/staging/tidspbridge/core/dsp-mmu.c
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * dsp-mmu.c
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * DSP iommu.
- *
- * Copyright (C) 2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#include <dspbridge/host_os.h>
-#include <plat/dmtimer.h>
-#include <dspbridge/dbdefs.h>
-#include <dspbridge/dev.h>
-#include <dspbridge/io_sm.h>
-#include <dspbridge/dspdeh.h>
-#include "_tiomap.h"
-
-#include <dspbridge/dsp-mmu.h>
-
-#define MMU_CNTL_TWL_EN		(1 << 2)
-
-static struct tasklet_struct mmu_tasklet;
-
-#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
-static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
-{
-	void *dummy_addr;
-	u32 fa, tmp;
-	struct iotlb_entry e;
-	struct iommu *mmu = dev_context->dsp_mmu;
-	dummy_addr = (void *)__get_free_page(GFP_ATOMIC);
-
-	/*
-	 * Before acking the MMU fault, let's make sure MMU can only
-	 * access entry #0. Then add a new entry so that the DSP OS
-	 * can continue in order to dump the stack.
-	 */
-	tmp = iommu_read_reg(mmu, MMU_CNTL);
-	tmp &= ~MMU_CNTL_TWL_EN;
-	iommu_write_reg(mmu, tmp, MMU_CNTL);
-	fa = iommu_read_reg(mmu, MMU_FAULT_AD);
-	e.da = fa & PAGE_MASK;
-	e.pa = virt_to_phys(dummy_addr);
-	e.valid = 1;
-	e.prsvd = 1;
-	e.pgsz = IOVMF_PGSZ_4K & MMU_CAM_PGSZ_MASK;
-	e.endian = MMU_RAM_ENDIAN_LITTLE;
-	e.elsz = MMU_RAM_ELSZ_32;
-	e.mixed = 0;
-
-	load_iotlb_entry(mmu, &e);
-
-	dsp_clk_enable(DSP_CLK_GPT8);
-
-	dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
-
-	/* Clear MMU interrupt */
-	tmp = iommu_read_reg(mmu, MMU_IRQSTATUS);
-	iommu_write_reg(mmu, tmp, MMU_IRQSTATUS);
-
-	dump_dsp_stack(dev_context);
-	dsp_clk_disable(DSP_CLK_GPT8);
-
-	iopgtable_clear_entry(mmu, fa);
-	free_page((unsigned long)dummy_addr);
-}
-#endif
-
-
-static void fault_tasklet(unsigned long data)
-{
-	struct iommu *mmu = (struct iommu *)data;
-	struct bridge_dev_context *dev_ctx;
-	struct deh_mgr *dm;
-	u32 fa;
-	dev_get_deh_mgr(dev_get_first(), &dm);
-	dev_get_bridge_context(dev_get_first(), &dev_ctx);
-
-	if (!dm || !dev_ctx)
-		return;
-
-	fa = iommu_read_reg(mmu, MMU_FAULT_AD);
-
-#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
-	print_dsp_trace_buffer(dev_ctx);
-	dump_dl_modules(dev_ctx);
-	mmu_fault_print_stack(dev_ctx);
-#endif
-
-	bridge_deh_notify(dm, DSP_MMUFAULT, fa);
-}
-
-/*
- *  ======== mmu_fault_isr ========
- *      ISR to be triggered by a DSP MMU fault interrupt.
- */
-static int mmu_fault_callback(struct iommu *mmu)
-{
-	if (!mmu)
-		return -EPERM;
-
-	iommu_write_reg(mmu, 0, MMU_IRQENABLE);
-	tasklet_schedule(&mmu_tasklet);
-	return 0;
-}
-
-/**
- * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
- *
- * This function initialize dsp mmu module and returns a struct iommu
- * handle to use it for dsp maps.
- *
- */
-struct iommu *dsp_mmu_init()
-{
-	struct iommu *mmu;
-
-	mmu = iommu_get("iva2");
-
-	if (!IS_ERR(mmu)) {
-		tasklet_init(&mmu_tasklet, fault_tasklet, (unsigned long)mmu);
-		mmu->isr = mmu_fault_callback;
-	}
-
-	return mmu;
-}
-
-/**
- * dsp_mmu_exit() - destroy dsp mmu module
- * @mmu:	Pointer to iommu handle.
- *
- * This function destroys dsp mmu module.
- *
- */
-void dsp_mmu_exit(struct iommu *mmu)
-{
-	if (mmu)
-		iommu_put(mmu);
-	tasklet_kill(&mmu_tasklet);
-}
-
-/**
- * user_va2_pa() - get physical address from userspace address.
- * @mm:		mm_struct Pointer of the process.
- * @address:	Virtual user space address.
- *
- */
-static u32 user_va2_pa(struct mm_struct *mm, u32 address)
-{
-	pgd_t *pgd;
-	pmd_t *pmd;
-	pte_t *ptep, pte;
-
-	pgd = pgd_offset(mm, address);
-	if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
-		pmd = pmd_offset(pgd, address);
-		if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
-			ptep = pte_offset_map(pmd, address);
-			if (ptep) {
-				pte = *ptep;
-				if (pte_present(pte))
-					return pte & PAGE_MASK;
-			}
-		}
-	}
-
-	return 0;
-}
-
-/**
- * get_io_pages() - pin and get pages of io user's buffer.
- * @mm:		mm_struct Pointer of the process.
- * @uva:		Virtual user space address.
- * @pages	Pages to be pined.
- * @usr_pgs	struct page array pointer where the user pages will be stored
- *
- */
-static int get_io_pages(struct mm_struct *mm, u32 uva, unsigned pages,
-						struct page **usr_pgs)
-{
-	u32 pa;
-	int i;
-	struct page *pg;
-
-	for (i = 0; i < pages; i++) {
-		pa = user_va2_pa(mm, uva);
-
-		if (!pfn_valid(__phys_to_pfn(pa)))
-			break;
-
-		pg = phys_to_page(pa);
-		usr_pgs[i] = pg;
-		get_page(pg);
-	}
-	return i;
-}
-
-/**
- * user_to_dsp_map() - maps user to dsp virtual address
- * @mmu:	Pointer to iommu handle.
- * @uva:		Virtual user space address.
- * @da		DSP address
- * @size		Buffer size to map.
- * @usr_pgs	struct page array pointer where the user pages will be stored
- *
- * This function maps a user space buffer into DSP virtual address.
- *
- */
-u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
-				struct page **usr_pgs)
-{
-	int res, w;
-	unsigned pages;
-	int i;
-	struct vm_area_struct *vma;
-	struct mm_struct *mm = current->mm;
-	struct sg_table *sgt;
-	struct scatterlist *sg;
-
-	if (!size || !usr_pgs)
-		return -EINVAL;
-
-	pages = size / PG_SIZE4K;
-
-	down_read(&mm->mmap_sem);
-	vma = find_vma(mm, uva);
-	while (vma && (uva + size > vma->vm_end))
-		vma = find_vma(mm, vma->vm_end + 1);
-
-	if (!vma) {
-		pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
-						__func__, uva, size);
-		up_read(&mm->mmap_sem);
-		return -EINVAL;
-	}
-	if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
-		w = 1;
-
-	if (vma->vm_flags & VM_IO)
-		i = get_io_pages(mm, uva, pages, usr_pgs);
-	else
-		i = get_user_pages(current, mm, uva, pages, w, 1,
-							usr_pgs, NULL);
-	up_read(&mm->mmap_sem);
-
-	if (i < 0)
-		return i;
-
-	if (i < pages) {
-		res = -EFAULT;
-		goto err_pages;
-	}
-
-	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
-	if (!sgt) {
-		res = -ENOMEM;
-		goto err_pages;
-	}
-
-	res = sg_alloc_table(sgt, pages, GFP_KERNEL);
-
-	if (res < 0)
-		goto err_sg;
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i)
-		sg_set_page(sg, usr_pgs[i], PAGE_SIZE, 0);
-
-	da = iommu_vmap(mmu, da, sgt, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-
-	if (!IS_ERR_VALUE(da))
-		return da;
-	res = (int)da;
-
-	sg_free_table(sgt);
-err_sg:
-	kfree(sgt);
-	i = pages;
-err_pages:
-	while (i--)
-		put_page(usr_pgs[i]);
-	return res;
-}
-
-/**
- * user_to_dsp_unmap() - unmaps DSP virtual buffer.
- * @mmu:	Pointer to iommu handle.
- * @da		DSP address
- *
- * This function unmaps a user space buffer into DSP virtual address.
- *
- */
-int user_to_dsp_unmap(struct iommu *mmu, u32 da)
-{
-	unsigned i;
-	struct sg_table *sgt;
-	struct scatterlist *sg;
-
-	sgt = iommu_vunmap(mmu, da);
-	if (!sgt)
-		return -EFAULT;
-
-	for_each_sg(sgt->sgl, sg, sgt->nents, i)
-		put_page(sg_page(sg));
-	sg_free_table(sgt);
-	kfree(sgt);
-
-	return 0;
-}
diff --git a/drivers/staging/tidspbridge/core/io_sm.c b/drivers/staging/tidspbridge/core/io_sm.c
index 194bada..5718645 100644
--- a/drivers/staging/tidspbridge/core/io_sm.c
+++ b/drivers/staging/tidspbridge/core/io_sm.c
@@ -39,6 +39,10 @@
 #include <dspbridge/ntfy.h>
 #include <dspbridge/sync.h>
 
+/* Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 /* Bridge Driver */
 #include <dspbridge/dspdeh.h>
 #include <dspbridge/dspio.h>
@@ -287,7 +291,6 @@
 	struct cod_manager *cod_man;
 	struct chnl_mgr *hchnl_mgr;
 	struct msg_mgr *hmsg_mgr;
-	struct shm_segs *sm_sg;
 	u32 ul_shm_base;
 	u32 ul_shm_base_offset;
 	u32 ul_shm_limit;
@@ -310,9 +313,18 @@
 	struct bridge_ioctl_extproc ae_proc[BRDIOCTL_NUMOFMMUTLB];
 	struct cfg_hostres *host_res;
 	struct bridge_dev_context *pbridge_context;
+	u32 map_attrs;
 	u32 shm0_end;
 	u32 ul_dyn_ext_base;
 	u32 ul_seg1_size = 0;
+	u32 pa_curr = 0;
+	u32 va_curr = 0;
+	u32 gpp_va_curr = 0;
+	u32 num_bytes = 0;
+	u32 all_bits = 0;
+	u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+	};
 
 	status = dev_get_bridge_context(hio_mgr->hdev_obj, &pbridge_context);
 	if (!pbridge_context) {
@@ -325,8 +337,6 @@
 		status = -EFAULT;
 		goto func_end;
 	}
-	sm_sg = &pbridge_context->sh_s;
-
 	status = dev_get_cod_mgr(hio_mgr->hdev_obj, &cod_man);
 	if (!cod_man) {
 		status = -EFAULT;
@@ -461,14 +471,129 @@
 	if (status)
 		goto func_end;
 
-	sm_sg->seg1_pa = ul_gpp_pa;
-	sm_sg->seg1_da = ul_dyn_ext_base;
-	sm_sg->seg1_va = ul_gpp_va;
-	sm_sg->seg1_size = ul_seg1_size;
-	sm_sg->seg0_pa = ul_gpp_pa + ul_pad_size + ul_seg1_size;
-	sm_sg->seg0_da = ul_dsp_va;
-	sm_sg->seg0_va = ul_gpp_va + ul_pad_size + ul_seg1_size;
-	sm_sg->seg0_size = ul_seg_size;
+	pa_curr = ul_gpp_pa;
+	va_curr = ul_dyn_ext_base * hio_mgr->word_size;
+	gpp_va_curr = ul_gpp_va;
+	num_bytes = ul_seg1_size;
+
+	/*
+	 * Try to fit into TLB entries. If not possible, push them to page
+	 * tables. It is quite possible that if sections are not on
+	 * bigger page boundary, we may end up making several small pages.
+	 * So, push them onto page tables, if that is the case.
+	 */
+	map_attrs = 0x00000000;
+	map_attrs = DSP_MAPLITTLEENDIAN;
+	map_attrs |= DSP_MAPPHYSICALADDR;
+	map_attrs |= DSP_MAPELEMSIZE32;
+	map_attrs |= DSP_MAPDONOTLOCK;
+
+	while (num_bytes) {
+		/*
+		 * To find the max. page size with which both PA & VA are
+		 * aligned.
+		 */
+		all_bits = pa_curr | va_curr;
+		dev_dbg(bridge, "all_bits %x, pa_curr %x, va_curr %x, "
+			"num_bytes %x\n", all_bits, pa_curr, va_curr,
+			num_bytes);
+		for (i = 0; i < 4; i++) {
+			if ((num_bytes >= page_size[i]) && ((all_bits &
+							     (page_size[i] -
+							      1)) == 0)) {
+				status =
+				    hio_mgr->intf_fxns->
+				    pfn_brd_mem_map(hio_mgr->hbridge_context,
+						    pa_curr, va_curr,
+						    page_size[i], map_attrs,
+						    NULL);
+				if (status)
+					goto func_end;
+				pa_curr += page_size[i];
+				va_curr += page_size[i];
+				gpp_va_curr += page_size[i];
+				num_bytes -= page_size[i];
+				/*
+				 * Don't try smaller sizes. Hopefully we have
+				 * reached an address aligned to a bigger page
+				 * size.
+				 */
+				break;
+			}
+		}
+	}
+	pa_curr += ul_pad_size;
+	va_curr += ul_pad_size;
+	gpp_va_curr += ul_pad_size;
+
+	/* Configure the TLB entries for the next cacheable segment */
+	num_bytes = ul_seg_size;
+	va_curr = ul_dsp_va * hio_mgr->word_size;
+	while (num_bytes) {
+		/*
+		 * To find the max. page size with which both PA & VA are
+		 * aligned.
+		 */
+		all_bits = pa_curr | va_curr;
+		dev_dbg(bridge, "all_bits for Seg1 %x, pa_curr %x, "
+			"va_curr %x, num_bytes %x\n", all_bits, pa_curr,
+			va_curr, num_bytes);
+		for (i = 0; i < 4; i++) {
+			if (!(num_bytes >= page_size[i]) ||
+			    !((all_bits & (page_size[i] - 1)) == 0))
+				continue;
+			if (ndx < MAX_LOCK_TLB_ENTRIES) {
+				/*
+				 * This is the physical address written to
+				 * DSP MMU.
+				 */
+				ae_proc[ndx].ul_gpp_pa = pa_curr;
+				/*
+				 * This is the virtual uncached ioremapped
+				 * address!!!
+				 */
+				ae_proc[ndx].ul_gpp_va = gpp_va_curr;
+				ae_proc[ndx].ul_dsp_va =
+				    va_curr / hio_mgr->word_size;
+				ae_proc[ndx].ul_size = page_size[i];
+				ae_proc[ndx].endianism = HW_LITTLE_ENDIAN;
+				ae_proc[ndx].elem_size = HW_ELEM_SIZE16BIT;
+				ae_proc[ndx].mixed_mode = HW_MMU_CPUES;
+				dev_dbg(bridge, "shm MMU TLB entry PA %x"
+					" VA %x DSP_VA %x Size %x\n",
+					ae_proc[ndx].ul_gpp_pa,
+					ae_proc[ndx].ul_gpp_va,
+					ae_proc[ndx].ul_dsp_va *
+					hio_mgr->word_size, page_size[i]);
+				ndx++;
+			} else {
+				status =
+				    hio_mgr->intf_fxns->
+				    pfn_brd_mem_map(hio_mgr->hbridge_context,
+						    pa_curr, va_curr,
+						    page_size[i], map_attrs,
+						    NULL);
+				dev_dbg(bridge,
+					"shm MMU PTE entry PA %x"
+					" VA %x DSP_VA %x Size %x\n",
+					ae_proc[ndx].ul_gpp_pa,
+					ae_proc[ndx].ul_gpp_va,
+					ae_proc[ndx].ul_dsp_va *
+					hio_mgr->word_size, page_size[i]);
+				if (status)
+					goto func_end;
+			}
+			pa_curr += page_size[i];
+			va_curr += page_size[i];
+			gpp_va_curr += page_size[i];
+			num_bytes -= page_size[i];
+			/*
+			 * Don't try smaller sizes. Hopefully we have reached
+			 * an address aligned to a bigger page size.
+			 */
+			break;
+		}
+	}
 
 	/*
 	 * Copy remaining entries from CDB. All entries are 1 MB and
@@ -509,12 +634,38 @@
 					"DSP_VA 0x%x\n", ae_proc[ndx].ul_gpp_pa,
 					ae_proc[ndx].ul_dsp_va);
 				ndx++;
+			} else {
+				status = hio_mgr->intf_fxns->pfn_brd_mem_map
+				    (hio_mgr->hbridge_context,
+				     hio_mgr->ext_proc_info.ty_tlb[i].
+				     ul_gpp_phys,
+				     hio_mgr->ext_proc_info.ty_tlb[i].
+				     ul_dsp_virt, 0x100000, map_attrs,
+				     NULL);
 			}
 		}
 		if (status)
 			goto func_end;
 	}
 
+	map_attrs = 0x00000000;
+	map_attrs = DSP_MAPLITTLEENDIAN;
+	map_attrs |= DSP_MAPPHYSICALADDR;
+	map_attrs |= DSP_MAPELEMSIZE32;
+	map_attrs |= DSP_MAPDONOTLOCK;
+
+	/* Map the L4 peripherals */
+	i = 0;
+	while (l4_peripheral_table[i].phys_addr) {
+		status = hio_mgr->intf_fxns->pfn_brd_mem_map
+		    (hio_mgr->hbridge_context, l4_peripheral_table[i].phys_addr,
+		     l4_peripheral_table[i].dsp_virt_addr, HW_PAGE_SIZE4KB,
+		     map_attrs, NULL);
+		if (status)
+			goto func_end;
+		i++;
+	}
+
 	for (i = ndx; i < BRDIOCTL_NUMOFMMUTLB; i++) {
 		ae_proc[i].ul_dsp_va = 0;
 		ae_proc[i].ul_gpp_pa = 0;
@@ -537,12 +688,12 @@
 		status = -EFAULT;
 		goto func_end;
 	} else {
-		if (sm_sg->seg0_da > ul_shm_base) {
+		if (ae_proc[0].ul_dsp_va > ul_shm_base) {
 			status = -EPERM;
 			goto func_end;
 		}
 		/* ul_shm_base may not be at ul_dsp_va address */
-		ul_shm_base_offset = (ul_shm_base - sm_sg->seg0_da) *
+		ul_shm_base_offset = (ul_shm_base - ae_proc[0].ul_dsp_va) *
 		    hio_mgr->word_size;
 		/*
 		 * bridge_dev_ctrl() will set dev context dsp-mmu info. In
@@ -566,7 +717,8 @@
 			goto func_end;
 		}
 		/* Register SM */
-		status = register_shm_segs(hio_mgr, cod_man, sm_sg->seg0_pa);
+		status =
+		    register_shm_segs(hio_mgr, cod_man, ae_proc[0].ul_gpp_pa);
 	}
 
 	hio_mgr->shared_mem = (struct shm *)ul_shm_base;
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index f22bc12..1be081f 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -23,7 +23,6 @@
 #include <dspbridge/host_os.h>
 #include <linux/mm.h>
 #include <linux/mmzone.h>
-#include <plat/control.h>
 
 /*  ----------------------------------- DSP/BIOS Bridge */
 #include <dspbridge/dbdefs.h>
@@ -35,6 +34,10 @@
 #include <dspbridge/drv.h>
 #include <dspbridge/sync.h>
 
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 /*  ----------------------------------- Link Driver */
 #include <dspbridge/dspdefs.h>
 #include <dspbridge/dspchnl.h>
@@ -47,6 +50,7 @@
 /*  ----------------------------------- Platform Manager */
 #include <dspbridge/dev.h>
 #include <dspbridge/dspapi.h>
+#include <dspbridge/dmm.h>
 #include <dspbridge/wdt.h>
 
 /*  ----------------------------------- Local */
@@ -67,6 +71,20 @@
 #define MMU_SMALL_PAGE_MASK      0xFFFFF000
 #define OMAP3_IVA2_BOOTADDR_MASK 0xFFFFFC00
 #define PAGES_II_LVL_TABLE   512
+#define PHYS_TO_PAGE(phys)      pfn_to_page((phys) >> PAGE_SHIFT)
+
+/*
+ * This is a totally ugly layer violation, but needed until
+ * omap_ctrl_set_dsp_boot*() are provided.
+ */
+#define OMAP3_IVA2_BOOTMOD_IDLE 1
+#define OMAP2_CONTROL_GENERAL 0x270
+#define OMAP343X_CONTROL_IVA2_BOOTADDR (OMAP2_CONTROL_GENERAL + 0x0190)
+#define OMAP343X_CONTROL_IVA2_BOOTMOD (OMAP2_CONTROL_GENERAL + 0x0194)
+
+#define OMAP343X_CTRL_REGADDR(reg) \
+	OMAP2_L4_IO_ADDRESS(OMAP343X_CTRL_BASE + (reg))
+
 
 /* Forward Declarations: */
 static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt);
@@ -91,6 +109,12 @@
 static int bridge_brd_mem_write(struct bridge_dev_context *dev_ctxt,
 				    u8 *host_buff, u32 dsp_addr,
 				    u32 ul_num_bytes, u32 mem_type);
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes, u32 ul_map_attr,
+				  struct page **mapped_pages);
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+				     u32 virt_addr, u32 ul_num_bytes);
 static int bridge_dev_create(struct bridge_dev_context
 					**dev_cntxt,
 					struct dev_object *hdev_obj,
@@ -98,8 +122,57 @@
 static int bridge_dev_ctrl(struct bridge_dev_context *dev_context,
 				  u32 dw_cmd, void *pargs);
 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt);
+static u32 user_va2_pa(struct mm_struct *mm, u32 address);
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+			     u32 va, u32 size,
+			     struct hw_mmu_map_attrs_t *map_attrs);
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+			  u32 size, struct hw_mmu_map_attrs_t *attrs);
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes,
+				  struct hw_mmu_map_attrs_t *hw_attrs);
+
 bool wait_for_start(struct bridge_dev_context *dev_context, u32 dw_sync_addr);
 
+/*  ----------------------------------- Globals */
+
+/* Attributes of L2 page tables for DSP MMU */
+struct page_info {
+	u32 num_entries;	/* Number of valid PTEs in the L2 PT */
+};
+
+/* Attributes used to manage the DSP MMU page tables */
+struct pg_table_attrs {
+	spinlock_t pg_lock;	/* Critical section object handle */
+
+	u32 l1_base_pa;		/* Physical address of the L1 PT */
+	u32 l1_base_va;		/* Virtual  address of the L1 PT */
+	u32 l1_size;		/* Size of the L1 PT */
+	u32 l1_tbl_alloc_pa;
+	/* Physical address of Allocated mem for L1 table. May not be aligned */
+	u32 l1_tbl_alloc_va;
+	/* Virtual address of Allocated mem for L1 table. May not be aligned */
+	u32 l1_tbl_alloc_sz;
+	/* Size of consistent memory allocated for L1 table.
+	 * May not be aligned */
+
+	u32 l2_base_pa;		/* Physical address of the L2 PT */
+	u32 l2_base_va;		/* Virtual  address of the L2 PT */
+	u32 l2_size;		/* Size of the L2 PT */
+	u32 l2_tbl_alloc_pa;
+	/* Physical address of Allocated mem for L2 table. May not be aligned */
+	u32 l2_tbl_alloc_va;
+	/* Virtual address of Allocated mem for L2 table. May not be aligned */
+	u32 l2_tbl_alloc_sz;
+	/* Size of consistent memory allocated for L2 table.
+	 * May not be aligned */
+
+	u32 l2_num_pages;	/* Number of allocated L2 PT */
+	/* Array [l2_num_pages] of L2 PT info structs */
+	struct page_info *pg_info;
+};
+
 /*
  *  This Bridge driver's function interface table.
  */
@@ -119,6 +192,8 @@
 	bridge_brd_set_state,
 	bridge_brd_mem_copy,
 	bridge_brd_mem_write,
+	bridge_brd_mem_map,
+	bridge_brd_mem_un_map,
 	/* The following CHNL functions are provided by chnl_io.lib: */
 	bridge_chnl_create,
 	bridge_chnl_destroy,
@@ -148,6 +223,27 @@
 	bridge_msg_set_queue_id,
 };
 
+static inline void flush_all(struct bridge_dev_context *dev_context)
+{
+	if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
+	    dev_context->dw_brd_state == BRD_HIBERNATION)
+		wake_dsp(dev_context, NULL);
+
+	hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
+}
+
+static void bad_page_dump(u32 pa, struct page *pg)
+{
+	pr_emerg("DSPBRIDGE: MAP function: COUNT 0 FOR PA 0x%x\n", pa);
+	pr_emerg("Bad page state in process '%s'\n"
+		 "page:%p flags:0x%0*lx mapping:%p mapcount:%d count:%d\n"
+		 "Backtrace:\n",
+		 current->comm, pg, (int)(2 * sizeof(unsigned long)),
+		 (unsigned long)pg->flags, pg->mapping,
+		 page_mapcount(pg), page_count(pg));
+	dump_stack();
+}
+
 /*
  *  ======== bridge_drv_entry ========
  *  purpose:
@@ -203,7 +299,8 @@
 		(*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
 					OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 	}
-
+	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
+					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 	dsp_clk_enable(DSP_CLK_IVA2);
 
 	/* set the device state to IDLE */
@@ -274,17 +371,14 @@
 {
 	int status = 0;
 	struct bridge_dev_context *dev_context = dev_ctxt;
-	struct iommu *mmu = NULL;
-	struct shm_segs *sm_sg;
-	int l4_i = 0, tlb_i = 0;
-	u32 sg0_da = 0, sg1_da = 0;
-	struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
 	u32 dw_sync_addr = 0;
 	u32 ul_shm_base;	/* Gpp Phys SM base addr(byte) */
 	u32 ul_shm_base_virt;	/* Dsp Virt SM base addr */
 	u32 ul_tlb_base_virt;	/* Base of MMU TLB entry */
 	/* Offset of shm_base_virt from tlb_base_virt */
 	u32 ul_shm_offset_virt;
+	s32 entry_ndx;
+	s32 itmp_entry_ndx = 0;	/* DSP-MMU TLB entry base address */
 	struct cfg_hostres *resources = NULL;
 	u32 temp;
 	u32 ul_dsp_clk_rate;
@@ -305,12 +399,12 @@
 	ul_shm_base_virt *= DSPWORDSIZE;
 	DBC_ASSERT(ul_shm_base_virt != 0);
 	/* DSP Virtual address */
-	ul_tlb_base_virt = dev_context->sh_s.seg0_da;
+	ul_tlb_base_virt = dev_context->atlb_entry[0].ul_dsp_va;
 	DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
 	ul_shm_offset_virt =
 	    ul_shm_base_virt - (ul_tlb_base_virt * DSPWORDSIZE);
 	/* Kernel logical address */
-	ul_shm_base = dev_context->sh_s.seg0_va + ul_shm_offset_virt;
+	ul_shm_base = dev_context->atlb_entry[0].ul_gpp_va + ul_shm_offset_virt;
 
 	DBC_ASSERT(ul_shm_base != 0);
 	/* 2nd wd is used as sync field */
@@ -345,83 +439,78 @@
 					OMAP343X_CONTROL_IVA2_BOOTMOD));
 		}
 	}
-
 	if (!status) {
+		/* Reset and Unreset the RST2, so that BOOTADDR is copied to
+		 * IVA2 SYSC register */
+		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
+			OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+		udelay(100);
 		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK, 0,
 					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
-		mmu = dev_context->dsp_mmu;
-		if (mmu)
-			dsp_mmu_exit(mmu);
-		mmu = dsp_mmu_init();
-		if (IS_ERR(mmu)) {
-			dev_err(bridge, "dsp_mmu_init failed!\n");
-			dev_context->dsp_mmu = NULL;
-			status = (int)mmu;
-		}
-	}
-	if (!status) {
-		dev_context->dsp_mmu = mmu;
-		sm_sg = &dev_context->sh_s;
-		sg0_da = iommu_kmap(mmu, sm_sg->seg0_da, sm_sg->seg0_pa,
-			sm_sg->seg0_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-		if (IS_ERR_VALUE(sg0_da)) {
-			status = (int)sg0_da;
-			sg0_da = 0;
-		}
-	}
-	if (!status) {
-		sg1_da = iommu_kmap(mmu, sm_sg->seg1_da, sm_sg->seg1_pa,
-			sm_sg->seg1_size, IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-		if (IS_ERR_VALUE(sg1_da)) {
-			status = (int)sg1_da;
-			sg1_da = 0;
-		}
-	}
-	if (!status) {
-		u32 da;
-		for (tlb_i = 0; tlb_i < BRDIOCTL_NUMOFMMUTLB; tlb_i++) {
-			if (!tlb[tlb_i].ul_gpp_pa)
+		udelay(100);
+
+		/* Disbale the DSP MMU */
+		hw_mmu_disable(resources->dw_dmmu_base);
+		/* Disable TWL */
+		hw_mmu_twl_disable(resources->dw_dmmu_base);
+
+		/* Only make TLB entry if both addresses are non-zero */
+		for (entry_ndx = 0; entry_ndx < BRDIOCTL_NUMOFMMUTLB;
+		     entry_ndx++) {
+			struct bridge_ioctl_extproc *e = &dev_context->atlb_entry[entry_ndx];
+			struct hw_mmu_map_attrs_t map_attrs = {
+				.endianism = e->endianism,
+				.element_size = e->elem_size,
+				.mixed_size = e->mixed_mode,
+			};
+
+			if (!e->ul_gpp_pa || !e->ul_dsp_va)
 				continue;
 
-			dev_dbg(bridge, "IOMMU %d GppPa: 0x%x DspVa 0x%x Size"
-				" 0x%x\n", tlb_i, tlb[tlb_i].ul_gpp_pa,
-				tlb[tlb_i].ul_dsp_va, tlb[tlb_i].ul_size);
+			dev_dbg(bridge,
+					"MMU %d, pa: 0x%x, va: 0x%x, size: 0x%x",
+					itmp_entry_ndx,
+					e->ul_gpp_pa,
+					e->ul_dsp_va,
+					e->ul_size);
 
-			da = iommu_kmap(mmu, tlb[tlb_i].ul_dsp_va,
-				tlb[tlb_i].ul_gpp_pa, PAGE_SIZE,
-				IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-			if (IS_ERR_VALUE(da)) {
-				status = (int)da;
-				break;
-			}
-		}
-	}
-	if (!status) {
-		u32 da;
-		l4_i = 0;
-		while (l4_peripheral_table[l4_i].phys_addr) {
-			da = iommu_kmap(mmu, l4_peripheral_table[l4_i].
-				dsp_virt_addr, l4_peripheral_table[l4_i].
-				phys_addr, PAGE_SIZE,
-				IOVMF_ENDIAN_LITTLE | IOVMF_ELSZ_32);
-			if (IS_ERR_VALUE(da)) {
-				status = (int)da;
-				break;
-			}
-			l4_i++;
+			hw_mmu_tlb_add(dev_context->dw_dsp_mmu_base,
+					e->ul_gpp_pa,
+					e->ul_dsp_va,
+					e->ul_size,
+					itmp_entry_ndx,
+					&map_attrs, 1, 1);
+
+			itmp_entry_ndx++;
 		}
 	}
 
 	/* Lock the above TLB entries and get the BIOS and load monitor timer
 	 * information */
 	if (!status) {
+		hw_mmu_num_locked_set(resources->dw_dmmu_base, itmp_entry_ndx);
+		hw_mmu_victim_num_set(resources->dw_dmmu_base, itmp_entry_ndx);
+		hw_mmu_ttb_set(resources->dw_dmmu_base,
+			       dev_context->pt_attrs->l1_base_pa);
+		hw_mmu_twl_enable(resources->dw_dmmu_base);
+		/* Enable the SmartIdle and AutoIdle bit for MMU_SYSCONFIG */
+
+		temp = __raw_readl((resources->dw_dmmu_base) + 0x10);
+		temp = (temp & 0xFFFFFFEF) | 0x11;
+		__raw_writel(temp, (resources->dw_dmmu_base) + 0x10);
+
+		/* Let the DSP MMU run */
+		hw_mmu_enable(resources->dw_dmmu_base);
+
 		/* Enable the BIOS clock */
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     BRIDGEINIT_BIOSGPTIMER, &ul_bios_gp_timer);
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     BRIDGEINIT_LOADMON_GPTIMER,
 				     &ul_load_monitor_timer);
+	}
 
+	if (!status) {
 		if (ul_load_monitor_timer != 0xFFFF) {
 			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 			    ul_load_monitor_timer;
@@ -430,7 +519,9 @@
 			dev_dbg(bridge, "Not able to get the symbol for Load "
 				"Monitor Timer\n");
 		}
+	}
 
+	if (!status) {
 		if (ul_bios_gp_timer != 0xFFFF) {
 			clk_cmd = (BPWR_ENABLE_CLOCK << MBX_PM_CLK_CMDSHIFT) |
 			    ul_bios_gp_timer;
@@ -439,7 +530,9 @@
 			dev_dbg(bridge,
 				"Not able to get the symbol for BIOS Timer\n");
 		}
+	}
 
+	if (!status) {
 		/* Set the DSP clock rate */
 		(void)dev_get_symbol(dev_context->hdev_obj,
 				     "_BRIDGEINIT_DSP_FREQ", &ul_dsp_clk_addr);
@@ -492,6 +585,9 @@
 
 		/* Let DSP go */
 		dev_dbg(bridge, "%s Unreset\n", __func__);
+		/* Enable DSP MMU Interrupts */
+		hw_mmu_event_enable(resources->dw_dmmu_base,
+				    HW_MMU_ALL_INTERRUPTS);
 		/* release the RST1, DSP starts executing now .. */
 		(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK, 0,
 					OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
@@ -521,23 +617,11 @@
 
 			/* update board state */
 			dev_context->dw_brd_state = BRD_RUNNING;
-			return 0;
+			/* (void)chnlsm_enable_interrupt(dev_context); */
 		} else {
 			dev_context->dw_brd_state = BRD_UNKNOWN;
 		}
 	}
-
-	while (tlb_i--) {
-		if (!tlb[tlb_i].ul_gpp_pa)
-			continue;
-		iommu_kunmap(mmu, tlb[tlb_i].ul_gpp_va);
-	}
-	while (l4_i--)
-		iommu_kunmap(mmu, l4_peripheral_table[l4_i].dsp_virt_addr);
-	if (sg0_da)
-		iommu_kunmap(mmu, sg0_da);
-	if (sg1_da)
-		iommu_kunmap(mmu, sg1_da);
 	return status;
 }
 
@@ -553,9 +637,8 @@
 {
 	int status = 0;
 	struct bridge_dev_context *dev_context = dev_ctxt;
+	struct pg_table_attrs *pt_attrs;
 	u32 dsp_pwr_state;
-	int i;
-	struct bridge_ioctl_extproc *tlb = dev_context->atlb_entry;
 	struct omap_dsp_platform_data *pdata =
 		omap_dspbridge_dev->dev.platform_data;
 
@@ -591,37 +674,23 @@
 
 	dsp_wdt_enable(false);
 
-	/* Reset DSP */
-	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST1_IVA2_MASK,
-		OMAP3430_RST1_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
-
+	/* This is a good place to clear the MMU page tables as well */
+	if (dev_context->pt_attrs) {
+		pt_attrs = dev_context->pt_attrs;
+		memset((u8 *) pt_attrs->l1_base_va, 0x00, pt_attrs->l1_size);
+		memset((u8 *) pt_attrs->l2_base_va, 0x00, pt_attrs->l2_size);
+		memset((u8 *) pt_attrs->pg_info, 0x00,
+		       (pt_attrs->l2_num_pages * sizeof(struct page_info)));
+	}
 	/* Disable the mailbox interrupts */
 	if (dev_context->mbox) {
 		omap_mbox_disable_irq(dev_context->mbox, IRQ_RX);
 		omap_mbox_put(dev_context->mbox);
 		dev_context->mbox = NULL;
 	}
-	if (dev_context->dsp_mmu) {
-		pr_err("Proc stop mmu if statement\n");
-		for (i = 0; i < BRDIOCTL_NUMOFMMUTLB; i++) {
-			if (!tlb[i].ul_gpp_pa)
-				continue;
-			iommu_kunmap(dev_context->dsp_mmu, tlb[i].ul_gpp_va);
-		}
-		i = 0;
-		while (l4_peripheral_table[i].phys_addr) {
-			iommu_kunmap(dev_context->dsp_mmu,
-				l4_peripheral_table[i].dsp_virt_addr);
-			i++;
-		}
-		iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg0_da);
-		iommu_kunmap(dev_context->dsp_mmu, dev_context->sh_s.seg1_da);
-		dsp_mmu_exit(dev_context->dsp_mmu);
-		dev_context->dsp_mmu = NULL;
-	}
-	/* Reset IVA IOMMU*/
-	(*pdata->dsp_prm_rmw_bits)(OMAP3430_RST2_IVA2_MASK,
-		OMAP3430_RST2_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
+	/* Reset IVA2 clocks*/
+	(*pdata->dsp_prm_write)(OMAP3430_RST1_IVA2_MASK | OMAP3430_RST2_IVA2_MASK |
+			OMAP3430_RST3_IVA2_MASK, OMAP3430_IVA2_MOD, OMAP2_RM_RSTCTRL);
 
 	dsp_clock_disable_all(dev_context->dsp_per_clks);
 	dsp_clk_disable(DSP_CLK_IVA2);
@@ -681,6 +750,10 @@
 	struct bridge_dev_context *dev_context = NULL;
 	s32 entry_ndx;
 	struct cfg_hostres *resources = config_param;
+	struct pg_table_attrs *pt_attrs;
+	u32 pg_tbl_pa;
+	u32 pg_tbl_va;
+	u32 align_size;
 	struct drv_data *drv_datap = dev_get_drvdata(bridge);
 
 	/* Allocate and initialize a data structure to contain the bridge driver
@@ -711,8 +784,97 @@
 	if (!dev_context->dw_dsp_base_addr)
 		status = -EPERM;
 
+	pt_attrs = kzalloc(sizeof(struct pg_table_attrs), GFP_KERNEL);
+	if (pt_attrs != NULL) {
+		/* Assuming that we use only DSP's memory map
+		 * until 0x4000:0000 , we would need only 1024
+		 * L1 enties i.e L1 size = 4K */
+		pt_attrs->l1_size = 0x1000;
+		align_size = pt_attrs->l1_size;
+		/* Align sizes are expected to be power of 2 */
+		/* we like to get aligned on L1 table size */
+		pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l1_size,
+						     align_size, &pg_tbl_pa);
+
+		/* Check if the PA is aligned for us */
+		if ((pg_tbl_pa) & (align_size - 1)) {
+			/* PA not aligned to page table size ,
+			 * try with more allocation and align */
+			mem_free_phys_mem((void *)pg_tbl_va, pg_tbl_pa,
+					  pt_attrs->l1_size);
+			/* we like to get aligned on L1 table size */
+			pg_tbl_va =
+			    (u32) mem_alloc_phys_mem((pt_attrs->l1_size) * 2,
+						     align_size, &pg_tbl_pa);
+			/* We should be able to get aligned table now */
+			pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+			pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+			pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size * 2;
+			/* Align the PA to the next 'align'  boundary */
+			pt_attrs->l1_base_pa =
+			    ((pg_tbl_pa) +
+			     (align_size - 1)) & (~(align_size - 1));
+			pt_attrs->l1_base_va =
+			    pg_tbl_va + (pt_attrs->l1_base_pa - pg_tbl_pa);
+		} else {
+			/* We got aligned PA, cool */
+			pt_attrs->l1_tbl_alloc_pa = pg_tbl_pa;
+			pt_attrs->l1_tbl_alloc_va = pg_tbl_va;
+			pt_attrs->l1_tbl_alloc_sz = pt_attrs->l1_size;
+			pt_attrs->l1_base_pa = pg_tbl_pa;
+			pt_attrs->l1_base_va = pg_tbl_va;
+		}
+		if (pt_attrs->l1_base_va)
+			memset((u8 *) pt_attrs->l1_base_va, 0x00,
+			       pt_attrs->l1_size);
+
+		/* number of L2 page tables = DMM pool used + SHMMEM +EXTMEM +
+		 * L4 pages */
+		pt_attrs->l2_num_pages = ((DMMPOOLSIZE >> 20) + 6);
+		pt_attrs->l2_size = HW_MMU_COARSE_PAGE_SIZE *
+		    pt_attrs->l2_num_pages;
+		align_size = 4;	/* Make it u32 aligned */
+		/* we like to get aligned on L1 table size */
+		pg_tbl_va = (u32) mem_alloc_phys_mem(pt_attrs->l2_size,
+						     align_size, &pg_tbl_pa);
+		pt_attrs->l2_tbl_alloc_pa = pg_tbl_pa;
+		pt_attrs->l2_tbl_alloc_va = pg_tbl_va;
+		pt_attrs->l2_tbl_alloc_sz = pt_attrs->l2_size;
+		pt_attrs->l2_base_pa = pg_tbl_pa;
+		pt_attrs->l2_base_va = pg_tbl_va;
+
+		if (pt_attrs->l2_base_va)
+			memset((u8 *) pt_attrs->l2_base_va, 0x00,
+			       pt_attrs->l2_size);
+
+		pt_attrs->pg_info = kzalloc(pt_attrs->l2_num_pages *
+					sizeof(struct page_info), GFP_KERNEL);
+		dev_dbg(bridge,
+			"L1 pa %x, va %x, size %x\n L2 pa %x, va "
+			"%x, size %x\n", pt_attrs->l1_base_pa,
+			pt_attrs->l1_base_va, pt_attrs->l1_size,
+			pt_attrs->l2_base_pa, pt_attrs->l2_base_va,
+			pt_attrs->l2_size);
+		dev_dbg(bridge, "pt_attrs %p L2 NumPages %x pg_info %p\n",
+			pt_attrs, pt_attrs->l2_num_pages, pt_attrs->pg_info);
+	}
+	if ((pt_attrs != NULL) && (pt_attrs->l1_base_va != 0) &&
+	    (pt_attrs->l2_base_va != 0) && (pt_attrs->pg_info != NULL))
+		dev_context->pt_attrs = pt_attrs;
+	else
+		status = -ENOMEM;
+
 	if (!status) {
+		spin_lock_init(&pt_attrs->pg_lock);
 		dev_context->tc_word_swap_on = drv_datap->tc_wordswapon;
+
+		/* Set the Clock Divisor for the DSP module */
+		udelay(5);
+		/* MMU address is obtained from the host
+		 * resources struct */
+		dev_context->dw_dsp_mmu_base = resources->dw_dmmu_base;
+	}
+	if (!status) {
 		dev_context->hdev_obj = hdev_obj;
 		/* Store current board state. */
 		dev_context->dw_brd_state = BRD_UNKNOWN;
@@ -722,6 +884,23 @@
 		/* Return ptr to our device state to the DSP API for storage */
 		*dev_cntxt = dev_context;
 	} else {
+		if (pt_attrs != NULL) {
+			kfree(pt_attrs->pg_info);
+
+			if (pt_attrs->l2_tbl_alloc_va) {
+				mem_free_phys_mem((void *)
+						  pt_attrs->l2_tbl_alloc_va,
+						  pt_attrs->l2_tbl_alloc_pa,
+						  pt_attrs->l2_tbl_alloc_sz);
+			}
+			if (pt_attrs->l1_tbl_alloc_va) {
+				mem_free_phys_mem((void *)
+						  pt_attrs->l1_tbl_alloc_va,
+						  pt_attrs->l1_tbl_alloc_pa,
+						  pt_attrs->l1_tbl_alloc_sz);
+			}
+		}
+		kfree(pt_attrs);
 		kfree(dev_context);
 	}
 func_end:
@@ -789,6 +968,7 @@
  */
 static int bridge_dev_destroy(struct bridge_dev_context *dev_ctxt)
 {
+	struct pg_table_attrs *pt_attrs;
 	int status = 0;
 	struct bridge_dev_context *dev_context = (struct bridge_dev_context *)
 	    dev_ctxt;
@@ -802,6 +982,23 @@
 
 	/* first put the device to stop state */
 	bridge_brd_stop(dev_context);
+	if (dev_context->pt_attrs) {
+		pt_attrs = dev_context->pt_attrs;
+		kfree(pt_attrs->pg_info);
+
+		if (pt_attrs->l2_tbl_alloc_va) {
+			mem_free_phys_mem((void *)pt_attrs->l2_tbl_alloc_va,
+					  pt_attrs->l2_tbl_alloc_pa,
+					  pt_attrs->l2_tbl_alloc_sz);
+		}
+		if (pt_attrs->l1_tbl_alloc_va) {
+			mem_free_phys_mem((void *)pt_attrs->l1_tbl_alloc_va,
+					  pt_attrs->l1_tbl_alloc_pa,
+					  pt_attrs->l1_tbl_alloc_sz);
+		}
+		kfree(pt_attrs);
+
+	}
 
 	if (dev_context->resources) {
 		host_res = dev_context->resources;
@@ -832,6 +1029,8 @@
 			iounmap((void *)host_res->dw_mem_base[3]);
 		if (host_res->dw_mem_base[4])
 			iounmap((void *)host_res->dw_mem_base[4]);
+		if (host_res->dw_dmmu_base)
+			iounmap(host_res->dw_dmmu_base);
 		if (host_res->dw_per_base)
 			iounmap(host_res->dw_per_base);
 		if (host_res->dw_per_pm_base)
@@ -845,6 +1044,7 @@
 		host_res->dw_mem_base[2] = (u32) NULL;
 		host_res->dw_mem_base[3] = (u32) NULL;
 		host_res->dw_mem_base[4] = (u32) NULL;
+		host_res->dw_dmmu_base = NULL;
 		host_res->dw_sys_ctrl_base = NULL;
 
 		kfree(host_res);
@@ -928,6 +1128,673 @@
 }
 
 /*
+ *  ======== bridge_brd_mem_map ========
+ *      This function maps MPU buffer to the DSP address space. It performs
+ *  linear to physical address translation if required. It translates each
+ *  page since linear addresses can be physically non-contiguous
+ *  All address & size arguments are assumed to be page aligned (in proc.c)
+ *
+ *  TODO: Disable MMU while updating the page tables (but that'll stall DSP)
+ */
+static int bridge_brd_mem_map(struct bridge_dev_context *dev_ctxt,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes, u32 ul_map_attr,
+				  struct page **mapped_pages)
+{
+	u32 attrs;
+	int status = 0;
+	struct bridge_dev_context *dev_context = dev_ctxt;
+	struct hw_mmu_map_attrs_t hw_attrs;
+	struct vm_area_struct *vma;
+	struct mm_struct *mm = current->mm;
+	u32 write = 0;
+	u32 num_usr_pgs = 0;
+	struct page *mapped_page, *pg;
+	s32 pg_num;
+	u32 va = virt_addr;
+	struct task_struct *curr_task = current;
+	u32 pg_i = 0;
+	u32 mpu_addr, pa;
+
+	dev_dbg(bridge,
+		"%s hDevCtxt %p, pa %x, va %x, size %x, ul_map_attr %x\n",
+		__func__, dev_ctxt, ul_mpu_addr, virt_addr, ul_num_bytes,
+		ul_map_attr);
+	if (ul_num_bytes == 0)
+		return -EINVAL;
+
+	if (ul_map_attr & DSP_MAP_DIR_MASK) {
+		attrs = ul_map_attr;
+	} else {
+		/* Assign default attributes */
+		attrs = ul_map_attr | (DSP_MAPVIRTUALADDR | DSP_MAPELEMSIZE16);
+	}
+	/* Take mapping properties */
+	if (attrs & DSP_MAPBIGENDIAN)
+		hw_attrs.endianism = HW_BIG_ENDIAN;
+	else
+		hw_attrs.endianism = HW_LITTLE_ENDIAN;
+
+	hw_attrs.mixed_size = (enum hw_mmu_mixed_size_t)
+	    ((attrs & DSP_MAPMIXEDELEMSIZE) >> 2);
+	/* Ignore element_size if mixed_size is enabled */
+	if (hw_attrs.mixed_size == 0) {
+		if (attrs & DSP_MAPELEMSIZE8) {
+			/* Size is 8 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE8BIT;
+		} else if (attrs & DSP_MAPELEMSIZE16) {
+			/* Size is 16 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE16BIT;
+		} else if (attrs & DSP_MAPELEMSIZE32) {
+			/* Size is 32 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE32BIT;
+		} else if (attrs & DSP_MAPELEMSIZE64) {
+			/* Size is 64 bit */
+			hw_attrs.element_size = HW_ELEM_SIZE64BIT;
+		} else {
+			/*
+			 * Mixedsize isn't enabled, so size can't be
+			 * zero here
+			 */
+			return -EINVAL;
+		}
+	}
+	if (attrs & DSP_MAPDONOTLOCK)
+		hw_attrs.donotlockmpupage = 1;
+	else
+		hw_attrs.donotlockmpupage = 0;
+
+	if (attrs & DSP_MAPVMALLOCADDR) {
+		return mem_map_vmalloc(dev_ctxt, ul_mpu_addr, virt_addr,
+				       ul_num_bytes, &hw_attrs);
+	}
+	/*
+	 * Do OS-specific user-va to pa translation.
+	 * Combine physically contiguous regions to reduce TLBs.
+	 * Pass the translated pa to pte_update.
+	 */
+	if ((attrs & DSP_MAPPHYSICALADDR)) {
+		status = pte_update(dev_context, ul_mpu_addr, virt_addr,
+				    ul_num_bytes, &hw_attrs);
+		goto func_cont;
+	}
+
+	/*
+	 * Important Note: ul_mpu_addr is mapped from user application process
+	 * to current process - it must lie completely within the current
+	 * virtual memory address space in order to be of use to us here!
+	 */
+	down_read(&mm->mmap_sem);
+	vma = find_vma(mm, ul_mpu_addr);
+	if (vma)
+		dev_dbg(bridge,
+			"VMAfor UserBuf: ul_mpu_addr=%x, ul_num_bytes=%x, "
+			"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+			ul_num_bytes, vma->vm_start, vma->vm_end,
+			vma->vm_flags);
+
+	/*
+	 * It is observed that under some circumstances, the user buffer is
+	 * spread across several VMAs. So loop through and check if the entire
+	 * user buffer is covered
+	 */
+	while ((vma) && (ul_mpu_addr + ul_num_bytes > vma->vm_end)) {
+		/* jump to the next VMA region */
+		vma = find_vma(mm, vma->vm_end + 1);
+		dev_dbg(bridge,
+			"VMA for UserBuf ul_mpu_addr=%x ul_num_bytes=%x, "
+			"vm_start=%lx, vm_end=%lx, vm_flags=%lx\n", ul_mpu_addr,
+			ul_num_bytes, vma->vm_start, vma->vm_end,
+			vma->vm_flags);
+	}
+	if (!vma) {
+		pr_err("%s: Failed to get VMA region for 0x%x (%d)\n",
+		       __func__, ul_mpu_addr, ul_num_bytes);
+		status = -EINVAL;
+		up_read(&mm->mmap_sem);
+		goto func_cont;
+	}
+
+	if (vma->vm_flags & VM_IO) {
+		num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+		mpu_addr = ul_mpu_addr;
+
+		/* Get the physical addresses for user buffer */
+		for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+			pa = user_va2_pa(mm, mpu_addr);
+			if (!pa) {
+				status = -EPERM;
+				pr_err("DSPBRIDGE: VM_IO mapping physical"
+				       "address is invalid\n");
+				break;
+			}
+			if (pfn_valid(__phys_to_pfn(pa))) {
+				pg = PHYS_TO_PAGE(pa);
+				get_page(pg);
+				if (page_count(pg) < 1) {
+					pr_err("Bad page in VM_IO buffer\n");
+					bad_page_dump(pa, pg);
+				}
+			}
+			status = pte_set(dev_context->pt_attrs, pa,
+					 va, HW_PAGE_SIZE4KB, &hw_attrs);
+			if (status)
+				break;
+
+			va += HW_PAGE_SIZE4KB;
+			mpu_addr += HW_PAGE_SIZE4KB;
+			pa += HW_PAGE_SIZE4KB;
+		}
+	} else {
+		num_usr_pgs = ul_num_bytes / PG_SIZE4K;
+		if (vma->vm_flags & (VM_WRITE | VM_MAYWRITE))
+			write = 1;
+
+		for (pg_i = 0; pg_i < num_usr_pgs; pg_i++) {
+			pg_num = get_user_pages(curr_task, mm, ul_mpu_addr, 1,
+						write, 1, &mapped_page, NULL);
+			if (pg_num > 0) {
+				if (page_count(mapped_page) < 1) {
+					pr_err("Bad page count after doing"
+					       "get_user_pages on"
+					       "user buffer\n");
+					bad_page_dump(page_to_phys(mapped_page),
+						      mapped_page);
+				}
+				status = pte_set(dev_context->pt_attrs,
+						 page_to_phys(mapped_page), va,
+						 HW_PAGE_SIZE4KB, &hw_attrs);
+				if (status)
+					break;
+
+				if (mapped_pages)
+					mapped_pages[pg_i] = mapped_page;
+
+				va += HW_PAGE_SIZE4KB;
+				ul_mpu_addr += HW_PAGE_SIZE4KB;
+			} else {
+				pr_err("DSPBRIDGE: get_user_pages FAILED,"
+				       "MPU addr = 0x%x,"
+				       "vma->vm_flags = 0x%lx,"
+				       "get_user_pages Err"
+				       "Value = %d, Buffer"
+				       "size=0x%x\n", ul_mpu_addr,
+				       vma->vm_flags, pg_num, ul_num_bytes);
+				status = -EPERM;
+				break;
+			}
+		}
+	}
+	up_read(&mm->mmap_sem);
+func_cont:
+	if (status) {
+		/*
+		 * Roll out the mapped pages incase it failed in middle of
+		 * mapping
+		 */
+		if (pg_i) {
+			bridge_brd_mem_un_map(dev_context, virt_addr,
+					   (pg_i * PG_SIZE4K));
+		}
+		status = -EPERM;
+	}
+	/*
+	 * In any case, flush the TLB
+	 * This is called from here instead from pte_update to avoid unnecessary
+	 * repetition while mapping non-contiguous physical regions of a virtual
+	 * region
+	 */
+	flush_all(dev_context);
+	dev_dbg(bridge, "%s status %x\n", __func__, status);
+	return status;
+}
+
+/*
+ *  ======== bridge_brd_mem_un_map ========
+ *      Invalidate the PTEs for the DSP VA block to be unmapped.
+ *
+ *      PTEs of a mapped memory block are contiguous in any page table
+ *      So, instead of looking up the PTE address for every 4K block,
+ *      we clear consecutive PTEs until we unmap all the bytes
+ */
+static int bridge_brd_mem_un_map(struct bridge_dev_context *dev_ctxt,
+				     u32 virt_addr, u32 ul_num_bytes)
+{
+	u32 l1_base_va;
+	u32 l2_base_va;
+	u32 l2_base_pa;
+	u32 l2_page_num;
+	u32 pte_val;
+	u32 pte_size;
+	u32 pte_count;
+	u32 pte_addr_l1;
+	u32 pte_addr_l2 = 0;
+	u32 rem_bytes;
+	u32 rem_bytes_l2;
+	u32 va_curr;
+	struct page *pg = NULL;
+	int status = 0;
+	struct bridge_dev_context *dev_context = dev_ctxt;
+	struct pg_table_attrs *pt = dev_context->pt_attrs;
+	u32 temp;
+	u32 paddr;
+	u32 numof4k_pages = 0;
+
+	va_curr = virt_addr;
+	rem_bytes = ul_num_bytes;
+	rem_bytes_l2 = 0;
+	l1_base_va = pt->l1_base_va;
+	pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+	dev_dbg(bridge, "%s dev_ctxt %p, va %x, NumBytes %x l1_base_va %x, "
+		"pte_addr_l1 %x\n", __func__, dev_ctxt, virt_addr,
+		ul_num_bytes, l1_base_va, pte_addr_l1);
+
+	while (rem_bytes && !status) {
+		u32 va_curr_orig = va_curr;
+		/* Find whether the L1 PTE points to a valid L2 PT */
+		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va_curr);
+		pte_val = *(u32 *) pte_addr_l1;
+		pte_size = hw_mmu_pte_size_l1(pte_val);
+
+		if (pte_size != HW_MMU_COARSE_PAGE_SIZE)
+			goto skip_coarse_page;
+
+		/*
+		 * Get the L2 PA from the L1 PTE, and find
+		 * corresponding L2 VA
+		 */
+		l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+		l2_base_va = l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+		l2_page_num =
+		    (l2_base_pa - pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+		/*
+		 * Find the L2 PTE address from which we will start
+		 * clearing, the number of PTEs to be cleared on this
+		 * page, and the size of VA space that needs to be
+		 * cleared on this L2 page
+		 */
+		pte_addr_l2 = hw_mmu_pte_addr_l2(l2_base_va, va_curr);
+		pte_count = pte_addr_l2 & (HW_MMU_COARSE_PAGE_SIZE - 1);
+		pte_count = (HW_MMU_COARSE_PAGE_SIZE - pte_count) / sizeof(u32);
+		if (rem_bytes < (pte_count * PG_SIZE4K))
+			pte_count = rem_bytes / PG_SIZE4K;
+		rem_bytes_l2 = pte_count * PG_SIZE4K;
+
+		/*
+		 * Unmap the VA space on this L2 PT. A quicker way
+		 * would be to clear pte_count entries starting from
+		 * pte_addr_l2. However, below code checks that we don't
+		 * clear invalid entries or less than 64KB for a 64KB
+		 * entry. Similar checking is done for L1 PTEs too
+		 * below
+		 */
+		while (rem_bytes_l2 && !status) {
+			pte_val = *(u32 *) pte_addr_l2;
+			pte_size = hw_mmu_pte_size_l2(pte_val);
+			/* va_curr aligned to pte_size? */
+			if (pte_size == 0 || rem_bytes_l2 < pte_size ||
+			    va_curr & (pte_size - 1)) {
+				status = -EPERM;
+				break;
+			}
+
+			/* Collect Physical addresses from VA */
+			paddr = (pte_val & ~(pte_size - 1));
+			if (pte_size == HW_PAGE_SIZE64KB)
+				numof4k_pages = 16;
+			else
+				numof4k_pages = 1;
+			temp = 0;
+			while (temp++ < numof4k_pages) {
+				if (!pfn_valid(__phys_to_pfn(paddr))) {
+					paddr += HW_PAGE_SIZE4KB;
+					continue;
+				}
+				pg = PHYS_TO_PAGE(paddr);
+				if (page_count(pg) < 1) {
+					pr_info("DSPBRIDGE: UNMAP function: "
+						"COUNT 0 FOR PA 0x%x, size = "
+						"0x%x\n", paddr, ul_num_bytes);
+					bad_page_dump(paddr, pg);
+				} else {
+					set_page_dirty(pg);
+					page_cache_release(pg);
+				}
+				paddr += HW_PAGE_SIZE4KB;
+			}
+			if (hw_mmu_pte_clear(pte_addr_l2, va_curr, pte_size)) {
+				status = -EPERM;
+				goto EXIT_LOOP;
+			}
+
+			status = 0;
+			rem_bytes_l2 -= pte_size;
+			va_curr += pte_size;
+			pte_addr_l2 += (pte_size >> 12) * sizeof(u32);
+		}
+		spin_lock(&pt->pg_lock);
+		if (rem_bytes_l2 == 0) {
+			pt->pg_info[l2_page_num].num_entries -= pte_count;
+			if (pt->pg_info[l2_page_num].num_entries == 0) {
+				/*
+				 * Clear the L1 PTE pointing to the L2 PT
+				 */
+				if (!hw_mmu_pte_clear(l1_base_va, va_curr_orig,
+						     HW_MMU_COARSE_PAGE_SIZE))
+					status = 0;
+				else {
+					status = -EPERM;
+					spin_unlock(&pt->pg_lock);
+					goto EXIT_LOOP;
+				}
+			}
+			rem_bytes -= pte_count * PG_SIZE4K;
+		} else
+			status = -EPERM;
+
+		spin_unlock(&pt->pg_lock);
+		continue;
+skip_coarse_page:
+		/* va_curr aligned to pte_size? */
+		/* pte_size = 1 MB or 16 MB */
+		if (pte_size == 0 || rem_bytes < pte_size ||
+		    va_curr & (pte_size - 1)) {
+			status = -EPERM;
+			break;
+		}
+
+		if (pte_size == HW_PAGE_SIZE1MB)
+			numof4k_pages = 256;
+		else
+			numof4k_pages = 4096;
+		temp = 0;
+		/* Collect Physical addresses from VA */
+		paddr = (pte_val & ~(pte_size - 1));
+		while (temp++ < numof4k_pages) {
+			if (pfn_valid(__phys_to_pfn(paddr))) {
+				pg = PHYS_TO_PAGE(paddr);
+				if (page_count(pg) < 1) {
+					pr_info("DSPBRIDGE: UNMAP function: "
+						"COUNT 0 FOR PA 0x%x, size = "
+						"0x%x\n", paddr, ul_num_bytes);
+					bad_page_dump(paddr, pg);
+				} else {
+					set_page_dirty(pg);
+					page_cache_release(pg);
+				}
+			}
+			paddr += HW_PAGE_SIZE4KB;
+		}
+		if (!hw_mmu_pte_clear(l1_base_va, va_curr, pte_size)) {
+			status = 0;
+			rem_bytes -= pte_size;
+			va_curr += pte_size;
+		} else {
+			status = -EPERM;
+			goto EXIT_LOOP;
+		}
+	}
+	/*
+	 * It is better to flush the TLB here, so that any stale old entries
+	 * get flushed
+	 */
+EXIT_LOOP:
+	flush_all(dev_context);
+	dev_dbg(bridge,
+		"%s: va_curr %x, pte_addr_l1 %x pte_addr_l2 %x rem_bytes %x,"
+		" rem_bytes_l2 %x status %x\n", __func__, va_curr, pte_addr_l1,
+		pte_addr_l2, rem_bytes, rem_bytes_l2, status);
+	return status;
+}
+
+/*
+ *  ======== user_va2_pa ========
+ *  Purpose:
+ *      This function walks through the page tables to convert a userland
+ *      virtual address to physical address
+ */
+static u32 user_va2_pa(struct mm_struct *mm, u32 address)
+{
+	pgd_t *pgd;
+	pmd_t *pmd;
+	pte_t *ptep, pte;
+
+	pgd = pgd_offset(mm, address);
+	if (!(pgd_none(*pgd) || pgd_bad(*pgd))) {
+		pmd = pmd_offset(pgd, address);
+		if (!(pmd_none(*pmd) || pmd_bad(*pmd))) {
+			ptep = pte_offset_map(pmd, address);
+			if (ptep) {
+				pte = *ptep;
+				if (pte_present(pte))
+					return pte & PAGE_MASK;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *  ======== pte_update ========
+ *      This function calculates the optimum page-aligned addresses and sizes
+ *      Caller must pass page-aligned values
+ */
+static int pte_update(struct bridge_dev_context *dev_ctxt, u32 pa,
+			     u32 va, u32 size,
+			     struct hw_mmu_map_attrs_t *map_attrs)
+{
+	u32 i;
+	u32 all_bits;
+	u32 pa_curr = pa;
+	u32 va_curr = va;
+	u32 num_bytes = size;
+	struct bridge_dev_context *dev_context = dev_ctxt;
+	int status = 0;
+	u32 page_size[] = { HW_PAGE_SIZE16MB, HW_PAGE_SIZE1MB,
+		HW_PAGE_SIZE64KB, HW_PAGE_SIZE4KB
+	};
+
+	while (num_bytes && !status) {
+		/* To find the max. page size with which both PA & VA are
+		 * aligned */
+		all_bits = pa_curr | va_curr;
+
+		for (i = 0; i < 4; i++) {
+			if ((num_bytes >= page_size[i]) && ((all_bits &
+							     (page_size[i] -
+							      1)) == 0)) {
+				status =
+				    pte_set(dev_context->pt_attrs, pa_curr,
+					    va_curr, page_size[i], map_attrs);
+				pa_curr += page_size[i];
+				va_curr += page_size[i];
+				num_bytes -= page_size[i];
+				/* Don't try smaller sizes. Hopefully we have
+				 * reached an address aligned to a bigger page
+				 * size */
+				break;
+			}
+		}
+	}
+
+	return status;
+}
+
+/*
+ *  ======== pte_set ========
+ *      This function calculates PTE address (MPU virtual) to be updated
+ *      It also manages the L2 page tables
+ */
+static int pte_set(struct pg_table_attrs *pt, u32 pa, u32 va,
+			  u32 size, struct hw_mmu_map_attrs_t *attrs)
+{
+	u32 i;
+	u32 pte_val;
+	u32 pte_addr_l1;
+	u32 pte_size;
+	/* Base address of the PT that will be updated */
+	u32 pg_tbl_va;
+	u32 l1_base_va;
+	/* Compiler warns that the next three variables might be used
+	 * uninitialized in this function. Doesn't seem so. Working around,
+	 * anyways. */
+	u32 l2_base_va = 0;
+	u32 l2_base_pa = 0;
+	u32 l2_page_num = 0;
+	int status = 0;
+
+	l1_base_va = pt->l1_base_va;
+	pg_tbl_va = l1_base_va;
+	if ((size == HW_PAGE_SIZE64KB) || (size == HW_PAGE_SIZE4KB)) {
+		/* Find whether the L1 PTE points to a valid L2 PT */
+		pte_addr_l1 = hw_mmu_pte_addr_l1(l1_base_va, va);
+		if (pte_addr_l1 <= (pt->l1_base_va + pt->l1_size)) {
+			pte_val = *(u32 *) pte_addr_l1;
+			pte_size = hw_mmu_pte_size_l1(pte_val);
+		} else {
+			return -EPERM;
+		}
+		spin_lock(&pt->pg_lock);
+		if (pte_size == HW_MMU_COARSE_PAGE_SIZE) {
+			/* Get the L2 PA from the L1 PTE, and find
+			 * corresponding L2 VA */
+			l2_base_pa = hw_mmu_pte_coarse_l1(pte_val);
+			l2_base_va =
+			    l2_base_pa - pt->l2_base_pa + pt->l2_base_va;
+			l2_page_num =
+			    (l2_base_pa -
+			     pt->l2_base_pa) / HW_MMU_COARSE_PAGE_SIZE;
+		} else if (pte_size == 0) {
+			/* L1 PTE is invalid. Allocate a L2 PT and
+			 * point the L1 PTE to it */
+			/* Find a free L2 PT. */
+			for (i = 0; (i < pt->l2_num_pages) &&
+			     (pt->pg_info[i].num_entries != 0); i++)
+				;;
+			if (i < pt->l2_num_pages) {
+				l2_page_num = i;
+				l2_base_pa = pt->l2_base_pa + (l2_page_num *
+						HW_MMU_COARSE_PAGE_SIZE);
+				l2_base_va = pt->l2_base_va + (l2_page_num *
+						HW_MMU_COARSE_PAGE_SIZE);
+				/* Endianness attributes are ignored for
+				 * HW_MMU_COARSE_PAGE_SIZE */
+				status =
+				    hw_mmu_pte_set(l1_base_va, l2_base_pa, va,
+						   HW_MMU_COARSE_PAGE_SIZE,
+						   attrs);
+			} else {
+				status = -ENOMEM;
+			}
+		} else {
+			/* Found valid L1 PTE of another size.
+			 * Should not overwrite it. */
+			status = -EPERM;
+		}
+		if (!status) {
+			pg_tbl_va = l2_base_va;
+			if (size == HW_PAGE_SIZE64KB)
+				pt->pg_info[l2_page_num].num_entries += 16;
+			else
+				pt->pg_info[l2_page_num].num_entries++;
+			dev_dbg(bridge, "PTE: L2 BaseVa %x, BasePa %x, PageNum "
+				"%x, num_entries %x\n", l2_base_va,
+				l2_base_pa, l2_page_num,
+				pt->pg_info[l2_page_num].num_entries);
+		}
+		spin_unlock(&pt->pg_lock);
+	}
+	if (!status) {
+		dev_dbg(bridge, "PTE: pg_tbl_va %x, pa %x, va %x, size %x\n",
+			pg_tbl_va, pa, va, size);
+		dev_dbg(bridge, "PTE: endianism %x, element_size %x, "
+			"mixed_size %x\n", attrs->endianism,
+			attrs->element_size, attrs->mixed_size);
+		status = hw_mmu_pte_set(pg_tbl_va, pa, va, size, attrs);
+	}
+
+	return status;
+}
+
+/* Memory map kernel VA -- memory allocated with vmalloc */
+static int mem_map_vmalloc(struct bridge_dev_context *dev_context,
+				  u32 ul_mpu_addr, u32 virt_addr,
+				  u32 ul_num_bytes,
+				  struct hw_mmu_map_attrs_t *hw_attrs)
+{
+	int status = 0;
+	struct page *page[1];
+	u32 i;
+	u32 pa_curr;
+	u32 pa_next;
+	u32 va_curr;
+	u32 size_curr;
+	u32 num_pages;
+	u32 pa;
+	u32 num_of4k_pages;
+	u32 temp = 0;
+
+	/*
+	 * Do Kernel va to pa translation.
+	 * Combine physically contiguous regions to reduce TLBs.
+	 * Pass the translated pa to pte_update.
+	 */
+	num_pages = ul_num_bytes / PAGE_SIZE;	/* PAGE_SIZE = OS page size */
+	i = 0;
+	va_curr = ul_mpu_addr;
+	page[0] = vmalloc_to_page((void *)va_curr);
+	pa_next = page_to_phys(page[0]);
+	while (!status && (i < num_pages)) {
+		/*
+		 * Reuse pa_next from the previous iteraion to avoid
+		 * an extra va2pa call
+		 */
+		pa_curr = pa_next;
+		size_curr = PAGE_SIZE;
+		/*
+		 * If the next page is physically contiguous,
+		 * map it with the current one by increasing
+		 * the size of the region to be mapped
+		 */
+		while (++i < num_pages) {
+			page[0] =
+			    vmalloc_to_page((void *)(va_curr + size_curr));
+			pa_next = page_to_phys(page[0]);
+
+			if (pa_next == (pa_curr + size_curr))
+				size_curr += PAGE_SIZE;
+			else
+				break;
+
+		}
+		if (pa_next == 0) {
+			status = -ENOMEM;
+			break;
+		}
+		pa = pa_curr;
+		num_of4k_pages = size_curr / HW_PAGE_SIZE4KB;
+		while (temp++ < num_of4k_pages) {
+			get_page(PHYS_TO_PAGE(pa));
+			pa += HW_PAGE_SIZE4KB;
+		}
+		status = pte_update(dev_context, pa_curr, virt_addr +
+				    (va_curr - ul_mpu_addr), size_curr,
+				    hw_attrs);
+		va_curr += size_curr;
+	}
+	/*
+	 * In any case, flush the TLB
+	 * This is called from here instead from pte_update to avoid unnecessary
+	 * repetition while mapping non-contiguous physical regions of a virtual
+	 * region
+	 */
+	flush_all(dev_context);
+	dev_dbg(bridge, "%s status %x\n", __func__, status);
+	return status;
+}
+
+/*
  *  ======== wait_for_start ========
  *      Wait for the singal from DSP that it has started, or time out.
  */
diff --git a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
index b57a9fd..fb9026e 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430_pwr.c
@@ -31,6 +31,10 @@
 #include <dspbridge/dev.h>
 #include <dspbridge/iodefs.h>
 
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 #include <dspbridge/pwr_sh.h>
 
 /*  ----------------------------------- Bridge Driver */
diff --git a/drivers/staging/tidspbridge/core/tiomap_io.c b/drivers/staging/tidspbridge/core/tiomap_io.c
index 66dbf02..ba29610 100644
--- a/drivers/staging/tidspbridge/core/tiomap_io.c
+++ b/drivers/staging/tidspbridge/core/tiomap_io.c
@@ -134,16 +134,17 @@
 
 		if (!status) {
 			ul_tlb_base_virt =
-			    dev_context->sh_s.seg0_da * DSPWORDSIZE;
+			    dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
 			DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
-			dw_ext_prog_virt_mem = dev_context->sh_s.seg0_va;
+			dw_ext_prog_virt_mem =
+			    dev_context->atlb_entry[0].ul_gpp_va;
 
 			if (!trace_read) {
 				ul_shm_offset_virt =
 				    ul_shm_base_virt - ul_tlb_base_virt;
 				ul_shm_offset_virt +=
 				    PG_ALIGN_HIGH(ul_ext_end - ul_dyn_ext_base +
-						  1, PAGE_SIZE * 16);
+						  1, HW_PAGE_SIZE64KB);
 				dw_ext_prog_virt_mem -= ul_shm_offset_virt;
 				dw_ext_prog_virt_mem +=
 				    (ul_ext_base - ul_dyn_ext_base);
@@ -317,9 +318,8 @@
 			ret = -EPERM;
 
 		if (!ret) {
-			ul_tlb_base_virt = dev_context->sh_s.seg0_da *
-								DSPWORDSIZE;
-
+			ul_tlb_base_virt =
+			    dev_context->atlb_entry[0].ul_dsp_va * DSPWORDSIZE;
 			DBC_ASSERT(ul_tlb_base_virt <= ul_shm_base_virt);
 
 			if (symbols_reloaded) {
@@ -337,7 +337,7 @@
 			    ul_shm_base_virt - ul_tlb_base_virt;
 			if (trace_load) {
 				dw_ext_prog_virt_mem =
-					dev_context->sh_s.seg0_va;
+				    dev_context->atlb_entry[0].ul_gpp_va;
 			} else {
 				dw_ext_prog_virt_mem = host_res->dw_mem_base[1];
 				dw_ext_prog_virt_mem +=
@@ -393,6 +393,7 @@
 		omap_dspbridge_dev->dev.platform_data;
 	struct cfg_hostres *resources = dev_context->resources;
 	int status = 0;
+	u32 temp;
 
 	if (!dev_context->mbox)
 		return 0;
@@ -436,7 +437,7 @@
 		omap_mbox_restore_ctx(dev_context->mbox);
 
 		/* Access MMU SYS CONFIG register to generate a short wakeup */
-		iommu_read_reg(dev_context->dsp_mmu, MMU_SYSCONFIG);
+		temp = readl(resources->dw_dmmu_base + 0x10);
 
 		dev_context->dw_brd_state = BRD_RUNNING;
 	} else if (dev_context->dw_brd_state == BRD_RETENTION) {
diff --git a/drivers/staging/tidspbridge/core/ue_deh.c b/drivers/staging/tidspbridge/core/ue_deh.c
index e24ea0c..3430418 100644
--- a/drivers/staging/tidspbridge/core/ue_deh.c
+++ b/drivers/staging/tidspbridge/core/ue_deh.c
@@ -31,6 +31,57 @@
 #include <dspbridge/drv.h>
 #include <dspbridge/wdt.h>
 
+static u32 fault_addr;
+
+static void mmu_fault_dpc(unsigned long data)
+{
+	struct deh_mgr *deh = (void *)data;
+
+	if (!deh)
+		return;
+
+	bridge_deh_notify(deh, DSP_MMUFAULT, 0);
+}
+
+static irqreturn_t mmu_fault_isr(int irq, void *data)
+{
+	struct deh_mgr *deh = data;
+	struct cfg_hostres *resources;
+	u32 event;
+
+	if (!deh)
+		return IRQ_HANDLED;
+
+	resources = deh->hbridge_context->resources;
+	if (!resources) {
+		dev_dbg(bridge, "%s: Failed to get Host Resources\n",
+				__func__);
+		return IRQ_HANDLED;
+	}
+
+	hw_mmu_event_status(resources->dw_dmmu_base, &event);
+	if (event == HW_MMU_TRANSLATION_FAULT) {
+		hw_mmu_fault_addr_read(resources->dw_dmmu_base, &fault_addr);
+		dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__,
+				event, fault_addr);
+		/*
+		 * Schedule a DPC directly. In the future, it may be
+		 * necessary to check if DSP MMU fault is intended for
+		 * Bridge.
+		 */
+		tasklet_schedule(&deh->dpc_tasklet);
+
+		/* Disable the MMU events, else once we clear it will
+		 * start to raise INTs again */
+		hw_mmu_event_disable(resources->dw_dmmu_base,
+				HW_MMU_TRANSLATION_FAULT);
+	} else {
+		hw_mmu_event_disable(resources->dw_dmmu_base,
+				HW_MMU_ALL_INTERRUPTS);
+	}
+	return IRQ_HANDLED;
+}
+
 int bridge_deh_create(struct deh_mgr **ret_deh,
 		struct dev_object *hdev_obj)
 {
@@ -58,9 +109,18 @@
 	}
 	ntfy_init(deh->ntfy_obj);
 
+	/* Create a MMUfault DPC */
+	tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh);
+
 	/* Fill in context structure */
 	deh->hbridge_context = hbridge_context;
 
+	/* Install ISR function for DSP MMU fault */
+	status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0,
+			"DspBridge\tiommu fault", deh);
+	if (status < 0)
+		goto err;
+
 	*ret_deh = deh;
 	return 0;
 
@@ -80,6 +140,11 @@
 		ntfy_delete(deh->ntfy_obj);
 		kfree(deh->ntfy_obj);
 	}
+	/* Disable DSP MMU fault */
+	free_irq(INT_DSP_MMU_IRQ, deh);
+
+	/* Free DPC object */
+	tasklet_kill(&deh->dpc_tasklet);
 
 	/* Deallocate the DEH manager object */
 	kfree(deh);
@@ -101,6 +166,48 @@
 		return ntfy_unregister(deh->ntfy_obj, hnotification);
 }
 
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+static void mmu_fault_print_stack(struct bridge_dev_context *dev_context)
+{
+	struct cfg_hostres *resources;
+	struct hw_mmu_map_attrs_t map_attrs = {
+		.endianism = HW_LITTLE_ENDIAN,
+		.element_size = HW_ELEM_SIZE16BIT,
+		.mixed_size = HW_MMU_CPUES,
+	};
+	void *dummy_va_addr;
+
+	resources = dev_context->resources;
+	dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC);
+
+	/*
+	 * Before acking the MMU fault, let's make sure MMU can only
+	 * access entry #0. Then add a new entry so that the DSP OS
+	 * can continue in order to dump the stack.
+	 */
+	hw_mmu_twl_disable(resources->dw_dmmu_base);
+	hw_mmu_tlb_flush_all(resources->dw_dmmu_base);
+
+	hw_mmu_tlb_add(resources->dw_dmmu_base,
+			virt_to_phys(dummy_va_addr), fault_addr,
+			HW_PAGE_SIZE4KB, 1,
+			&map_attrs, HW_SET, HW_SET);
+
+	dsp_clk_enable(DSP_CLK_GPT8);
+
+	dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe);
+
+	/* Clear MMU interrupt */
+	hw_mmu_event_ack(resources->dw_dmmu_base,
+			HW_MMU_TRANSLATION_FAULT);
+	dump_dsp_stack(dev_context);
+	dsp_clk_disable(DSP_CLK_GPT8);
+
+	hw_mmu_disable(resources->dw_dmmu_base);
+	free_page((unsigned long)dummy_va_addr);
+}
+#endif
+
 static inline const char *event_to_string(int event)
 {
 	switch (event) {
@@ -133,7 +240,13 @@
 #endif
 		break;
 	case DSP_MMUFAULT:
-		dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, info);
+		dev_err(bridge, "%s: %s, addr=0x%x", __func__,
+				str, fault_addr);
+#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
+		print_dsp_trace_buffer(dev_context);
+		dump_dl_modules(dev_context);
+		mmu_fault_print_stack(dev_context);
+#endif
 		break;
 	default:
 		dev_err(bridge, "%s: %s", __func__, str);
diff --git a/drivers/staging/tidspbridge/hw/EasiGlobal.h b/drivers/staging/tidspbridge/hw/EasiGlobal.h
new file mode 100644
index 0000000..e48d7f6
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/EasiGlobal.h
@@ -0,0 +1,41 @@
+/*
+ * EasiGlobal.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _EASIGLOBAL_H
+#define _EASIGLOBAL_H
+#include <linux/types.h>
+
+/*
+ * DEFINE:        READ_ONLY, WRITE_ONLY &  READ_WRITE
+ *
+ * DESCRIPTION: Defines used to describe register types for EASI-checker tests.
+ */
+
+#define READ_ONLY    1
+#define WRITE_ONLY   2
+#define READ_WRITE   3
+
+/*
+ * MACRO:        _DEBUG_LEVEL1_EASI
+ *
+ * DESCRIPTION:  A MACRO which can be used to indicate that a particular beach
+ *               register access function was called.
+ *
+ * NOTE:         We currently dont use this functionality.
+ */
+#define _DEBUG_LEVEL1_EASI(easi_num)     ((void)0)
+
+#endif /* _EASIGLOBAL_H */
diff --git a/drivers/staging/tidspbridge/hw/MMUAccInt.h b/drivers/staging/tidspbridge/hw/MMUAccInt.h
new file mode 100644
index 0000000..1cefca3
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMUAccInt.h
@@ -0,0 +1,76 @@
+/*
+ * MMUAccInt.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_ACC_INT_H
+#define _MMU_ACC_INT_H
+
+/* Mappings of level 1 EASI function numbers to function names */
+
+#define EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32 (MMU_BASE_EASIL1 + 3)
+#define EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32  (MMU_BASE_EASIL1 + 17)
+#define EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32    (MMU_BASE_EASIL1 + 39)
+#define EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32   (MMU_BASE_EASIL1 + 51)
+#define EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32 (MMU_BASE_EASIL1 + 102)
+#define EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 103)
+#define EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32 (MMU_BASE_EASIL1 + 156)
+#define EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32 (MMU_BASE_EASIL1 + 174)
+#define EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32   (MMU_BASE_EASIL1 + 180)
+#define EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32     (MMU_BASE_EASIL1 + 190)
+#define EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32   (MMU_BASE_EASIL1 + 194)
+#define EASIL1_MMUMMU_TTB_WRITE_REGISTER32  (MMU_BASE_EASIL1 + 198)
+#define EASIL1_MMUMMU_LOCK_READ_REGISTER32   (MMU_BASE_EASIL1 + 203)
+#define EASIL1_MMUMMU_LOCK_WRITE_REGISTER32  (MMU_BASE_EASIL1 + 204)
+#define EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32  (MMU_BASE_EASIL1 + 205)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32 (MMU_BASE_EASIL1 + 209)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32 (MMU_BASE_EASIL1 + 211)
+#define EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32  (MMU_BASE_EASIL1 + 212)
+#define EASIL1_MMUMMU_LD_TLB_READ_REGISTER32    (MMU_BASE_EASIL1 + 213)
+#define EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32   (MMU_BASE_EASIL1 + 214)
+#define EASIL1_MMUMMU_CAM_WRITE_REGISTER32   (MMU_BASE_EASIL1 + 226)
+#define EASIL1_MMUMMU_RAM_WRITE_REGISTER32 (MMU_BASE_EASIL1 + 268)
+#define EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32  (MMU_BASE_EASIL1 + 322)
+
+/* Register offset address definitions */
+#define MMU_MMU_SYSCONFIG_OFFSET   0x10
+#define MMU_MMU_IRQSTATUS_OFFSET  0x18
+#define MMU_MMU_IRQENABLE_OFFSET    0x1c
+#define MMU_MMU_WALKING_ST_OFFSET 0x40
+#define MMU_MMU_CNTL_OFFSET   0x44
+#define MMU_MMU_FAULT_AD_OFFSET  0x48
+#define MMU_MMU_TTB_OFFSET  0x4c
+#define MMU_MMU_LOCK_OFFSET   0x50
+#define MMU_MMU_LD_TLB_OFFSET  0x54
+#define MMU_MMU_CAM_OFFSET   0x58
+#define MMU_MMU_RAM_OFFSET   0x5c
+#define MMU_MMU_GFLUSH_OFFSET  0x60
+#define MMU_MMU_FLUSH_ENTRY_OFFSET  0x64
+/* Bitfield mask and offset declarations */
+#define MMU_MMU_SYSCONFIG_IDLE_MODE_MASK  0x18
+#define MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET  3
+#define MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK  0x1
+#define MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET   0
+#define MMU_MMU_WALKING_ST_TWL_RUNNING_MASK 0x1
+#define MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET  0
+#define MMU_MMU_CNTL_TWL_ENABLE_MASK 0x4
+#define MMU_MMU_CNTL_TWL_ENABLE_OFFSET 2
+#define MMU_MMU_CNTL_MMU_ENABLE_MASK    0x2
+#define MMU_MMU_CNTL_MMU_ENABLE_OFFSET   1
+#define MMU_MMU_LOCK_BASE_VALUE_MASK 0xfc00
+#define MMU_MMU_LOCK_BASE_VALUE_OFFSET   10
+#define MMU_MMU_LOCK_CURRENT_VICTIM_MASK   0x3f0
+#define MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET    4
+
+#endif /* _MMU_ACC_INT_H */
diff --git a/drivers/staging/tidspbridge/hw/MMURegAcM.h b/drivers/staging/tidspbridge/hw/MMURegAcM.h
new file mode 100644
index 0000000..ab1a16d
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/MMURegAcM.h
@@ -0,0 +1,225 @@
+/*
+ * MMURegAcM.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _MMU_REG_ACM_H
+#define _MMU_REG_ACM_H
+
+#include <linux/io.h>
+#include <EasiGlobal.h>
+
+#include "MMUAccInt.h"
+
+#if defined(USE_LEVEL_1_MACROS)
+
+#define MMUMMU_SYSCONFIG_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_SYSCONFIG_OFFSET))
+
+#define MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_IDLE_MODE_WRITE32);\
+    data &= ~(MMU_MMU_SYSCONFIG_IDLE_MODE_MASK);\
+    new_value <<= MMU_MMU_SYSCONFIG_IDLE_MODE_OFFSET;\
+    new_value &= MMU_MMU_SYSCONFIG_IDLE_MODE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_SYSCONFIG_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_SYSCONFIG_AUTO_IDLE_WRITE32);\
+    data &= ~(MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK);\
+    new_value <<= MMU_MMU_SYSCONFIG_AUTO_IDLE_OFFSET;\
+    new_value &= MMU_MMU_SYSCONFIG_AUTO_IDLE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_IRQSTATUS_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(easil1_mmummu_irqstatus_read_register32),\
+      __raw_readl((base_address)+MMU_MMU_IRQSTATUS_OFFSET))
+
+#define MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_IRQSTATUS_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQSTATUS_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_IRQENABLE_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_IRQENABLE_OFFSET))
+
+#define MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_IRQENABLE_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_IRQENABLE_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_WALKING_STTWL_RUNNING_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_WALKING_STTWL_RUNNING_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_WALKING_ST_OFFSET))))\
+      & MMU_MMU_WALKING_ST_TWL_RUNNING_MASK) >>\
+      MMU_MMU_WALKING_ST_TWL_RUNNING_OFFSET))
+
+#define MMUMMU_CNTLTWL_ENABLE_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_CNTL_OFFSET)))) &\
+      MMU_MMU_CNTL_TWL_ENABLE_MASK) >>\
+      MMU_MMU_CNTL_TWL_ENABLE_OFFSET))
+
+#define MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_CNTL_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLTWL_ENABLE_WRITE32);\
+    data &= ~(MMU_MMU_CNTL_TWL_ENABLE_MASK);\
+    new_value <<= MMU_MMU_CNTL_TWL_ENABLE_OFFSET;\
+    new_value &= MMU_MMU_CNTL_TWL_ENABLE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_CNTL_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CNTLMMU_ENABLE_WRITE32);\
+    data &= ~(MMU_MMU_CNTL_MMU_ENABLE_MASK);\
+    new_value <<= MMU_MMU_CNTL_MMU_ENABLE_OFFSET;\
+    new_value &= MMU_MMU_CNTL_MMU_ENABLE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_FAULT_AD_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FAULT_AD_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_FAULT_AD_OFFSET))
+
+#define MMUMMU_TTB_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_TTB_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_TTB_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_LOCK_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_LOCK_OFFSET))
+
+#define MMUMMU_LOCK_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LOCK_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_LOCK_BASE_VALUE_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_BASE_VALUE_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+      MMU_MMU_LOCK_BASE_VALUE_MASK) >>\
+      MMU_MMU_LOCK_BASE_VALUE_OFFSET))
+
+#define MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LOCK_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(easil1_mmummu_lock_base_value_write32);\
+    data &= ~(MMU_MMU_LOCK_BASE_VALUE_MASK);\
+    new_value <<= MMU_MMU_LOCK_BASE_VALUE_OFFSET;\
+    new_value &= MMU_MMU_LOCK_BASE_VALUE_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_READ32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_READ32),\
+      (((__raw_readl(((base_address)+(MMU_MMU_LOCK_OFFSET)))) &\
+      MMU_MMU_LOCK_CURRENT_VICTIM_MASK) >>\
+      MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET))
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LOCK_OFFSET;\
+    register u32 data = __raw_readl((base_address)+offset);\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_WRITE32);\
+    data &= ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK);\
+    new_value <<= MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET;\
+    new_value &= MMU_MMU_LOCK_CURRENT_VICTIM_MASK;\
+    new_value |= data;\
+    __raw_writel(new_value, base_address+offset);\
+}
+
+#define MMUMMU_LOCK_CURRENT_VICTIM_SET32(var, value)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LOCK_CURRENT_VICTIM_SET32),\
+      (((var) & ~(MMU_MMU_LOCK_CURRENT_VICTIM_MASK)) |\
+      (((value) << MMU_MMU_LOCK_CURRENT_VICTIM_OFFSET) &\
+      MMU_MMU_LOCK_CURRENT_VICTIM_MASK)))
+
+#define MMUMMU_LD_TLB_READ_REGISTER32(base_address)\
+    (_DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_READ_REGISTER32),\
+      __raw_readl((base_address)+MMU_MMU_LD_TLB_OFFSET))
+
+#define MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_LD_TLB_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_LD_TLB_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_CAM_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_CAM_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_CAM_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_RAM_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_RAM_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_RAM_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#define MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, value)\
+{\
+    const u32 offset = MMU_MMU_FLUSH_ENTRY_OFFSET;\
+    register u32 new_value = (value);\
+    _DEBUG_LEVEL1_EASI(EASIL1_MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32);\
+    __raw_writel(new_value, (base_address)+offset);\
+}
+
+#endif /* USE_LEVEL_1_MACROS */
+
+#endif /* _MMU_REG_ACM_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_defs.h b/drivers/staging/tidspbridge/hw/hw_defs.h
new file mode 100644
index 0000000..d5266d4
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_defs.h
@@ -0,0 +1,58 @@
+/*
+ * hw_defs.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * Global HW definitions
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HW_DEFS_H
+#define _HW_DEFS_H
+
+/* Page size */
+#define HW_PAGE_SIZE4KB   0x1000
+#define HW_PAGE_SIZE64KB  0x10000
+#define HW_PAGE_SIZE1MB   0x100000
+#define HW_PAGE_SIZE16MB  0x1000000
+
+/* hw_status:  return type for HW API */
+typedef long hw_status;
+
+/*  Macro used to set and clear any bit */
+#define HW_CLEAR	0
+#define HW_SET		1
+
+/* hw_endianism_t:  Enumerated Type used to specify the endianism
+ *		Do NOT change these values. They are used as bit fields. */
+enum hw_endianism_t {
+	HW_LITTLE_ENDIAN,
+	HW_BIG_ENDIAN
+};
+
+/* hw_element_size_t:  Enumerated Type used to specify the element size
+ *		Do NOT change these values. They are used as bit fields. */
+enum hw_element_size_t {
+	HW_ELEM_SIZE8BIT,
+	HW_ELEM_SIZE16BIT,
+	HW_ELEM_SIZE32BIT,
+	HW_ELEM_SIZE64BIT
+};
+
+/* hw_idle_mode_t:  Enumerated Type used to specify Idle modes */
+enum hw_idle_mode_t {
+	HW_FORCE_IDLE,
+	HW_NO_IDLE,
+	HW_SMART_IDLE
+};
+
+#endif /* _HW_DEFS_H */
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.c b/drivers/staging/tidspbridge/hw/hw_mmu.c
new file mode 100644
index 0000000..014f5d5
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.c
@@ -0,0 +1,562 @@
+/*
+ * hw_mmu.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * API definitions to setup MMU TLB and PTE
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#include <linux/io.h>
+#include "MMURegAcM.h"
+#include <hw_defs.h>
+#include <hw_mmu.h>
+#include <linux/types.h>
+#include <linux/err.h>
+
+#define MMU_BASE_VAL_MASK	0xFC00
+#define MMU_PAGE_MAX	     3
+#define MMU_ELEMENTSIZE_MAX      3
+#define MMU_ADDR_MASK	    0xFFFFF000
+#define MMU_TTB_MASK	     0xFFFFC000
+#define MMU_SECTION_ADDR_MASK    0xFFF00000
+#define MMU_SSECTION_ADDR_MASK   0xFF000000
+#define MMU_PAGE_TABLE_MASK      0xFFFFFC00
+#define MMU_LARGE_PAGE_MASK      0xFFFF0000
+#define MMU_SMALL_PAGE_MASK      0xFFFFF000
+
+#define MMU_LOAD_TLB	0x00000001
+#define MMU_GFLUSH	0x60
+
+/*
+ * hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
+ */
+enum hw_mmu_page_size_t {
+	HW_MMU_SECTION,
+	HW_MMU_LARGE_PAGE,
+	HW_MMU_SMALL_PAGE,
+	HW_MMU_SUPERSECTION
+};
+
+/*
+ * FUNCTION	      : mmu_flush_entry
+ *
+ * INPUTS:
+ *
+ *       Identifier      : base_address
+ *       Type		: const u32
+ *       Description     : Base Address of instance of MMU module
+ *
+ * RETURNS:
+ *
+ *       Type		: hw_status
+ *       Description     : 0		 -- No errors occured
+ *			 RET_BAD_NULL_PARAM     -- A Pointer
+ *						Paramater was set to NULL
+ *
+ * PURPOSE:	      : Flush the TLB entry pointed by the
+ *			lock counter register
+ *			even if this entry is set protected
+ *
+ * METHOD:	       : Check the Input parameter and Flush a
+ *			 single entry in the TLB.
+ */
+static hw_status mmu_flush_entry(const void __iomem *base_address);
+
+/*
+ * FUNCTION	      : mmu_set_cam_entry
+ *
+ * INPUTS:
+ *
+ *       Identifier      : base_address
+ *       TypE		: const u32
+ *       Description     : Base Address of instance of MMU module
+ *
+ *       Identifier      : page_sz
+ *       TypE		: const u32
+ *       Description     : It indicates the page size
+ *
+ *       Identifier      : preserved_bit
+ *       Type		: const u32
+ *       Description     : It indicates the TLB entry is preserved entry
+ *							or not
+ *
+ *       Identifier      : valid_bit
+ *       Type		: const u32
+ *       Description     : It indicates the TLB entry is valid entry or not
+ *
+ *
+ *       Identifier      : virtual_addr_tag
+ *       Type	    	: const u32
+ *       Description     : virtual Address
+ *
+ * RETURNS:
+ *
+ *       Type	    	: hw_status
+ *       Description     : 0		 -- No errors occured
+ *			 RET_BAD_NULL_PARAM     -- A Pointer Paramater
+ *						   was set to NULL
+ *			 RET_PARAM_OUT_OF_RANGE -- Input Parameter out
+ *						   of Range
+ *
+ * PURPOSE:	      	: Set MMU_CAM reg
+ *
+ * METHOD:	       	: Check the Input parameters and set the CAM entry.
+ */
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+				   const u32 page_sz,
+				   const u32 preserved_bit,
+				   const u32 valid_bit,
+				   const u32 virtual_addr_tag);
+
+/*
+ * FUNCTION	      : mmu_set_ram_entry
+ *
+ * INPUTS:
+ *
+ *       Identifier      : base_address
+ *       Type	    	: const u32
+ *       Description     : Base Address of instance of MMU module
+ *
+ *       Identifier      : physical_addr
+ *       Type	    	: const u32
+ *       Description     : Physical Address to which the corresponding
+ *			 virtual   Address shouldpoint
+ *
+ *       Identifier      : endianism
+ *       Type	    	: hw_endianism_t
+ *       Description     : endianism for the given page
+ *
+ *       Identifier      : element_size
+ *       Type	    	: hw_element_size_t
+ *       Description     : The element size ( 8,16, 32 or 64 bit)
+ *
+ *       Identifier      : mixed_size
+ *       Type	    	: hw_mmu_mixed_size_t
+ *       Description     : Element Size to follow CPU or TLB
+ *
+ * RETURNS:
+ *
+ *       Type	    	: hw_status
+ *       Description     : 0		 -- No errors occured
+ *			 RET_BAD_NULL_PARAM     -- A Pointer Paramater
+ *							was set to NULL
+ *			 RET_PARAM_OUT_OF_RANGE -- Input Parameter
+ *							out of Range
+ *
+ * PURPOSE:	      : Set MMU_CAM reg
+ *
+ * METHOD:	       : Check the Input parameters and set the RAM entry.
+ */
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+				   const u32 physical_addr,
+				   enum hw_endianism_t endianism,
+				   enum hw_element_size_t element_size,
+				   enum hw_mmu_mixed_size_t mixed_size);
+
+/* HW FUNCTIONS */
+
+hw_status hw_mmu_enable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_SET);
+
+	return status;
+}
+
+hw_status hw_mmu_disable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLMMU_ENABLE_WRITE32(base_address, HW_CLEAR);
+
+	return status;
+}
+
+hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+				u32 num_locked_entries)
+{
+	hw_status status = 0;
+
+	MMUMMU_LOCK_BASE_VALUE_WRITE32(base_address, num_locked_entries);
+
+	return status;
+}
+
+hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+				u32 victim_entry_num)
+{
+	hw_status status = 0;
+
+	MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, victim_entry_num);
+
+	return status;
+}
+
+hw_status hw_mmu_event_ack(const void __iomem *base_address, u32 irq_mask)
+{
+	hw_status status = 0;
+
+	MMUMMU_IRQSTATUS_WRITE_REGISTER32(base_address, irq_mask);
+
+	return status;
+}
+
+hw_status hw_mmu_event_disable(const void __iomem *base_address, u32 irq_mask)
+{
+	hw_status status = 0;
+	u32 irq_reg;
+
+	irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
+
+	MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg & ~irq_mask);
+
+	return status;
+}
+
+hw_status hw_mmu_event_enable(const void __iomem *base_address, u32 irq_mask)
+{
+	hw_status status = 0;
+	u32 irq_reg;
+
+	irq_reg = MMUMMU_IRQENABLE_READ_REGISTER32(base_address);
+
+	MMUMMU_IRQENABLE_WRITE_REGISTER32(base_address, irq_reg | irq_mask);
+
+	return status;
+}
+
+hw_status hw_mmu_event_status(const void __iomem *base_address, u32 *irq_mask)
+{
+	hw_status status = 0;
+
+	*irq_mask = MMUMMU_IRQSTATUS_READ_REGISTER32(base_address);
+
+	return status;
+}
+
+hw_status hw_mmu_fault_addr_read(const void __iomem *base_address, u32 *addr)
+{
+	hw_status status = 0;
+
+	/* read values from register */
+	*addr = MMUMMU_FAULT_AD_READ_REGISTER32(base_address);
+
+	return status;
+}
+
+hw_status hw_mmu_ttb_set(const void __iomem *base_address, u32 ttb_phys_addr)
+{
+	hw_status status = 0;
+	u32 load_ttb;
+
+	load_ttb = ttb_phys_addr & ~0x7FUL;
+	/* write values to register */
+	MMUMMU_TTB_WRITE_REGISTER32(base_address, load_ttb);
+
+	return status;
+}
+
+hw_status hw_mmu_twl_enable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_SET);
+
+	return status;
+}
+
+hw_status hw_mmu_twl_disable(const void __iomem *base_address)
+{
+	hw_status status = 0;
+
+	MMUMMU_CNTLTWL_ENABLE_WRITE32(base_address, HW_CLEAR);
+
+	return status;
+}
+
+hw_status hw_mmu_tlb_flush(const void __iomem *base_address, u32 virtual_addr,
+			   u32 page_sz)
+{
+	hw_status status = 0;
+	u32 virtual_addr_tag;
+	enum hw_mmu_page_size_t pg_size_bits;
+
+	switch (page_sz) {
+	case HW_PAGE_SIZE4KB:
+		pg_size_bits = HW_MMU_SMALL_PAGE;
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		pg_size_bits = HW_MMU_LARGE_PAGE;
+		break;
+
+	case HW_PAGE_SIZE1MB:
+		pg_size_bits = HW_MMU_SECTION;
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		pg_size_bits = HW_MMU_SUPERSECTION;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	/* Generate the 20-bit tag from virtual address */
+	virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+	mmu_set_cam_entry(base_address, pg_size_bits, 0, 0, virtual_addr_tag);
+
+	mmu_flush_entry(base_address);
+
+	return status;
+}
+
+hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+			 u32 physical_addr,
+			 u32 virtual_addr,
+			 u32 page_sz,
+			 u32 entry_num,
+			 struct hw_mmu_map_attrs_t *map_attrs,
+			 s8 preserved_bit, s8 valid_bit)
+{
+	hw_status status = 0;
+	u32 lock_reg;
+	u32 virtual_addr_tag;
+	enum hw_mmu_page_size_t mmu_pg_size;
+
+	/*Check the input Parameters */
+	switch (page_sz) {
+	case HW_PAGE_SIZE4KB:
+		mmu_pg_size = HW_MMU_SMALL_PAGE;
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		mmu_pg_size = HW_MMU_LARGE_PAGE;
+		break;
+
+	case HW_PAGE_SIZE1MB:
+		mmu_pg_size = HW_MMU_SECTION;
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		mmu_pg_size = HW_MMU_SUPERSECTION;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	lock_reg = MMUMMU_LOCK_READ_REGISTER32(base_address);
+
+	/* Generate the 20-bit tag from virtual address */
+	virtual_addr_tag = ((virtual_addr & MMU_ADDR_MASK) >> 12);
+
+	/* Write the fields in the CAM Entry Register */
+	mmu_set_cam_entry(base_address, mmu_pg_size, preserved_bit, valid_bit,
+			  virtual_addr_tag);
+
+	/* Write the different fields of the RAM Entry Register */
+	/* endianism of the page,Element Size of the page (8, 16, 32, 64 bit) */
+	mmu_set_ram_entry(base_address, physical_addr, map_attrs->endianism,
+			  map_attrs->element_size, map_attrs->mixed_size);
+
+	/* Update the MMU Lock Register */
+	/* currentVictim between lockedBaseValue and (MMU_Entries_Number - 1) */
+	MMUMMU_LOCK_CURRENT_VICTIM_WRITE32(base_address, entry_num);
+
+	/* Enable loading of an entry in TLB by writing 1
+	   into LD_TLB_REG register */
+	MMUMMU_LD_TLB_WRITE_REGISTER32(base_address, MMU_LOAD_TLB);
+
+	MMUMMU_LOCK_WRITE_REGISTER32(base_address, lock_reg);
+
+	return status;
+}
+
+hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+			 u32 physical_addr,
+			 u32 virtual_addr,
+			 u32 page_sz, struct hw_mmu_map_attrs_t *map_attrs)
+{
+	hw_status status = 0;
+	u32 pte_addr, pte_val;
+	s32 num_entries = 1;
+
+	switch (page_sz) {
+	case HW_PAGE_SIZE4KB:
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SMALL_PAGE_MASK);
+		pte_val =
+		    ((physical_addr & MMU_SMALL_PAGE_MASK) |
+		     (map_attrs->endianism << 9) | (map_attrs->
+						    element_size << 4) |
+		     (map_attrs->mixed_size << 11) | 2);
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_LARGE_PAGE_MASK);
+		pte_val =
+		    ((physical_addr & MMU_LARGE_PAGE_MASK) |
+		     (map_attrs->endianism << 9) | (map_attrs->
+						    element_size << 4) |
+		     (map_attrs->mixed_size << 11) | 1);
+		break;
+
+	case HW_PAGE_SIZE1MB:
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SECTION_ADDR_MASK);
+		pte_val =
+		    ((((physical_addr & MMU_SECTION_ADDR_MASK) |
+		       (map_attrs->endianism << 15) | (map_attrs->
+						       element_size << 10) |
+		       (map_attrs->mixed_size << 17)) & ~0x40000) | 0x2);
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SSECTION_ADDR_MASK);
+		pte_val =
+		    (((physical_addr & MMU_SSECTION_ADDR_MASK) |
+		      (map_attrs->endianism << 15) | (map_attrs->
+						      element_size << 10) |
+		      (map_attrs->mixed_size << 17)
+		     ) | 0x40000 | 0x2);
+		break;
+
+	case HW_MMU_COARSE_PAGE_SIZE:
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SECTION_ADDR_MASK);
+		pte_val = (physical_addr & MMU_PAGE_TABLE_MASK) | 1;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	while (--num_entries >= 0)
+		((u32 *) pte_addr)[num_entries] = pte_val;
+
+	return status;
+}
+
+hw_status hw_mmu_pte_clear(const u32 pg_tbl_va, u32 virtual_addr, u32 page_size)
+{
+	hw_status status = 0;
+	u32 pte_addr;
+	s32 num_entries = 1;
+
+	switch (page_size) {
+	case HW_PAGE_SIZE4KB:
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SMALL_PAGE_MASK);
+		break;
+
+	case HW_PAGE_SIZE64KB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l2(pg_tbl_va,
+					      virtual_addr &
+					      MMU_LARGE_PAGE_MASK);
+		break;
+
+	case HW_PAGE_SIZE1MB:
+	case HW_MMU_COARSE_PAGE_SIZE:
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SECTION_ADDR_MASK);
+		break;
+
+	case HW_PAGE_SIZE16MB:
+		num_entries = 16;
+		pte_addr = hw_mmu_pte_addr_l1(pg_tbl_va,
+					      virtual_addr &
+					      MMU_SSECTION_ADDR_MASK);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	while (--num_entries >= 0)
+		((u32 *) pte_addr)[num_entries] = 0;
+
+	return status;
+}
+
+/* mmu_flush_entry */
+static hw_status mmu_flush_entry(const void __iomem *base_address)
+{
+	hw_status status = 0;
+	u32 flush_entry_data = 0x1;
+
+	/* write values to register */
+	MMUMMU_FLUSH_ENTRY_WRITE_REGISTER32(base_address, flush_entry_data);
+
+	return status;
+}
+
+/* mmu_set_cam_entry */
+static hw_status mmu_set_cam_entry(const void __iomem *base_address,
+				   const u32 page_sz,
+				   const u32 preserved_bit,
+				   const u32 valid_bit,
+				   const u32 virtual_addr_tag)
+{
+	hw_status status = 0;
+	u32 mmu_cam_reg;
+
+	mmu_cam_reg = (virtual_addr_tag << 12);
+	mmu_cam_reg = (mmu_cam_reg) | (page_sz) | (valid_bit << 2) |
+	    (preserved_bit << 3);
+
+	/* write values to register */
+	MMUMMU_CAM_WRITE_REGISTER32(base_address, mmu_cam_reg);
+
+	return status;
+}
+
+/* mmu_set_ram_entry */
+static hw_status mmu_set_ram_entry(const void __iomem *base_address,
+				   const u32 physical_addr,
+				   enum hw_endianism_t endianism,
+				   enum hw_element_size_t element_size,
+				   enum hw_mmu_mixed_size_t mixed_size)
+{
+	hw_status status = 0;
+	u32 mmu_ram_reg;
+
+	mmu_ram_reg = (physical_addr & MMU_ADDR_MASK);
+	mmu_ram_reg = (mmu_ram_reg) | ((endianism << 9) | (element_size << 7) |
+				       (mixed_size << 6));
+
+	/* write values to register */
+	MMUMMU_RAM_WRITE_REGISTER32(base_address, mmu_ram_reg);
+
+	return status;
+
+}
+
+void hw_mmu_tlb_flush_all(const void __iomem *base)
+{
+	__raw_writeb(1, base + MMU_GFLUSH);
+}
diff --git a/drivers/staging/tidspbridge/hw/hw_mmu.h b/drivers/staging/tidspbridge/hw/hw_mmu.h
new file mode 100644
index 0000000..1458a2c
--- /dev/null
+++ b/drivers/staging/tidspbridge/hw/hw_mmu.h
@@ -0,0 +1,163 @@
+/*
+ * hw_mmu.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * MMU types and API declarations
+ *
+ * Copyright (C) 2007 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef _HW_MMU_H
+#define _HW_MMU_H
+
+#include <linux/types.h>
+
+/* Bitmasks for interrupt sources */
+#define HW_MMU_TRANSLATION_FAULT   0x2
+#define HW_MMU_ALL_INTERRUPTS      0x1F
+
+#define HW_MMU_COARSE_PAGE_SIZE 0x400
+
+/* hw_mmu_mixed_size_t:  Enumerated Type used to specify whether to follow
+			CPU/TLB Element size */
+enum hw_mmu_mixed_size_t {
+	HW_MMU_TLBES,
+	HW_MMU_CPUES
+};
+
+/* hw_mmu_map_attrs_t:  Struct containing MMU mapping attributes */
+struct hw_mmu_map_attrs_t {
+	enum hw_endianism_t endianism;
+	enum hw_element_size_t element_size;
+	enum hw_mmu_mixed_size_t mixed_size;
+	bool donotlockmpupage;
+};
+
+extern hw_status hw_mmu_enable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_disable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_num_locked_set(const void __iomem *base_address,
+				       u32 num_locked_entries);
+
+extern hw_status hw_mmu_victim_num_set(const void __iomem *base_address,
+				       u32 victim_entry_num);
+
+/* For MMU faults */
+extern hw_status hw_mmu_event_ack(const void __iomem *base_address,
+				  u32 irq_mask);
+
+extern hw_status hw_mmu_event_disable(const void __iomem *base_address,
+				      u32 irq_mask);
+
+extern hw_status hw_mmu_event_enable(const void __iomem *base_address,
+				     u32 irq_mask);
+
+extern hw_status hw_mmu_event_status(const void __iomem *base_address,
+				     u32 *irq_mask);
+
+extern hw_status hw_mmu_fault_addr_read(const void __iomem *base_address,
+					u32 *addr);
+
+/* Set the TT base address */
+extern hw_status hw_mmu_ttb_set(const void __iomem *base_address,
+				u32 ttb_phys_addr);
+
+extern hw_status hw_mmu_twl_enable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_twl_disable(const void __iomem *base_address);
+
+extern hw_status hw_mmu_tlb_flush(const void __iomem *base_address,
+				  u32 virtual_addr, u32 page_sz);
+
+extern hw_status hw_mmu_tlb_add(const void __iomem *base_address,
+				u32 physical_addr,
+				u32 virtual_addr,
+				u32 page_sz,
+				u32 entry_num,
+				struct hw_mmu_map_attrs_t *map_attrs,
+				s8 preserved_bit, s8 valid_bit);
+
+/* For PTEs */
+extern hw_status hw_mmu_pte_set(const u32 pg_tbl_va,
+				u32 physical_addr,
+				u32 virtual_addr,
+				u32 page_sz,
+				struct hw_mmu_map_attrs_t *map_attrs);
+
+extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
+				  u32 virtual_addr, u32 page_size);
+
+void hw_mmu_tlb_flush_all(const void __iomem *base);
+
+static inline u32 hw_mmu_pte_addr_l1(u32 l1_base, u32 va)
+{
+	u32 pte_addr;
+	u32 va31_to20;
+
+	va31_to20 = va >> (20 - 2);	/* Left-shift by 2 here itself */
+	va31_to20 &= 0xFFFFFFFCUL;
+	pte_addr = l1_base + va31_to20;
+
+	return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_addr_l2(u32 l2_base, u32 va)
+{
+	u32 pte_addr;
+
+	pte_addr = (l2_base & 0xFFFFFC00) | ((va >> 10) & 0x3FC);
+
+	return pte_addr;
+}
+
+static inline u32 hw_mmu_pte_coarse_l1(u32 pte_val)
+{
+	u32 pte_coarse;
+
+	pte_coarse = pte_val & 0xFFFFFC00;
+
+	return pte_coarse;
+}
+
+static inline u32 hw_mmu_pte_size_l1(u32 pte_val)
+{
+	u32 pte_size = 0;
+
+	if ((pte_val & 0x3) == 0x1) {
+		/* Points to L2 PT */
+		pte_size = HW_MMU_COARSE_PAGE_SIZE;
+	}
+
+	if ((pte_val & 0x3) == 0x2) {
+		if (pte_val & (1 << 18))
+			pte_size = HW_PAGE_SIZE16MB;
+		else
+			pte_size = HW_PAGE_SIZE1MB;
+	}
+
+	return pte_size;
+}
+
+static inline u32 hw_mmu_pte_size_l2(u32 pte_val)
+{
+	u32 pte_size = 0;
+
+	if (pte_val & 0x2)
+		pte_size = HW_PAGE_SIZE4KB;
+	else if (pte_val & 0x1)
+		pte_size = HW_PAGE_SIZE64KB;
+
+	return pte_size;
+}
+
+#endif /* _HW_MMU_H */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
index dfb55cc..38122db 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/cfgdefs.h
@@ -68,6 +68,7 @@
 	void __iomem *dw_per_base;
 	u32 dw_per_pm_base;
 	u32 dw_core_pm_base;
+	void __iomem *dw_dmmu_base;
 	void __iomem *dw_sys_ctrl_base;
 };
 
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dev.h b/drivers/staging/tidspbridge/include/dspbridge/dev.h
index 9bdd48f..357458f 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dev.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dev.h
@@ -27,6 +27,7 @@
 #include <dspbridge/nodedefs.h>
 #include <dspbridge/dispdefs.h>
 #include <dspbridge/dspdefs.h>
+#include <dspbridge/dmm.h>
 #include <dspbridge/host_os.h>
 
 /*  ----------------------------------- This */
@@ -233,6 +234,29 @@
 				  struct cmm_object **mgr);
 
 /*
+ *  ======== dev_get_dmm_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the dynamic memory manager created for this
+ *      device.
+ *  Parameters:
+ *      hdev_obj:     Handle to device object created with
+ *                      dev_create_device().
+ *      *mgr:           Ptr to location to store handle.
+ *  Returns:
+ *      0:        Success.
+ *      -EFAULT:    Invalid hdev_obj.
+ *  Requires:
+ *      mgr != NULL.
+ *      DEV Initialized.
+ *  Ensures:
+ *      0:        *mgr contains a handle to a channel manager object,
+ *                      or NULL.
+ *      else:           *mgr is NULL.
+ */
+extern int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+				  struct dmm_object **mgr);
+
+/*
  *  ======== dev_get_cod_mgr ========
  *  Purpose:
  *      Retrieve the COD manager create for this device.
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dmm.h b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
new file mode 100644
index 0000000..6c58335
--- /dev/null
+++ b/drivers/staging/tidspbridge/include/dspbridge/dmm.h
@@ -0,0 +1,75 @@
+/*
+ * dmm.h
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Mapping(DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region.
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#ifndef DMM_
+#define DMM_
+
+#include <dspbridge/dbdefs.h>
+
+struct dmm_object;
+
+/* DMM attributes used in dmm_create() */
+struct dmm_mgrattrs {
+	u32 reserved;
+};
+
+#define DMMPOOLSIZE      0x4000000
+
+/*
+ *  ======== dmm_get_handle ========
+ *  Purpose:
+ *      Return the dynamic memory manager object for this device.
+ *      This is typically called from the client process.
+ */
+
+extern int dmm_get_handle(void *hprocessor,
+				 struct dmm_object **dmm_manager);
+
+extern int dmm_reserve_memory(struct dmm_object *dmm_mgr,
+				     u32 size, u32 *prsv_addr);
+
+extern int dmm_un_reserve_memory(struct dmm_object *dmm_mgr,
+					u32 rsv_addr);
+
+extern int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr,
+				 u32 size);
+
+extern int dmm_un_map_memory(struct dmm_object *dmm_mgr,
+				    u32 addr, u32 *psize);
+
+extern int dmm_destroy(struct dmm_object *dmm_mgr);
+
+extern int dmm_delete_tables(struct dmm_object *dmm_mgr);
+
+extern int dmm_create(struct dmm_object **dmm_manager,
+			     struct dev_object *hdev_obj,
+			     const struct dmm_mgrattrs *mgr_attrts);
+
+extern bool dmm_init(void);
+
+extern void dmm_exit(void);
+
+extern int dmm_create_tables(struct dmm_object *dmm_mgr,
+				    u32 addr, u32 size);
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr);
+#endif
+
+#endif /* DMM_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/drv.h b/drivers/staging/tidspbridge/include/dspbridge/drv.h
index 75a2c9b..c1f363e 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/drv.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/drv.h
@@ -108,6 +108,12 @@
 	struct bridge_dma_map_info dma_info;
 };
 
+/* Used for DMM reserved memory accounting */
+struct dmm_rsv_object {
+	struct list_head link;
+	u32 dsp_reserved_addr;
+};
+
 /* New structure (member of process context) abstracts DMM resource info */
 struct dspheap_res_object {
 	s32 heap_allocated;	/* DMM status */
@@ -159,6 +165,10 @@
 	struct list_head dmm_map_list;
 	spinlock_t dmm_map_lock;
 
+	/* DMM reserved memory resources */
+	struct list_head dmm_rsv_list;
+	spinlock_t dmm_rsv_lock;
+
 	/* DSP Heap resources */
 	struct dspheap_res_object *pdspheap_list;
 
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h b/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
deleted file mode 100644
index cb38d4c..0000000
--- a/drivers/staging/tidspbridge/include/dspbridge/dsp-mmu.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * dsp-mmu.h
- *
- * DSP-BIOS Bridge driver support functions for TI OMAP processors.
- *
- * DSP iommu.
- *
- * Copyright (C) 2005-2010 Texas Instruments, Inc.
- *
- * This package is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
- * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
- */
-
-#ifndef _DSP_MMU_
-#define _DSP_MMU_
-
-#include <plat/iommu.h>
-#include <plat/iovmm.h>
-
-/**
- * dsp_mmu_init() - initialize dsp_mmu module and returns a handle
- *
- * This function initialize dsp mmu module and returns a struct iommu
- * handle to use it for dsp maps.
- *
- */
-struct iommu *dsp_mmu_init(void);
-
-/**
- * dsp_mmu_exit() - destroy dsp mmu module
- * @mmu:	Pointer to iommu handle.
- *
- * This function destroys dsp mmu module.
- *
- */
-void dsp_mmu_exit(struct iommu *mmu);
-
-/**
- * user_to_dsp_map() - maps user to dsp virtual address
- * @mmu:	Pointer to iommu handle.
- * @uva:		Virtual user space address.
- * @da		DSP address
- * @size		Buffer size to map.
- * @usr_pgs	struct page array pointer where the user pages will be stored
- *
- * This function maps a user space buffer into DSP virtual address.
- *
- */
-u32 user_to_dsp_map(struct iommu *mmu, u32 uva, u32 da, u32 size,
-						struct page **usr_pgs);
-
-/**
- * user_to_dsp_unmap() - unmaps DSP virtual buffer.
- * @mmu:	Pointer to iommu handle.
- * @da		DSP address
- *
- * This function unmaps a user space buffer into DSP virtual address.
- *
- */
-int user_to_dsp_unmap(struct iommu *mmu, u32 da);
-
-#endif
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
index 6153634..0ae7d16 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspdefs.h
@@ -162,6 +162,48 @@
 				       u32 mem_type);
 
 /*
+ *  ======== bridge_brd_mem_map ========
+ *  Purpose:
+ *      Map a MPU memory region to a DSP/IVA memory space
+ *  Parameters:
+ *      dev_ctxt:    Handle to Bridge driver defined device info.
+ *      ul_mpu_addr:      MPU memory region start address.
+ *      virt_addr:      DSP/IVA memory region u8 address.
+ *      ul_num_bytes:     Number of bytes to map.
+ *      map_attrs:       Mapping attributes (e.g. endianness).
+ *  Returns:
+ *      0:        Success.
+ *      -EPERM:      Other, unspecified error.
+ *  Requires:
+ *      dev_ctxt != NULL;
+ *  Ensures:
+ */
+typedef int(*fxn_brd_memmap) (struct bridge_dev_context
+				     * dev_ctxt, u32 ul_mpu_addr,
+				     u32 virt_addr, u32 ul_num_bytes,
+				     u32 map_attr,
+				     struct page **mapped_pages);
+
+/*
+ *  ======== bridge_brd_mem_un_map ========
+ *  Purpose:
+ *      UnMap an MPU memory region from DSP/IVA memory space
+ *  Parameters:
+ *      dev_ctxt:    Handle to Bridge driver defined device info.
+ *      virt_addr:      DSP/IVA memory region u8 address.
+ *      ul_num_bytes:     Number of bytes to unmap.
+ *  Returns:
+ *      0:        Success.
+ *      -EPERM:      Other, unspecified error.
+ *  Requires:
+ *      dev_ctxt != NULL;
+ *  Ensures:
+ */
+typedef int(*fxn_brd_memunmap) (struct bridge_dev_context
+				       * dev_ctxt,
+				       u32 virt_addr, u32 ul_num_bytes);
+
+/*
  *  ======== bridge_brd_stop ========
  *  Purpose:
  *      Bring board to the BRD_STOPPED state.
@@ -951,6 +993,8 @@
 	fxn_brd_setstate pfn_brd_set_state;	/* Sets the Board State */
 	fxn_brd_memcopy pfn_brd_mem_copy;	/* Copies DSP Memory */
 	fxn_brd_memwrite pfn_brd_mem_write;	/* Write DSP Memory w/o halt */
+	fxn_brd_memmap pfn_brd_mem_map;	/* Maps MPU mem to DSP mem */
+	fxn_brd_memunmap pfn_brd_mem_un_map;	/* Unmaps MPU mem to DSP mem */
 	fxn_chnl_create pfn_chnl_create;	/* Create channel manager. */
 	fxn_chnl_destroy pfn_chnl_destroy;	/* Destroy channel manager. */
 	fxn_chnl_open pfn_chnl_open;	/* Create a new channel. */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
index bad1801..41e0594 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/dspioctl.h
@@ -19,6 +19,10 @@
 #ifndef DSPIOCTL_
 #define DSPIOCTL_
 
+/* ------------------------------------ Hardware Abstraction Layer */
+#include <hw_defs.h>
+#include <hw_mmu.h>
+
 /*
  * Any IOCTLS at or above this value are reserved for standard Bridge driver
  * interfaces.
@@ -61,6 +65,9 @@
 	/* GPP virtual address. __va does not work for ioremapped addresses */
 	u32 ul_gpp_va;
 	u32 ul_size;		/* Size of the mapped memory in bytes */
+	enum hw_endianism_t endianism;
+	enum hw_mmu_mixed_size_t mixed_mode;
+	enum hw_element_size_t elem_size;
 };
 
 #endif /* DSPIOCTL_ */
diff --git a/drivers/staging/tidspbridge/include/dspbridge/proc.h b/drivers/staging/tidspbridge/include/dspbridge/proc.h
index 2d12aab..5e09fd1 100644
--- a/drivers/staging/tidspbridge/include/dspbridge/proc.h
+++ b/drivers/staging/tidspbridge/include/dspbridge/proc.h
@@ -551,6 +551,29 @@
 			   struct process_context *pr_ctxt);
 
 /*
+ *  ======== proc_reserve_memory ========
+ *  Purpose:
+ *      Reserve a virtually contiguous region of DSP address space.
+ *  Parameters:
+ *      hprocessor      :   The processor handle.
+ *      ul_size	  :   Size of the address space to reserve.
+ *      pp_rsv_addr       :   Ptr to DSP side reserved u8 address.
+ *  Returns:
+ *      0	 :   Success.
+ *      -EFAULT     :   Invalid processor handle.
+ *      -EPERM       :   General failure.
+ *      -ENOMEM     :   Cannot reserve chunk of this size.
+ *  Requires:
+ *      pp_rsv_addr is not NULL
+ *      PROC Initialized.
+ *  Ensures:
+ *  Details:
+ */
+extern int proc_reserve_memory(void *hprocessor,
+				      u32 ul_size, void **pp_rsv_addr,
+				      struct process_context *pr_ctxt);
+
+/*
  *  ======== proc_un_map ========
  *  Purpose:
  *      Removes a MPU buffer mapping from the DSP address space.
@@ -572,4 +595,27 @@
 extern int proc_un_map(void *hprocessor, void *map_addr,
 			      struct process_context *pr_ctxt);
 
+/*
+ *  ======== proc_un_reserve_memory ========
+ *  Purpose:
+ *      Frees a previously reserved region of DSP address space.
+ *  Parameters:
+ *      hprocessor      :   The processor handle.
+ *      prsv_addr	:   Ptr to DSP side reservedBYTE address.
+ *  Returns:
+ *      0	 :   Success.
+ *      -EFAULT     :   Invalid processor handle.
+ *      -EPERM       :   General failure.
+ *      -ENOENT   :   Cannot find a reserved region starting with this
+ *		      :   address.
+ *  Requires:
+ *      prsv_addr is not NULL
+ *      PROC Initialized.
+ *  Ensures:
+ *  Details:
+ */
+extern int proc_un_reserve_memory(void *hprocessor,
+					 void *prsv_addr,
+					 struct process_context *pr_ctxt);
+
 #endif /* PROC_ */
diff --git a/drivers/staging/tidspbridge/pmgr/dev.c b/drivers/staging/tidspbridge/pmgr/dev.c
index 7b30267..132e960 100644
--- a/drivers/staging/tidspbridge/pmgr/dev.c
+++ b/drivers/staging/tidspbridge/pmgr/dev.c
@@ -34,6 +34,7 @@
 #include <dspbridge/cod.h>
 #include <dspbridge/drv.h>
 #include <dspbridge/proc.h>
+#include <dspbridge/dmm.h>
 
 /*  ----------------------------------- Resource Manager */
 #include <dspbridge/mgr.h>
@@ -74,6 +75,7 @@
 	struct msg_mgr *hmsg_mgr;	/* Message manager. */
 	struct io_mgr *hio_mgr;	/* IO manager (CHNL, msg_ctrl) */
 	struct cmm_object *hcmm_mgr;	/* SM memory manager. */
+	struct dmm_object *dmm_mgr;	/* Dynamic memory manager. */
 	struct ldr_module *module_obj;	/* Bridge Module handle. */
 	u32 word_size;		/* DSP word size: quick access. */
 	struct drv_object *hdrv_obj;	/* Driver Object */
@@ -248,6 +250,9 @@
 			/* Instantiate the DEH module */
 			status = bridge_deh_create(&dev_obj->hdeh_mgr, dev_obj);
 		}
+		/* Create DMM mgr . */
+		status = dmm_create(&dev_obj->dmm_mgr,
+				    (struct dev_object *)dev_obj, NULL);
 	}
 	/* Add the new DEV_Object to the global list: */
 	if (!status) {
@@ -273,6 +278,8 @@
 			kfree(dev_obj->proc_list);
 			if (dev_obj->cod_mgr)
 				cod_delete(dev_obj->cod_mgr);
+			if (dev_obj->dmm_mgr)
+				dmm_destroy(dev_obj->dmm_mgr);
 			kfree(dev_obj);
 		}
 
@@ -382,6 +389,11 @@
 			dev_obj->hcmm_mgr = NULL;
 		}
 
+		if (dev_obj->dmm_mgr) {
+			dmm_destroy(dev_obj->dmm_mgr);
+			dev_obj->dmm_mgr = NULL;
+		}
+
 		/* Call the driver's bridge_dev_destroy() function: */
 		/* Require of DevDestroy */
 		if (dev_obj->hbridge_context) {
@@ -462,6 +474,32 @@
 }
 
 /*
+ *  ======== dev_get_dmm_mgr ========
+ *  Purpose:
+ *      Retrieve the handle to the dynamic memory manager created for this
+ *      device.
+ */
+int dev_get_dmm_mgr(struct dev_object *hdev_obj,
+			   struct dmm_object **mgr)
+{
+	int status = 0;
+	struct dev_object *dev_obj = hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(mgr != NULL);
+
+	if (hdev_obj) {
+		*mgr = dev_obj->dmm_mgr;
+	} else {
+		*mgr = NULL;
+		status = -EFAULT;
+	}
+
+	DBC_ENSURE(!status || (mgr != NULL && *mgr == NULL));
+	return status;
+}
+
+/*
  *  ======== dev_get_cod_mgr ========
  *  Purpose:
  *      Retrieve the COD manager create for this device.
@@ -713,8 +751,10 @@
 
 	refs--;
 
-	if (refs == 0)
+	if (refs == 0) {
 		cmm_exit();
+		dmm_exit();
+	}
 
 	DBC_ENSURE(refs >= 0);
 }
@@ -726,12 +766,25 @@
  */
 bool dev_init(void)
 {
-	bool ret = true;
+	bool cmm_ret, dmm_ret, ret = true;
 
 	DBC_REQUIRE(refs >= 0);
 
-	if (refs == 0)
-		ret = cmm_init();
+	if (refs == 0) {
+		cmm_ret = cmm_init();
+		dmm_ret = dmm_init();
+
+		ret = cmm_ret && dmm_ret;
+
+		if (!ret) {
+			if (cmm_ret)
+				cmm_exit();
+
+			if (dmm_ret)
+				dmm_exit();
+
+		}
+	}
 
 	if (ret)
 		refs++;
@@ -1065,6 +1118,8 @@
 		STORE_FXN(fxn_brd_setstate, pfn_brd_set_state);
 		STORE_FXN(fxn_brd_memcopy, pfn_brd_mem_copy);
 		STORE_FXN(fxn_brd_memwrite, pfn_brd_mem_write);
+		STORE_FXN(fxn_brd_memmap, pfn_brd_mem_map);
+		STORE_FXN(fxn_brd_memunmap, pfn_brd_mem_un_map);
 		STORE_FXN(fxn_chnl_create, pfn_chnl_create);
 		STORE_FXN(fxn_chnl_destroy, pfn_chnl_destroy);
 		STORE_FXN(fxn_chnl_open, pfn_chnl_open);
diff --git a/drivers/staging/tidspbridge/pmgr/dmm.c b/drivers/staging/tidspbridge/pmgr/dmm.c
new file mode 100644
index 0000000..8685233
--- /dev/null
+++ b/drivers/staging/tidspbridge/pmgr/dmm.c
@@ -0,0 +1,533 @@
+/*
+ * dmm.c
+ *
+ * DSP-BIOS Bridge driver support functions for TI OMAP processors.
+ *
+ * The Dynamic Memory Manager (DMM) module manages the DSP Virtual address
+ * space that can be directly mapped to any MPU buffer or memory region
+ *
+ * Notes:
+ *   Region: Generic memory entitiy having a start address and a size
+ *   Chunk:  Reserved region
+ *
+ * Copyright (C) 2005-2006 Texas Instruments, Inc.
+ *
+ * This package is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
+ * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+ */
+#include <linux/types.h>
+
+/*  ----------------------------------- Host OS */
+#include <dspbridge/host_os.h>
+
+/*  ----------------------------------- DSP/BIOS Bridge */
+#include <dspbridge/dbdefs.h>
+
+/*  ----------------------------------- Trace & Debug */
+#include <dspbridge/dbc.h>
+
+/*  ----------------------------------- OS Adaptation Layer */
+#include <dspbridge/sync.h>
+
+/*  ----------------------------------- Platform Manager */
+#include <dspbridge/dev.h>
+#include <dspbridge/proc.h>
+
+/*  ----------------------------------- This */
+#include <dspbridge/dmm.h>
+
+/*  ----------------------------------- Defines, Data Structures, Typedefs */
+#define DMM_ADDR_VIRTUAL(a) \
+	(((struct map_page *)(a) - virtual_mapping_table) * PG_SIZE4K +\
+	dyn_mem_map_beg)
+#define DMM_ADDR_TO_INDEX(a) (((a) - dyn_mem_map_beg) / PG_SIZE4K)
+
+/* DMM Mgr */
+struct dmm_object {
+	/* Dmm Lock is used to serialize access mem manager for
+	 * multi-threads. */
+	spinlock_t dmm_lock;	/* Lock to access dmm mgr */
+};
+
+/*  ----------------------------------- Globals */
+static u32 refs;		/* module reference count */
+struct map_page {
+	u32 region_size:15;
+	u32 mapped_size:15;
+	u32 reserved:1;
+	u32 mapped:1;
+};
+
+/*  Create the free list */
+static struct map_page *virtual_mapping_table;
+static u32 free_region;		/* The index of free region */
+static u32 free_size;
+static u32 dyn_mem_map_beg;	/* The Beginning of dynamic memory mapping */
+static u32 table_size;		/* The size of virt and phys pages tables */
+
+/*  ----------------------------------- Function Prototypes */
+static struct map_page *get_region(u32 addr);
+static struct map_page *get_free_region(u32 len);
+static struct map_page *get_mapped_region(u32 addrs);
+
+/*  ======== dmm_create_tables ========
+ *  Purpose:
+ *      Create table to hold the information of physical address
+ *      the buffer pages that is passed by the user, and the table
+ *      to hold the information of the virtual memory that is reserved
+ *      for DSP.
+ */
+int dmm_create_tables(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	int status = 0;
+
+	status = dmm_delete_tables(dmm_obj);
+	if (!status) {
+		dyn_mem_map_beg = addr;
+		table_size = PG_ALIGN_HIGH(size, PG_SIZE4K) / PG_SIZE4K;
+		/*  Create the free list */
+		virtual_mapping_table = __vmalloc(table_size *
+				sizeof(struct map_page), GFP_KERNEL |
+				__GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+		if (virtual_mapping_table == NULL)
+			status = -ENOMEM;
+		else {
+			/* On successful allocation,
+			 * all entries are zero ('free') */
+			free_region = 0;
+			free_size = table_size * PG_SIZE4K;
+			virtual_mapping_table[0].region_size = table_size;
+		}
+	}
+
+	if (status)
+		pr_err("%s: failure, status 0x%x\n", __func__, status);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_create ========
+ *  Purpose:
+ *      Create a dynamic memory manager object.
+ */
+int dmm_create(struct dmm_object **dmm_manager,
+		      struct dev_object *hdev_obj,
+		      const struct dmm_mgrattrs *mgr_attrts)
+{
+	struct dmm_object *dmm_obj = NULL;
+	int status = 0;
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(dmm_manager != NULL);
+
+	*dmm_manager = NULL;
+	/* create, zero, and tag a cmm mgr object */
+	dmm_obj = kzalloc(sizeof(struct dmm_object), GFP_KERNEL);
+	if (dmm_obj != NULL) {
+		spin_lock_init(&dmm_obj->dmm_lock);
+		*dmm_manager = dmm_obj;
+	} else {
+		status = -ENOMEM;
+	}
+
+	return status;
+}
+
+/*
+ *  ======== dmm_destroy ========
+ *  Purpose:
+ *      Release the communication memory manager resources.
+ */
+int dmm_destroy(struct dmm_object *dmm_mgr)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	if (dmm_mgr) {
+		status = dmm_delete_tables(dmm_obj);
+		if (!status)
+			kfree(dmm_obj);
+	} else
+		status = -EFAULT;
+
+	return status;
+}
+
+/*
+ *  ======== dmm_delete_tables ========
+ *  Purpose:
+ *      Delete DMM Tables.
+ */
+int dmm_delete_tables(struct dmm_object *dmm_mgr)
+{
+	int status = 0;
+
+	DBC_REQUIRE(refs > 0);
+	/* Delete all DMM tables */
+	if (dmm_mgr)
+		vfree(virtual_mapping_table);
+	else
+		status = -EFAULT;
+	return status;
+}
+
+/*
+ *  ======== dmm_exit ========
+ *  Purpose:
+ *      Discontinue usage of module; free resources when reference count
+ *      reaches 0.
+ */
+void dmm_exit(void)
+{
+	DBC_REQUIRE(refs > 0);
+
+	refs--;
+}
+
+/*
+ *  ======== dmm_get_handle ========
+ *  Purpose:
+ *      Return the dynamic memory manager object for this device.
+ *      This is typically called from the client process.
+ */
+int dmm_get_handle(void *hprocessor, struct dmm_object **dmm_manager)
+{
+	int status = 0;
+	struct dev_object *hdev_obj;
+
+	DBC_REQUIRE(refs > 0);
+	DBC_REQUIRE(dmm_manager != NULL);
+	if (hprocessor != NULL)
+		status = proc_get_dev_object(hprocessor, &hdev_obj);
+	else
+		hdev_obj = dev_get_first();	/* default */
+
+	if (!status)
+		status = dev_get_dmm_mgr(hdev_obj, dmm_manager);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_init ========
+ *  Purpose:
+ *      Initializes private state of DMM module.
+ */
+bool dmm_init(void)
+{
+	bool ret = true;
+
+	DBC_REQUIRE(refs >= 0);
+
+	if (ret)
+		refs++;
+
+	DBC_ENSURE((ret && (refs > 0)) || (!ret && (refs >= 0)));
+
+	virtual_mapping_table = NULL;
+	table_size = 0;
+
+	return ret;
+}
+
+/*
+ *  ======== dmm_map_memory ========
+ *  Purpose:
+ *      Add a mapping block to the reserved chunk. DMM assumes that this block
+ *  will be mapped in the DSP/IVA's address space. DMM returns an error if a
+ *  mapping overlaps another one. This function stores the info that will be
+ *  required later while unmapping the block.
+ */
+int dmm_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 size)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	int status = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+	/* Find the Reserved memory chunk containing the DSP block to
+	 * be mapped */
+	chunk = (struct map_page *)get_region(addr);
+	if (chunk != NULL) {
+		/* Mark the region 'mapped', leave the 'reserved' info as-is */
+		chunk->mapped = true;
+		chunk->mapped_size = (size / PG_SIZE4K);
+	} else
+		status = -ENOENT;
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s dmm_mgr %p, addr %x, size %x\n\tstatus %x, "
+		"chunk %p", __func__, dmm_mgr, addr, size, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_reserve_memory ========
+ *  Purpose:
+ *      Reserve a chunk of virtually contiguous DSP/IVA address space.
+ */
+int dmm_reserve_memory(struct dmm_object *dmm_mgr, u32 size,
+			      u32 *prsv_addr)
+{
+	int status = 0;
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *node;
+	u32 rsv_addr = 0;
+	u32 rsv_size = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+
+	/* Try to get a DSP chunk from the free list */
+	node = get_free_region(size);
+	if (node != NULL) {
+		/*  DSP chunk of given size is available. */
+		rsv_addr = DMM_ADDR_VIRTUAL(node);
+		/* Calculate the number entries to use */
+		rsv_size = size / PG_SIZE4K;
+		if (rsv_size < node->region_size) {
+			/* Mark remainder of free region */
+			node[rsv_size].mapped = false;
+			node[rsv_size].reserved = false;
+			node[rsv_size].region_size =
+			    node->region_size - rsv_size;
+			node[rsv_size].mapped_size = 0;
+		}
+		/*  get_region will return first fit chunk. But we only use what
+		   is requested. */
+		node->mapped = false;
+		node->reserved = true;
+		node->region_size = rsv_size;
+		node->mapped_size = 0;
+		/* Return the chunk's starting address */
+		*prsv_addr = rsv_addr;
+	} else
+		/*dSP chunk of given size is not available */
+		status = -ENOMEM;
+
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s dmm_mgr %p, size %x, prsv_addr %p\n\tstatus %x, "
+		"rsv_addr %x, rsv_size %x\n", __func__, dmm_mgr, size,
+		prsv_addr, status, rsv_addr, rsv_size);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_un_map_memory ========
+ *  Purpose:
+ *      Remove the mapped block from the reserved chunk.
+ */
+int dmm_un_map_memory(struct dmm_object *dmm_mgr, u32 addr, u32 *psize)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	int status = 0;
+
+	spin_lock(&dmm_obj->dmm_lock);
+	chunk = get_mapped_region(addr);
+	if (chunk == NULL)
+		status = -ENOENT;
+
+	if (!status) {
+		/* Unmap the region */
+		*psize = chunk->mapped_size * PG_SIZE4K;
+		chunk->mapped = false;
+		chunk->mapped_size = 0;
+	}
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s: dmm_mgr %p, addr %x, psize %p\n\tstatus %x, "
+		"chunk %p\n", __func__, dmm_mgr, addr, psize, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== dmm_un_reserve_memory ========
+ *  Purpose:
+ *      Free a chunk of reserved DSP/IVA address space.
+ */
+int dmm_un_reserve_memory(struct dmm_object *dmm_mgr, u32 rsv_addr)
+{
+	struct dmm_object *dmm_obj = (struct dmm_object *)dmm_mgr;
+	struct map_page *chunk;
+	u32 i;
+	int status = 0;
+	u32 chunk_size;
+
+	spin_lock(&dmm_obj->dmm_lock);
+
+	/* Find the chunk containing the reserved address */
+	chunk = get_mapped_region(rsv_addr);
+	if (chunk == NULL)
+		status = -ENOENT;
+
+	if (!status) {
+		/* Free all the mapped pages for this reserved region */
+		i = 0;
+		while (i < chunk->region_size) {
+			if (chunk[i].mapped) {
+				/* Remove mapping from the page tables. */
+				chunk_size = chunk[i].mapped_size;
+				/* Clear the mapping flags */
+				chunk[i].mapped = false;
+				chunk[i].mapped_size = 0;
+				i += chunk_size;
+			} else
+				i++;
+		}
+		/* Clear the flags (mark the region 'free') */
+		chunk->reserved = false;
+		/* NOTE: We do NOT coalesce free regions here.
+		 * Free regions are coalesced in get_region(), as it traverses
+		 *the whole mapping table
+		 */
+	}
+	spin_unlock(&dmm_obj->dmm_lock);
+
+	dev_dbg(bridge, "%s: dmm_mgr %p, rsv_addr %x\n\tstatus %x chunk %p",
+		__func__, dmm_mgr, rsv_addr, status, chunk);
+
+	return status;
+}
+
+/*
+ *  ======== get_region ========
+ *  Purpose:
+ *      Returns a region containing the specified memory region
+ */
+static struct map_page *get_region(u32 addr)
+{
+	struct map_page *curr_region = NULL;
+	u32 i = 0;
+
+	if (virtual_mapping_table != NULL) {
+		/* find page mapped by this address */
+		i = DMM_ADDR_TO_INDEX(addr);
+		if (i < table_size)
+			curr_region = virtual_mapping_table + i;
+	}
+
+	dev_dbg(bridge, "%s: curr_region %p, free_region %d, free_size %d\n",
+		__func__, curr_region, free_region, free_size);
+	return curr_region;
+}
+
+/*
+ *  ======== get_free_region ========
+ *  Purpose:
+ *  Returns the requested free region
+ */
+static struct map_page *get_free_region(u32 len)
+{
+	struct map_page *curr_region = NULL;
+	u32 i = 0;
+	u32 region_size = 0;
+	u32 next_i = 0;
+
+	if (virtual_mapping_table == NULL)
+		return curr_region;
+	if (len > free_size) {
+		/* Find the largest free region
+		 * (coalesce during the traversal) */
+		while (i < table_size) {
+			region_size = virtual_mapping_table[i].region_size;
+			next_i = i + region_size;
+			if (virtual_mapping_table[i].reserved == false) {
+				/* Coalesce, if possible */
+				if (next_i < table_size &&
+				    virtual_mapping_table[next_i].reserved
+				    == false) {
+					virtual_mapping_table[i].region_size +=
+					    virtual_mapping_table
+					    [next_i].region_size;
+					continue;
+				}
+				region_size *= PG_SIZE4K;
+				if (region_size > free_size) {
+					free_region = i;
+					free_size = region_size;
+				}
+			}
+			i = next_i;
+		}
+	}
+	if (len <= free_size) {
+		curr_region = virtual_mapping_table + free_region;
+		free_region += (len / PG_SIZE4K);
+		free_size -= len;
+	}
+	return curr_region;
+}
+
+/*
+ *  ======== get_mapped_region ========
+ *  Purpose:
+ *  Returns the requestedmapped region
+ */
+static struct map_page *get_mapped_region(u32 addrs)
+{
+	u32 i = 0;
+	struct map_page *curr_region = NULL;
+
+	if (virtual_mapping_table == NULL)
+		return curr_region;
+
+	i = DMM_ADDR_TO_INDEX(addrs);
+	if (i < table_size && (virtual_mapping_table[i].mapped ||
+			       virtual_mapping_table[i].reserved))
+		curr_region = virtual_mapping_table + i;
+	return curr_region;
+}
+
+#ifdef DSP_DMM_DEBUG
+u32 dmm_mem_map_dump(struct dmm_object *dmm_mgr)
+{
+	struct map_page *curr_node = NULL;
+	u32 i;
+	u32 freemem = 0;
+	u32 bigsize = 0;
+
+	spin_lock(&dmm_mgr->dmm_lock);
+
+	if (virtual_mapping_table != NULL) {
+		for (i = 0; i < table_size; i +=
+		     virtual_mapping_table[i].region_size) {
+			curr_node = virtual_mapping_table + i;
+			if (curr_node->reserved) {
+				/*printk("RESERVED size = 0x%x, "
+				   "Map size = 0x%x\n",
+				   (curr_node->region_size * PG_SIZE4K),
+				   (curr_node->mapped == false) ? 0 :
+				   (curr_node->mapped_size * PG_SIZE4K));
+				 */
+			} else {
+/*				printk("UNRESERVED size = 0x%x\n",
+					(curr_node->region_size * PG_SIZE4K));
+ */
+				freemem += (curr_node->region_size * PG_SIZE4K);
+				if (curr_node->region_size > bigsize)
+					bigsize = curr_node->region_size;
+			}
+		}
+	}
+	spin_unlock(&dmm_mgr->dmm_lock);
+	printk(KERN_INFO "Total DSP VA FREE memory = %d Mbytes\n",
+	       freemem / (1024 * 1024));
+	printk(KERN_INFO "Total DSP VA USED memory= %d Mbytes \n",
+	       (((table_size * PG_SIZE4K) - freemem)) / (1024 * 1024));
+	printk(KERN_INFO "DSP VA - Biggest FREE block = %d Mbytes \n\n",
+	       (bigsize * PG_SIZE4K / (1024 * 1024)));
+
+	return 0;
+}
+#endif
diff --git a/drivers/staging/tidspbridge/pmgr/dspapi.c b/drivers/staging/tidspbridge/pmgr/dspapi.c
index 981551c..86ca785 100644
--- a/drivers/staging/tidspbridge/pmgr/dspapi.c
+++ b/drivers/staging/tidspbridge/pmgr/dspapi.c
@@ -993,10 +993,27 @@
 /*
  * ======== procwrap_reserve_memory ========
  */
-u32 __deprecated procwrap_reserve_memory(union trapped_args *args,
-							void *pr_ctxt)
+u32 procwrap_reserve_memory(union trapped_args *args, void *pr_ctxt)
 {
-	return 0;
+	int status;
+	void *prsv_addr;
+	void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+	if ((args->args_proc_rsvmem.ul_size <= 0) ||
+	    (args->args_proc_rsvmem.ul_size & (PG_SIZE4K - 1)) != 0)
+		return -EINVAL;
+
+	status = proc_reserve_memory(hprocessor,
+				     args->args_proc_rsvmem.ul_size, &prsv_addr,
+				     pr_ctxt);
+	if (!status) {
+		if (put_user(prsv_addr, args->args_proc_rsvmem.pp_rsv_addr)) {
+			status = -EINVAL;
+			proc_un_reserve_memory(args->args_proc_rsvmem.
+					       hprocessor, prsv_addr, pr_ctxt);
+		}
+	}
+	return status;
 }
 
 /*
@@ -1025,10 +1042,15 @@
 /*
  * ======== procwrap_un_reserve_memory ========
  */
-u32 __deprecated procwrap_un_reserve_memory(union trapped_args *args,
-							void *pr_ctxt)
+u32 procwrap_un_reserve_memory(union trapped_args *args, void *pr_ctxt)
 {
-	return 0;
+	int status;
+	void *hprocessor = ((struct process_context *)pr_ctxt)->hprocessor;
+
+	status = proc_un_reserve_memory(hprocessor,
+					args->args_proc_unrsvmem.prsv_addr,
+					pr_ctxt);
+	return status;
 }
 
 /*
diff --git a/drivers/staging/tidspbridge/rmgr/drv.c b/drivers/staging/tidspbridge/rmgr/drv.c
index 91cc1685..81b1b90 100644
--- a/drivers/staging/tidspbridge/rmgr/drv.c
+++ b/drivers/staging/tidspbridge/rmgr/drv.c
@@ -146,6 +146,7 @@
 	struct process_context *ctxt = (struct process_context *)process_ctxt;
 	int status = 0;
 	struct dmm_map_object *temp_map, *map_obj;
+	struct dmm_rsv_object *temp_rsv, *rsv_obj;
 
 	/* Free DMM mapped memory resources */
 	list_for_each_entry_safe(map_obj, temp_map, &ctxt->dmm_map_list, link) {
@@ -155,6 +156,16 @@
 			pr_err("%s: proc_un_map failed!"
 			       " status = 0x%xn", __func__, status);
 	}
+
+	/* Free DMM reserved memory resources */
+	list_for_each_entry_safe(rsv_obj, temp_rsv, &ctxt->dmm_rsv_list, link) {
+		status = proc_un_reserve_memory(ctxt->hprocessor, (void *)
+						rsv_obj->dsp_reserved_addr,
+						ctxt);
+		if (status)
+			pr_err("%s: proc_un_reserve_memory failed!"
+			       " status = 0x%xn", __func__, status);
+	}
 	return status;
 }
 
@@ -732,6 +743,7 @@
 	host_res->dw_sys_ctrl_base = ioremap(OMAP_SYSC_BASE, OMAP_SYSC_SIZE);
 	dev_dbg(bridge, "dw_mem_base[0] 0x%x\n", host_res->dw_mem_base[0]);
 	dev_dbg(bridge, "dw_mem_base[3] 0x%x\n", host_res->dw_mem_base[3]);
+	dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
 
 	/* for 24xx base port is not mapping the mamory for DSP
 	 * internal memory TODO Do a ioremap here */
@@ -785,6 +797,8 @@
 							 OMAP_PER_PRM_SIZE);
 		host_res->dw_core_pm_base = (u32) ioremap(OMAP_CORE_PRM_BASE,
 							  OMAP_CORE_PRM_SIZE);
+		host_res->dw_dmmu_base = ioremap(OMAP_DMMU_BASE,
+						 OMAP_DMMU_SIZE);
 
 		dev_dbg(bridge, "dw_mem_base[0] 0x%x\n",
 			host_res->dw_mem_base[0]);
@@ -796,6 +810,7 @@
 			host_res->dw_mem_base[3]);
 		dev_dbg(bridge, "dw_mem_base[4] 0x%x\n",
 			host_res->dw_mem_base[4]);
+		dev_dbg(bridge, "dw_dmmu_base %p\n", host_res->dw_dmmu_base);
 
 		shm_size = drv_datap->shm_size;
 		if (shm_size >= 0x10000) {
diff --git a/drivers/staging/tidspbridge/rmgr/drv_interface.c b/drivers/staging/tidspbridge/rmgr/drv_interface.c
index 34be43f..324fcdf 100644
--- a/drivers/staging/tidspbridge/rmgr/drv_interface.c
+++ b/drivers/staging/tidspbridge/rmgr/drv_interface.c
@@ -509,6 +509,8 @@
 		pr_ctxt->res_state = PROC_RES_ALLOCATED;
 		spin_lock_init(&pr_ctxt->dmm_map_lock);
 		INIT_LIST_HEAD(&pr_ctxt->dmm_map_list);
+		spin_lock_init(&pr_ctxt->dmm_rsv_lock);
+		INIT_LIST_HEAD(&pr_ctxt->dmm_rsv_list);
 
 		pr_ctxt->node_id = kzalloc(sizeof(struct idr), GFP_KERNEL);
 		if (pr_ctxt->node_id) {
diff --git a/drivers/staging/tidspbridge/rmgr/node.c b/drivers/staging/tidspbridge/rmgr/node.c
index a660247..1562f3c 100644
--- a/drivers/staging/tidspbridge/rmgr/node.c
+++ b/drivers/staging/tidspbridge/rmgr/node.c
@@ -56,6 +56,7 @@
 /*  ----------------------------------- This */
 #include <dspbridge/nodepriv.h>
 #include <dspbridge/node.h>
+#include <dspbridge/dmm.h>
 
 /* Static/Dynamic Loader includes */
 #include <dspbridge/dbll.h>
@@ -316,6 +317,10 @@
 	u32 mapped_addr = 0;
 	u32 map_attrs = 0x0;
 	struct dsp_processorstate proc_state;
+#ifdef DSP_DMM_DEBUG
+	struct dmm_object *dmm_mgr;
+	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+#endif
 
 	void *node_res;
 
@@ -425,12 +430,34 @@
 	if (status)
 		goto func_cont;
 
+	status = proc_reserve_memory(hprocessor,
+				     pnode->create_args.asa.task_arg_obj.
+				     heap_size + PAGE_SIZE,
+				     (void **)&(pnode->create_args.asa.
+					task_arg_obj.udsp_heap_res_addr),
+				     pr_ctxt);
+	if (status) {
+		pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
+		       __func__, status);
+		goto func_cont;
+	}
+#ifdef DSP_DMM_DEBUG
+	status = dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = DSP_EHANDLE;
+		goto func_cont;
+	}
+
+	dmm_mem_map_dump(dmm_mgr);
+#endif
+
 	map_attrs |= DSP_MAPLITTLEENDIAN;
 	map_attrs |= DSP_MAPELEMSIZE32;
 	map_attrs |= DSP_MAPVIRTUALADDR;
 	status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
 			  pnode->create_args.asa.task_arg_obj.heap_size,
-			  NULL, (void **)&mapped_addr, map_attrs,
+			  (void *)pnode->create_args.asa.task_arg_obj.
+			  udsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
 			  pr_ctxt);
 	if (status)
 		pr_err("%s: Failed to map memory for Heap: 0x%x\n",
@@ -2484,7 +2511,11 @@
 	struct stream_chnl stream;
 	struct node_msgargs node_msg_args;
 	struct node_taskargs task_arg_obj;
-
+#ifdef DSP_DMM_DEBUG
+	struct dmm_object *dmm_mgr;
+	struct proc_object *p_proc_object =
+	    (struct proc_object *)hnode->hprocessor;
+#endif
 	int status;
 	if (!hnode)
 		goto func_end;
@@ -2545,6 +2576,19 @@
 			status = proc_un_map(hnode->hprocessor, (void *)
 					     task_arg_obj.udsp_heap_addr,
 					     pr_ctxt);
+
+			status = proc_un_reserve_memory(hnode->hprocessor,
+							(void *)
+							task_arg_obj.
+							udsp_heap_res_addr,
+							pr_ctxt);
+#ifdef DSP_DMM_DEBUG
+			status = dmm_get_handle(p_proc_object, &dmm_mgr);
+			if (dmm_mgr)
+				dmm_mem_map_dump(dmm_mgr);
+			else
+				status = DSP_EHANDLE;
+#endif
 		}
 	}
 	if (node_type != NODE_MESSAGE) {
diff --git a/drivers/staging/tidspbridge/rmgr/proc.c b/drivers/staging/tidspbridge/rmgr/proc.c
index 7a15a02..b47d7aa 100644
--- a/drivers/staging/tidspbridge/rmgr/proc.c
+++ b/drivers/staging/tidspbridge/rmgr/proc.c
@@ -39,6 +39,7 @@
 #include <dspbridge/cod.h>
 #include <dspbridge/dev.h>
 #include <dspbridge/procpriv.h>
+#include <dspbridge/dmm.h>
 
 /*  ----------------------------------- Resource Manager */
 #include <dspbridge/mgr.h>
@@ -51,7 +52,6 @@
 #include <dspbridge/msg.h>
 #include <dspbridge/dspioctl.h>
 #include <dspbridge/drv.h>
-#include <_tiomap.h>
 
 /*  ----------------------------------- This */
 #include <dspbridge/proc.h>
@@ -151,21 +151,34 @@
 	return map_obj;
 }
 
+static int match_exact_map_obj(struct dmm_map_object *map_obj,
+					u32 dsp_addr, u32 size)
+{
+	if (map_obj->dsp_addr == dsp_addr && map_obj->size != size)
+		pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n",
+				__func__, dsp_addr, map_obj->size, size);
+
+	return map_obj->dsp_addr == dsp_addr &&
+		map_obj->size == size;
+}
+
 static void remove_mapping_information(struct process_context *pr_ctxt,
-						u32 dsp_addr)
+						u32 dsp_addr, u32 size)
 {
 	struct dmm_map_object *map_obj;
 
-	pr_debug("%s: looking for virt 0x%x\n", __func__, dsp_addr);
+	pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__,
+							dsp_addr, size);
 
 	spin_lock(&pr_ctxt->dmm_map_lock);
 	list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) {
-		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x\n",
+		pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n",
 							__func__,
 							map_obj->mpu_addr,
-							map_obj->dsp_addr);
+							map_obj->dsp_addr,
+							map_obj->size);
 
-		if (map_obj->dsp_addr == dsp_addr) {
+		if (match_exact_map_obj(map_obj, dsp_addr, size)) {
 			pr_debug("%s: match, deleting map info\n", __func__);
 			list_del(&map_obj->link);
 			kfree(map_obj->dma_info.sg);
@@ -1077,6 +1090,7 @@
 	s32 cnew_envp;		/* "  " in new_envp[] */
 	s32 nproc_id = 0;	/* Anticipate MP version. */
 	struct dcd_manager *hdcd_handle;
+	struct dmm_object *dmm_mgr;
 	u32 dw_ext_end;
 	u32 proc_id;
 	int brd_state;
@@ -1267,6 +1281,25 @@
 			if (!status)
 				status = cod_get_sym_value(cod_mgr, EXTEND,
 							   &dw_ext_end);
+
+			/* Reset DMM structs and add an initial free chunk */
+			if (!status) {
+				status =
+				    dev_get_dmm_mgr(p_proc_object->hdev_obj,
+						    &dmm_mgr);
+				if (dmm_mgr) {
+					/* Set dw_ext_end to DMM START u8
+					 * address */
+					dw_ext_end =
+					    (dw_ext_end + 1) * DSPWORDSIZE;
+					/* DMM memory is from EXT_END */
+					status = dmm_create_tables(dmm_mgr,
+								   dw_ext_end,
+								   DMMPOOLSIZE);
+				} else {
+					status = -EFAULT;
+				}
+			}
 		}
 	}
 	/* Restore the original argv[0] */
@@ -1319,10 +1352,12 @@
 {
 	u32 va_align;
 	u32 pa_align;
+	struct dmm_object *dmm_mgr;
 	u32 size_align;
 	int status = 0;
 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
 	struct dmm_map_object *map_obj;
+	u32 tmp_addr = 0;
 
 #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK
 	if ((ul_map_attr & BUFMODE_MASK) != RBUF) {
@@ -1347,30 +1382,33 @@
 	}
 	/* Critical section */
 	mutex_lock(&proc_lock);
+	dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (dmm_mgr)
+		status = dmm_map_memory(dmm_mgr, va_align, size_align);
+	else
+		status = -EFAULT;
 
 	/* Add mapping to the page tables. */
 	if (!status) {
+
+		/* Mapped address = MSB of VA | LSB of PA */
+		tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1)));
 		/* mapped memory resource tracking */
-		map_obj = add_mapping_info(pr_ctxt, pa_align, va_align,
+		map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr,
 						size_align);
-		if (!map_obj) {
+		if (!map_obj)
 			status = -ENOMEM;
-		} else {
-			va_align = user_to_dsp_map(
-				p_proc_object->hbridge_context->dsp_mmu,
-				pa_align, va_align, size_align,
-				map_obj->pages);
-			if (IS_ERR_VALUE(va_align))
-				status = (int)va_align;
-		}
+		else
+			status = (*p_proc_object->intf_fxns->pfn_brd_mem_map)
+			    (p_proc_object->hbridge_context, pa_align, va_align,
+			     size_align, ul_map_attr, map_obj->pages);
 	}
 	if (!status) {
 		/* Mapped address = MSB of VA | LSB of PA */
-		map_obj->dsp_addr = (va_align |
-					((u32)pmpu_addr & (PG_SIZE4K - 1)));
-		*pp_map_addr = (void *)map_obj->dsp_addr;
+		*pp_map_addr = (void *) tmp_addr;
 	} else {
-		remove_mapping_information(pr_ctxt, va_align);
+		remove_mapping_information(pr_ctxt, tmp_addr, size_align);
+		dmm_un_map_memory(dmm_mgr, va_align, &size_align);
 	}
 	mutex_unlock(&proc_lock);
 
@@ -1463,6 +1501,55 @@
 }
 
 /*
+ *  ======== proc_reserve_memory ========
+ *  Purpose:
+ *      Reserve a virtually contiguous region of DSP address space.
+ */
+int proc_reserve_memory(void *hprocessor, u32 ul_size,
+			       void **pp_rsv_addr,
+			       struct process_context *pr_ctxt)
+{
+	struct dmm_object *dmm_mgr;
+	int status = 0;
+	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+	struct dmm_rsv_object *rsv_obj;
+
+	if (!p_proc_object) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr);
+	if (status != 0)
+		goto func_end;
+
+	/*
+	 * A successful reserve should be followed by insertion of rsv_obj
+	 * into dmm_rsv_list, so that reserved memory resource tracking
+	 * remains uptodate
+	 */
+	rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL);
+	if (rsv_obj) {
+		rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr;
+		spin_lock(&pr_ctxt->dmm_rsv_lock);
+		list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list);
+		spin_unlock(&pr_ctxt->dmm_rsv_lock);
+	}
+
+func_end:
+	dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p "
+		"status 0x%x\n", __func__, hprocessor,
+		ul_size, pp_rsv_addr, status);
+	return status;
+}
+
+/*
  *  ======== proc_start ========
  *  Purpose:
  *      Start a processor running.
@@ -1610,7 +1697,9 @@
 {
 	int status = 0;
 	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+	struct dmm_object *dmm_mgr;
 	u32 va_align;
+	u32 size_align;
 
 	va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K);
 	if (!p_proc_object) {
@@ -1618,11 +1707,24 @@
 		goto func_end;
 	}
 
+	status = dmm_get_handle(hprocessor, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
 	/* Critical section */
 	mutex_lock(&proc_lock);
+	/*
+	 * Update DMM structures. Get the size to unmap.
+	 * This function returns error if the VA is not mapped
+	 */
+	status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align);
 	/* Remove mapping from the page tables. */
-	status = user_to_dsp_unmap(p_proc_object->hbridge_context->dsp_mmu,
-								va_align);
+	if (!status) {
+		status = (*p_proc_object->intf_fxns->pfn_brd_mem_un_map)
+		    (p_proc_object->hbridge_context, va_align, size_align);
+	}
 
 	mutex_unlock(&proc_lock);
 	if (status)
@@ -1633,7 +1735,7 @@
 	 * from dmm_map_list, so that mapped memory resource tracking
 	 * remains uptodate
 	 */
-	remove_mapping_information(pr_ctxt, (u32) map_addr);
+	remove_mapping_information(pr_ctxt, (u32) map_addr, size_align);
 
 func_end:
 	dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n",
@@ -1642,6 +1744,55 @@
 }
 
 /*
+ *  ======== proc_un_reserve_memory ========
+ *  Purpose:
+ *      Frees a previously reserved region of DSP address space.
+ */
+int proc_un_reserve_memory(void *hprocessor, void *prsv_addr,
+				  struct process_context *pr_ctxt)
+{
+	struct dmm_object *dmm_mgr;
+	int status = 0;
+	struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
+	struct dmm_rsv_object *rsv_obj;
+
+	if (!p_proc_object) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_get_handle(p_proc_object, &dmm_mgr);
+	if (!dmm_mgr) {
+		status = -EFAULT;
+		goto func_end;
+	}
+
+	status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr);
+	if (status != 0)
+		goto func_end;
+
+	/*
+	 * A successful unreserve should be followed by removal of rsv_obj
+	 * from dmm_rsv_list, so that reserved memory resource tracking
+	 * remains uptodate
+	 */
+	spin_lock(&pr_ctxt->dmm_rsv_lock);
+	list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) {
+		if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) {
+			list_del(&rsv_obj->link);
+			kfree(rsv_obj);
+			break;
+		}
+	}
+	spin_unlock(&pr_ctxt->dmm_rsv_lock);
+
+func_end:
+	dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n",
+		__func__, hprocessor, prsv_addr, status);
+	return status;
+}
+
+/*
  *  ======== = proc_monitor ======== ==
  *  Purpose:
  *      Place the Processor in Monitor State. This is an internal
diff --git a/drivers/staging/udlfb/udlfb.c b/drivers/staging/udlfb/udlfb.c
index 5969e84..fed2510 100644
--- a/drivers/staging/udlfb/udlfb.c
+++ b/drivers/staging/udlfb/udlfb.c
@@ -887,7 +887,7 @@
 
 		struct fb_deferred_io *fbdefio;
 
-		fbdefio = kmalloc(GFP_KERNEL, sizeof(struct fb_deferred_io));
+		fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
 
 		if (fbdefio) {
 			fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index e992d5d..7cc3d24 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1675,13 +1675,14 @@
 
 		{
 			char essid[IW_ESSID_MAX_SIZE+1];
-			if (wrq->u.essid.pointer)
+			if (wrq->u.essid.pointer) {
 				rc = iwctl_giwessid(dev, NULL,
 						    &(wrq->u.essid), essid);
 				if (copy_to_user(wrq->u.essid.pointer,
 						         essid,
 						         wrq->u.essid.length) )
 					rc = -EFAULT;
+			}
 		}
 		break;
 
diff --git a/drivers/staging/westbridge/astoria/api/src/cyasusb.c b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
index 5a21970..7777d9a 100644
--- a/drivers/staging/westbridge/astoria/api/src/cyasusb.c
+++ b/drivers/staging/westbridge/astoria/api/src/cyasusb.c
@@ -1417,7 +1417,6 @@
 	 */
 	bus_mask   = 0;
 	media_mask = 0;
-	media_mask = 0;
 	for (bus = 0; bus < CY_AS_MAX_BUSES; bus++) {
 		for (device = 0; device < CY_AS_MAX_STORAGE_DEVICES; device++) {
 			if (config_p->devices_to_enumerate[bus][device] ==
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 4af83d5..6a71f52 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -139,7 +139,7 @@
 }
 
 int prism2_add_key(struct wiphy *wiphy, struct net_device *dev,
-		   u8 key_index, const u8 *mac_addr,
+		   u8 key_index, bool pairwise, const u8 *mac_addr,
 		   struct key_params *params)
 {
 	wlandevice_t *wlandev = dev->ml_priv;
@@ -198,7 +198,7 @@
 }
 
 int prism2_get_key(struct wiphy *wiphy, struct net_device *dev,
-		   u8 key_index, const u8 *mac_addr, void *cookie,
+		   u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie,
 		   void (*callback)(void *cookie, struct key_params*))
 {
 	wlandevice_t *wlandev = dev->ml_priv;
@@ -227,7 +227,7 @@
 }
 
 int prism2_del_key(struct wiphy *wiphy, struct net_device *dev,
-		   u8 key_index, const u8 *mac_addr)
+		   u8 key_index, bool pairwise, const u8 *mac_addr)
 {
 	wlandevice_t *wlandev = dev->ml_priv;
 	u32 did;
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index aa1792c8..b7b4a73 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -522,8 +522,8 @@
 		if (copy_to_user(useraddr, &edata, sizeof(edata)))
 			return -EFAULT;
 		return 0;
-	}
 #endif
+	}
 
 	return -EOPNOTSUPP;
 }
diff --git a/drivers/tty/Makefile b/drivers/tty/Makefile
new file mode 100644
index 0000000..c43ef48
--- /dev/null
+++ b/drivers/tty/Makefile
@@ -0,0 +1,11 @@
+obj-y				+= tty_io.o n_tty.o tty_ioctl.o tty_ldisc.o \
+				   tty_buffer.o tty_port.o tty_mutex.o
+obj-$(CONFIG_LEGACY_PTYS)	+= pty.o
+obj-$(CONFIG_UNIX98_PTYS)	+= pty.o
+obj-$(CONFIG_AUDIT)		+= tty_audit.o
+obj-$(CONFIG_MAGIC_SYSRQ)	+= sysrq.o
+obj-$(CONFIG_N_HDLC)		+= n_hdlc.o
+obj-$(CONFIG_N_GSM)		+= n_gsm.o
+obj-$(CONFIG_R3964)		+= n_r3964.o
+
+obj-y				+= vt/
diff --git a/drivers/char/n_gsm.c b/drivers/tty/n_gsm.c
similarity index 99%
rename from drivers/char/n_gsm.c
rename to drivers/tty/n_gsm.c
index 04ef3ef..81b4658 100644
--- a/drivers/char/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -716,8 +716,8 @@
 		if (msg->len < 128)
 			*--dp = (msg->len << 1) | EA;
 		else {
-			*--dp = (msg->len >> 6) | EA;
-			*--dp = (msg->len & 127) << 1;
+			*--dp = ((msg->len & 127) << 1) | EA;
+			*--dp = (msg->len >> 6) & 0xfe;
 		}
 	}
 
@@ -2375,6 +2375,7 @@
 	gsm->mru = c->mru;
 	gsm->encoding = c->encapsulation;
 	gsm->adaption = c->adaption;
+	gsm->n2 = c->n2;
 
 	if (c->i == 1)
 		gsm->ftype = UIH;
diff --git a/drivers/char/n_hdlc.c b/drivers/tty/n_hdlc.c
similarity index 100%
rename from drivers/char/n_hdlc.c
rename to drivers/tty/n_hdlc.c
diff --git a/drivers/char/n_r3964.c b/drivers/tty/n_r3964.c
similarity index 100%
rename from drivers/char/n_r3964.c
rename to drivers/tty/n_r3964.c
diff --git a/drivers/char/n_tty.c b/drivers/tty/n_tty.c
similarity index 100%
rename from drivers/char/n_tty.c
rename to drivers/tty/n_tty.c
diff --git a/drivers/char/pty.c b/drivers/tty/pty.c
similarity index 100%
rename from drivers/char/pty.c
rename to drivers/tty/pty.c
diff --git a/drivers/char/sysrq.c b/drivers/tty/sysrq.c
similarity index 100%
rename from drivers/char/sysrq.c
rename to drivers/tty/sysrq.c
diff --git a/drivers/char/tty_audit.c b/drivers/tty/tty_audit.c
similarity index 100%
rename from drivers/char/tty_audit.c
rename to drivers/tty/tty_audit.c
diff --git a/drivers/char/tty_buffer.c b/drivers/tty/tty_buffer.c
similarity index 96%
rename from drivers/char/tty_buffer.c
rename to drivers/tty/tty_buffer.c
index cc1e985..d8210ca 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/tty/tty_buffer.c
@@ -413,7 +413,8 @@
 	spin_lock_irqsave(&tty->buf.lock, flags);
 
 	if (!test_and_set_bit(TTY_FLUSHING, &tty->flags)) {
-		struct tty_buffer *head;
+		struct tty_buffer *head, *tail = tty->buf.tail;
+		int seen_tail = 0;
 		while ((head = tty->buf.head) != NULL) {
 			int count;
 			char *char_buf;
@@ -423,6 +424,15 @@
 			if (!count) {
 				if (head->next == NULL)
 					break;
+				/*
+				  There's a possibility tty might get new buffer
+				  added during the unlock window below. We could
+				  end up spinning in here forever hogging the CPU
+				  completely. To avoid this let's have a rest each
+				  time we processed the tail buffer.
+				*/
+				if (tail == head)
+					seen_tail = 1;
 				tty->buf.head = head->next;
 				tty_buffer_free(tty, head);
 				continue;
@@ -432,7 +442,7 @@
 			   line discipline as we want to empty the queue */
 			if (test_bit(TTY_FLUSHPENDING, &tty->flags))
 				break;
-			if (!tty->receive_room) {
+			if (!tty->receive_room || seen_tail) {
 				schedule_delayed_work(&tty->buf.work, 1);
 				break;
 			}
diff --git a/drivers/char/tty_io.c b/drivers/tty/tty_io.c
similarity index 100%
rename from drivers/char/tty_io.c
rename to drivers/tty/tty_io.c
diff --git a/drivers/char/tty_ioctl.c b/drivers/tty/tty_ioctl.c
similarity index 100%
rename from drivers/char/tty_ioctl.c
rename to drivers/tty/tty_ioctl.c
diff --git a/drivers/char/tty_ldisc.c b/drivers/tty/tty_ldisc.c
similarity index 95%
rename from drivers/char/tty_ldisc.c
rename to drivers/tty/tty_ldisc.c
index 412f977..d8e96b0 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -47,6 +47,7 @@
 
 static DEFINE_SPINLOCK(tty_ldisc_lock);
 static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_idle);
 /* Line disc dispatch table */
 static struct tty_ldisc_ops *tty_ldiscs[NR_LDISCS];
 
@@ -83,6 +84,7 @@
 		return;
 	}
 	local_irq_restore(flags);
+	wake_up(&tty_ldisc_idle);
 }
 
 /**
@@ -531,6 +533,23 @@
 }
 
 /**
+ *	tty_ldisc_wait_idle	-	wait for the ldisc to become idle
+ *	@tty: tty to wait for
+ *
+ *	Wait for the line discipline to become idle. The discipline must
+ *	have been halted for this to guarantee it remains idle.
+ */
+static int tty_ldisc_wait_idle(struct tty_struct *tty)
+{
+	int ret;
+	ret = wait_event_interruptible_timeout(tty_ldisc_idle,
+			atomic_read(&tty->ldisc->users) == 1, 5 * HZ);
+	if (ret < 0)
+		return ret;
+	return ret > 0 ? 0 : -EBUSY;
+}
+
+/**
  *	tty_set_ldisc		-	set line discipline
  *	@tty: the terminal to set
  *	@ldisc: the line discipline
@@ -634,8 +653,17 @@
 
 	flush_scheduled_work();
 
+	retval = tty_ldisc_wait_idle(tty);
+
 	tty_lock();
 	mutex_lock(&tty->ldisc_mutex);
+
+	/* handle wait idle failure locked */
+	if (retval) {
+		tty_ldisc_put(new_ldisc);
+		goto enable;
+	}
+
 	if (test_bit(TTY_HUPPED, &tty->flags)) {
 		/* We were raced by the hangup method. It will have stomped
 		   the ldisc data and closed the ldisc down */
@@ -669,6 +697,7 @@
 
 	tty_ldisc_put(o_ldisc);
 
+enable:
 	/*
 	 *	Allow ldisc referencing to occur again
 	 */
@@ -714,9 +743,12 @@
  *	state closed
  */
 
-static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
+static int tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
 {
-	struct tty_ldisc *ld;
+	struct tty_ldisc *ld = tty_ldisc_get(ldisc);
+
+	if (IS_ERR(ld))
+		return -1;
 
 	tty_ldisc_close(tty, tty->ldisc);
 	tty_ldisc_put(tty->ldisc);
@@ -724,10 +756,10 @@
 	/*
 	 *	Switch the line discipline back
 	 */
-	ld = tty_ldisc_get(ldisc);
-	BUG_ON(IS_ERR(ld));
 	tty_ldisc_assign(tty, ld);
 	tty_set_termios_ldisc(tty, ldisc);
+
+	return 0;
 }
 
 /**
@@ -802,13 +834,16 @@
 	   a FIXME */
 	if (tty->ldisc) {	/* Not yet closed */
 		if (reset == 0) {
-			tty_ldisc_reinit(tty, tty->termios->c_line);
-			err = tty_ldisc_open(tty, tty->ldisc);
+
+			if (!tty_ldisc_reinit(tty, tty->termios->c_line))
+				err = tty_ldisc_open(tty, tty->ldisc);
+			else
+				err = 1;
 		}
 		/* If the re-open fails or we reset then go to N_TTY. The
 		   N_TTY open cannot fail */
 		if (reset || err) {
-			tty_ldisc_reinit(tty, N_TTY);
+			BUG_ON(tty_ldisc_reinit(tty, N_TTY));
 			WARN_ON(tty_ldisc_open(tty, tty->ldisc));
 		}
 		tty_ldisc_enable(tty);
diff --git a/drivers/char/tty_mutex.c b/drivers/tty/tty_mutex.c
similarity index 100%
rename from drivers/char/tty_mutex.c
rename to drivers/tty/tty_mutex.c
diff --git a/drivers/char/tty_port.c b/drivers/tty/tty_port.c
similarity index 100%
rename from drivers/char/tty_port.c
rename to drivers/tty/tty_port.c
diff --git a/drivers/char/.gitignore b/drivers/tty/vt/.gitignore
similarity index 100%
rename from drivers/char/.gitignore
rename to drivers/tty/vt/.gitignore
diff --git a/drivers/tty/vt/Makefile b/drivers/tty/vt/Makefile
new file mode 100644
index 0000000..14a51c9
--- /dev/null
+++ b/drivers/tty/vt/Makefile
@@ -0,0 +1,34 @@
+#
+# This file contains the font map for the default (hardware) font
+#
+FONTMAPFILE = cp437.uni
+
+obj-$(CONFIG_VT)			+= vt_ioctl.o vc_screen.o \
+					   selection.o keyboard.o
+obj-$(CONFIG_CONSOLE_TRANSLATIONS)	+= consolemap.o consolemap_deftbl.o
+obj-$(CONFIG_HW_CONSOLE)		+= vt.o defkeymap.o
+
+# Files generated that shall be removed upon make clean
+clean-files := consolemap_deftbl.c defkeymap.c
+
+quiet_cmd_conmk = CONMK   $@
+      cmd_conmk = scripts/conmakehash $< > $@
+
+$(obj)/consolemap_deftbl.c: $(src)/$(FONTMAPFILE)
+	$(call cmd,conmk)
+
+$(obj)/defkeymap.o:  $(obj)/defkeymap.c
+
+# Uncomment if you're changing the keymap and have an appropriate
+# loadkeys version for the map. By default, we'll use the shipped
+# versions.
+# GENERATE_KEYMAP := 1
+
+ifdef GENERATE_KEYMAP
+
+$(obj)/defkeymap.c: $(obj)/%.c: $(src)/%.map
+	loadkeys --mktable $< > $@.tmp
+	sed -e 's/^static *//' $@.tmp > $@
+	rm $@.tmp
+
+endif
diff --git a/drivers/char/consolemap.c b/drivers/tty/vt/consolemap.c
similarity index 100%
rename from drivers/char/consolemap.c
rename to drivers/tty/vt/consolemap.c
diff --git a/drivers/char/cp437.uni b/drivers/tty/vt/cp437.uni
similarity index 100%
rename from drivers/char/cp437.uni
rename to drivers/tty/vt/cp437.uni
diff --git a/drivers/char/defkeymap.c_shipped b/drivers/tty/vt/defkeymap.c_shipped
similarity index 100%
rename from drivers/char/defkeymap.c_shipped
rename to drivers/tty/vt/defkeymap.c_shipped
diff --git a/drivers/char/defkeymap.map b/drivers/tty/vt/defkeymap.map
similarity index 100%
rename from drivers/char/defkeymap.map
rename to drivers/tty/vt/defkeymap.map
diff --git a/drivers/char/keyboard.c b/drivers/tty/vt/keyboard.c
similarity index 100%
rename from drivers/char/keyboard.c
rename to drivers/tty/vt/keyboard.c
diff --git a/drivers/char/selection.c b/drivers/tty/vt/selection.c
similarity index 100%
rename from drivers/char/selection.c
rename to drivers/tty/vt/selection.c
diff --git a/drivers/char/vc_screen.c b/drivers/tty/vt/vc_screen.c
similarity index 98%
rename from drivers/char/vc_screen.c
rename to drivers/tty/vt/vc_screen.c
index 273ab44..eab3a1f 100644
--- a/drivers/char/vc_screen.c
+++ b/drivers/tty/vt/vc_screen.c
@@ -553,12 +553,12 @@
 vcs_poll(struct file *file, poll_table *wait)
 {
 	struct vcs_poll_data *poll = vcs_poll_data_get(file);
-	int ret = 0;
+	int ret = DEFAULT_POLLMASK|POLLERR|POLLPRI;
 
 	if (poll) {
 		poll_wait(file, &poll->waitq, wait);
-		if (!poll->seen_last_update)
-			ret = POLLIN | POLLRDNORM;
+		if (poll->seen_last_update)
+			ret = DEFAULT_POLLMASK;
 	}
 	return ret;
 }
diff --git a/drivers/char/vt.c b/drivers/tty/vt/vt.c
similarity index 100%
rename from drivers/char/vt.c
rename to drivers/tty/vt/vt.c
diff --git a/drivers/char/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
similarity index 100%
rename from drivers/char/vt_ioctl.c
rename to drivers/tty/vt/vt_ioctl.c
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index f1aaff6..045bb4b 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -965,10 +965,11 @@
 
 static int proc_connectinfo(struct dev_state *ps, void __user *arg)
 {
-	struct usbdevfs_connectinfo ci;
+	struct usbdevfs_connectinfo ci = {
+		.devnum = ps->dev->devnum,
+		.slow = ps->dev->speed == USB_SPEED_LOW
+	};
 
-	ci.devnum = ps->dev->devnum;
-	ci.slow = ps->dev->speed == USB_SPEED_LOW;
 	if (copy_to_user(arg, &ci, sizeof(ci)))
 		return -EFAULT;
 	return 0;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index b739ca8..607d0db 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -158,7 +158,7 @@
 	boolean "Freescale Highspeed USB DR Peripheral Controller"
 	depends on FSL_SOC || ARCH_MXC
 	select USB_GADGET_DUALSPEED
-	select USB_FSL_MPH_DR_OF
+	select USB_FSL_MPH_DR_OF if OF
 	help
 	   Some of Freescale PowerPC processors have a High Speed
 	   Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/goku_udc.h b/drivers/usb/gadget/goku_udc.h
index 566cb23..e7e0c69 100644
--- a/drivers/usb/gadget/goku_udc.h
+++ b/drivers/usb/gadget/goku_udc.h
@@ -251,7 +251,8 @@
 					got_region:1,
 					req_config:1,
 					configured:1,
-					enabled:1;
+					enabled:1,
+					registered:1;
 
 	/* pci state used to access those endpoints */
 	struct pci_dev			*pdev;
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index cb23355..fbe86ca 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -811,7 +811,6 @@
 		INFO(dev, "MAC %pM\n", net->dev_addr);
 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 
-		netif_stop_queue(net);
 		the_dev = dev;
 	}
 
diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
index 01e5354..40f7716 100644
--- a/drivers/usb/gadget/u_serial.c
+++ b/drivers/usb/gadget/u_serial.c
@@ -105,11 +105,15 @@
 	wait_queue_head_t	close_wait;	/* wait for last close */
 
 	struct list_head	read_pool;
+	int read_started;
+	int read_allocated;
 	struct list_head	read_queue;
 	unsigned		n_read;
 	struct tasklet_struct	push;
 
 	struct list_head	write_pool;
+	int write_started;
+	int write_allocated;
 	struct gs_buf		port_write_buf;
 	wait_queue_head_t	drain_wait;	/* wait while writes drain */
 
@@ -363,6 +367,9 @@
 		struct usb_request	*req;
 		int			len;
 
+		if (port->write_started >= QUEUE_SIZE)
+			break;
+
 		req = list_entry(pool->next, struct usb_request, list);
 		len = gs_send_packet(port, req->buf, in->maxpacket);
 		if (len == 0) {
@@ -397,6 +404,8 @@
 			break;
 		}
 
+		port->write_started++;
+
 		/* abort immediately after disconnect */
 		if (!port->port_usb)
 			break;
@@ -418,7 +427,6 @@
 {
 	struct list_head	*pool = &port->read_pool;
 	struct usb_ep		*out = port->port_usb->out;
-	unsigned		started = 0;
 
 	while (!list_empty(pool)) {
 		struct usb_request	*req;
@@ -430,6 +438,9 @@
 		if (!tty)
 			break;
 
+		if (port->read_started >= QUEUE_SIZE)
+			break;
+
 		req = list_entry(pool->next, struct usb_request, list);
 		list_del(&req->list);
 		req->length = out->maxpacket;
@@ -447,13 +458,13 @@
 			list_add(&req->list, pool);
 			break;
 		}
-		started++;
+		port->read_started++;
 
 		/* abort immediately after disconnect */
 		if (!port->port_usb)
 			break;
 	}
-	return started;
+	return port->read_started;
 }
 
 /*
@@ -535,6 +546,7 @@
 		}
 recycle:
 		list_move(&req->list, &port->read_pool);
+		port->read_started--;
 	}
 
 	/* Push from tty to ldisc; without low_latency set this is handled by
@@ -587,6 +599,7 @@
 
 	spin_lock(&port->port_lock);
 	list_add(&req->list, &port->write_pool);
+	port->write_started--;
 
 	switch (req->status) {
 	default:
@@ -608,7 +621,8 @@
 	spin_unlock(&port->port_lock);
 }
 
-static void gs_free_requests(struct usb_ep *ep, struct list_head *head)
+static void gs_free_requests(struct usb_ep *ep, struct list_head *head,
+							 int *allocated)
 {
 	struct usb_request	*req;
 
@@ -616,25 +630,31 @@
 		req = list_entry(head->next, struct usb_request, list);
 		list_del(&req->list);
 		gs_free_req(ep, req);
+		if (allocated)
+			(*allocated)--;
 	}
 }
 
 static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head,
-		void (*fn)(struct usb_ep *, struct usb_request *))
+		void (*fn)(struct usb_ep *, struct usb_request *),
+		int *allocated)
 {
 	int			i;
 	struct usb_request	*req;
+	int n = allocated ? QUEUE_SIZE - *allocated : QUEUE_SIZE;
 
 	/* Pre-allocate up to QUEUE_SIZE transfers, but if we can't
 	 * do quite that many this time, don't fail ... we just won't
 	 * be as speedy as we might otherwise be.
 	 */
-	for (i = 0; i < QUEUE_SIZE; i++) {
+	for (i = 0; i < n; i++) {
 		req = gs_alloc_req(ep, ep->maxpacket, GFP_ATOMIC);
 		if (!req)
 			return list_empty(head) ? -ENOMEM : 0;
 		req->complete = fn;
 		list_add_tail(&req->list, head);
+		if (allocated)
+			(*allocated)++;
 	}
 	return 0;
 }
@@ -661,14 +681,15 @@
 	 * configurations may use different endpoints with a given port;
 	 * and high speed vs full speed changes packet sizes too.
 	 */
-	status = gs_alloc_requests(ep, head, gs_read_complete);
+	status = gs_alloc_requests(ep, head, gs_read_complete,
+		&port->read_allocated);
 	if (status)
 		return status;
 
 	status = gs_alloc_requests(port->port_usb->in, &port->write_pool,
-			gs_write_complete);
+			gs_write_complete, &port->write_allocated);
 	if (status) {
-		gs_free_requests(ep, head);
+		gs_free_requests(ep, head, &port->read_allocated);
 		return status;
 	}
 
@@ -680,8 +701,9 @@
 	if (started) {
 		tty_wakeup(port->port_tty);
 	} else {
-		gs_free_requests(ep, head);
-		gs_free_requests(port->port_usb->in, &port->write_pool);
+		gs_free_requests(ep, head, &port->read_allocated);
+		gs_free_requests(port->port_usb->in, &port->write_pool,
+			&port->write_allocated);
 		status = -EIO;
 	}
 
@@ -1315,8 +1337,12 @@
 	spin_lock_irqsave(&port->port_lock, flags);
 	if (port->open_count == 0 && !port->openclose)
 		gs_buf_free(&port->port_write_buf);
-	gs_free_requests(gser->out, &port->read_pool);
-	gs_free_requests(gser->out, &port->read_queue);
-	gs_free_requests(gser->in, &port->write_pool);
+	gs_free_requests(gser->out, &port->read_pool, NULL);
+	gs_free_requests(gser->out, &port->read_queue, NULL);
+	gs_free_requests(gser->in, &port->write_pool, NULL);
+
+	port->read_allocated = port->read_started =
+		port->write_allocated = port->write_started = 0;
+
 	spin_unlock_irqrestore(&port->port_lock, flags);
 }
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2391c39..6f4f8e6 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -122,7 +122,7 @@
 	bool "Support for Freescale on-chip EHCI USB controller"
 	depends on USB_EHCI_HCD && FSL_SOC
 	select USB_EHCI_ROOT_HUB_TT
-	select USB_FSL_MPH_DR_OF
+	select USB_FSL_MPH_DR_OF if OF
 	---help---
 	  Variation of ARC USB block used in some Freescale chips.
 
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index ac9c4d7..bce8505 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -36,6 +36,8 @@
 static int ehci_mxc_setup(struct usb_hcd *hcd)
 {
 	struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+	struct device *dev = hcd->self.controller;
+	struct mxc_usbh_platform_data *pdata = dev_get_platdata(dev);
 	int retval;
 
 	/* EHCI registers start at offset 0x100 */
@@ -63,6 +65,12 @@
 
 	ehci_reset(ehci);
 
+	/* set up the PORTSCx register */
+	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
+
+	/* is this really needed? */
+	msleep(10);
+
 	ehci_port_power(ehci, 0);
 	return 0;
 }
@@ -114,7 +122,7 @@
 	struct mxc_usbh_platform_data *pdata = pdev->dev.platform_data;
 	struct usb_hcd *hcd;
 	struct resource *res;
-	int irq, ret, temp;
+	int irq, ret;
 	struct ehci_mxc_priv *priv;
 	struct device *dev = &pdev->dev;
 
@@ -188,10 +196,6 @@
 		clk_enable(priv->ahbclk);
 	}
 
-	/* set up the PORTSCx register */
-	ehci_writel(ehci, pdata->portsc, &ehci->regs->port_status[0]);
-	mdelay(10);
-
 	/* setup specific usb hw */
 	ret = mxc_initialize_usb_hw(pdev->id, pdata->flags);
 	if (ret < 0)
diff --git a/drivers/usb/host/ohci-jz4740.c b/drivers/usb/host/ohci-jz4740.c
index 10e1872..931d588 100644
--- a/drivers/usb/host/ohci-jz4740.c
+++ b/drivers/usb/host/ohci-jz4740.c
@@ -273,4 +273,4 @@
 	},
 };
 
-MODULE_ALIAS("platfrom:jz4740-ohci");
+MODULE_ALIAS("platform:jz4740-ohci");
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index 3756641..c9078e4 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -553,6 +553,7 @@
 			/* needed for power consumption */
 			struct usb_config_descriptor *cfg_descriptor = &dev->udev->actconfig->desc;
 
+			memset(&info, 0, sizeof(info));
 			/* directly from the descriptor */
 			info.vendor = le16_to_cpu(dev->udev->descriptor.idVendor);
 			info.product = dev->product_id;
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 70d00e9..dd573ab 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -3008,6 +3008,7 @@
 #else
 			x.sisusb_conactive  = 0;
 #endif
+			memset(x.sisusb_reserved, 0, sizeof(x.sisusb_reserved));
 
 			if (copy_to_user((void __user *)arg, &x, sizeof(x)))
 				retval = -EFAULT;
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index 611a9d2..fcb5206 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -171,8 +171,9 @@
 	}
 
 	/* Start sampling ID pin, when plug is removed from MUSB */
-	if (is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
-		|| musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
+	if ((is_otg_enabled(musb) && (musb->xceiv->state == OTG_STATE_B_IDLE
+		|| musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) ||
+		(musb->int_usb & MUSB_INTR_DISCONNECT && is_host_active(musb))) {
 		mod_timer(&musb_conn_timer, jiffies + TIMER_DELAY);
 		musb->a_wait_bcon = TIMER_DELAY;
 	}
@@ -323,30 +324,8 @@
 	return -EIO;
 }
 
-int __init musb_platform_init(struct musb *musb, void *board_data)
+static void musb_platform_reg_init(struct musb *musb)
 {
-
-	/*
-	 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
-	 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
-	 * be low for DEVICE mode and high for HOST mode. We set it high
-	 * here because we are in host mode
-	 */
-
-	if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
-		printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d \n",
-			musb->config->gpio_vrsel);
-		return -ENODEV;
-	}
-	gpio_direction_output(musb->config->gpio_vrsel, 0);
-
-	usb_nop_xceiv_register();
-	musb->xceiv = otg_get_transceiver();
-	if (!musb->xceiv) {
-		gpio_free(musb->config->gpio_vrsel);
-		return -ENODEV;
-	}
-
 	if (ANOMALY_05000346) {
 		bfin_write_USB_APHY_CALIB(ANOMALY_05000346_value);
 		SSYNC();
@@ -358,7 +337,8 @@
 	}
 
 	/* Configure PLL oscillator register */
-	bfin_write_USB_PLLOSC_CTRL(0x30a8);
+	bfin_write_USB_PLLOSC_CTRL(0x3080 |
+			((480/musb->config->clkin) << 1));
 	SSYNC();
 
 	bfin_write_USB_SRP_CLKDIV((get_sclk()/1000) / 32 - 1);
@@ -380,6 +360,33 @@
 				EP2_RX_ENA | EP3_RX_ENA | EP4_RX_ENA |
 				EP5_RX_ENA | EP6_RX_ENA | EP7_RX_ENA);
 	SSYNC();
+}
+
+int __init musb_platform_init(struct musb *musb, void *board_data)
+{
+
+	/*
+	 * Rev 1.0 BF549 EZ-KITs require PE7 to be high for both DEVICE
+	 * and OTG HOST modes, while rev 1.1 and greater require PE7 to
+	 * be low for DEVICE mode and high for HOST mode. We set it high
+	 * here because we are in host mode
+	 */
+
+	if (gpio_request(musb->config->gpio_vrsel, "USB_VRSEL")) {
+		printk(KERN_ERR "Failed ro request USB_VRSEL GPIO_%d\n",
+			musb->config->gpio_vrsel);
+		return -ENODEV;
+	}
+	gpio_direction_output(musb->config->gpio_vrsel, 0);
+
+	usb_nop_xceiv_register();
+	musb->xceiv = otg_get_transceiver();
+	if (!musb->xceiv) {
+		gpio_free(musb->config->gpio_vrsel);
+		return -ENODEV;
+	}
+
+	musb_platform_reg_init(musb);
 
 	if (is_host_enabled(musb)) {
 		musb->board_set_vbus = bfin_set_vbus;
@@ -394,6 +401,27 @@
 	return 0;
 }
 
+#ifdef CONFIG_PM
+void musb_platform_save_context(struct musb *musb,
+			struct musb_context_registers *musb_context)
+{
+	if (is_host_active(musb))
+		/*
+		 * During hibernate gpio_vrsel will change from high to low
+		 * low which will generate wakeup event resume the system
+		 * immediately.  Set it to 0 before hibernate to avoid this
+		 * wakeup event.
+		 */
+		gpio_set_value(musb->config->gpio_vrsel, 0);
+}
+
+void musb_platform_restore_context(struct musb *musb,
+			struct musb_context_registers *musb_context)
+{
+	musb_platform_reg_init(musb);
+}
+#endif
+
 int musb_platform_exit(struct musb *musb)
 {
 	gpio_free(musb->config->gpio_vrsel);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index c9f9024..e6669fc 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -552,7 +552,8 @@
 	if (int_usb & MUSB_INTR_SESSREQ) {
 		void __iomem *mbase = musb->mregs;
 
-		if (devctl & MUSB_DEVCTL_BDEVICE) {
+		if ((devctl & MUSB_DEVCTL_VBUS) == MUSB_DEVCTL_VBUS
+				&& (devctl & MUSB_DEVCTL_BDEVICE)) {
 			DBG(3, "SessReq while on B state\n");
 			return IRQ_HANDLED;
 		}
@@ -1052,6 +1053,11 @@
 		clk_put(musb->clock);
 	spin_unlock_irqrestore(&musb->lock, flags);
 
+	if (!is_otg_enabled(musb) && is_host_enabled(musb))
+		usb_remove_hcd(musb_to_hcd(musb));
+	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
+	musb_platform_exit(musb);
+
 	/* FIXME power down */
 }
 
@@ -2244,13 +2250,6 @@
 	 */
 	musb_exit_debugfs(musb);
 	musb_shutdown(pdev);
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
-	if (musb->board_mode == MUSB_HOST)
-		usb_remove_hcd(musb_to_hcd(musb));
-#endif
-	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
-	musb_platform_exit(musb);
-	musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
 
 	musb_free(musb);
 	iounmap(ctrl_base);
@@ -2411,9 +2410,6 @@
 	unsigned long	flags;
 	struct musb	*musb = dev_to_musb(&pdev->dev);
 
-	if (!musb->clock)
-		return 0;
-
 	spin_lock_irqsave(&musb->lock, flags);
 
 	if (is_peripheral_active(musb)) {
@@ -2428,10 +2424,12 @@
 
 	musb_save_context(musb);
 
-	if (musb->set_clock)
-		musb->set_clock(musb->clock, 0);
-	else
-		clk_disable(musb->clock);
+	if (musb->clock) {
+		if (musb->set_clock)
+			musb->set_clock(musb->clock, 0);
+		else
+			clk_disable(musb->clock);
+	}
 	spin_unlock_irqrestore(&musb->lock, flags);
 	return 0;
 }
@@ -2441,13 +2439,12 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct musb	*musb = dev_to_musb(&pdev->dev);
 
-	if (!musb->clock)
-		return 0;
-
-	if (musb->set_clock)
-		musb->set_clock(musb->clock, 1);
-	else
-		clk_enable(musb->clock);
+	if (musb->clock) {
+		if (musb->set_clock)
+			musb->set_clock(musb->clock, 1);
+		else
+			clk_enable(musb->clock);
+	}
 
 	musb_restore_context(musb);
 
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 69797e5..febaabc 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -487,7 +487,7 @@
 };
 
 #if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
-    defined(CONFIG_ARCH_OMAP4)
+    defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_BLACKFIN)
 extern void musb_platform_save_context(struct musb *musb,
 		struct musb_context_registers *musb_context);
 extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index 5d81504..36cfd06 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -644,10 +644,8 @@
 	 */
 
 				csr |= MUSB_RXCSR_DMAENAB;
-				if (!musb_ep->hb_mult &&
-					musb_ep->hw_ep->rx_double_buffered)
-					csr |= MUSB_RXCSR_AUTOCLEAR;
 #ifdef USE_MODE1
+				csr |= MUSB_RXCSR_AUTOCLEAR;
 				/* csr |= MUSB_RXCSR_DMAMODE; */
 
 				/* this special sequence (enabling and then
@@ -656,6 +654,10 @@
 				 */
 				musb_writew(epio, MUSB_RXCSR,
 					csr | MUSB_RXCSR_DMAMODE);
+#else
+				if (!musb_ep->hb_mult &&
+					musb_ep->hw_ep->rx_double_buffered)
+					csr |= MUSB_RXCSR_AUTOCLEAR;
 #endif
 				musb_writew(epio, MUSB_RXCSR, csr);
 
@@ -807,7 +809,7 @@
 
 #if defined(CONFIG_USB_INVENTRA_DMA) || defined(CONFIG_USB_TUSB_OMAP_DMA)
 		/* Autoclear doesn't clear RxPktRdy for short packets */
-		if ((dma->desired_mode == 0)
+		if ((dma->desired_mode == 0 && !hw_ep->rx_double_buffered)
 				|| (dma->actual_len
 					& (musb_ep->packet_sz - 1))) {
 			/* ack the read! */
@@ -818,8 +820,16 @@
 		/* incomplete, and not short? wait for next IN packet */
 		if ((request->actual < request->length)
 				&& (musb_ep->dma->actual_len
-					== musb_ep->packet_sz))
+					== musb_ep->packet_sz)) {
+			/* In double buffer case, continue to unload fifo if
+ 			 * there is Rx packet in FIFO.
+ 			 **/
+			csr = musb_readw(epio, MUSB_RXCSR);
+			if ((csr & MUSB_RXCSR_RXPKTRDY) &&
+				hw_ep->rx_double_buffered)
+				goto exit;
 			return;
+		}
 #endif
 		musb_g_giveback(musb_ep, request, 0);
 
@@ -827,7 +837,7 @@
 		if (!request)
 			return;
 	}
-
+exit:
 	/* Analyze request */
 	rxstate(musb, to_musb_request(request));
 }
@@ -916,13 +926,9 @@
 		 * likewise high bandwidth periodic tx
 		 */
 		/* Set TXMAXP with the FIFO size of the endpoint
-		 * to disable double buffering mode. Currently, It seems that double
-		 * buffering has problem if musb RTL revision number < 2.0.
+		 * to disable double buffering mode.
 		 */
-		if (musb->hwvers < MUSB_HWVERS_2000)
-			musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
-		else
-			musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		musb_writew(regs, MUSB_TXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
 
 		csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
 		if (musb_readw(regs, MUSB_TXCSR)
@@ -958,10 +964,7 @@
 		/* Set RXMAXP with the FIFO size of the endpoint
 		 * to disable double buffering mode.
 		 */
-		if (musb->hwvers < MUSB_HWVERS_2000)
-			musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
-		else
-			musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
+		musb_writew(regs, MUSB_RXMAXP, musb_ep->packet_sz | (musb_ep->hb_mult << 11));
 
 		/* force shared fifo to OUT-only mode */
 		if (hw_ep->is_shared_fifo) {
@@ -1166,8 +1169,6 @@
 						: DMA_FROM_DEVICE);
 			request->mapped = 0;
 		}
-	} else if (!req->buf) {
-		return -ENODATA;
 	} else
 		request->mapped = 0;
 
@@ -1695,8 +1696,10 @@
 	musb_platform_try_idle(musb, 0);
 
 	status = device_register(&musb->g.dev);
-	if (status != 0)
+	if (status != 0) {
+		put_device(&musb->g.dev);
 		the_gadget = NULL;
+	}
 	return status;
 }
 
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 2442675..5a727c5 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -633,8 +633,9 @@
 	return 0;
 }
 
-static inline void  musb_read_txhubport(void __iomem *mbase, u8 epnum)
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 #endif /* CONFIG_BLACKFIN */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 6f771af..563114d 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -158,6 +158,8 @@
 				dma_addr_t dma_addr, u32 len)
 {
 	struct musb_dma_channel *musb_channel = channel->private_data;
+	struct musb_dma_controller *controller = musb_channel->controller;
+	struct musb *musb = controller->private_data;
 
 	DBG(2, "ep%d-%s pkt_sz %d, dma_addr 0x%x length %d, mode %d\n",
 		musb_channel->epnum,
@@ -167,6 +169,18 @@
 	BUG_ON(channel->status == MUSB_DMA_STATUS_UNKNOWN ||
 		channel->status == MUSB_DMA_STATUS_BUSY);
 
+	/*
+	 * The DMA engine in RTL1.8 and above cannot handle
+	 * DMA addresses that are not aligned to a 4 byte boundary.
+	 * It ends up masking the last two bits of the address
+	 * programmed in DMA_ADDR.
+	 *
+	 * Fail such DMA transfers, so that the backup PIO mode
+	 * can carry out the transfer
+	 */
+	if ((musb->hwvers >= MUSB_HWVERS_1800) && (dma_addr % 4))
+		return false;
+
 	channel->actual_len = 0;
 	musb_channel->start_addr = dma_addr;
 	musb_channel->len = len;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 89a9a58..76f8b35 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -794,6 +794,8 @@
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LOGBOOKML_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_LS_LOGBOOK_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_SCIENCESCOPE_HS_LOGBOOK_PID) },
+	{ USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID),
+		.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
 	{ },					/* Optional parameter entry */
 	{ }					/* Terminating entry */
 };
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 7dfe02f1..263f625 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1100,3 +1100,10 @@
 #define FTDI_SCIENCESCOPE_LOGBOOKML_PID		0xFF18
 #define FTDI_SCIENCESCOPE_LS_LOGBOOK_PID	0xFF1C
 #define FTDI_SCIENCESCOPE_HS_LOGBOOK_PID	0xFF1D
+
+/*
+ * Milkymist One JTAG/Serial
+ */
+#define QIHARDWARE_VID			0x20B7
+#define MILKYMISTONE_JTAGSERIAL_PID	0x0713
+
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2297fb1..ef2977d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -518,7 +518,7 @@
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
-	{ USB_DEVICE(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC) },
+	{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
 	{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 2054b1e..d126819 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -331,10 +331,7 @@
 
 	iu->iu_id = IU_ID_COMMAND;
 	iu->tag = cpu_to_be16(stream_id);
-	if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER))
-		iu->prio_attr = UAS_ORDERED_TAG;
-	else
-		iu->prio_attr = UAS_SIMPLE_TAG;
+	iu->prio_attr = UAS_SIMPLE_TAG;
 	iu->len = len;
 	int_to_scsilun(sdev->lun, &iu->lun);
 	memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
diff --git a/drivers/uwb/allocator.c b/drivers/uwb/allocator.c
index 436e4f7..e45e673 100644
--- a/drivers/uwb/allocator.c
+++ b/drivers/uwb/allocator.c
@@ -326,7 +326,8 @@
 	int bit_index;
 
 	ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL);
-	
+	if (!ai)
+		return UWB_RSV_ALLOC_NOT_FOUND;
 	ai->min_mas = rsv->min_mas;
 	ai->max_mas = rsv->max_mas;
 	ai->max_interval = rsv->max_interval;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 4b4da5b..f442668 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -129,8 +129,9 @@
 	size_t hdr_size;
 	struct socket *sock;
 
-	sock = rcu_dereference_check(vq->private_data,
-				     lockdep_is_held(&vq->mutex));
+	/* TODO: check that we are running from vhost_worker?
+	 * Not sure it's worth it, it's straight-forward enough. */
+	sock = rcu_dereference_check(vq->private_data, 1);
 	if (!sock)
 		return;
 
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 3ec2460..734c650 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -502,8 +502,10 @@
 		struct device_attribute *attr, const char *buf, size_t count)
 {
 	struct adp8860_bl *data = dev_get_drvdata(dev);
+	int ret = strict_strtoul(buf, 10, &data->cached_daylight_max);
+	if (ret)
+		return ret;
 
-	strict_strtoul(buf, 10, &data->cached_daylight_max);
 	return adp8860_store(dev, buf, count, ADP8860_BLMX1);
 }
 static DEVICE_ATTR(l1_daylight_max, 0664, adp8860_bl_l1_daylight_max_show,
@@ -614,7 +616,7 @@
 	if (val == 0) {
 		/* Enable automatic ambient light sensing */
 		adp8860_set_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
-	} else if ((val > 0) && (val < 6)) {
+	} else if ((val > 0) && (val <= 3)) {
 		/* Disable automatic ambient light sensing */
 		adp8860_clr_bits(data->client, ADP8860_MDCR, CMP_AUTOEN);
 
@@ -622,7 +624,7 @@
 		mutex_lock(&data->lock);
 		adp8860_read(data->client, ADP8860_CFGR, &reg_val);
 		reg_val &= ~(CFGR_BLV_MASK << CFGR_BLV_SHIFT);
-		reg_val |= val << CFGR_BLV_SHIFT;
+		reg_val |= (val - 1) << CFGR_BLV_SHIFT;
 		adp8860_write(data->client, ADP8860_CFGR, reg_val);
 		mutex_unlock(&data->lock);
 	}
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 9093ef0..c67801e 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -78,7 +78,7 @@
 	const u16 slpin = 0x10;
 	const u16 disoff = 0x28;
 
-	if (power) {
+	if (power <= FB_BLANK_NORMAL) {
 		if (priv->lcd_on)
 			return 0;
 
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index abc43a0..5d3cf33 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -129,7 +129,7 @@
 	struct spi_device *spi = st->spi;
 	struct lms283gf05_pdata *pdata = spi->dev.platform_data;
 
-	if (power) {
+	if (power <= FB_BLANK_NORMAL) {
 		if (pdata)
 			lms283gf05_reset(pdata->reset_gpio,
 					pdata->reset_inverted);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 9fb533f..1485f73 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -335,6 +335,24 @@
 		},
 		.driver_data	= (void *)&nvidia_chipset_data,
 	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBookAir 3,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,1"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBookAir 3,2",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir3,2"),
+		},
+		.driver_data	= (void *)&nvidia_chipset_data,
+	},
 	{ }
 };
 
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 5504435..21866ec 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -25,6 +25,7 @@
 	struct pwm_device	*pwm;
 	struct device		*dev;
 	unsigned int		period;
+	unsigned int		lth_brightness;
 	int			(*notify)(struct device *,
 					  int brightness);
 };
@@ -48,7 +49,9 @@
 		pwm_config(pb->pwm, 0, pb->period);
 		pwm_disable(pb->pwm);
 	} else {
-		pwm_config(pb->pwm, brightness * pb->period / max, pb->period);
+		brightness = pb->lth_brightness +
+			(brightness * (pb->period - pb->lth_brightness) / max);
+		pwm_config(pb->pwm, brightness, pb->period);
 		pwm_enable(pb->pwm);
 	}
 	return 0;
@@ -92,6 +95,8 @@
 
 	pb->period = data->pwm_period_ns;
 	pb->notify = data->notify;
+	pb->lth_brightness = data->lth_brightness *
+		(data->pwm_period_ns / data->max_brightness);
 	pb->dev = &pdev->dev;
 
 	pb->pwm = pwm_request(data->pwm_id, "backlight");
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index a3128c9..5927db0 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -729,10 +729,10 @@
 
 	return strlen(buf);
 }
-static DEVICE_ATTR(gamma_table, 0644,
+static DEVICE_ATTR(gamma_table, 0444,
 		s6e63m0_sysfs_show_gamma_table, NULL);
 
-static int __init s6e63m0_probe(struct spi_device *spi)
+static int __devinit s6e63m0_probe(struct spi_device *spi)
 {
 	int ret = 0;
 	struct s6e63m0 *lcd = NULL;
@@ -829,6 +829,9 @@
 	struct s6e63m0 *lcd = dev_get_drvdata(&spi->dev);
 
 	s6e63m0_power(lcd, FB_BLANK_POWERDOWN);
+	device_remove_file(&spi->dev, &dev_attr_gamma_table);
+	device_remove_file(&spi->dev, &dev_attr_gamma_mode);
+	backlight_device_unregister(lcd->bd);
 	lcd_device_unregister(lcd->ld);
 	kfree(lcd);
 
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 97612f5..321a0c8 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -1299,9 +1299,6 @@
 		evtchn_to_irq[evtchn] = irq;
 		irq_info[irq] = mk_virq_info(evtchn, virq);
 		bind_evtchn_to_cpu(evtchn, cpu);
-
-		/* Ready for use. */
-		unmask_evtchn(evtchn);
 	}
 }
 
@@ -1327,10 +1324,6 @@
 		evtchn_to_irq[evtchn] = irq;
 		irq_info[irq] = mk_ipi_info(evtchn, ipi);
 		bind_evtchn_to_cpu(evtchn, cpu);
-
-		/* Ready for use. */
-		unmask_evtchn(evtchn);
-
 	}
 }
 
@@ -1390,6 +1383,7 @@
 void xen_irq_resume(void)
 {
 	unsigned int cpu, irq, evtchn;
+	struct irq_desc *desc;
 
 	init_evtchn_cpu_bindings();
 
@@ -1408,6 +1402,23 @@
 		restore_cpu_virqs(cpu);
 		restore_cpu_ipis(cpu);
 	}
+
+	/*
+	 * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
+	 * are not handled by the IRQ core.
+	 */
+	for_each_irq_desc(irq, desc) {
+		if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
+			continue;
+		if (desc->status & IRQ_DISABLED)
+			continue;
+
+		evtchn = evtchn_from_irq(irq);
+		if (evtchn == -1)
+			continue;
+
+		unmask_evtchn(evtchn);
+	}
 }
 
 static struct irq_chip xen_dynamic_chip __read_mostly = {
diff --git a/fs/bio.c b/fs/bio.c
index 8abb2df..4bd454f 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -370,6 +370,9 @@
 {
 	struct bio *bio;
 
+	if (nr_iovecs > UIO_MAXIOV)
+		return NULL;
+
 	bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
 		      gfp_mask);
 	if (unlikely(!bio))
@@ -697,8 +700,12 @@
 static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
 					       gfp_t gfp_mask)
 {
-	struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask);
+	struct bio_map_data *bmd;
 
+	if (iov_count > UIO_MAXIOV)
+		return NULL;
+
+	bmd = kmalloc(sizeof(*bmd), gfp_mask);
 	if (!bmd)
 		return NULL;
 
@@ -827,6 +834,12 @@
 		end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 		start = uaddr >> PAGE_SHIFT;
 
+		/*
+		 * Overflow, abort
+		 */
+		if (end < start)
+			return ERR_PTR(-EINVAL);
+
 		nr_pages += end - start;
 		len += iov[i].iov_len;
 	}
@@ -955,6 +968,12 @@
 		unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
 		unsigned long start = uaddr >> PAGE_SHIFT;
 
+		/*
+		 * Overflow, abort
+		 */
+		if (end < start)
+			return ERR_PTR(-EINVAL);
+
 		nr_pages += end - start;
 		/*
 		 * buffer must be aligned to at least hardsector size for now
@@ -982,7 +1001,7 @@
 		unsigned long start = uaddr >> PAGE_SHIFT;
 		const int local_nr_pages = end - start;
 		const int page_limit = cur_page + local_nr_pages;
-		
+
 		ret = get_user_pages_fast(uaddr, local_nr_pages,
 				write_to_vm, &pages[cur_page]);
 		if (ret < local_nr_pages) {
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 5aff46c..355abcd 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -81,7 +81,7 @@
 
 v) mount check for unmatched uids
 
-w) Add support for new vfs entry points for setlease and fallocate 
+w) Add support for new vfs entry point for fallocate
 
 x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of 
 processes can proceed better in parallel (on the server)
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 525ba59..e9a393c 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -15,7 +15,7 @@
  *   the GNU Lesser General Public License for more details.
  *
  */
-#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
 
 #ifndef _CIFS_FS_SB_H
 #define _CIFS_FS_SB_H
@@ -42,9 +42,9 @@
 #define CIFS_MOUNT_MULTIUSER	0x20000 /* multiuser mount */
 
 struct cifs_sb_info {
-	struct radix_tree_root tlink_tree;
-#define CIFS_TLINK_MASTER_TAG		0	/* is "master" (mount) tcon */
+	struct rb_root tlink_tree;
 	spinlock_t tlink_tree_lock;
+	struct tcon_link *master_tlink;
 	struct nls_table *local_nls;
 	unsigned int rsize;
 	unsigned int wsize;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 75c4eaa..9c37897 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -116,7 +116,7 @@
 		return -ENOMEM;
 
 	spin_lock_init(&cifs_sb->tlink_tree_lock);
-	INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL);
+	cifs_sb->tlink_tree = RB_ROOT;
 
 	rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
 	if (rc) {
@@ -321,8 +321,7 @@
 	/* Until the file is open and we have gotten oplock
 	info back from the server, can not assume caching of
 	file data or metadata */
-	cifs_inode->clientCanCacheRead = false;
-	cifs_inode->clientCanCacheAll = false;
+	cifs_set_oplock_level(cifs_inode, 0);
 	cifs_inode->delete_pending = false;
 	cifs_inode->invalid_mapping = false;
 	cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f259e4d..b577bf0 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -336,7 +336,8 @@
  * "get" on the container.
  */
 struct tcon_link {
-	unsigned long		tl_index;
+	struct rb_node		tl_rbnode;
+	uid_t			tl_uid;
 	unsigned long		tl_flags;
 #define TCON_LINK_MASTER	0
 #define TCON_LINK_PENDING	1
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index edb6d90..7ed69b6 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,6 +104,7 @@
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
 				      int offset);
+extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
 
 extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
 				struct file *file, struct tcon_link *tlink,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9eb327d..251a17c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -116,6 +116,7 @@
 
 static int ipv4_connect(struct TCP_Server_Info *server);
 static int ipv6_connect(struct TCP_Server_Info *server);
+static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
 static void cifs_prune_tlinks(struct work_struct *work);
 
 /*
@@ -2900,24 +2901,16 @@
 		goto mount_fail_check;
 	}
 
-	tlink->tl_index = pSesInfo->linux_uid;
+	tlink->tl_uid = pSesInfo->linux_uid;
 	tlink->tl_tcon = tcon;
 	tlink->tl_time = jiffies;
 	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
 	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
 
-	rc = radix_tree_preload(GFP_KERNEL);
-	if (rc == -ENOMEM) {
-		kfree(tlink);
-		goto mount_fail_check;
-	}
-
+	cifs_sb->master_tlink = tlink;
 	spin_lock(&cifs_sb->tlink_tree_lock);
-	radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink);
-	radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
-			   CIFS_TLINK_MASTER_TAG);
+	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
 	spin_unlock(&cifs_sb->tlink_tree_lock);
-	radix_tree_preload_end();
 
 	queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
 				TLINK_IDLE_EXPIRE);
@@ -3107,32 +3100,25 @@
 int
 cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
 {
-	int i, ret;
+	struct rb_root *root = &cifs_sb->tlink_tree;
+	struct rb_node *node;
+	struct tcon_link *tlink;
 	char *tmp;
-	struct tcon_link *tlink[8];
-	unsigned long index = 0;
 
 	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
 
-	do {
-		spin_lock(&cifs_sb->tlink_tree_lock);
-		ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
-					     (void **)tlink, index,
-					     ARRAY_SIZE(tlink));
-		/* increment index for next pass */
-		if (ret > 0)
-			index = tlink[ret - 1]->tl_index + 1;
-		for (i = 0; i < ret; i++) {
-			cifs_get_tlink(tlink[i]);
-			clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
-			radix_tree_delete(&cifs_sb->tlink_tree,
-							tlink[i]->tl_index);
-		}
-		spin_unlock(&cifs_sb->tlink_tree_lock);
+	spin_lock(&cifs_sb->tlink_tree_lock);
+	while ((node = rb_first(root))) {
+		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+		cifs_get_tlink(tlink);
+		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+		rb_erase(node, root);
 
-		for (i = 0; i < ret; i++)
-			cifs_put_tlink(tlink[i]);
-	} while (ret != 0);
+		spin_unlock(&cifs_sb->tlink_tree_lock);
+		cifs_put_tlink(tlink);
+		spin_lock(&cifs_sb->tlink_tree_lock);
+	}
+	spin_unlock(&cifs_sb->tlink_tree_lock);
 
 	tmp = cifs_sb->prepath;
 	cifs_sb->prepathlen = 0;
@@ -3271,22 +3257,10 @@
 	return tcon;
 }
 
-static struct tcon_link *
+static inline struct tcon_link *
 cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
 {
-	struct tcon_link *tlink;
-	unsigned int ret;
-
-	spin_lock(&cifs_sb->tlink_tree_lock);
-	ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
-					0, 1, CIFS_TLINK_MASTER_TAG);
-	spin_unlock(&cifs_sb->tlink_tree_lock);
-
-	/* the master tcon should always be present */
-	if (ret == 0)
-		BUG();
-
-	return tlink;
+	return cifs_sb->master_tlink;
 }
 
 struct cifsTconInfo *
@@ -3302,6 +3276,47 @@
 	return signal_pending(current) ? -ERESTARTSYS : 0;
 }
 
+/* find and return a tlink with given uid */
+static struct tcon_link *
+tlink_rb_search(struct rb_root *root, uid_t uid)
+{
+	struct rb_node *node = root->rb_node;
+	struct tcon_link *tlink;
+
+	while (node) {
+		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+
+		if (tlink->tl_uid > uid)
+			node = node->rb_left;
+		else if (tlink->tl_uid < uid)
+			node = node->rb_right;
+		else
+			return tlink;
+	}
+	return NULL;
+}
+
+/* insert a tcon_link into the tree */
+static void
+tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+	struct tcon_link *tlink;
+
+	while (*new) {
+		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
+		parent = *new;
+
+		if (tlink->tl_uid > new_tlink->tl_uid)
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&new_tlink->tl_rbnode, parent, new);
+	rb_insert_color(&new_tlink->tl_rbnode, root);
+}
+
 /*
  * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
  * current task.
@@ -3309,7 +3324,7 @@
  * If the superblock doesn't refer to a multiuser mount, then just return
  * the master tcon for the mount.
  *
- * First, search the radix tree for an existing tcon for this fsuid. If one
+ * First, search the rbtree for an existing tcon for this fsuid. If one
  * exists, then check to see if it's pending construction. If it is then wait
  * for construction to complete. Once it's no longer pending, check to see if
  * it failed and either return an error or retry construction, depending on
@@ -3322,14 +3337,14 @@
 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
 {
 	int ret;
-	unsigned long fsuid = (unsigned long) current_fsuid();
+	uid_t fsuid = current_fsuid();
 	struct tcon_link *tlink, *newtlink;
 
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
 
 	spin_lock(&cifs_sb->tlink_tree_lock);
-	tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
 	if (tlink)
 		cifs_get_tlink(tlink);
 	spin_unlock(&cifs_sb->tlink_tree_lock);
@@ -3338,36 +3353,24 @@
 		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
 		if (newtlink == NULL)
 			return ERR_PTR(-ENOMEM);
-		newtlink->tl_index = fsuid;
+		newtlink->tl_uid = fsuid;
 		newtlink->tl_tcon = ERR_PTR(-EACCES);
 		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
 		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
 		cifs_get_tlink(newtlink);
 
-		ret = radix_tree_preload(GFP_KERNEL);
-		if (ret != 0) {
-			kfree(newtlink);
-			return ERR_PTR(ret);
-		}
-
 		spin_lock(&cifs_sb->tlink_tree_lock);
 		/* was one inserted after previous search? */
-		tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
 		if (tlink) {
 			cifs_get_tlink(tlink);
 			spin_unlock(&cifs_sb->tlink_tree_lock);
-			radix_tree_preload_end();
 			kfree(newtlink);
 			goto wait_for_construction;
 		}
-		ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
-		spin_unlock(&cifs_sb->tlink_tree_lock);
-		radix_tree_preload_end();
-		if (ret) {
-			kfree(newtlink);
-			return ERR_PTR(ret);
-		}
 		tlink = newtlink;
+		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
+		spin_unlock(&cifs_sb->tlink_tree_lock);
 	} else {
 wait_for_construction:
 		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
@@ -3413,39 +3416,39 @@
 {
 	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
 						    prune_tlinks.work);
-	struct tcon_link *tlink[8];
-	unsigned long now = jiffies;
-	unsigned long index = 0;
-	int i, ret;
+	struct rb_root *root = &cifs_sb->tlink_tree;
+	struct rb_node *node = rb_first(root);
+	struct rb_node *tmp;
+	struct tcon_link *tlink;
 
-	do {
-		spin_lock(&cifs_sb->tlink_tree_lock);
-		ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
-					     (void **)tlink, index,
-					     ARRAY_SIZE(tlink));
-		/* increment index for next pass */
-		if (ret > 0)
-			index = tlink[ret - 1]->tl_index + 1;
-		for (i = 0; i < ret; i++) {
-			if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
-			    atomic_read(&tlink[i]->tl_count) != 0 ||
-			    time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
-				       now)) {
-				tlink[i] = NULL;
-				continue;
-			}
-			cifs_get_tlink(tlink[i]);
-			clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
-			radix_tree_delete(&cifs_sb->tlink_tree,
-					  tlink[i]->tl_index);
-		}
+	/*
+	 * Because we drop the spinlock in the loop in order to put the tlink
+	 * it's not guarded against removal of links from the tree. The only
+	 * places that remove entries from the tree are this function and
+	 * umounts. Because this function is non-reentrant and is canceled
+	 * before umount can proceed, this is safe.
+	 */
+	spin_lock(&cifs_sb->tlink_tree_lock);
+	node = rb_first(root);
+	while (node != NULL) {
+		tmp = node;
+		node = rb_next(tmp);
+		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
+
+		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
+		    atomic_read(&tlink->tl_count) != 0 ||
+		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
+			continue;
+
+		cifs_get_tlink(tlink);
+		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+		rb_erase(tmp, root);
+
 		spin_unlock(&cifs_sb->tlink_tree_lock);
-
-		for (i = 0; i < ret; i++) {
-			if (tlink[i] != NULL)
-				cifs_put_tlink(tlink[i]);
-		}
-	} while (ret != 0);
+		cifs_put_tlink(tlink);
+		spin_lock(&cifs_sb->tlink_tree_lock);
+	}
+	spin_unlock(&cifs_sb->tlink_tree_lock);
 
 	queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
 				TLINK_IDLE_EXPIRE);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ae82159..06c3e83 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -146,12 +146,7 @@
 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
 					 xid, NULL);
 
-	if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-		pCifsInode->clientCanCacheAll = true;
-		pCifsInode->clientCanCacheRead = true;
-		cFYI(1, "Exclusive Oplock granted on inode %p", inode);
-	} else if ((oplock & 0xF) == OPLOCK_READ)
-		pCifsInode->clientCanCacheRead = true;
+	cifs_set_oplock_level(pCifsInode, oplock);
 
 	return rc;
 }
@@ -253,12 +248,7 @@
 		list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
 	spin_unlock(&cifs_file_list_lock);
 
-	if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-		pCifsInode->clientCanCacheAll = true;
-		pCifsInode->clientCanCacheRead = true;
-		cFYI(1, "Exclusive Oplock inode %p", inode);
-	} else if ((oplock & 0xF) == OPLOCK_READ)
-		pCifsInode->clientCanCacheRead = true;
+	cifs_set_oplock_level(pCifsInode, oplock);
 
 	file->private_data = pCifsFile;
 	return pCifsFile;
@@ -271,8 +261,9 @@
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
+	struct inode *inode = cifs_file->dentry->d_inode;
 	struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
-	struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
+	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 	struct cifsLockInfo *li, *tmp;
 
 	spin_lock(&cifs_file_list_lock);
@@ -288,8 +279,7 @@
 	if (list_empty(&cifsi->openFileList)) {
 		cFYI(1, "closing last open instance for inode %p",
 			cifs_file->dentry->d_inode);
-		cifsi->clientCanCacheRead = false;
-		cifsi->clientCanCacheAll  = false;
+		cifs_set_oplock_level(cifsi, 0);
 	}
 	spin_unlock(&cifs_file_list_lock);
 
@@ -607,8 +597,6 @@
 		rc = filemap_write_and_wait(inode->i_mapping);
 		mapping_set_error(inode->i_mapping, rc);
 
-		pCifsInode->clientCanCacheAll = false;
-		pCifsInode->clientCanCacheRead = false;
 		if (tcon->unix_ext)
 			rc = cifs_get_inode_info_unix(&inode,
 				full_path, inode->i_sb, xid);
@@ -622,18 +610,9 @@
 	     invalidate the current end of file on the server
 	     we can not go to the server to get the new inod
 	     info */
-	if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-		pCifsInode->clientCanCacheAll = true;
-		pCifsInode->clientCanCacheRead = true;
-		cFYI(1, "Exclusive Oplock granted on inode %p",
-			 pCifsFile->dentry->d_inode);
-	} else if ((oplock & 0xF) == OPLOCK_READ) {
-		pCifsInode->clientCanCacheRead = true;
-		pCifsInode->clientCanCacheAll = false;
-	} else {
-		pCifsInode->clientCanCacheRead = false;
-		pCifsInode->clientCanCacheAll = false;
-	}
+
+	cifs_set_oplock_level(pCifsInode, oplock);
+
 	cifs_relock_file(pCifsFile);
 
 reopen_error_exit:
@@ -775,12 +754,6 @@
 
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
-
-	if (file->private_data == NULL) {
-		rc = -EBADF;
-		FreeXid(xid);
-		return rc;
-	}
 	netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
 
 	if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -956,6 +929,7 @@
 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
 	size_t write_size, loff_t *poffset)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	int rc = 0;
 	unsigned int bytes_written = 0;
 	unsigned int total_written;
@@ -963,7 +937,7 @@
 	struct cifsTconInfo *pTcon;
 	int xid, long_op;
 	struct cifsFileInfo *open_file;
-	struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
+	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
@@ -1029,21 +1003,17 @@
 
 	cifs_stats_bytes_written(pTcon, total_written);
 
-	/* since the write may have blocked check these pointers again */
-	if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
-		struct inode *inode = file->f_path.dentry->d_inode;
 /* Do not update local mtime - server will set its actual value on write
- *		inode->i_ctime = inode->i_mtime =
- * 			current_fs_time(inode->i_sb);*/
-		if (total_written > 0) {
-			spin_lock(&inode->i_lock);
-			if (*poffset > file->f_path.dentry->d_inode->i_size)
-				i_size_write(file->f_path.dentry->d_inode,
-					*poffset);
-			spin_unlock(&inode->i_lock);
-		}
-		mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ *	inode->i_ctime = inode->i_mtime =
+ * 		current_fs_time(inode->i_sb);*/
+	if (total_written > 0) {
+		spin_lock(&inode->i_lock);
+		if (*poffset > inode->i_size)
+			i_size_write(inode, *poffset);
+		spin_unlock(&inode->i_lock);
 	}
+	mark_inode_dirty_sync(inode);
+
 	FreeXid(xid);
 	return total_written;
 }
@@ -1178,7 +1148,7 @@
 					bool fsuid_only)
 {
 	struct cifsFileInfo *open_file;
-	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+	struct cifs_sb_info *cifs_sb;
 	bool any_available = false;
 	int rc;
 
@@ -1192,6 +1162,8 @@
 		return NULL;
 	}
 
+	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 39869c3..ef3a55b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -2177,7 +2177,6 @@
 
 	setattr_copy(inode, attrs);
 	mark_inode_dirty(inode);
-	return 0;
 
 cifs_setattr_exit:
 	kfree(full_path);
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 077bf75..0c98672 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -38,10 +38,10 @@
 	struct cifs_sb_info *cifs_sb;
 #ifdef CONFIG_CIFS_POSIX
 	struct cifsFileInfo *pSMBFile = filep->private_data;
-	struct cifsTconInfo *tcon = tlink_tcon(pSMBFile->tlink);
+	struct cifsTconInfo *tcon;
 	__u64	ExtAttrBits = 0;
 	__u64	ExtAttrMask = 0;
-	__u64   caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
+	__u64   caps;
 #endif /* CONFIG_CIFS_POSIX */
 
 	xid = GetXid();
@@ -62,9 +62,11 @@
 			break;
 #ifdef CONFIG_CIFS_POSIX
 		case FS_IOC_GETFLAGS:
+			if (pSMBFile == NULL)
+				break;
+			tcon = tlink_tcon(pSMBFile->tlink);
+			caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
 			if (CIFS_UNIX_EXTATTR_CAP & caps) {
-				if (pSMBFile == NULL)
-					break;
 				rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
 					&ExtAttrBits, &ExtAttrMask);
 				if (rc == 0)
@@ -75,13 +77,15 @@
 			break;
 
 		case FS_IOC_SETFLAGS:
+			if (pSMBFile == NULL)
+				break;
+			tcon = tlink_tcon(pSMBFile->tlink);
+			caps = le64_to_cpu(tcon->fsUnixInfo.Capability);
 			if (CIFS_UNIX_EXTATTR_CAP & caps) {
 				if (get_user(ExtAttrBits, (int __user *)arg)) {
 					rc = -EFAULT;
 					break;
 				}
-				if (pSMBFile == NULL)
-					break;
 				/* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
 					extAttrBits, &ExtAttrMask);*/
 			}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c4e296f..43f1028 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -569,10 +569,9 @@
 
 				cFYI(1, "file id match, oplock break");
 				pCifsInode = CIFS_I(netfile->dentry->d_inode);
-				pCifsInode->clientCanCacheAll = false;
-				if (pSMB->OplockLevel == 0)
-					pCifsInode->clientCanCacheRead = false;
 
+				cifs_set_oplock_level(pCifsInode,
+						      pSMB->OplockLevel);
 				/*
 				 * cifs_oplock_break_put() can't be called
 				 * from here.  Get reference after queueing
@@ -722,3 +721,23 @@
 			   cifs_sb_master_tcon(cifs_sb)->treeName);
 	}
 }
+
+void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
+{
+	oplock &= 0xF;
+
+	if (oplock == OPLOCK_EXCLUSIVE) {
+		cinode->clientCanCacheAll = true;
+		cinode->clientCanCacheRead = true;
+		cFYI(1, "Exclusive Oplock granted on inode %p",
+		     &cinode->vfs_inode);
+	} else if (oplock == OPLOCK_READ) {
+		cinode->clientCanCacheAll = false;
+		cinode->clientCanCacheRead = true;
+		cFYI(1, "Level II Oplock granted on inode %p",
+		    &cinode->vfs_inode);
+	} else {
+		cinode->clientCanCacheAll = false;
+		cinode->clientCanCacheRead = false;
+	}
+}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8b5dd63..6a5edea 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -177,7 +177,7 @@
 
 struct ext4_io_page {
 	struct page	*p_page;
-	int		p_count;
+	atomic_t	p_count;
 };
 
 #define MAX_IO_PAGES 128
@@ -858,6 +858,7 @@
 	spinlock_t i_completed_io_lock;
 	/* current io_end structure for async DIO write*/
 	ext4_io_end_t *cur_aio_dio;
+	atomic_t i_ioend_count;	/* Number of outstanding io_end structs */
 
 	/*
 	 * Transactions that contain inode's metadata needed to complete
@@ -2060,6 +2061,7 @@
 /* page-io.c */
 extern int __init ext4_init_pageio(void);
 extern void ext4_exit_pageio(void);
+extern void ext4_ioend_wait(struct inode *);
 extern void ext4_free_io_end(ext4_io_end_t *io);
 extern ext4_io_end_t *ext4_init_io_end(struct inode *inode, gfp_t flags);
 extern int ext4_end_io_nolock(ext4_io_end_t *io);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1916164..bdbe699 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -53,6 +53,7 @@
 static inline int ext4_begin_ordered_truncate(struct inode *inode,
 					      loff_t new_size)
 {
+	trace_ext4_begin_ordered_truncate(inode, new_size);
 	return jbd2_journal_begin_ordered_truncate(
 					EXT4_SB(inode->i_sb)->s_journal,
 					&EXT4_I(inode)->jinode,
@@ -178,6 +179,7 @@
 	handle_t *handle;
 	int err;
 
+	trace_ext4_evict_inode(inode);
 	if (inode->i_nlink) {
 		truncate_inode_pages(&inode->i_data, 0);
 		goto no_delete;
@@ -5410,9 +5412,7 @@
 	 * will return the blocks that include the delayed allocation
 	 * blocks for this file.
 	 */
-	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
-	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
 	return 0;
@@ -5649,6 +5649,7 @@
 	int err, ret;
 
 	might_sleep();
+	trace_ext4_mark_inode_dirty(inode, _RET_IP_);
 	err = ext4_reserve_inode_write(handle, inode, &iloc);
 	if (ext4_handle_valid(handle) &&
 	    EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index c58eba3..5b4d4e3 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -4640,8 +4640,6 @@
 		 * with group lock held. generate_buddy look at
 		 * them with group lock_held
 		 */
-		if (test_opt(sb, DISCARD))
-			ext4_issue_discard(sb, block_group, bit, count);
 		ext4_lock_group(sb, block_group);
 		mb_clear_bits(bitmap_bh->b_data, bit, count);
 		mb_free_blocks(inode, &e4b, bit, count);
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 46a7d6a..7f5451c 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -32,8 +32,14 @@
 
 static struct kmem_cache *io_page_cachep, *io_end_cachep;
 
+#define WQ_HASH_SZ		37
+#define to_ioend_wq(v)	(&ioend_wq[((unsigned long)v) % WQ_HASH_SZ])
+static wait_queue_head_t ioend_wq[WQ_HASH_SZ];
+
 int __init ext4_init_pageio(void)
 {
+	int i;
+
 	io_page_cachep = KMEM_CACHE(ext4_io_page, SLAB_RECLAIM_ACCOUNT);
 	if (io_page_cachep == NULL)
 		return -ENOMEM;
@@ -42,6 +48,8 @@
 		kmem_cache_destroy(io_page_cachep);
 		return -ENOMEM;
 	}
+	for (i = 0; i < WQ_HASH_SZ; i++)
+		init_waitqueue_head(&ioend_wq[i]);
 
 	return 0;
 }
@@ -52,24 +60,37 @@
 	kmem_cache_destroy(io_page_cachep);
 }
 
+void ext4_ioend_wait(struct inode *inode)
+{
+	wait_queue_head_t *wq = to_ioend_wq(inode);
+
+	wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_ioend_count) == 0));
+}
+
+static void put_io_page(struct ext4_io_page *io_page)
+{
+	if (atomic_dec_and_test(&io_page->p_count)) {
+		end_page_writeback(io_page->p_page);
+		put_page(io_page->p_page);
+		kmem_cache_free(io_page_cachep, io_page);
+	}
+}
+
 void ext4_free_io_end(ext4_io_end_t *io)
 {
 	int i;
+	wait_queue_head_t *wq;
 
 	BUG_ON(!io);
 	if (io->page)
 		put_page(io->page);
-	for (i = 0; i < io->num_io_pages; i++) {
-		if (--io->pages[i]->p_count == 0) {
-			struct page *page = io->pages[i]->p_page;
-
-			end_page_writeback(page);
-			put_page(page);
-			kmem_cache_free(io_page_cachep, io->pages[i]);
-		}
-	}
+	for (i = 0; i < io->num_io_pages; i++)
+		put_io_page(io->pages[i]);
 	io->num_io_pages = 0;
-	iput(io->inode);
+	wq = to_ioend_wq(io->inode);
+	if (atomic_dec_and_test(&EXT4_I(io->inode)->i_ioend_count) &&
+	    waitqueue_active(wq))
+		wake_up_all(wq);
 	kmem_cache_free(io_end_cachep, io);
 }
 
@@ -142,8 +163,8 @@
 	io = kmem_cache_alloc(io_end_cachep, flags);
 	if (io) {
 		memset(io, 0, sizeof(*io));
-		io->inode = igrab(inode);
-		BUG_ON(!io->inode);
+		atomic_inc(&EXT4_I(inode)->i_ioend_count);
+		io->inode = inode;
 		INIT_WORK(&io->work, ext4_end_io_work);
 		INIT_LIST_HEAD(&io->list);
 	}
@@ -171,35 +192,15 @@
 	struct workqueue_struct *wq;
 	struct inode *inode;
 	unsigned long flags;
-	ext4_fsblk_t err_block;
 	int i;
 
 	BUG_ON(!io_end);
-	inode = io_end->inode;
 	bio->bi_private = NULL;
 	bio->bi_end_io = NULL;
 	if (test_bit(BIO_UPTODATE, &bio->bi_flags))
 		error = 0;
-	err_block = bio->bi_sector >> (inode->i_blkbits - 9);
 	bio_put(bio);
 
-	if (!(inode->i_sb->s_flags & MS_ACTIVE)) {
-		pr_err("sb umounted, discard end_io request for inode %lu\n",
-			io_end->inode->i_ino);
-		ext4_free_io_end(io_end);
-		return;
-	}
-
-	if (error) {
-		io_end->flag |= EXT4_IO_END_ERROR;
-		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
-			     "(offset %llu size %ld starting block %llu)",
-			     inode->i_ino,
-			     (unsigned long long) io_end->offset,
-			     (long) io_end->size,
-			     (unsigned long long) err_block);
-	}
-
 	for (i = 0; i < io_end->num_io_pages; i++) {
 		struct page *page = io_end->pages[i]->p_page;
 		struct buffer_head *bh, *head;
@@ -236,13 +237,7 @@
 			} while (bh != head);
 		}
 
-		if (--io_end->pages[i]->p_count == 0) {
-			struct page *page = io_end->pages[i]->p_page;
-
-			end_page_writeback(page);
-			put_page(page);
-			kmem_cache_free(io_page_cachep, io_end->pages[i]);
-		}
+		put_io_page(io_end->pages[i]);
 
 		/*
 		 * If this is a partial write which happened to make
@@ -254,8 +249,19 @@
 		if (!partial_write)
 			SetPageUptodate(page);
 	}
-
 	io_end->num_io_pages = 0;
+	inode = io_end->inode;
+
+	if (error) {
+		io_end->flag |= EXT4_IO_END_ERROR;
+		ext4_warning(inode->i_sb, "I/O error writing to inode %lu "
+			     "(offset %llu size %ld starting block %llu)",
+			     inode->i_ino,
+			     (unsigned long long) io_end->offset,
+			     (long) io_end->size,
+			     (unsigned long long)
+			     bio->bi_sector >> (inode->i_blkbits - 9));
+	}
 
 	/* Add the io_end to per-inode completed io list*/
 	spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
@@ -305,7 +311,6 @@
 	bio->bi_private = io->io_end = io_end;
 	bio->bi_end_io = ext4_end_bio;
 
-	io_end->inode = inode;
 	io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
 
 	io->io_bio = bio;
@@ -360,7 +365,7 @@
 	if ((io_end->num_io_pages == 0) ||
 	    (io_end->pages[io_end->num_io_pages-1] != io_page)) {
 		io_end->pages[io_end->num_io_pages++] = io_page;
-		io_page->p_count++;
+		atomic_inc(&io_page->p_count);
 	}
 	return 0;
 }
@@ -389,7 +394,7 @@
 		return -ENOMEM;
 	}
 	io_page->p_page = page;
-	io_page->p_count = 0;
+	atomic_set(&io_page->p_count, 1);
 	get_page(page);
 
 	for (bh = head = page_buffers(page), block_start = 0;
@@ -421,10 +426,6 @@
 	 * PageWriteback bit from the page to prevent the system from
 	 * wedging later on.
 	 */
-	if (io_page->p_count == 0) {
-		put_page(page);
-		end_page_writeback(page);
-		kmem_cache_free(io_page_cachep, io_page);
-	}
+	put_io_page(io_page);
 	return ret;
 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 40131b7..61182fe 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -828,12 +828,22 @@
 	ei->cur_aio_dio = NULL;
 	ei->i_sync_tid = 0;
 	ei->i_datasync_tid = 0;
+	atomic_set(&ei->i_ioend_count, 0);
 
 	return &ei->vfs_inode;
 }
 
+static int ext4_drop_inode(struct inode *inode)
+{
+	int drop = generic_drop_inode(inode);
+
+	trace_ext4_drop_inode(inode, drop);
+	return drop;
+}
+
 static void ext4_destroy_inode(struct inode *inode)
 {
+	ext4_ioend_wait(inode);
 	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
 		ext4_msg(inode->i_sb, KERN_ERR,
 			 "Inode %lu (%p): orphan list check failed!",
@@ -1173,6 +1183,7 @@
 	.destroy_inode	= ext4_destroy_inode,
 	.write_inode	= ext4_write_inode,
 	.dirty_inode	= ext4_dirty_inode,
+	.drop_inode	= ext4_drop_inode,
 	.evict_inode	= ext4_evict_inode,
 	.put_super	= ext4_put_super,
 	.sync_fs	= ext4_sync_fs,
@@ -1194,6 +1205,7 @@
 	.destroy_inode	= ext4_destroy_inode,
 	.write_inode	= ext4_write_inode,
 	.dirty_inode	= ext4_dirty_inode,
+	.drop_inode	= ext4_drop_inode,
 	.evict_inode	= ext4_evict_inode,
 	.write_super	= ext4_write_super,
 	.put_super	= ext4_put_super,
@@ -2699,7 +2711,6 @@
 	struct ext4_li_request *elr;
 	unsigned long next_wakeup;
 	DEFINE_WAIT(wait);
-	int ret;
 
 	BUG_ON(NULL == eli);
 
@@ -2723,13 +2734,12 @@
 			elr = list_entry(pos, struct ext4_li_request,
 					 lr_request);
 
-			if (time_after_eq(jiffies, elr->lr_next_sched))
-				ret = ext4_run_li_request(elr);
-
-			if (ret) {
-				ret = 0;
-				ext4_remove_li_request(elr);
-				continue;
+			if (time_after_eq(jiffies, elr->lr_next_sched)) {
+				if (ext4_run_li_request(elr) != 0) {
+					/* error, remove the lazy_init job */
+					ext4_remove_li_request(elr);
+					continue;
+				}
 			}
 
 			if (time_before(elr->lr_next_sched, next_wakeup))
@@ -2740,7 +2750,8 @@
 		if (freezing(current))
 			refrigerator();
 
-		if (time_after_eq(jiffies, next_wakeup)) {
+		if ((time_after_eq(jiffies, next_wakeup)) ||
+		    (MAX_JIFFY_OFFSET == next_wakeup)) {
 			cond_resched();
 			continue;
 		}
@@ -3348,6 +3359,24 @@
 	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
 	spin_lock_init(&sbi->s_next_gen_lock);
 
+	err = percpu_counter_init(&sbi->s_freeblocks_counter,
+			ext4_count_free_blocks(sb));
+	if (!err) {
+		err = percpu_counter_init(&sbi->s_freeinodes_counter,
+				ext4_count_free_inodes(sb));
+	}
+	if (!err) {
+		err = percpu_counter_init(&sbi->s_dirs_counter,
+				ext4_count_dirs(sb));
+	}
+	if (!err) {
+		err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
+	}
+	if (err) {
+		ext4_msg(sb, KERN_ERR, "insufficient memory");
+		goto failed_mount3;
+	}
+
 	sbi->s_stripe = ext4_get_stripe_size(sbi);
 	sbi->s_max_writeback_mb_bump = 128;
 
@@ -3446,22 +3475,19 @@
 	}
 	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
 
-no_journal:
-	err = percpu_counter_init(&sbi->s_freeblocks_counter,
-				  ext4_count_free_blocks(sb));
-	if (!err)
-		err = percpu_counter_init(&sbi->s_freeinodes_counter,
-					  ext4_count_free_inodes(sb));
-	if (!err)
-		err = percpu_counter_init(&sbi->s_dirs_counter,
-					  ext4_count_dirs(sb));
-	if (!err)
-		err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
-	if (err) {
-		ext4_msg(sb, KERN_ERR, "insufficient memory");
-		goto failed_mount_wq;
-	}
+	/*
+	 * The journal may have updated the bg summary counts, so we
+	 * need to update the global counters.
+	 */
+	percpu_counter_set(&sbi->s_freeblocks_counter,
+			   ext4_count_free_blocks(sb));
+	percpu_counter_set(&sbi->s_freeinodes_counter,
+			   ext4_count_free_inodes(sb));
+	percpu_counter_set(&sbi->s_dirs_counter,
+			   ext4_count_dirs(sb));
+	percpu_counter_set(&sbi->s_dirtyblocks_counter, 0);
 
+no_journal:
 	EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
 	if (!EXT4_SB(sb)->dio_unwritten_wq) {
 		printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
@@ -3611,10 +3637,6 @@
 		jbd2_journal_destroy(sbi->s_journal);
 		sbi->s_journal = NULL;
 	}
-	percpu_counter_destroy(&sbi->s_freeblocks_counter);
-	percpu_counter_destroy(&sbi->s_freeinodes_counter);
-	percpu_counter_destroy(&sbi->s_dirs_counter);
-	percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
 failed_mount3:
 	if (sbi->s_flex_groups) {
 		if (is_vmalloc_addr(sbi->s_flex_groups))
@@ -3622,6 +3644,10 @@
 		else
 			kfree(sbi->s_flex_groups);
 	}
+	percpu_counter_destroy(&sbi->s_freeblocks_counter);
+	percpu_counter_destroy(&sbi->s_freeinodes_counter);
+	percpu_counter_destroy(&sbi->s_dirs_counter);
+	percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
 failed_mount2:
 	for (i = 0; i < db_count; i++)
 		brelse(sbi->s_group_desc[i]);
@@ -3949,13 +3975,11 @@
 	else
 		es->s_kbytes_written =
 			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
-	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeblocks_counter))
-		ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
-					&EXT4_SB(sb)->s_freeblocks_counter));
-	if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
-		es->s_free_inodes_count =
-			cpu_to_le32(percpu_counter_sum_positive(
-					&EXT4_SB(sb)->s_freeinodes_counter));
+	ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
+					   &EXT4_SB(sb)->s_freeblocks_counter));
+	es->s_free_inodes_count =
+		cpu_to_le32(percpu_counter_sum_positive(
+				&EXT4_SB(sb)->s_freeinodes_counter));
 	sb->s_dirt = 0;
 	BUFFER_TRACE(sbh, "marking dirty");
 	mark_buffer_dirty(sbh);
@@ -4556,12 +4580,10 @@
 
 static int ext4_quota_off(struct super_block *sb, int type)
 {
-	/* Force all delayed allocation blocks to be allocated */
-	if (test_opt(sb, DELALLOC)) {
-		down_read(&sb->s_umount);
+	/* Force all delayed allocation blocks to be allocated.
+	 * Caller already holds s_umount sem */
+	if (test_opt(sb, DELALLOC))
 		sync_filesystem(sb);
-		up_read(&sb->s_umount);
-	}
 
 	return dquot_quota_off(sb, type);
 }
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d6cfac1..a5fe681 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -932,8 +932,7 @@
 	if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
 		*user = current_user();
 		if (user_shm_lock(size, *user)) {
-			WARN_ONCE(1,
-			  "Using mlock ulimits for SHM_HUGETLB deprecated\n");
+			printk_once(KERN_WARNING "Using mlock ulimits for SHM_HUGETLB is deprecated\n");
 		} else {
 			*user = NULL;
 			return ERR_PTR(-EPERM);
diff --git a/fs/ioprio.c b/fs/ioprio.c
index 748cfb9..2f7d05c 100644
--- a/fs/ioprio.c
+++ b/fs/ioprio.c
@@ -111,12 +111,14 @@
 	read_lock(&tasklist_lock);
 	switch (which) {
 		case IOPRIO_WHO_PROCESS:
+			rcu_read_lock();
 			if (!who)
 				p = current;
 			else
 				p = find_task_by_vpid(who);
 			if (p)
 				ret = set_task_ioprio(p, ioprio);
+			rcu_read_unlock();
 			break;
 		case IOPRIO_WHO_PGRP:
 			if (!who)
@@ -139,7 +141,12 @@
 				break;
 
 			do_each_thread(g, p) {
-				if (__task_cred(p)->uid != who)
+				int match;
+
+				rcu_read_lock();
+				match = __task_cred(p)->uid == who;
+				rcu_read_unlock();
+				if (!match)
 					continue;
 				ret = set_task_ioprio(p, ioprio);
 				if (ret)
@@ -200,12 +207,14 @@
 	read_lock(&tasklist_lock);
 	switch (which) {
 		case IOPRIO_WHO_PROCESS:
+			rcu_read_lock();
 			if (!who)
 				p = current;
 			else
 				p = find_task_by_vpid(who);
 			if (p)
 				ret = get_task_ioprio(p);
+			rcu_read_unlock();
 			break;
 		case IOPRIO_WHO_PGRP:
 			if (!who)
@@ -232,7 +241,12 @@
 				break;
 
 			do_each_thread(g, p) {
-				if (__task_cred(p)->uid != user->uid)
+				int match;
+
+				rcu_read_lock();
+				match = __task_cred(p)->uid == user->uid;
+				rcu_read_unlock();
+				if (!match)
 					continue;
 				tmpio = get_task_ioprio(p);
 				if (tmpio < 0)
diff --git a/fs/locks.c b/fs/locks.c
index 65765cb..0e62dd3 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1504,9 +1504,8 @@
 
 static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg)
 {
-	struct file_lock *fl;
+	struct file_lock *fl, *ret;
 	struct fasync_struct *new;
-	struct inode *inode = filp->f_path.dentry->d_inode;
 	int error;
 
 	fl = lease_alloc(filp, arg);
@@ -1518,13 +1517,16 @@
 		locks_free_lock(fl);
 		return -ENOMEM;
 	}
+	ret = fl;
 	lock_flocks();
-	error = __vfs_setlease(filp, arg, &fl);
+	error = __vfs_setlease(filp, arg, &ret);
 	if (error) {
 		unlock_flocks();
 		locks_free_lock(fl);
 		goto out_free_fasync;
 	}
+	if (ret != fl)
+		locks_free_lock(fl);
 
 	/*
 	 * fasync_insert_entry() returns the old entry if any.
@@ -1532,17 +1534,10 @@
 	 * inserted it into the fasync list. Clear new so that
 	 * we don't release it here.
 	 */
-	if (!fasync_insert_entry(fd, filp, &fl->fl_fasync, new))
+	if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new))
 		new = NULL;
 
-	if (error < 0) {
-		/* remove lease just inserted by setlease */
-		fl->fl_type = F_UNLCK | F_INPROGRESS;
-		fl->fl_break_time = jiffies - 10;
-		time_out_leases(inode);
-	} else {
-		error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
-	}
+	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 	unlock_flocks();
 
 out_free_fasync:
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index cd51a36..57afd4a 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -486,7 +486,7 @@
 
 /* dev_mtd.c */
 #ifdef CONFIG_MTD
-int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
+int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr);
 #else
 static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
 {
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index f1e5ec6..ad2bfa6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -673,16 +673,17 @@
 	spin_unlock(&clp->cl_lock);
 }
 
-static void nfsd4_register_conn(struct nfsd4_conn *conn)
+static int nfsd4_register_conn(struct nfsd4_conn *conn)
 {
 	conn->cn_xpt_user.callback = nfsd4_conn_lost;
-	register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
+	return register_xpt_user(conn->cn_xprt, &conn->cn_xpt_user);
 }
 
 static __be32 nfsd4_new_conn(struct svc_rqst *rqstp, struct nfsd4_session *ses)
 {
 	struct nfsd4_conn *conn;
 	u32 flags = NFS4_CDFC4_FORE;
+	int ret;
 
 	if (ses->se_flags & SESSION4_BACK_CHAN)
 		flags |= NFS4_CDFC4_BACK;
@@ -690,7 +691,10 @@
 	if (!conn)
 		return nfserr_jukebox;
 	nfsd4_hash_conn(conn, ses);
-	nfsd4_register_conn(conn);
+	ret = nfsd4_register_conn(conn);
+	if (ret)
+		/* oops; xprt is already down: */
+		nfsd4_conn_lost(&conn->cn_xpt_user);
 	return nfs_ok;
 }
 
@@ -1644,6 +1648,7 @@
 {
 	struct nfs4_client *clp = ses->se_client;
 	struct nfsd4_conn *c;
+	int ret;
 
 	spin_lock(&clp->cl_lock);
 	c = __nfsd4_find_conn(new->cn_xprt, ses);
@@ -1654,7 +1659,10 @@
 	}
 	__nfsd4_hash_conn(new, ses);
 	spin_unlock(&clp->cl_lock);
-	nfsd4_register_conn(new);
+	ret = nfsd4_register_conn(new);
+	if (ret)
+		/* oops; xprt is already down: */
+		nfsd4_conn_lost(&new->cn_xpt_user);
 	return;
 }
 
diff --git a/fs/openpromfs/inode.c b/fs/openpromfs/inode.c
index ddb1f41..911e61f 100644
--- a/fs/openpromfs/inode.c
+++ b/fs/openpromfs/inode.c
@@ -418,7 +418,7 @@
 static struct dentry *openprom_mount(struct file_system_type *fs_type,
 	int flags, const char *dev_name, void *data)
 {
-	return mount_single(fs_type, flags, data, openprom_fill_super)
+	return mount_single(fs_type, flags, data, openprom_fill_super);
 }
 
 static struct file_system_type openprom_fs_type = {
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index c9af48f..7d287af 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1111,11 +1111,12 @@
 			uptodate = 0;
 
 		/*
-		 * A hole may still be marked uptodate because discard_buffer
-		 * leaves the flag set.
+		 * set_page_dirty dirties all buffers in a page, independent
+		 * of their state.  The dirty state however is entirely
+		 * meaningless for holes (!mapped && uptodate), so skip
+		 * buffers covering holes here.
 		 */
 		if (!buffer_mapped(bh) && buffer_uptodate(bh)) {
-			ASSERT(!buffer_dirty(bh));
 			imap_valid = 0;
 			continue;
 		}
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 63fd2c0..aa1d353 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1781,7 +1781,6 @@
 	INIT_LIST_HEAD(list);
 	spin_lock(dwlk);
 	list_for_each_entry_safe(bp, n, dwq, b_list) {
-		trace_xfs_buf_delwri_split(bp, _RET_IP_);
 		ASSERT(bp->b_flags & XBF_DELWRI);
 
 		if (!XFS_BUF_ISPINNED(bp) && !xfs_buf_cond_lock(bp)) {
@@ -1795,6 +1794,7 @@
 					 _XBF_RUN_QUEUES);
 			bp->b_flags |= XBF_WRITE;
 			list_move_tail(&bp->b_list, list);
+			trace_xfs_buf_delwri_split(bp, _RET_IP_);
 		} else
 			skipped++;
 	}
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index 2ea238f6..ad442d9 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -416,7 +416,7 @@
 	if (IS_ERR(dentry))
 		return PTR_ERR(dentry);
 
-	kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
+	kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
 	if (!kbuf)
 		goto out_dput;
 
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 96107ef..94d5fd6 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -762,7 +762,8 @@
 	inode->i_state = I_NEW;
 
 	inode_sb_list_add(inode);
-	insert_inode_hash(inode);
+	/* make the inode look hashed for the writeback code */
+	hlist_add_fake(&inode->i_hash);
 
 	inode->i_mode	= ip->i_d.di_mode;
 	inode->i_nlink	= ip->i_d.di_nlink;
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 9f3a78f..064f964 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -353,9 +353,6 @@
 			mp->m_qflags &= ~XFS_OQUOTA_ENFD;
 		} else if (!strcmp(this_char, MNTOPT_DELAYLOG)) {
 			mp->m_flags |= XFS_MOUNT_DELAYLOG;
-			cmn_err(CE_WARN,
-				"Enabling EXPERIMENTAL delayed logging feature "
-				"- use at your own risk.\n");
 		} else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) {
 			mp->m_flags &= ~XFS_MOUNT_DELAYLOG;
 		} else if (!strcmp(this_char, "ihashsize")) {
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 37d3325..afb0d7c 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -853,6 +853,7 @@
 		if (trylock) {
 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
 				skipped++;
+				xfs_perag_put(pag);
 				continue;
 			}
 			first_index = pag->pag_ici_reclaim_cursor;
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index 9b715dc..9124425 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -744,9 +744,15 @@
 	 * If the file's parent directory is known, take its iolock in exclusive
 	 * mode to prevent two sibling files from racing each other to migrate
 	 * themselves and their parent to different AGs.
+	 *
+	 * Note that we lock the parent directory iolock inside the child
+	 * iolock here.  That's fine as we never hold both parent and child
+	 * iolock in any other place.  This is different from the ilock,
+	 * which requires locking of the child after the parent for namespace
+	 * operations.
 	 */
 	if (pip)
-		xfs_ilock(pip, XFS_IOLOCK_EXCL);
+		xfs_ilock(pip, XFS_IOLOCK_EXCL | XFS_IOLOCK_PARENT);
 
 	/*
 	 * A new AG needs to be found for the file.  If the file's parent
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b1498ab..19e9dfa1 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -275,6 +275,7 @@
 		pag = radix_tree_delete(&mp->m_perag_tree, agno);
 		spin_unlock(&mp->m_perag_lock);
 		ASSERT(pag);
+		ASSERT(atomic_read(&pag->pag_ref) == 0);
 		call_rcu(&pag->rcu_head, __xfs_free_perag);
 	}
 }
diff --git a/fs/xfs/xfs_quota.h b/fs/xfs/xfs_quota.h
index e0e64b1..9bb6eda 100644
--- a/fs/xfs/xfs_quota.h
+++ b/fs/xfs/xfs_quota.h
@@ -346,8 +346,17 @@
 #define xfs_trans_mod_dquot_byino(tp, ip, fields, delta)
 #define xfs_trans_apply_dquot_deltas(tp)
 #define xfs_trans_unreserve_and_mod_dquots(tp)
-#define xfs_trans_reserve_quota_nblks(tp, ip, nblks, ninos, flags)	(0)
-#define xfs_trans_reserve_quota_bydquots(tp, mp, u, g, nb, ni, fl)	(0)
+static inline int xfs_trans_reserve_quota_nblks(struct xfs_trans *tp,
+		struct xfs_inode *ip, long nblks, long ninos, uint flags)
+{
+	return 0;
+}
+static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
+		struct xfs_mount *mp, struct xfs_dquot *udqp,
+		struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
+{
+	return 0;
+}
 #define xfs_qm_vop_create_dqattach(tp, ip, u, g)
 #define xfs_qm_vop_rename_dqattach(it)					(0)
 #define xfs_qm_vop_chown(tp, ip, old, new)				(NULL)
@@ -357,11 +366,14 @@
 #define xfs_qm_dqdetach(ip)
 #define xfs_qm_dqrele(d)
 #define xfs_qm_statvfs(ip, s)
-#define xfs_qm_sync(mp, fl)						(0)
+static inline int xfs_qm_sync(struct xfs_mount *mp, int flags)
+{
+	return 0;
+}
 #define xfs_qm_newmount(mp, a, b)					(0)
 #define xfs_qm_mount_quotas(mp)
 #define xfs_qm_unmount(mp)
-#define xfs_qm_unmount_quotas(mp)					(0)
+#define xfs_qm_unmount_quotas(mp)
 #endif /* CONFIG_XFS_QUOTA */
 
 #define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
index 47e6417..bd8cad2 100644
--- a/include/asm-generic/stat.h
+++ b/include/asm-generic/stat.h
@@ -33,18 +33,18 @@
 	int		st_blksize;	/* Optimal block size for I/O.  */
 	int		__pad2;
 	long		st_blocks;	/* Number 512-byte blocks allocated. */
-	int		st_atime;	/* Time of last access.  */
-	unsigned int	st_atime_nsec;
-	int		st_mtime;	/* Time of last modification.  */
-	unsigned int	st_mtime_nsec;
-	int		st_ctime;	/* Time of last status change.  */
-	unsigned int	st_ctime_nsec;
+	long		st_atime;	/* Time of last access.  */
+	unsigned long	st_atime_nsec;
+	long		st_mtime;	/* Time of last modification.  */
+	unsigned long	st_mtime_nsec;
+	long		st_ctime;	/* Time of last status change.  */
+	unsigned long	st_ctime_nsec;
 	unsigned int	__unused4;
 	unsigned int	__unused5;
 };
 
-#if __BITS_PER_LONG != 64
 /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
 struct stat64 {
 	unsigned long long st_dev;	/* Device.  */
 	unsigned long long st_ino;	/* File serial number.  */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 5afa5b5..beafc15 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -432,6 +432,10 @@
  * together with the @destroy function,
  * enables driver-specific objects derived from a ttm_buffer_object.
  * On successful return, the object kref and list_kref are set to 1.
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
  * Returns
  * -ENOMEM: Out of memory.
  * -EINVAL: Invalid placement flags.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index d01b4dd..8e0c848 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -206,14 +206,84 @@
 struct ttm_mem_type_manager;
 
 struct ttm_mem_type_manager_func {
+	/**
+	 * struct ttm_mem_type_manager member init
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @p_size: Implementation dependent, but typically the size of the
+	 * range to be managed in pages.
+	 *
+	 * Called to initialize a private range manager. The function is
+	 * expected to initialize the man::priv member.
+	 * Returns 0 on success, negative error code on failure.
+	 */
 	int  (*init)(struct ttm_mem_type_manager *man, unsigned long p_size);
+
+	/**
+	 * struct ttm_mem_type_manager member takedown
+	 *
+	 * @man: Pointer to a memory type manager.
+	 *
+	 * Called to undo the setup done in init. All allocated resources
+	 * should be freed.
+	 */
 	int  (*takedown)(struct ttm_mem_type_manager *man);
+
+	/**
+	 * struct ttm_mem_type_manager member get_node
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @bo: Pointer to the buffer object we're allocating space for.
+	 * @placement: Placement details.
+	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+	 *
+	 * This function should allocate space in the memory type managed
+	 * by @man. Placement details if
+	 * applicable are given by @placement. If successful,
+	 * @mem::mm_node should be set to a non-null value, and
+	 * @mem::start should be set to a value identifying the beginning
+	 * of the range allocated, and the function should return zero.
+	 * If the memory region accomodate the buffer object, @mem::mm_node
+	 * should be set to NULL, and the function should return 0.
+	 * If a system error occured, preventing the request to be fulfilled,
+	 * the function should return a negative error code.
+	 *
+	 * Note that @mem::mm_node will only be dereferenced by
+	 * struct ttm_mem_type_manager functions and optionally by the driver,
+	 * which has knowledge of the underlying type.
+	 *
+	 * This function may not be called from within atomic context, so
+	 * an implementation can and must use either a mutex or a spinlock to
+	 * protect any data structures managing the space.
+	 */
 	int  (*get_node)(struct ttm_mem_type_manager *man,
 			 struct ttm_buffer_object *bo,
 			 struct ttm_placement *placement,
 			 struct ttm_mem_reg *mem);
+
+	/**
+	 * struct ttm_mem_type_manager member put_node
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @mem: Pointer to a struct ttm_mem_reg to be filled in.
+	 *
+	 * This function frees memory type resources previously allocated
+	 * and that are identified by @mem::mm_node and @mem::start. May not
+	 * be called from within atomic context.
+	 */
 	void (*put_node)(struct ttm_mem_type_manager *man,
 			 struct ttm_mem_reg *mem);
+
+	/**
+	 * struct ttm_mem_type_manager member debug
+	 *
+	 * @man: Pointer to a memory type manager.
+	 * @prefix: Prefix to be used in printout to identify the caller.
+	 *
+	 * This function is called to print out the state of the memory
+	 * type manager to aid debugging of out-of-memory conditions.
+	 * It may not be called from within atomic context.
+	 */
 	void (*debug)(struct ttm_mem_type_manager *man, const char *prefix);
 };
 
@@ -231,14 +301,13 @@
 	uint64_t size;
 	uint32_t available_caching;
 	uint32_t default_caching;
-
-	/*
-	 * Protected by the bdev->lru_lock.
-	 * TODO: Consider one lru_lock per ttm_mem_type_manager.
-	 * Plays ill with list removal, though.
-	 */
 	const struct ttm_mem_type_manager_func *func;
 	void *priv;
+
+	/*
+	 * Protected by the global->lru_lock.
+	 */
+
 	struct list_head lru;
 };
 
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
new file mode 100644
index 0000000..96c038e
--- /dev/null
+++ b/include/linux/atomic.h
@@ -0,0 +1,37 @@
+#ifndef _LINUX_ATOMIC_H
+#define _LINUX_ATOMIC_H
+#include <asm/atomic.h>
+
+/**
+ * atomic_inc_not_zero_hint - increment if not null
+ * @v: pointer of type atomic_t
+ * @hint: probable value of the atomic before the increment
+ *
+ * This version of atomic_inc_not_zero() gives a hint of probable
+ * value of the atomic. This helps processor to not read the memory
+ * before doing the atomic read/modify/write cycle, lowering
+ * number of bus transactions on some arches.
+ *
+ * Returns: 0 if increment was not done, 1 otherwise.
+ */
+#ifndef atomic_inc_not_zero_hint
+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+{
+	int val, c = hint;
+
+	/* sanity test, should be removed by compiler if hint is a constant */
+	if (!hint)
+		return atomic_inc_not_zero(v);
+
+	do {
+		val = atomic_cmpxchg(v, c, c + 1);
+		if (val == c)
+			return 1;
+		c = val;
+	} while (c);
+
+	return 0;
+}
+#endif
+
+#endif /* _LINUX_ATOMIC_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ba67999..35dcdb3 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -66,10 +66,6 @@
 #define bio_offset(bio)		bio_iovec((bio))->bv_offset
 #define bio_segments(bio)	((bio)->bi_vcnt - (bio)->bi_idx)
 #define bio_sectors(bio)	((bio)->bi_size >> 9)
-#define bio_empty_barrier(bio) \
-	((bio->bi_rw & REQ_HARDBARRIER) && \
-	 !bio_has_data(bio) && \
-	 !(bio->bi_rw & REQ_DISCARD))
 
 static inline unsigned int bio_cur_bytes(struct bio *bio)
 {
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0437ab6..46ad519 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -122,7 +122,6 @@
 	__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
 	__REQ_FAILFAST_DRIVER,	/* no driver retries of driver errors */
 
-	__REQ_HARDBARRIER,	/* may not be passed by drive either */
 	__REQ_SYNC,		/* request is sync (sync write or read) */
 	__REQ_META,		/* metadata io request */
 	__REQ_DISCARD,		/* request to discard sectors */
@@ -159,7 +158,6 @@
 #define REQ_FAILFAST_DEV	(1 << __REQ_FAILFAST_DEV)
 #define REQ_FAILFAST_TRANSPORT	(1 << __REQ_FAILFAST_TRANSPORT)
 #define REQ_FAILFAST_DRIVER	(1 << __REQ_FAILFAST_DRIVER)
-#define REQ_HARDBARRIER		(1 << __REQ_HARDBARRIER)
 #define REQ_SYNC		(1 << __REQ_SYNC)
 #define REQ_META		(1 << __REQ_META)
 #define REQ_DISCARD		(1 << __REQ_DISCARD)
@@ -168,8 +166,8 @@
 #define REQ_FAILFAST_MASK \
 	(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
 #define REQ_COMMON_MASK \
-	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \
-	 REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
+	(REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
+	 REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
 #define REQ_CLONE_MASK		REQ_COMMON_MASK
 
 #define REQ_UNPLUG		(1 << __REQ_UNPLUG)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5027a59..aae86fd 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -552,8 +552,7 @@
  * it already be started by driver.
  */
 #define RQ_NOMERGE_FLAGS	\
-	(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \
-	 REQ_FLUSH | REQ_FUA)
+	(REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
 #define rq_mergeable(rq)	\
 	(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
 	 (((rq)->cmd_flags & REQ_DISCARD) || \
diff --git a/include/linux/dccp.h b/include/linux/dccp.h
index 749f01c..010e2d8 100644
--- a/include/linux/dccp.h
+++ b/include/linux/dccp.h
@@ -197,6 +197,21 @@
 	DCCPF_MAX_CCID_SPECIFIC = 255,
 };
 
+/* DCCP socket control message types for cmsg */
+enum dccp_cmsg_type {
+	DCCP_SCM_PRIORITY = 1,
+	DCCP_SCM_QPOLICY_MAX = 0xFFFF,
+	/* ^-- Up to here reserved exclusively for qpolicy parameters */
+	DCCP_SCM_MAX
+};
+
+/* DCCP priorities for outgoing/queued packets */
+enum dccp_packet_dequeueing_policy {
+	DCCPQ_POLICY_SIMPLE,
+	DCCPQ_POLICY_PRIO,
+	DCCPQ_POLICY_MAX
+};
+
 /* DCCP socket options */
 #define DCCP_SOCKOPT_PACKET_SIZE	1 /* XXX deprecated, without effect */
 #define DCCP_SOCKOPT_SERVICE		2
@@ -210,6 +225,8 @@
 #define DCCP_SOCKOPT_CCID		13
 #define DCCP_SOCKOPT_TX_CCID		14
 #define DCCP_SOCKOPT_RX_CCID		15
+#define DCCP_SOCKOPT_QPOLICY_ID		16
+#define DCCP_SOCKOPT_QPOLICY_TXQLEN	17
 #define DCCP_SOCKOPT_CCID_RX_INFO	128
 #define DCCP_SOCKOPT_CCID_TX_INFO	192
 
@@ -458,10 +475,13 @@
  * @dccps_hc_rx_ccid - CCID used for the receiver (or receiving half-connection)
  * @dccps_hc_tx_ccid - CCID used for the sender (or sending half-connection)
  * @dccps_options_received - parsed set of retrieved options
+ * @dccps_qpolicy - TX dequeueing policy, one of %dccp_packet_dequeueing_policy
+ * @dccps_tx_qlen - maximum length of the TX queue
  * @dccps_role - role of this sock, one of %dccp_role
  * @dccps_hc_rx_insert_options - receiver wants to add options when acking
  * @dccps_hc_tx_insert_options - sender wants to add options when sending
  * @dccps_server_timewait - server holds timewait state on close (RFC 4340, 8.3)
+ * @dccps_sync_scheduled - flag which signals "send out-of-band message soon"
  * @dccps_xmitlet - tasklet scheduled by the TX CCID to dequeue data packets
  * @dccps_xmit_timer - used by the TX CCID to delay sending (rate-based pacing)
  * @dccps_syn_rtt - RTT sample from Request/Response exchange (in usecs)
@@ -499,10 +519,13 @@
 	struct ccid			*dccps_hc_rx_ccid;
 	struct ccid			*dccps_hc_tx_ccid;
 	struct dccp_options_received	dccps_options_received;
+	__u8				dccps_qpolicy;
+	__u32				dccps_tx_qlen;
 	enum dccp_role			dccps_role:2;
 	__u8				dccps_hc_rx_insert_options:1;
 	__u8				dccps_hc_tx_insert_options:1;
 	__u8				dccps_server_timewait:1;
+	__u8				dccps_sync_scheduled:1;
 	struct tasklet_struct		dccps_xmitlet;
 	struct timer_list		dccps_xmit_timer;
 };
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 9b2a015..ef44c7a 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -53,7 +53,7 @@
 
 
 extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.3.9rc2"
+#define REL_VERSION "8.3.9"
 #define API_VERSION 88
 #define PRO_VERSION_MIN 86
 #define PRO_VERSION_MAX 95
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 6628a50..1908929 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -691,7 +691,9 @@
 #define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
 #define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level. */
 #define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation. */
-#define ETHTOOL_GLINK		0x0000000a /* Get link status (ethtool_value) */
+/* Get link status for host, i.e. whether the interface *and* the
+ * physical port (if there is one) are up (ethtool_value). */
+#define ETHTOOL_GLINK		0x0000000a
 #define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
 #define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data. */
 #define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 69b43db..45266b7 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -91,54 +91,6 @@
 #define         BPF_TAX         0x00
 #define         BPF_TXA         0x80
 
-enum {
-	BPF_S_RET_K = 0,
-	BPF_S_RET_A,
-	BPF_S_ALU_ADD_K,
-	BPF_S_ALU_ADD_X,
-	BPF_S_ALU_SUB_K,
-	BPF_S_ALU_SUB_X,
-	BPF_S_ALU_MUL_K,
-	BPF_S_ALU_MUL_X,
-	BPF_S_ALU_DIV_X,
-	BPF_S_ALU_AND_K,
-	BPF_S_ALU_AND_X,
-	BPF_S_ALU_OR_K,
-	BPF_S_ALU_OR_X,
-	BPF_S_ALU_LSH_K,
-	BPF_S_ALU_LSH_X,
-	BPF_S_ALU_RSH_K,
-	BPF_S_ALU_RSH_X,
-	BPF_S_ALU_NEG,
-	BPF_S_LD_W_ABS,
-	BPF_S_LD_H_ABS,
-	BPF_S_LD_B_ABS,
-	BPF_S_LD_W_LEN,
-	BPF_S_LD_W_IND,
-	BPF_S_LD_H_IND,
-	BPF_S_LD_B_IND,
-	BPF_S_LD_IMM,
-	BPF_S_LDX_W_LEN,
-	BPF_S_LDX_B_MSH,
-	BPF_S_LDX_IMM,
-	BPF_S_MISC_TAX,
-	BPF_S_MISC_TXA,
-	BPF_S_ALU_DIV_K,
-	BPF_S_LD_MEM,
-	BPF_S_LDX_MEM,
-	BPF_S_ST,
-	BPF_S_STX,
-	BPF_S_JMP_JA,
-	BPF_S_JMP_JEQ_K,
-	BPF_S_JMP_JEQ_X,
-	BPF_S_JMP_JGE_K,
-	BPF_S_JMP_JGE_X,
-	BPF_S_JMP_JGT_K,
-	BPF_S_JMP_JGT_X,
-	BPF_S_JMP_JSET_K,
-	BPF_S_JMP_JSET_X,
-};
-
 #ifndef BPF_MAXINSNS
 #define BPF_MAXINSNS 4096
 #endif
@@ -172,7 +124,9 @@
 #define SKF_AD_MARK 	20
 #define SKF_AD_QUEUE	24
 #define SKF_AD_HATYPE	28
-#define SKF_AD_MAX	32
+#define SKF_AD_RXHASH	32
+#define SKF_AD_CPU	36
+#define SKF_AD_MAX	40
 #define SKF_NET_OFF   (-0x100000)
 #define SKF_LL_OFF    (-0x200000)
 
@@ -194,8 +148,8 @@
 struct sock;
 
 extern int sk_filter(struct sock *sk, struct sk_buff *skb);
-extern unsigned int sk_run_filter(struct sk_buff *skb,
-				  struct sock_filter *filter, int flen);
+extern unsigned int sk_run_filter(const struct sk_buff *skb,
+				  const struct sock_filter *filter);
 extern int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 extern int sk_detach_filter(struct sock *sk);
 extern int sk_chk_filter(struct sock_filter *filter, int flen);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8a389b6..41cb31f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -96,11 +96,15 @@
  */
 #define in_nmi()	(preempt_count() & NMI_MASK)
 
-#if defined(CONFIG_PREEMPT)
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
 # define PREEMPT_INATOMIC_BASE kernel_locked()
-# define PREEMPT_CHECK_OFFSET 1
 #else
 # define PREEMPT_INATOMIC_BASE 0
+#endif
+
+#if defined(CONFIG_PREEMPT)
+# define PREEMPT_CHECK_OFFSET 1
+#else
 # define PREEMPT_CHECK_OFFSET 0
 #endif
 
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e913819..b676c58 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -5,6 +5,7 @@
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/uaccess.h>
+#include <linux/hardirq.h>
 
 #include <asm/cacheflush.h>
 
diff --git a/include/linux/i2c/adp5588.h b/include/linux/i2c/adp5588.h
index 3c5d6b6..cec17cf 100644
--- a/include/linux/i2c/adp5588.h
+++ b/include/linux/i2c/adp5588.h
@@ -1,7 +1,7 @@
 /*
  * Analog Devices ADP5588 I/O Expander and QWERTY Keypad Controller
  *
- * Copyright 2009 Analog Devices Inc.
+ * Copyright 2009-2010 Analog Devices Inc.
  *
  * Licensed under the GPL-2 or later.
  */
@@ -77,13 +77,26 @@
  /* Configuration Register1 */
 #define ADP5588_AUTO_INC	(1 << 7)
 #define ADP5588_GPIEM_CFG	(1 << 6)
+#define ADP5588_OVR_FLOW_M	(1 << 5)
 #define ADP5588_INT_CFG		(1 << 4)
+#define ADP5588_OVR_FLOW_IEN	(1 << 3)
+#define ADP5588_K_LCK_IM	(1 << 2)
 #define ADP5588_GPI_IEN		(1 << 1)
+#define ADP5588_KE_IEN		(1 << 0)
 
 /* Interrupt Status Register */
+#define ADP5588_CMP2_INT	(1 << 5)
+#define ADP5588_CMP1_INT	(1 << 4)
+#define ADP5588_OVR_FLOW_INT	(1 << 3)
+#define ADP5588_K_LCK_INT	(1 << 2)
 #define ADP5588_GPI_INT		(1 << 1)
 #define ADP5588_KE_INT		(1 << 0)
 
+/* Key Lock and Event Counter Register */
+#define ADP5588_K_LCK_EN	(1 << 6)
+#define ADP5588_LCK21		0x30
+#define ADP5588_KEC		0xF
+
 #define ADP5588_MAXGPIO		18
 #define ADP5588_BANK(offs)	((offs) >> 3)
 #define ADP5588_BIT(offs)	(1u << ((offs) & 0x7))
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 0d241a5..f7e73c3 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -102,7 +102,9 @@
 #include <linux/netdevice.h>
 
 extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __user *));
-extern int (*br_should_route_hook)(struct sk_buff *skb);
+
+typedef int (*br_should_route_hook_t)(struct sk_buff *skb);
+extern br_should_route_hook_t __rcu *br_should_route_hook;
 
 #endif
 
diff --git a/include/linux/if_link.h b/include/linux/if_link.h
index 2fc66dd..6485d2a 100644
--- a/include/linux/if_link.h
+++ b/include/linux/if_link.h
@@ -80,6 +80,24 @@
 	__u8	port;
 };
 
+/*
+ * IFLA_AF_SPEC
+ *   Contains nested attributes for address family specific attributes.
+ *   Each address family may create a attribute with the address family
+ *   number as type and create its own attribute structure in it.
+ *
+ *   Example:
+ *   [IFLA_AF_SPEC] = {
+ *       [AF_INET] = {
+ *           [IFLA_INET_CONF] = ...,
+ *       },
+ *       [AF_INET6] = {
+ *           [IFLA_INET6_FLAGS] = ...,
+ *           [IFLA_INET6_CONF] = ...,
+ *       }
+ *   }
+ */
+
 enum {
 	IFLA_UNSPEC,
 	IFLA_ADDRESS,
@@ -116,6 +134,7 @@
 	IFLA_STATS64,
 	IFLA_VF_PORTS,
 	IFLA_PORT_SELF,
+	IFLA_AF_SPEC,
 	__IFLA_MAX
 };
 
@@ -128,6 +147,14 @@
 #define IFLA_PAYLOAD(n) NLMSG_PAYLOAD(n,sizeof(struct ifinfomsg))
 #endif
 
+enum {
+	IFLA_INET_UNSPEC,
+	IFLA_INET_CONF,
+	__IFLA_INET_MAX,
+};
+
+#define IFLA_INET_MAX (__IFLA_INET_MAX - 1)
+
 /* ifi_flags.
 
    IFF_* flags.
@@ -232,6 +259,7 @@
 	MACVLAN_MODE_PRIVATE = 1, /* don't talk to other macvlans */
 	MACVLAN_MODE_VEPA    = 2, /* talk to other ports through ext bridge */
 	MACVLAN_MODE_BRIDGE  = 4, /* talk to bridge ports directly */
+	MACVLAN_MODE_PASSTHRU = 8,/* take over the underlying device */
 };
 
 /* SR-IOV virtual function management section */
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h
index 8a2fd66..e28b2e4 100644
--- a/include/linux/if_macvlan.h
+++ b/include/linux/if_macvlan.h
@@ -25,19 +25,25 @@
 struct macvtap_queue;
 
 /**
- *	struct macvlan_rx_stats - MACVLAN percpu rx stats
+ *	struct macvlan_pcpu_stats - MACVLAN percpu stats
  *	@rx_packets: number of received packets
  *	@rx_bytes: number of received bytes
  *	@rx_multicast: number of received multicast packets
+ *	@tx_packets: number of transmitted packets
+ *	@tx_bytes: number of transmitted bytes
  *	@syncp: synchronization point for 64bit counters
- *	@rx_errors: number of errors
+ *	@rx_errors: number of rx errors
+ *	@tx_dropped: number of tx dropped packets
  */
-struct macvlan_rx_stats {
+struct macvlan_pcpu_stats {
 	u64			rx_packets;
 	u64			rx_bytes;
 	u64			rx_multicast;
+	u64			tx_packets;
+	u64			tx_bytes;
 	struct u64_stats_sync	syncp;
-	unsigned long		rx_errors;
+	u32			rx_errors;
+	u32			tx_dropped;
 };
 
 /*
@@ -52,7 +58,7 @@
 	struct hlist_node	hlist;
 	struct macvlan_port	*port;
 	struct net_device	*lowerdev;
-	struct macvlan_rx_stats __percpu *rx_stats;
+	struct macvlan_pcpu_stats __percpu *pcpu_stats;
 	enum macvlan_mode	mode;
 	int (*receive)(struct sk_buff *skb);
 	int (*forward)(struct net_device *dev, struct sk_buff *skb);
@@ -64,18 +70,18 @@
 				    unsigned int len, bool success,
 				    bool multicast)
 {
-	struct macvlan_rx_stats *rx_stats;
-
-	rx_stats = this_cpu_ptr(vlan->rx_stats);
 	if (likely(success)) {
-		u64_stats_update_begin(&rx_stats->syncp);
-		rx_stats->rx_packets++;;
-		rx_stats->rx_bytes += len;
+		struct macvlan_pcpu_stats *pcpu_stats;
+
+		pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
+		u64_stats_update_begin(&pcpu_stats->syncp);
+		pcpu_stats->rx_packets++;
+		pcpu_stats->rx_bytes += len;
 		if (multicast)
-			rx_stats->rx_multicast++;
-		u64_stats_update_end(&rx_stats->syncp);
+			pcpu_stats->rx_multicast++;
+		u64_stats_update_end(&pcpu_stats->syncp);
 	} else {
-		rx_stats->rx_errors++;
+		this_cpu_inc(vlan->pcpu_stats->rx_errors);
 	}
 }
 
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index c2f3a72..635e1fa 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -339,6 +339,31 @@
 	}
 }
 
+/**
+ * vlan_get_protocol - get protocol EtherType.
+ * @skb: skbuff to query
+ *
+ * Returns the EtherType of the packet, regardless of whether it is
+ * vlan encapsulated (normal or hardware accelerated) or not.
+ */
+static inline __be16 vlan_get_protocol(const struct sk_buff *skb)
+{
+	__be16 protocol = 0;
+
+	if (vlan_tx_tag_present(skb) ||
+	     skb->protocol != cpu_to_be16(ETH_P_8021Q))
+		protocol = skb->protocol;
+	else {
+		__be16 proto, *protop;
+		protop = skb_header_pointer(skb, offsetof(struct vlan_ethhdr,
+						h_vlan_encapsulated_proto),
+						sizeof(proto), &proto);
+		if (likely(protop))
+			protocol = *protop;
+	}
+
+	return protocol;
+}
 #endif /* __KERNEL__ */
 
 /* VLAN IOCTLs are found in sockios.h */
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index 93fc244..c4987f2 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -167,10 +167,10 @@
  */
 
 struct ip_mc_socklist {
-	struct ip_mc_socklist	*next;
+	struct ip_mc_socklist __rcu *next_rcu;
 	struct ip_mreqn		multi;
 	unsigned int		sfmode;		/* MCAST_{INCLUDE,EXCLUDE} */
-	struct ip_sf_socklist	*sflist;
+	struct ip_sf_socklist __rcu	*sflist;
 	struct rcu_head		rcu;
 };
 
@@ -186,11 +186,14 @@
 struct ip_mc_list {
 	struct in_device	*interface;
 	__be32			multiaddr;
+	unsigned int		sfmode;
 	struct ip_sf_list	*sources;
 	struct ip_sf_list	*tomb;
-	unsigned int		sfmode;
 	unsigned long		sfcount[2];
-	struct ip_mc_list	*next;
+	union {
+		struct ip_mc_list *next;
+		struct ip_mc_list __rcu *next_rcu;
+	};
 	struct timer_list	timer;
 	int			users;
 	atomic_t		refcnt;
@@ -201,6 +204,7 @@
 	char			loaded;
 	unsigned char		gsquery;	/* check source marks? */
 	unsigned char		crcount;
+	struct rcu_head		rcu;
 };
 
 /* V3 exponential field decoding */
@@ -234,7 +238,7 @@
 extern void ip_mc_remap(struct in_device *);
 extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
 extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
-extern void ip_mc_rejoin_group(struct ip_mc_list *im);
+extern void ip_mc_rejoin_groups(struct in_device *in_dev);
 
 #endif
 #endif
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ccd5b07..ae8fdc5 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -41,10 +41,12 @@
 	__IPV4_DEVCONF_MAX
 };
 
+#define IPV4_DEVCONF_MAX (__IPV4_DEVCONF_MAX - 1)
+
 struct ipv4_devconf {
 	void	*sysctl;
-	int	data[__IPV4_DEVCONF_MAX - 1];
-	DECLARE_BITMAP(state, __IPV4_DEVCONF_MAX - 1);
+	int	data[IPV4_DEVCONF_MAX];
+	DECLARE_BITMAP(state, IPV4_DEVCONF_MAX);
 };
 
 struct in_device {
@@ -52,9 +54,8 @@
 	atomic_t		refcnt;
 	int			dead;
 	struct in_ifaddr	*ifa_list;	/* IP ifaddr chain		*/
-	rwlock_t		mc_list_lock;
-	struct ip_mc_list	*mc_list;	/* IP multicast filter chain    */
-	int			mc_count;	          /* Number of installed mcasts	*/
+	struct ip_mc_list __rcu	*mc_list;	/* IP multicast filter chain    */
+	int			mc_count;	/* Number of installed mcasts	*/
 	spinlock_t		mc_tomb_lock;
 	struct ip_mc_list	*mc_tomb;
 	unsigned long		mr_v1_seen;
@@ -91,7 +92,7 @@
 
 static inline void ipv4_devconf_setall(struct in_device *in_dev)
 {
-	bitmap_fill(in_dev->cnf.state, __IPV4_DEVCONF_MAX - 1);
+	bitmap_fill(in_dev->cnf.state, IPV4_DEVCONF_MAX);
 }
 
 #define IN_DEV_CONF_GET(in_dev, attr) \
@@ -221,7 +222,7 @@
 
 static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev)
 {
-	return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held());
+	return rtnl_dereference(dev->ip_ptr);
 }
 
 extern void in_dev_finish_destroy(struct in_device *idev);
diff --git a/include/linux/input.h b/include/linux/input.h
index 51af441..6ef4446 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -1406,6 +1406,8 @@
 int __must_check input_register_device(struct input_dev *);
 void input_unregister_device(struct input_dev *);
 
+void input_reset_device(struct input_dev *);
+
 int __must_check input_register_handler(struct input_handler *);
 void input_unregister_handler(struct input_handler *);
 
@@ -1421,7 +1423,7 @@
 int input_open_device(struct input_handle *);
 void input_close_device(struct input_handle *);
 
-int input_flush_device(struct input_handle* handle, struct file* file);
+int input_flush_device(struct input_handle *handle, struct file *file);
 
 void input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value);
 void input_inject_event(struct input_handle *handle, unsigned int type, unsigned int code, int value);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 3e70b21..b2eee89 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -76,7 +76,6 @@
 void exit_io_context(struct task_struct *task);
 struct io_context *get_io_context(gfp_t gfp_flags, int node);
 struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
-void copy_io_context(struct io_context **pdst, struct io_context **psrc);
 #else
 static inline void exit_io_context(struct task_struct *task)
 {
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 8e429d0..0c99776 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -364,7 +364,7 @@
 
 	__u32			dst_cookie;
 
-	struct ipv6_mc_socklist	*ipv6_mc_list;
+	struct ipv6_mc_socklist	__rcu *ipv6_mc_list;
 	struct ipv6_ac_socklist	*ipv6_ac_list;
 	struct ipv6_fl_socklist *ipv6_fl_list;
 
diff --git a/include/linux/jhash.h b/include/linux/jhash.h
index ced1159..47cb09e 100644
--- a/include/linux/jhash.h
+++ b/include/linux/jhash.h
@@ -3,129 +3,156 @@
 
 /* jhash.h: Jenkins hash support.
  *
- * Copyright (C) 1996 Bob Jenkins (bob_jenkins@burtleburtle.net)
+ * Copyright (C) 2006. Bob Jenkins (bob_jenkins@burtleburtle.net)
  *
  * http://burtleburtle.net/bob/hash/
  *
  * These are the credits from Bob's sources:
  *
- * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
- * hash(), hash2(), hash3, and mix() are externally useful functions.
- * Routines to test the hash are included if SELF_TEST is defined.
- * You can use this free for any purpose.  It has no warranty.
+ * lookup3.c, by Bob Jenkins, May 2006, Public Domain.
  *
- * Copyright (C) 2003 David S. Miller (davem@redhat.com)
+ * These are functions for producing 32-bit hashes for hash table lookup.
+ * hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+ * are externally useful functions.  Routines to test the hash are included
+ * if SELF_TEST is defined.  You can use this free for any purpose.  It's in
+ * the public domain.  It has no warranty.
+ *
+ * Copyright (C) 2009-2010 Jozsef Kadlecsik (kadlec@blackhole.kfki.hu)
  *
  * I've modified Bob's hash to be useful in the Linux kernel, and
- * any bugs present are surely my fault.  -DaveM
+ * any bugs present are my fault.
+ * Jozsef
  */
+#include <linux/bitops.h>
+#include <linux/unaligned/packed_struct.h>
 
-/* NOTE: Arguments are modified. */
-#define __jhash_mix(a, b, c) \
-{ \
-  a -= b; a -= c; a ^= (c>>13); \
-  b -= c; b -= a; b ^= (a<<8); \
-  c -= a; c -= b; c ^= (b>>13); \
-  a -= b; a -= c; a ^= (c>>12);  \
-  b -= c; b -= a; b ^= (a<<16); \
-  c -= a; c -= b; c ^= (b>>5); \
-  a -= b; a -= c; a ^= (c>>3);  \
-  b -= c; b -= a; b ^= (a<<10); \
-  c -= a; c -= b; c ^= (b>>15); \
+/* Best hash sizes are of power of two */
+#define jhash_size(n)   ((u32)1<<(n))
+/* Mask the hash value, i.e (value & jhash_mask(n)) instead of (value % n) */
+#define jhash_mask(n)   (jhash_size(n)-1)
+
+/* __jhash_mix -- mix 3 32-bit values reversibly. */
+#define __jhash_mix(a, b, c)			\
+{						\
+	a -= c;  a ^= rol32(c, 4);  c += b;	\
+	b -= a;  b ^= rol32(a, 6);  a += c;	\
+	c -= b;  c ^= rol32(b, 8);  b += a;	\
+	a -= c;  a ^= rol32(c, 16); c += b;	\
+	b -= a;  b ^= rol32(a, 19); a += c;	\
+	c -= b;  c ^= rol32(b, 4);  b += a;	\
 }
 
-/* The golden ration: an arbitrary value */
-#define JHASH_GOLDEN_RATIO	0x9e3779b9
+/* __jhash_final - final mixing of 3 32-bit values (a,b,c) into c */
+#define __jhash_final(a, b, c)			\
+{						\
+	c ^= b; c -= rol32(b, 14);		\
+	a ^= c; a -= rol32(c, 11);		\
+	b ^= a; b -= rol32(a, 25);		\
+	c ^= b; c -= rol32(b, 16);		\
+	a ^= c; a -= rol32(c, 4);		\
+	b ^= a; b -= rol32(a, 14);		\
+	c ^= b; c -= rol32(b, 24);		\
+}
 
-/* The most generic version, hashes an arbitrary sequence
- * of bytes.  No alignment or length assumptions are made about
- * the input key.
+/* An arbitrary initial parameter */
+#define JHASH_INITVAL		0xdeadbeef
+
+/* jhash - hash an arbitrary key
+ * @k: sequence of bytes as key
+ * @length: the length of the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * The generic version, hashes an arbitrary sequence of bytes.
+ * No alignment or length assumptions are made about the input key.
+ *
+ * Returns the hash value of the key. The result depends on endianness.
  */
 static inline u32 jhash(const void *key, u32 length, u32 initval)
 {
-	u32 a, b, c, len;
+	u32 a, b, c;
 	const u8 *k = key;
 
-	len = length;
-	a = b = JHASH_GOLDEN_RATIO;
-	c = initval;
+	/* Set up the internal state */
+	a = b = c = JHASH_INITVAL + length + initval;
 
-	while (len >= 12) {
-		a += (k[0] +((u32)k[1]<<8) +((u32)k[2]<<16) +((u32)k[3]<<24));
-		b += (k[4] +((u32)k[5]<<8) +((u32)k[6]<<16) +((u32)k[7]<<24));
-		c += (k[8] +((u32)k[9]<<8) +((u32)k[10]<<16)+((u32)k[11]<<24));
-
-		__jhash_mix(a,b,c);
-
+	/* All but the last block: affect some 32 bits of (a,b,c) */
+	while (length > 12) {
+		a += __get_unaligned_cpu32(k);
+		b += __get_unaligned_cpu32(k + 4);
+		c += __get_unaligned_cpu32(k + 8);
+		__jhash_mix(a, b, c);
+		length -= 12;
 		k += 12;
-		len -= 12;
 	}
-
-	c += length;
-	switch (len) {
-	case 11: c += ((u32)k[10]<<24);
-	case 10: c += ((u32)k[9]<<16);
-	case 9 : c += ((u32)k[8]<<8);
-	case 8 : b += ((u32)k[7]<<24);
-	case 7 : b += ((u32)k[6]<<16);
-	case 6 : b += ((u32)k[5]<<8);
-	case 5 : b += k[4];
-	case 4 : a += ((u32)k[3]<<24);
-	case 3 : a += ((u32)k[2]<<16);
-	case 2 : a += ((u32)k[1]<<8);
-	case 1 : a += k[0];
-	};
-
-	__jhash_mix(a,b,c);
+	/* Last block: affect all 32 bits of (c) */
+	/* All the case statements fall through */
+	switch (length) {
+	case 12: c += (u32)k[11]<<24;
+	case 11: c += (u32)k[10]<<16;
+	case 10: c += (u32)k[9]<<8;
+	case 9:  c += k[8];
+	case 8:  b += (u32)k[7]<<24;
+	case 7:  b += (u32)k[6]<<16;
+	case 6:  b += (u32)k[5]<<8;
+	case 5:  b += k[4];
+	case 4:  a += (u32)k[3]<<24;
+	case 3:  a += (u32)k[2]<<16;
+	case 2:  a += (u32)k[1]<<8;
+	case 1:  a += k[0];
+		 __jhash_final(a, b, c);
+	case 0: /* Nothing left to add */
+		break;
+	}
 
 	return c;
 }
 
-/* A special optimized version that handles 1 or more of u32s.
- * The length parameter here is the number of u32s in the key.
+/* jhash2 - hash an array of u32's
+ * @k: the key which must be an array of u32's
+ * @length: the number of u32's in the key
+ * @initval: the previous hash, or an arbitray value
+ *
+ * Returns the hash value of the key.
  */
 static inline u32 jhash2(const u32 *k, u32 length, u32 initval)
 {
-	u32 a, b, c, len;
+	u32 a, b, c;
 
-	a = b = JHASH_GOLDEN_RATIO;
-	c = initval;
-	len = length;
+	/* Set up the internal state */
+	a = b = c = JHASH_INITVAL + (length<<2) + initval;
 
-	while (len >= 3) {
+	/* Handle most of the key */
+	while (length > 3) {
 		a += k[0];
 		b += k[1];
 		c += k[2];
 		__jhash_mix(a, b, c);
-		k += 3; len -= 3;
+		length -= 3;
+		k += 3;
 	}
 
-	c += length * 4;
-
-	switch (len) {
-	case 2 : b += k[1];
-	case 1 : a += k[0];
-	};
-
-	__jhash_mix(a,b,c);
+	/* Handle the last 3 u32's: all the case statements fall through */
+	switch (length) {
+	case 3: c += k[2];
+	case 2: b += k[1];
+	case 1: a += k[0];
+		__jhash_final(a, b, c);
+	case 0:	/* Nothing left to add */
+		break;
+	}
 
 	return c;
 }
 
 
-/* A special ultra-optimized versions that knows they are hashing exactly
- * 3, 2 or 1 word(s).
- *
- * NOTE: In particular the "c += length; __jhash_mix(a,b,c);" normally
- *       done at the end is not done here.
- */
+/* jhash_3words - hash exactly 3, 2 or 1 word(s) */
 static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval)
 {
-	a += JHASH_GOLDEN_RATIO;
-	b += JHASH_GOLDEN_RATIO;
+	a += JHASH_INITVAL;
+	b += JHASH_INITVAL;
 	c += initval;
 
-	__jhash_mix(a, b, c);
+	__jhash_final(a, b, c);
 
 	return c;
 }
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 450092c..fc3da9e 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -60,7 +60,7 @@
 #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
 #define roundup(x, y) (					\
 {							\
-	typeof(y) __y = y;				\
+	const typeof(y) __y = y;			\
 	(((x) + (__y - 1)) / __y) * __y;		\
 }							\
 )
@@ -293,6 +293,7 @@
 				   unsigned int interval_msec);
 
 extern int printk_delay_msec;
+extern int dmesg_restrict;
 
 /*
  * Print a one-time message (analogous to WARN_ONCE() et al):
diff --git a/include/linux/leds-lp5521.h b/include/linux/leds-lp5521.h
new file mode 100644
index 0000000..38368d7
--- /dev/null
+++ b/include/linux/leds-lp5521.h
@@ -0,0 +1,47 @@
+/*
+ * LP5521 LED chip driver.
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_LP5521_H
+#define __LINUX_LP5521_H
+
+/* See Documentation/leds/leds-lp5521.txt */
+
+struct lp5521_led_config {
+	u8		chan_nr;
+	u8		led_current; /* mA x10, 0 if led is not connected */
+	u8		max_current;
+};
+
+#define LP5521_CLOCK_AUTO	0
+#define LP5521_CLOCK_INT	1
+#define LP5521_CLOCK_EXT	2
+
+struct lp5521_platform_data {
+	struct lp5521_led_config *led_config;
+	u8	num_channels;
+	u8	clock_mode;
+	int	(*setup_resources)(void);
+	void	(*release_resources)(void);
+	void	(*enable)(bool state);
+};
+
+#endif /* __LINUX_LP5521_H */
diff --git a/include/linux/leds-lp5523.h b/include/linux/leds-lp5523.h
new file mode 100644
index 0000000..7967476
--- /dev/null
+++ b/include/linux/leds-lp5523.h
@@ -0,0 +1,47 @@
+/*
+ * LP5523 LED Driver
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Samu Onkalo <samu.p.onkalo@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#ifndef __LINUX_LP5523_H
+#define __LINUX_LP5523_H
+
+/* See Documentation/leds/leds-lp5523.txt */
+
+struct lp5523_led_config {
+	u8		chan_nr;
+	u8		led_current; /* mA x10, 0 if led is not connected */
+	u8		max_current;
+};
+
+#define LP5523_CLOCK_AUTO	0
+#define LP5523_CLOCK_INT	1
+#define LP5523_CLOCK_EXT	2
+
+struct lp5523_platform_data {
+	struct lp5523_led_config *led_config;
+	u8	num_channels;
+	u8	clock_mode;
+	int	(*setup_resources)(void);
+	void	(*release_resources)(void);
+	void	(*enable)(bool state);
+};
+
+#endif /* __LINUX_LP5523_H */
diff --git a/include/linux/leds.h b/include/linux/leds.h
index ba6986a..0f19df9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -15,6 +15,7 @@
 #include <linux/list.h>
 #include <linux/spinlock.h>
 #include <linux/rwsem.h>
+#include <linux/timer.h>
 
 struct device;
 /*
@@ -45,10 +46,14 @@
 	/* Get LED brightness level */
 	enum led_brightness (*brightness_get)(struct led_classdev *led_cdev);
 
-	/* Activate hardware accelerated blink, delays are in
-	 * miliseconds and if none is provided then a sensible default
-	 * should be chosen. The call can adjust the timings if it can't
-	 * match the values specified exactly. */
+	/*
+	 * Activate hardware accelerated blink, delays are in milliseconds
+	 * and if both are zero then a sensible default should be chosen.
+	 * The call should adjust the timings in that case and if it can't
+	 * match the values specified exactly.
+	 * Deactivate blinking again when the brightness is set to a fixed
+	 * value via the brightness_set() callback.
+	 */
 	int		(*blink_set)(struct led_classdev *led_cdev,
 				     unsigned long *delay_on,
 				     unsigned long *delay_off);
@@ -57,6 +62,10 @@
 	struct list_head	 node;			/* LED Device list */
 	const char		*default_trigger;	/* Trigger to use */
 
+	unsigned long		 blink_delay_on, blink_delay_off;
+	struct timer_list	 blink_timer;
+	int			 blink_brightness;
+
 #ifdef CONFIG_LEDS_TRIGGERS
 	/* Protects the trigger data below */
 	struct rw_semaphore	 trigger_lock;
@@ -73,6 +82,36 @@
 extern void led_classdev_suspend(struct led_classdev *led_cdev);
 extern void led_classdev_resume(struct led_classdev *led_cdev);
 
+/**
+ * led_blink_set - set blinking with software fallback
+ * @led_cdev: the LED to start blinking
+ * @delay_on: the time it should be on (in ms)
+ * @delay_off: the time it should ble off (in ms)
+ *
+ * This function makes the LED blink, attempting to use the
+ * hardware acceleration if possible, but falling back to
+ * software blinking if there is no hardware blinking or if
+ * the LED refuses the passed values.
+ *
+ * Note that if software blinking is active, simply calling
+ * led_cdev->brightness_set() will not stop the blinking,
+ * use led_classdev_brightness_set() instead.
+ */
+extern void led_blink_set(struct led_classdev *led_cdev,
+			  unsigned long *delay_on,
+			  unsigned long *delay_off);
+/**
+ * led_brightness_set - set LED brightness
+ * @led_cdev: the LED to set
+ * @brightness: the brightness to set it to
+ *
+ * Set an LED's brightness, and, if necessary, cancel the
+ * software blink timer that implements blinking when the
+ * hardware doesn't.
+ */
+extern void led_brightness_set(struct led_classdev *led_cdev,
+			       enum led_brightness brightness);
+
 /*
  * LED Triggers
  */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 1ff81b5..dd3c34e 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -11,6 +11,7 @@
 #define MARVELL_PHY_ID_88E1118		0x01410e10
 #define MARVELL_PHY_ID_88E1121R		0x01410cb0
 #define MARVELL_PHY_ID_88E1145		0x01410cd0
+#define MARVELL_PHY_ID_88E1149R		0x01410e50
 #define MARVELL_PHY_ID_88E1240		0x01410e30
 #define MARVELL_PHY_ID_88E1318S		0x01410e90
 
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index c779b49..b1494ac 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -55,6 +55,7 @@
 #define MDIO_PCS_10GBRT_STAT2	33	/* 10GBASE-R/-T PCS status 2 */
 #define MDIO_AN_10GBT_CTRL	32	/* 10GBASE-T auto-negotiation control */
 #define MDIO_AN_10GBT_STAT	33	/* 10GBASE-T auto-negotiation status */
+#define MDIO_AN_EEE_ADV		60	/* EEE advertisement */
 
 /* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
 #define MDIO_PMA_LASI_RXCTRL	0x9000	/* RX_ALARM control */
@@ -235,6 +236,10 @@
 #define MDIO_AN_10GBT_STAT_MS		0x4000	/* Master/slave config */
 #define MDIO_AN_10GBT_STAT_MSFLT	0x8000	/* Master/slave config fault */
 
+/* AN EEE Advertisement register. */
+#define MDIO_AN_EEE_ADV_100TX		0x0002	/* Advertise 100TX EEE cap */
+#define MDIO_AN_EEE_ADV_1000T		0x0004	/* Advertise 1000T EEE cap */
+
 /* LASI RX_ALARM control/status registers. */
 #define MDIO_PMA_LASI_RX_PHYXSLFLT	0x0001	/* PHY XS RX local fault */
 #define MDIO_PMA_LASI_RX_PCSLFLT	0x0008	/* PCS RX local fault */
diff --git a/include/linux/mmc/sh_mmcif.h b/include/linux/mmc/sh_mmcif.h
index d19e211..5c99da1 100644
--- a/include/linux/mmc/sh_mmcif.h
+++ b/include/linux/mmc/sh_mmcif.h
@@ -59,19 +59,19 @@
 #define MMCIF_CE_HOST_STS2	0x0000004C
 #define MMCIF_CE_VERSION	0x0000007C
 
-extern inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
+static inline u32 sh_mmcif_readl(void __iomem *addr, int reg)
 {
 	return readl(addr + reg);
 }
 
-extern inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
+static inline void sh_mmcif_writel(void __iomem *addr, int reg, u32 val)
 {
 	writel(val, addr + reg);
 }
 
 #define SH_MMCIF_BBS 512 /* boot block size */
 
-extern inline void sh_mmcif_boot_cmd_send(void __iomem *base,
+static inline void sh_mmcif_boot_cmd_send(void __iomem *base,
 					  unsigned long cmd, unsigned long arg)
 {
 	sh_mmcif_writel(base, MMCIF_CE_INT, 0);
@@ -79,7 +79,7 @@
 	sh_mmcif_writel(base, MMCIF_CE_CMD_SET, cmd);
 }
 
-extern inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
+static inline int sh_mmcif_boot_cmd_poll(void __iomem *base, unsigned long mask)
 {
 	unsigned long tmp;
 	int cnt;
@@ -95,14 +95,14 @@
 	return -1;
 }
 
-extern inline int sh_mmcif_boot_cmd(void __iomem *base,
+static inline int sh_mmcif_boot_cmd(void __iomem *base,
 				    unsigned long cmd, unsigned long arg)
 {
 	sh_mmcif_boot_cmd_send(base, cmd, arg);
 	return sh_mmcif_boot_cmd_poll(base, 0x00010000);
 }
 
-extern inline int sh_mmcif_boot_do_read_single(void __iomem *base,
+static inline int sh_mmcif_boot_do_read_single(void __iomem *base,
 					       unsigned int block_nr,
 					       unsigned long *buf)
 {
@@ -125,7 +125,7 @@
 	return 0;
 }
 
-extern inline int sh_mmcif_boot_do_read(void __iomem *base,
+static inline int sh_mmcif_boot_do_read(void __iomem *base,
 					unsigned long first_block,
 					unsigned long nr_blocks,
 					void *buf)
@@ -143,7 +143,7 @@
 	return ret;
 }
 
-extern inline void sh_mmcif_boot_init(void __iomem *base)
+static inline void sh_mmcif_boot_init(void __iomem *base)
 {
 	unsigned long tmp;
 
@@ -177,7 +177,7 @@
 	sh_mmcif_boot_cmd(base, 0x03400040, 0x00010000);
 }
 
-extern inline void sh_mmcif_boot_slurp(void __iomem *base,
+static inline void sh_mmcif_boot_slurp(void __iomem *base,
 				       unsigned char *buf,
 				       unsigned long no_bytes)
 {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 072652d..d31bc3c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -493,6 +493,8 @@
 enum netdev_queue_state_t {
 	__QUEUE_STATE_XOFF,
 	__QUEUE_STATE_FROZEN,
+#define QUEUE_STATE_XOFF_OR_FROZEN ((1 << __QUEUE_STATE_XOFF)		| \
+				    (1 << __QUEUE_STATE_FROZEN))
 };
 
 struct netdev_queue {
@@ -503,6 +505,12 @@
 	struct Qdisc		*qdisc;
 	unsigned long		state;
 	struct Qdisc		*qdisc_sleeping;
+#ifdef CONFIG_RPS
+	struct kobject		kobj;
+#endif
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+	int			numa_node;
+#endif
 /*
  * write mostly part
  */
@@ -517,6 +525,22 @@
 	u64			tx_dropped;
 } ____cacheline_aligned_in_smp;
 
+static inline int netdev_queue_numa_node_read(const struct netdev_queue *q)
+{
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+	return q->numa_node;
+#else
+	return -1;
+#endif
+}
+
+static inline void netdev_queue_numa_node_write(struct netdev_queue *q, int node)
+{
+#if defined(CONFIG_XPS) && defined(CONFIG_NUMA)
+	q->numa_node = node;
+#endif
+}
+
 #ifdef CONFIG_RPS
 /*
  * This structure holds an RPS map which can be of variable length.  The
@@ -592,11 +616,36 @@
 	struct rps_map __rcu		*rps_map;
 	struct rps_dev_flow_table __rcu	*rps_flow_table;
 	struct kobject			kobj;
-	struct netdev_rx_queue		*first;
-	atomic_t			count;
+	struct net_device		*dev;
 } ____cacheline_aligned_in_smp;
 #endif /* CONFIG_RPS */
 
+#ifdef CONFIG_XPS
+/*
+ * This structure holds an XPS map which can be of variable length.  The
+ * map is an array of queues.
+ */
+struct xps_map {
+	unsigned int len;
+	unsigned int alloc_len;
+	struct rcu_head rcu;
+	u16 queues[0];
+};
+#define XPS_MAP_SIZE(_num) (sizeof(struct xps_map) + (_num * sizeof(u16)))
+#define XPS_MIN_MAP_ALLOC ((L1_CACHE_BYTES - sizeof(struct xps_map))	\
+    / sizeof(u16))
+
+/*
+ * This structure holds all XPS maps for device.  Maps are indexed by CPU.
+ */
+struct xps_dev_maps {
+	struct rcu_head rcu;
+	struct xps_map __rcu *cpu_map[0];
+};
+#define XPS_DEV_MAPS_SIZE (sizeof(struct xps_dev_maps) +		\
+    (nr_cpu_ids * sizeof(struct xps_map *)))
+#endif /* CONFIG_XPS */
+
 /*
  * This structure defines the management hooks for network devices.
  * The following hooks can be defined; unless noted otherwise, they are
@@ -951,7 +1000,7 @@
 #endif
 	void 			*atalk_ptr;	/* AppleTalk link 	*/
 	struct in_device __rcu	*ip_ptr;	/* IPv4 specific data	*/
-	void                    *dn_ptr;        /* DECnet specific data */
+	struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
 	struct inet6_dev __rcu	*ip6_ptr;       /* IPv6 specific data */
 	void			*ec_ptr;	/* Econet specific data	*/
 	void			*ax25_ptr;	/* AX.25 specific data */
@@ -995,8 +1044,8 @@
 	unsigned int		real_num_rx_queues;
 #endif
 
-	rx_handler_func_t	*rx_handler;
-	void			*rx_handler_data;
+	rx_handler_func_t __rcu	*rx_handler;
+	void __rcu		*rx_handler_data;
 
 	struct netdev_queue __rcu *ingress_queue;
 
@@ -1017,6 +1066,10 @@
 	unsigned long		tx_queue_len;	/* Max frames per queue allowed */
 	spinlock_t		tx_global_lock;
 
+#ifdef CONFIG_XPS
+	struct xps_dev_maps __rcu *xps_maps;
+#endif
+
 	/* These may be needed for future network-power-down code. */
 
 	/*
@@ -1307,7 +1360,8 @@
 
 extern int 			netdev_boot_setup_check(struct net_device *dev);
 extern unsigned long		netdev_boot_base(const char *prefix, int unit);
-extern struct net_device    *dev_getbyhwaddr(struct net *net, unsigned short type, char *hwaddr);
+extern struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+					      const char *hwaddr);
 extern struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type);
 extern struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type);
 extern void		dev_add_pack(struct packet_type *pt);
@@ -1554,6 +1608,11 @@
 
 static inline void netif_tx_stop_queue(struct netdev_queue *dev_queue)
 {
+	if (WARN_ON(!dev_queue)) {
+		printk(KERN_INFO "netif_stop_queue() cannot be called before "
+		       "register_netdev()");
+		return;
+	}
 	set_bit(__QUEUE_STATE_XOFF, &dev_queue->state);
 }
 
@@ -1595,9 +1654,9 @@
 	return netif_tx_queue_stopped(netdev_get_tx_queue(dev, 0));
 }
 
-static inline int netif_tx_queue_frozen(const struct netdev_queue *dev_queue)
+static inline int netif_tx_queue_frozen_or_stopped(const struct netdev_queue *dev_queue)
 {
-	return test_bit(__QUEUE_STATE_FROZEN, &dev_queue->state);
+	return dev_queue->state & QUEUE_STATE_XOFF_OR_FROZEN;
 }
 
 /**
@@ -2234,6 +2293,8 @@
 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
 					struct net_device *dev);
 
+int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev);
+
 static inline int net_gso_ok(int features, int gso_type)
 {
 	int feature = gso_type << NETIF_F_GSO_SHIFT;
@@ -2249,10 +2310,7 @@
 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
 {
 	if (skb_is_gso(skb)) {
-		int features = dev->features;
-
-		if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci)
-			features &= dev->vlan_features;
+		int features = netif_get_vlan_features(skb, dev);
 
 		return (!skb_gso_ok(skb, features) ||
 			unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 89341c3..1893837 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -33,6 +33,8 @@
 
 #define NF_QUEUE_NR(x) ((((x) << NF_VERDICT_BITS) & NF_VERDICT_QMASK) | NF_QUEUE)
 
+#define NF_DROP_ERR(x) (((-x) << NF_VERDICT_BITS) | NF_DROP)
+
 /* only for userspace compatibility */
 #ifndef __KERNEL__
 /* Generic cache responses from hook functions.
@@ -215,7 +217,7 @@
 	int ret;
 
 	if (!cond ||
-	    (ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN) == 1))
+	    ((ret = nf_hook_thresh(pf, hook, skb, in, out, okfn, INT_MIN)) == 1))
 		ret = okfn(skb);
 	return ret;
 }
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 057bf22..40150f3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -747,6 +747,16 @@
 	u64				tstamp_running;
 	u64				tstamp_stopped;
 
+	/*
+	 * timestamp shadows the actual context timing but it can
+	 * be safely used in NMI interrupt context. It reflects the
+	 * context time as it was when the event was last scheduled in.
+	 *
+	 * ctx_time already accounts for ctx->timestamp. Therefore to
+	 * compute ctx_time for a sample, simply add perf_clock().
+	 */
+	u64				shadow_ctx_time;
+
 	struct perf_event_attr		attr;
 	struct hw_perf_event		hw;
 
diff --git a/include/linux/pwm_backlight.h b/include/linux/pwm_backlight.h
index 01b3d75..e031e1a 100644
--- a/include/linux/pwm_backlight.h
+++ b/include/linux/pwm_backlight.h
@@ -8,6 +8,7 @@
 	int pwm_id;
 	unsigned int max_brightness;
 	unsigned int dft_brightness;
+	unsigned int lth_brightness;
 	unsigned int pwm_period_ns;
 	int (*init)(struct device *dev);
 	int (*notify)(struct device *dev, int brightness);
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index a39cbed..ab2baa5 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -34,19 +34,13 @@
  * needed for RCU lookups (because root->height is unreliable). The only
  * time callers need worry about this is when doing a lookup_slot under
  * RCU.
+ *
+ * Indirect pointer in fact is also used to tag the last pointer of a node
+ * when it is shrunk, before we rcu free the node. See shrink code for
+ * details.
  */
 #define RADIX_TREE_INDIRECT_PTR	1
-#define RADIX_TREE_RETRY ((void *)-1UL)
 
-static inline void *radix_tree_ptr_to_indirect(void *ptr)
-{
-	return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
-}
-
-static inline void *radix_tree_indirect_to_ptr(void *ptr)
-{
-	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
-}
 #define radix_tree_indirect_to_ptr(ptr) \
 	radix_tree_indirect_to_ptr((void __force *)(ptr))
 
@@ -140,16 +134,29 @@
  *		removed.
  *
  * For use with radix_tree_lookup_slot().  Caller must hold tree at least read
- * locked across slot lookup and dereference.  More likely, will be used with
- * radix_tree_replace_slot(), as well, so caller will hold tree write locked.
+ * locked across slot lookup and dereference. Not required if write lock is
+ * held (ie. items cannot be concurrently inserted).
+ *
+ * radix_tree_deref_retry must be used to confirm validity of the pointer if
+ * only the read lock is held.
  */
 static inline void *radix_tree_deref_slot(void **pslot)
 {
-	void *ret = rcu_dereference(*pslot);
-	if (unlikely(radix_tree_is_indirect_ptr(ret)))
-		ret = RADIX_TREE_RETRY;
-	return ret;
+	return rcu_dereference(*pslot);
 }
+
+/**
+ * radix_tree_deref_retry	- check radix_tree_deref_slot
+ * @arg:	pointer returned by radix_tree_deref_slot
+ * Returns:	0 if retry is not required, otherwise retry is required
+ *
+ * radix_tree_deref_retry must be used with radix_tree_deref_slot.
+ */
+static inline int radix_tree_deref_retry(void *arg)
+{
+	return unlikely((unsigned long)arg & RADIX_TREE_INDIRECT_PTR);
+}
+
 /**
  * radix_tree_replace_slot	- replace item in a slot
  * @pslot:	pointer to slot, returned by radix_tree_lookup_slot
diff --git a/include/linux/resource.h b/include/linux/resource.h
index 88d36f9..d01c96c 100644
--- a/include/linux/resource.h
+++ b/include/linux/resource.h
@@ -2,6 +2,7 @@
 #define _LINUX_RESOURCE_H
 
 #include <linux/time.h>
+#include <linux/types.h>
 
 /*
  * Resource control/accounting header file for linux
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index d42f2744..bbad657 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -6,7 +6,6 @@
 #include <linux/if_link.h>
 #include <linux/if_addr.h>
 #include <linux/neighbour.h>
-#include <linux/netdevice.h>
 
 /* rtnetlink families. Values up to 127 are reserved for real address
  * families, values above 128 may be used arbitrarily.
@@ -606,6 +605,7 @@
 #ifdef __KERNEL__
 
 #include <linux/mutex.h>
+#include <linux/netdevice.h>
 
 static __inline__ int rtattr_strcmp(const struct rtattr *rta, const char *str)
 {
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 4dca992..cea0c38 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -122,6 +122,10 @@
 long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
 			      unsigned int div_max, unsigned long rate);
 
+long clk_round_parent(struct clk *clk, unsigned long target,
+		      unsigned long *best_freq, unsigned long *parent_freq,
+		      unsigned int div_min, unsigned int div_max);
+
 #define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags)	\
 {									\
 	.parent		= _parent,					\
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 864bd56..4d9dcd1 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -5,7 +5,6 @@
 	char *name;
 	long channel_offset;
 	int timer_bit;
-	char *clk;
 	unsigned long clockevent_rating;
 	unsigned long clocksource_rating;
 };
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index e6ba898..19f37a6 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -386,9 +386,10 @@
 #else
 	__u8			deliver_no_wcard:1;
 #endif
+	__u8			ooo_okay:1;
 	kmemcheck_bitfield_end(flags2);
 
-	/* 0/14 bit hole */
+	/* 0/13 bit hole */
 
 #ifdef CONFIG_NET_DMA
 	dma_cookie_t		dma_cookie;
diff --git a/include/linux/snmp.h b/include/linux/snmp.h
index ebb0c80..12b2b18 100644
--- a/include/linux/snmp.h
+++ b/include/linux/snmp.h
@@ -230,6 +230,7 @@
 	LINUX_MIB_TCPMINTTLDROP, /* RFC 5082 */
 	LINUX_MIB_TCPDEFERACCEPTDROP,
 	LINUX_MIB_IPRPFILTER, /* IP Reverse Path Filter (rp_filter) */
+	LINUX_MIB_TCPTIMEWAITOVERFLOW,		/* TCPTimeWaitOverflow */
 	__LINUX_MIB_MAX
 };
 
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index d66c617..e103529 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -40,9 +40,9 @@
 	int pmt;
 	void (*fix_mac_speed)(void *priv, unsigned int speed);
 	void (*bus_setup)(void __iomem *ioaddr);
-#ifdef CONFIG_STM_DRIVERS
-	struct stm_pad_config *pad_config;
-#endif
+	int (*init)(struct platform_device *pdev);
+	void (*exit)(struct platform_device *pdev);
+	void *custom_cfg;
 	void *bsp_priv;
 };
 
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index bbdb680..aea0d438 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -82,13 +82,6 @@
 	struct net		*xpt_net;
 };
 
-static inline void register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
-{
-	spin_lock(&xpt->xpt_lock);
-	list_add(&u->list, &xpt->xpt_users);
-	spin_unlock(&xpt->xpt_lock);
-}
-
 static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
 {
 	spin_lock(&xpt->xpt_lock);
@@ -96,6 +89,23 @@
 	spin_unlock(&xpt->xpt_lock);
 }
 
+static inline int register_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
+{
+	spin_lock(&xpt->xpt_lock);
+	if (test_bit(XPT_CLOSE, &xpt->xpt_flags)) {
+		/*
+		 * The connection is about to be deleted soon (or,
+		 * worse, may already be deleted--in which case we've
+		 * already notified the xpt_users).
+		 */
+		spin_unlock(&xpt->xpt_lock);
+		return -ENOTCONN;
+	}
+	list_add(&u->list, &xpt->xpt_users);
+	spin_unlock(&xpt->xpt_lock);
+	return 0;
+}
+
 int	svc_reg_xprt_class(struct svc_xprt_class *);
 void	svc_unreg_xprt_class(struct svc_xprt_class *);
 void	svc_xprt_init(struct svc_xprt_class *, struct svc_xprt *,
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2a75474..c7ea9bc 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -50,7 +50,7 @@
 #define N_V253		19	/* Codec control over voice modem */
 #define N_CAIF		20      /* CAIF protocol for talking to modems */
 #define N_GSM0710	21	/* GSM 0710 Mux */
-#define N_TI_WL	22	/* for TI's WL BT, FM, GPS combo chips */
+#define N_TI_WL		22	/* for TI's WL BT, FM, GPS combo chips */
 
 /*
  * This character is the same as _POSIX_VDISABLE: it cannot be used as
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 35fe6ab..24300d8 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -797,7 +797,7 @@
  * @disconnect: Called when the interface is no longer accessible, usually
  *	because its device has been (or is being) disconnected or the
  *	driver module is being unloaded.
- * @ioctl: Used for drivers that want to talk to userspace through
+ * @unlocked_ioctl: Used for drivers that want to talk to userspace through
  *	the "usbfs" filesystem.  This lets devices provide ways to
  *	expose information to user space regardless of where they
  *	do (or don't) show up otherwise in the filesystem.
diff --git a/include/linux/usb/musb.h b/include/linux/usb/musb.h
index ee2dd1d..2387f9f 100644
--- a/include/linux/usb/musb.h
+++ b/include/linux/usb/musb.h
@@ -89,6 +89,8 @@
 	/* A GPIO controlling VRSEL in Blackfin */
 	unsigned int	gpio_vrsel;
 	unsigned int	gpio_vrsel_active;
+	/* musb CLKIN in Blackfin in MHZ */
+	unsigned char   clkin;
 #endif
 
 };
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 7ae27a4..44842c8 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -97,6 +97,12 @@
 
 #define FLAG_LINK_INTR	0x0800		/* updates link (carrier) status */
 
+/*
+ * Indicates to usbnet, that USB driver accumulates multiple IP packets.
+ * Affects statistic (counters) and short packet handling.
+ */
+#define FLAG_MULTI_PACKET	0x1000
+
 	/* init device ... can sleep, or cause probe() failure */
 	int	(*bind)(struct usbnet *, struct usb_interface *);
 
diff --git a/include/linux/xfrm.h b/include/linux/xfrm.h
index b971e38..930fdd2 100644
--- a/include/linux/xfrm.h
+++ b/include/linux/xfrm.h
@@ -283,6 +283,7 @@
 	XFRMA_KMADDRESS,        /* struct xfrm_user_kmaddress */
 	XFRMA_ALG_AUTH_TRUNC,	/* struct xfrm_algo_auth */
 	XFRMA_MARK,		/* struct xfrm_mark */
+	XFRMA_TFCPAD,		/* __u32 */
 	__XFRMA_MAX
 
 #define XFRMA_MAX (__XFRMA_MAX - 1)
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index a944124..23710aa 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -1,8 +1,6 @@
 #ifndef _ADDRCONF_H
 #define _ADDRCONF_H
 
-#define RETRANS_TIMER	HZ
-
 #define MAX_RTR_SOLICITATIONS		3
 #define RTR_SOLICITATION_INTERVAL	(4*HZ)
 
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 90c9e28..18e5c3f 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -10,6 +10,7 @@
 extern void unix_notinflight(struct file *fp);
 extern void unix_gc(void);
 extern void wait_for_unix_gc(void);
+extern struct sock *unix_get_socket(struct file *filp);
 
 #define UNIX_HASH_SIZE	256
 
@@ -56,6 +57,7 @@
 	spinlock_t		lock;
 	unsigned int		gc_candidate : 1;
 	unsigned int		gc_maybe_cycle : 1;
+	unsigned char		recursion_level;
 	struct socket_wq	peer_wq;
 };
 #define unix_sk(__sk) ((struct unix_sock *)__sk)
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 6da573c..8eff83b 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -28,7 +28,7 @@
  * @sockaddr:		Socket address to connect.
  * @priority:		Priority of the connection.
  * @link_selector:	Link selector (high bandwidth or low latency)
- * @link_name:		Name of the CAIF Link Layer to use.
+ * @ifindex:		kernel index of the interface.
  * @param:		Connect Request parameters (CAIF_SO_REQ_PARAM).
  *
  * This struct is used when connecting a CAIF channel.
@@ -39,7 +39,7 @@
 	struct sockaddr_caif sockaddr;
 	enum caif_channel_priority priority;
 	enum caif_link_selector link_selector;
-	char link_name[16];
+	int ifindex;
 	struct caif_param param;
 };
 
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index ce4570d..87c3d11 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -121,6 +121,8 @@
 	wait_queue_head_t wait;
 	spinlock_t lock;
 	bool flow_stop;
+	bool slave;
+	bool slave_talked;
 #ifdef CONFIG_DEBUG_FS
 	enum cfspi_state dbg_state;
 	u16 pcmd;
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index bd646fa..f688478 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -139,10 +139,10 @@
 		     enum cfcnfg_phy_preference phy_pref);
 
 /**
- * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer
+ * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
+ * 			it matches caif physical id with the kernel interface id.
  * @cnfg:	Configuration object
- * @name:	Name of the Physical Layer (Caif Link Layer)
+ * @ifi:	ifindex obtained from socket.c bindtodevice.
  */
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name);
-
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
 #endif				/* CFCNFG_H_ */
diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
index 9402543..e54f639 100644
--- a/include/net/caif/cfctrl.h
+++ b/include/net/caif/cfctrl.h
@@ -51,7 +51,7 @@
 	void (*restart_rsp)(void);
 	void (*radioset_rsp)(void);
 	void (*reject_rsp)(struct cflayer *layer, u8 linkid,
-				struct cflayer *client_layer);;
+				struct cflayer *client_layer);
 };
 
 /* Link Setup Parameters for CAIF-Links. */
diff --git a/include/net/dn.h b/include/net/dn.h
index e5469f7..a514a3c 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -225,7 +225,7 @@
 extern int decnet_dr_count;
 extern int decnet_no_fc_max_cwnd;
 
-extern int sysctl_decnet_mem[3];
+extern long sysctl_decnet_mem[3];
 extern int sysctl_decnet_wmem[3];
 extern int sysctl_decnet_rmem[3];
 
diff --git a/include/net/dn_dev.h b/include/net/dn_dev.h
index 0916bbf..b9e32db 100644
--- a/include/net/dn_dev.h
+++ b/include/net/dn_dev.h
@@ -5,13 +5,14 @@
 struct dn_dev;
 
 struct dn_ifaddr {
-	struct dn_ifaddr *ifa_next;
+	struct dn_ifaddr __rcu *ifa_next;
 	struct dn_dev    *ifa_dev;
 	__le16            ifa_local;
 	__le16            ifa_address;
 	__u8              ifa_flags;
 	__u8              ifa_scope;
 	char              ifa_label[IFNAMSIZ];
+	struct rcu_head   rcu;
 };
 
 #define DN_DEV_S_RU  0 /* Run - working normally   */
@@ -83,7 +84,7 @@
 
 
 struct dn_dev {
-	struct dn_ifaddr *ifa_list;
+	struct dn_ifaddr __rcu *ifa_list;
 	struct net_device *dev;
 	struct dn_dev_parms parms;
 	char use_long;
@@ -171,19 +172,27 @@
 
 static inline int dn_dev_islocal(struct net_device *dev, __le16 addr)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db;
 	struct dn_ifaddr *ifa;
+	int res = 0;
 
+	rcu_read_lock();
+	dn_db = rcu_dereference(dev->dn_ptr);
 	if (dn_db == NULL) {
 		printk(KERN_DEBUG "dn_dev_islocal: Called for non DECnet device\n");
-		return 0;
+		goto out;
 	}
 
-	for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next)
-		if ((addr ^ ifa->ifa_local) == 0)
-			return 1;
-
-	return 0;
+	for (ifa = rcu_dereference(dn_db->ifa_list);
+	     ifa != NULL;
+	     ifa = rcu_dereference(ifa->ifa_next))
+		if ((addr ^ ifa->ifa_local) == 0) {
+			res = 1;
+			break;
+		}
+out:
+	rcu_read_unlock();
+	return res;
 }
 
 #endif /* _NET_DN_DEV_H */
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index ccadab3..9b185df 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -80,6 +80,16 @@
 	unsigned rt_type;
 };
 
+static inline bool dn_is_input_route(struct dn_route *rt)
+{
+	return rt->fl.iif != 0;
+}
+
+static inline bool dn_is_output_route(struct dn_route *rt)
+{
+	return rt->fl.iif == 0;
+}
+
 extern void dn_route_init(void);
 extern void dn_route_cleanup(void);
 
diff --git a/include/net/dst.h b/include/net/dst.h
index ffe9cb7..755ac6c 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -70,7 +70,7 @@
 
 	struct  dst_ops	        *ops;
 
-	u32			metrics[RTAX_MAX];
+	u32			_metrics[RTAX_MAX];
 
 #ifdef CONFIG_NET_CLS_ROUTE
 	__u32			tclassid;
@@ -94,19 +94,46 @@
 	int			__use;
 	unsigned long		lastuse;
 	union {
-		struct dst_entry *next;
-		struct rtable __rcu *rt_next;
-		struct rt6_info   *rt6_next;
-		struct dn_route  *dn_next;
+		struct dst_entry	*next;
+		struct rtable __rcu	*rt_next;
+		struct rt6_info		*rt6_next;
+		struct dn_route __rcu	*dn_next;
 	};
 };
 
 #ifdef __KERNEL__
 
 static inline u32
-dst_metric(const struct dst_entry *dst, int metric)
+dst_metric_raw(const struct dst_entry *dst, const int metric)
 {
-	return dst->metrics[metric-1];
+	return dst->_metrics[metric-1];
+}
+
+static inline u32
+dst_metric(const struct dst_entry *dst, const int metric)
+{
+	WARN_ON_ONCE(metric == RTAX_HOPLIMIT);
+	return dst_metric_raw(dst, metric);
+}
+
+static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
+{
+	dst->_metrics[metric-1] = val;
+}
+
+static inline void dst_import_metrics(struct dst_entry *dst, const u32 *src_metrics)
+{
+	memcpy(dst->_metrics, src_metrics, RTAX_MAX * sizeof(u32));
+}
+
+static inline void dst_copy_metrics(struct dst_entry *dest, const struct dst_entry *src)
+{
+	dst_import_metrics(dest, src->_metrics);
+}
+
+static inline u32 *dst_metrics_ptr(struct dst_entry *dst)
+{
+	return dst->_metrics;
 }
 
 static inline u32
@@ -134,7 +161,7 @@
 static inline void set_dst_metric_rtt(struct dst_entry *dst, int metric,
 				      unsigned long rtt)
 {
-	dst->metrics[metric-1] = jiffies_to_msecs(rtt);
+	dst_metric_set(dst, metric, jiffies_to_msecs(rtt));
 }
 
 static inline u32
diff --git a/include/net/flow.h b/include/net/flow.h
index 0ac3fb5..7196e68 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -67,6 +67,7 @@
 		} dnports;
 
 		__be32		spi;
+		__be32		gre_key;
 
 		struct {
 			__u8	type;
@@ -78,6 +79,7 @@
 #define fl_icmp_code	uli_u.icmpt.code
 #define fl_ipsec_spi	uli_u.spi
 #define fl_mh_type	uli_u.mht.type
+#define fl_gre_key	uli_u.gre_key
 	__u32           secid;	/* used by xfrm; see secid.txt */
 } __attribute__((__aligned__(BITS_PER_LONG/8)));
 
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index f95ff8d..04977ee 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -89,10 +89,11 @@
 struct ipv6_mc_socklist {
 	struct in6_addr		addr;
 	int			ifindex;
-	struct ipv6_mc_socklist *next;
+	struct ipv6_mc_socklist __rcu *next;
 	rwlock_t		sflock;
 	unsigned int		sfmode;		/* MCAST_{INCLUDE,EXCLUDE} */
 	struct ip6_sf_socklist	*sflist;
+	struct rcu_head		rcu;
 };
 
 struct ip6_sf_list {
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index aae08f6..ff01350 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -25,6 +25,9 @@
 extern int inet6_csk_bind_conflict(const struct sock *sk,
 				   const struct inet_bind_bucket *tb);
 
+extern struct dst_entry* inet6_csk_route_req(struct sock *sk,
+					     const struct request_sock *req);
+
 extern struct request_sock *inet6_csk_search_req(const struct sock *sk,
 						 struct request_sock ***prevp,
 						 const __be16 rport,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index e4f494b..6c93a56 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -43,7 +43,7 @@
 	struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
 				      struct request_sock *req,
 				      struct dst_entry *dst);
-	int	    (*remember_stamp)(struct sock *sk);
+	struct inet_peer *(*get_peer)(struct sock *sk, bool *release_it);
 	u16	    net_header_len;
 	u16	    sockaddr_len;
 	int	    (*setsockopt)(struct sock *sk, int level, int optname, 
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 1989cfd..8181498 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -116,8 +116,9 @@
 	struct ipv6_pinfo	*pinet6;
 #endif
 	/* Socket demultiplex comparisons on incoming packets. */
-	__be32			inet_daddr;
-	__be32			inet_rcv_saddr;
+#define inet_daddr		sk.__sk_common.skc_daddr
+#define inet_rcv_saddr		sk.__sk_common.skc_rcv_saddr
+
 	__be16			inet_dport;
 	__u16			inet_num;
 	__be32			inet_saddr;
@@ -141,7 +142,7 @@
 				nodefrag:1;
 	int			mc_index;
 	__be32			mc_addr;
-	struct ip_mc_socklist	*mc_list;
+	struct ip_mc_socklist __rcu	*mc_list;
 	struct {
 		unsigned int		flags;
 		unsigned int		fragsize;
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index a066fdd..17404b5 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -88,12 +88,6 @@
 extern void inet_twdr_twkill_work(struct work_struct *work);
 extern void inet_twdr_twcal_tick(unsigned long data);
 
-#if (BITS_PER_LONG == 64)
-#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 8
-#else
-#define INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES 4
-#endif
-
 struct inet_bind_bucket;
 
 /*
@@ -117,15 +111,15 @@
 #define tw_hash			__tw_common.skc_hash
 #define tw_prot			__tw_common.skc_prot
 #define tw_net			__tw_common.skc_net
+#define tw_daddr        	__tw_common.skc_daddr
+#define tw_rcv_saddr    	__tw_common.skc_rcv_saddr
 	int			tw_timeout;
 	volatile unsigned char	tw_substate;
-	/* 3 bits hole, try to pack */
 	unsigned char		tw_rcv_wscale;
+
 	/* Socket demultiplex comparisons on incoming packets. */
-	/* these five are in inet_sock */
+	/* these three are in inet_sock */
 	__be16			tw_sport;
-	__be32			tw_daddr __attribute__((aligned(INET_TIMEWAIT_ADDRCMP_ALIGN_BYTES)));
-	__be32			tw_rcv_saddr;
 	__be16			tw_dport;
 	__u16			tw_num;
 	kmemcheck_bitfield_begin(flags);
@@ -191,10 +185,10 @@
 	return (struct inet_timewait_sock *)sk;
 }
 
-static inline __be32 inet_rcv_saddr(const struct sock *sk)
+static inline __be32 sk_rcv_saddr(const struct sock *sk)
 {
-	return likely(sk->sk_state != TCP_TIME_WAIT) ?
-		inet_sk(sk)->inet_rcv_saddr : inet_twsk(sk)->tw_rcv_saddr;
+/* both inet_sk() and inet_twsk() store rcv_saddr in skc_rcv_saddr */
+	return sk->__sk_common.skc_rcv_saddr;
 }
 
 extern void inet_twsk_put(struct inet_timewait_sock *tw);
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index fe239bf..599d96e 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -11,12 +11,21 @@
 #include <linux/init.h>
 #include <linux/jiffies.h>
 #include <linux/spinlock.h>
+#include <net/ipv6.h>
 #include <asm/atomic.h>
 
+struct inetpeer_addr {
+	union {
+		__be32		a4;
+		__be32		a6[4];
+	};
+	__u16	family;
+};
+
 struct inet_peer {
 	/* group together avl_left,avl_right,v4daddr to speedup lookups */
 	struct inet_peer __rcu	*avl_left, *avl_right;
-	__be32			v4daddr;	/* peer's address */
+	struct inetpeer_addr	daddr;
 	__u32			avl_height;
 	struct list_head	unused;
 	__u32			dtime;		/* the time of last use of not
@@ -26,7 +35,6 @@
 	 * Once inet_peer is queued for deletion (refcnt == -1), following fields
 	 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
 	 * We can share memory with rcu_head to keep inet_peer small
-	 * (less then 64 bytes)
 	 */
 	union {
 		struct {
@@ -42,7 +50,25 @@
 void			inet_initpeers(void) __init;
 
 /* can be called with or without local BH being disabled */
-struct inet_peer	*inet_getpeer(__be32 daddr, int create);
+struct inet_peer	*inet_getpeer(struct inetpeer_addr *daddr, int create);
+
+static inline struct inet_peer *inet_getpeer_v4(__be32 v4daddr, int create)
+{
+	struct inetpeer_addr daddr;
+
+	daddr.a4 = v4daddr;
+	daddr.family = AF_INET;
+	return inet_getpeer(&daddr, create);
+}
+
+static inline struct inet_peer *inet_getpeer_v6(struct in6_addr *v6daddr, int create)
+{
+	struct inetpeer_addr daddr;
+
+	ipv6_addr_copy((struct in6_addr *)daddr.a6, v6daddr);
+	daddr.family = AF_INET6;
+	return inet_getpeer(&daddr, create);
+}
 
 /* can be called from BH context or outside */
 extern void inet_putpeer(struct inet_peer *p);
diff --git a/include/net/ip.h b/include/net/ip.h
index 86e2b18..67fac78 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -201,7 +201,6 @@
 	return test_bit(port, sysctl_local_reserved_ports);
 }
 
-extern int sysctl_ip_default_ttl;
 extern int sysctl_ip_nonlocal_bind;
 
 extern struct ctl_path net_core_path[];
@@ -428,15 +427,6 @@
 extern void	ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
 			       u32 info);
 
-/* sysctl helpers - any sysctl which holds a value that ends up being
- * fed into the routing cache should use these handlers.
- */
-int ipv4_doint_and_flush(ctl_table *ctl, int write,
-			 void __user *buffer,
-			 size_t *lenp, loff_t *ppos);
-int ipv4_doint_and_flush_strategy(ctl_table *table,
-				  void __user *oldval, size_t __user *oldlenp,
-				  void __user *newval, size_t newlen);
 #ifdef CONFIG_PROC_FS
 extern int ip_misc_proc_init(void);
 #endif
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 062a823..708ff7c 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -21,6 +21,7 @@
 #include <net/dst.h>
 #include <net/flow.h>
 #include <net/netlink.h>
+#include <net/inetpeer.h>
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 #define FIB6_TABLE_HASHSZ 256
@@ -109,6 +110,7 @@
 	u32				rt6i_metric;
 
 	struct inet6_dev		*rt6i_idev;
+	struct inet_peer		*rt6i_peer;
 
 #ifdef CONFIG_XFRM
 	u32				rt6i_flow_cache_genid;
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 278312c..e06e0ca 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -3,7 +3,6 @@
 
 #define IP6_RT_PRIO_USER	1024
 #define IP6_RT_PRIO_ADDRCONF	256
-#define IP6_RT_PRIO_KERN	512
 
 struct route_info {
 	__u8			type;
@@ -56,6 +55,18 @@
 	return (flags >> 3) & 7;
 }
 
+extern void			rt6_bind_peer(struct rt6_info *rt,
+					      int create);
+
+static inline struct inet_peer *rt6_get_peer(struct rt6_info *rt)
+{
+	if (rt->rt6i_peer)
+		return rt->rt6i_peer;
+
+	rt6_bind_peer(rt, 0);
+	return rt->rt6i_peer;
+}
+
 extern void			ip6_route_input(struct sk_buff *skb);
 
 extern struct dst_entry *	ip6_route_output(struct net *net,
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 895997b..e0e594f 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -42,9 +42,6 @@
 #define ND_REACHABLE_TIME		(30*HZ)
 #define ND_RETRANS_TIMER		HZ
 
-#define ND_MIN_RANDOM_FACTOR		(1/2)
-#define ND_MAX_RANDOM_FACTOR		(3/2)
-
 #ifdef __KERNEL__
 
 #include <linux/compiler.h>
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 55590ab..4014b62 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -96,16 +96,16 @@
 	struct neigh_parms	*parms;
 	unsigned long		confirmed;
 	unsigned long		updated;
-	__u8			flags;
-	__u8			nud_state;
-	__u8			type;
-	__u8			dead;
+	rwlock_t		lock;
 	atomic_t		refcnt;
 	struct sk_buff_head	arp_queue;
 	struct timer_list	timer;
 	unsigned long		used;
 	atomic_t		probes;
-	rwlock_t		lock;
+	__u8			flags;
+	__u8			nud_state;
+	__u8			type;
+	__u8			dead;
 	seqlock_t		ha_lock;
 	unsigned char		ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))];
 	struct hh_cache		*hh;
@@ -303,7 +303,7 @@
 
 static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
 {
-	unsigned long now = ACCESS_ONCE(jiffies);
+	unsigned long now = jiffies;
 	
 	if (neigh->used != now)
 		neigh->used = now;
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f3b201d..373f1a9 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -225,13 +225,15 @@
 				     u32 pid, unsigned int group, int report,
 				     gfp_t flags);
 
-extern int		nla_validate(struct nlattr *head, int len, int maxtype,
+extern int		nla_validate(const struct nlattr *head,
+				     int len, int maxtype,
 				     const struct nla_policy *policy);
-extern int		nla_parse(struct nlattr *tb[], int maxtype,
-				  struct nlattr *head, int len,
+extern int		nla_parse(struct nlattr **tb, int maxtype,
+				  const struct nlattr *head, int len,
 				  const struct nla_policy *policy);
 extern int		nla_policy_len(const struct nla_policy *, int);
-extern struct nlattr *	nla_find(struct nlattr *head, int len, int attrtype);
+extern struct nlattr *	nla_find(const struct nlattr *head,
+				 int len, int attrtype);
 extern size_t		nla_strlcpy(char *dst, const struct nlattr *nla,
 				    size_t dstsize);
 extern int		nla_memcpy(void *dest, const struct nlattr *src, int count);
@@ -346,7 +348,8 @@
  * Returns the next netlink message in the message stream and
  * decrements remaining by the size of the current message.
  */
-static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining)
+static inline struct nlmsghdr *
+nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
 {
 	int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
 
@@ -384,7 +387,7 @@
  *
  * Returns the first attribute which matches the specified type.
  */
-static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
+static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
 					     int hdrlen, int attrtype)
 {
 	return nla_find(nlmsg_attrdata(nlh, hdrlen),
@@ -398,7 +401,8 @@
  * @maxtype: maximum attribute type to be expected
  * @policy: validation policy
  */
-static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype,
+static inline int nlmsg_validate(const struct nlmsghdr *nlh,
+				 int hdrlen, int maxtype,
 				 const struct nla_policy *policy)
 {
 	if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
@@ -727,7 +731,8 @@
  *
  * Returns the first attribute which matches the specified type.
  */
-static inline struct nlattr *nla_find_nested(struct nlattr *nla, int attrtype)
+static inline struct nlattr *
+nla_find_nested(const struct nlattr *nla, int attrtype)
 {
 	return nla_find(nla_data(nla), nla_len(nla), attrtype);
 }
@@ -1032,7 +1037,7 @@
  *
  * Returns 0 on success or a negative error code.
  */
-static inline int nla_validate_nested(struct nlattr *start, int maxtype,
+static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
 				      const struct nla_policy *policy)
 {
 	return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
diff --git a/include/net/netns/generic.h b/include/net/netns/generic.h
index 81a31c0..3419bf5 100644
--- a/include/net/netns/generic.h
+++ b/include/net/netns/generic.h
@@ -30,7 +30,7 @@
 	void *ptr[0];
 };
 
-static inline void *net_generic(struct net *net, int id)
+static inline void *net_generic(const struct net *net, int id)
 {
 	struct net_generic *ng;
 	void *ptr;
diff --git a/include/net/route.h b/include/net/route.h
index 7e5e73b..2700236 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -55,8 +55,6 @@
 	/* Cache lookup keys */
 	struct flowi		fl;
 
-	struct in_device	*idev;
-	
 	int			rt_genid;
 	unsigned		rt_flags;
 	__u16			rt_type;
@@ -73,6 +71,16 @@
 	struct inet_peer	*peer; /* long-living peer info */
 };
 
+static inline bool rt_is_input_route(struct rtable *rt)
+{
+	return rt->fl.iif != 0;
+}
+
+static inline bool rt_is_output_route(struct rtable *rt)
+{
+	return rt->fl.iif == 0;
+}
+
 struct ip_rt_acct {
 	__u32 	o_bytes;
 	__u32 	o_packets;
@@ -161,14 +169,12 @@
 {
 	struct flowi fl = { .oif = oif,
 			    .mark = sk->sk_mark,
-			    .nl_u = { .ip4_u = { .daddr = dst,
-						 .saddr = src,
-						 .tos   = tos } },
+			    .fl4_dst = dst,
+			    .fl4_src = src,
+			    .fl4_tos = tos,
 			    .proto = protocol,
-			    .uli_u = { .ports =
-				       { .sport = sport,
-					 .dport = dport } } };
-
+			    .fl_ip_sport = sport,
+			    .fl_ip_dport = dport };
 	int err;
 	struct net *net = sock_net(sk);
 
@@ -225,4 +231,15 @@
 	return skb_rtable(skb)->rt_iif;
 }
 
+extern int sysctl_ip_default_ttl;
+
+static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
+{
+	int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+
+	if (hoplimit == 0)
+		hoplimit = sysctl_ip_default_ttl;
+	return hoplimit;
+}
+
 #endif	/* _ROUTE_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index e013c68..4093ca7 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -83,6 +83,41 @@
 extern int	rtnl_link_register(struct rtnl_link_ops *ops);
 extern void	rtnl_link_unregister(struct rtnl_link_ops *ops);
 
+/**
+ * 	struct rtnl_af_ops - rtnetlink address family operations
+ *
+ *	@list: Used internally
+ * 	@family: Address family
+ * 	@fill_link_af: Function to fill IFLA_AF_SPEC with address family
+ * 		       specific netlink attributes.
+ * 	@get_link_af_size: Function to calculate size of address family specific
+ * 			   netlink attributes exlusive the container attribute.
+ *	@validate_link_af: Validate a IFLA_AF_SPEC attribute, must check attr
+ *			   for invalid configuration settings.
+ * 	@set_link_af: Function to parse a IFLA_AF_SPEC attribute and modify
+ *		      net_device accordingly.
+ */
+struct rtnl_af_ops {
+	struct list_head	list;
+	int			family;
+
+	int			(*fill_link_af)(struct sk_buff *skb,
+						const struct net_device *dev);
+	size_t			(*get_link_af_size)(const struct net_device *dev);
+
+	int			(*validate_link_af)(const struct net_device *dev,
+						    const struct nlattr *attr);
+	int			(*set_link_af)(struct net_device *dev,
+					       const struct nlattr *attr);
+};
+
+extern int	__rtnl_af_register(struct rtnl_af_ops *ops);
+extern void	__rtnl_af_unregister(struct rtnl_af_ops *ops);
+
+extern int	rtnl_af_register(struct rtnl_af_ops *ops);
+extern void	rtnl_af_unregister(struct rtnl_af_ops *ops);
+
+
 extern struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
 extern struct net_device *rtnl_create_link(struct net *src_net, struct net *net,
 	char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[]);
diff --git a/include/net/scm.h b/include/net/scm.h
index 3165650..745460f 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -10,11 +10,12 @@
 /* Well, we should have at least one descriptor open
  * to accept passed FDs 8)
  */
-#define SCM_MAX_FD	255
+#define SCM_MAX_FD	253
 
 struct scm_fp_list {
 	struct list_head	list;
-	int			count;
+	short			count;
+	short			max;
 	struct file		*fp[SCM_MAX_FD];
 };
 
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h
index 2c55a7e..c01dc99 100644
--- a/include/net/sctp/command.h
+++ b/include/net/sctp/command.h
@@ -111,9 +111,6 @@
 	SCTP_CMD_LAST
 } sctp_verb_t;
 
-#define SCTP_CMD_MAX		(SCTP_CMD_LAST - 1)
-#define SCTP_CMD_NUM_VERBS	(SCTP_CMD_MAX + 1)
-
 /* How many commands can you put in an sctp_cmd_seq_t?
  * This is a rather arbitrary number, ideally derived from a careful
  * analysis of the state functions, but in reality just taken from
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 6390884..c70d8cc 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -61,7 +61,6 @@
  * symbols.  CIDs are dense through SCTP_CID_BASE_MAX.
  */
 #define SCTP_CID_BASE_MAX		SCTP_CID_SHUTDOWN_COMPLETE
-#define SCTP_CID_MAX			SCTP_CID_ASCONF_ACK
 
 #define SCTP_NUM_BASE_CHUNK_TYPES	(SCTP_CID_BASE_MAX + 1)
 
@@ -86,9 +85,6 @@
 
 } sctp_event_t;
 
-#define SCTP_EVENT_T_MAX SCTP_EVENT_T_PRIMITIVE
-#define SCTP_EVENT_T_NUM (SCTP_EVENT_T_MAX + 1)
-
 /* As a convenience for the state machine, we append SCTP_EVENT_* and
  * SCTP_ULP_* to the list of possible chunks.
  */
@@ -162,9 +158,6 @@
 		       		- (unsigned long)(c->chunk_hdr)\
 				- sizeof(sctp_data_chunk_t)))
 
-#define SCTP_MAX_ERROR_CAUSE  SCTP_ERROR_NONEXIST_IP
-#define SCTP_NUM_ERROR_CAUSE  10
-
 /* Internal error codes */
 typedef enum {
 
@@ -266,7 +259,6 @@
 #define SCTP_TSN_MAP_INITIAL BITS_PER_LONG
 #define SCTP_TSN_MAP_INCREMENT SCTP_TSN_MAP_INITIAL
 #define SCTP_TSN_MAP_SIZE 4096
-#define SCTP_TSN_MAX_GAP  65535
 
 /* We will not record more than this many duplicate TSNs between two
  * SACKs.  The minimum PMTU is 576.  Remove all the headers and there
@@ -301,9 +293,6 @@
 
 #define SCTP_CLOCK_GRANULARITY	1	/* 1 jiffy */
 
-#define SCTP_DEF_MAX_INIT 6
-#define SCTP_DEF_MAX_SEND 10
-
 #define SCTP_DEFAULT_COOKIE_LIFE	(60 * 1000) /* 60 seconds */
 
 #define SCTP_DEFAULT_MINWINDOW	1500	/* default minimum rwnd size */
@@ -317,9 +306,6 @@
 					 */
 #define SCTP_DEFAULT_MINSEGMENT 512	/* MTU size ... if no mtu disc */
 #define SCTP_HOW_MANY_SECRETS 2		/* How many secrets I keep */
-#define SCTP_HOW_LONG_COOKIE_LIVE 3600	/* How many seconds the current
-					 * secret will live?
-					 */
 #define SCTP_SECRET_SIZE 32		/* Number of octets in a 256 bits. */
 
 #define SCTP_SIGNATURE_SIZE 20	        /* size of a SLA-1 signature */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 69fef4f..cc9185c 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -261,8 +261,6 @@
 #define sctp_assoc_hashsize		(sctp_globals.assoc_hashsize)
 #define sctp_assoc_hashtable		(sctp_globals.assoc_hashtable)
 #define sctp_port_hashsize		(sctp_globals.port_hashsize)
-#define sctp_port_rover			(sctp_globals.port_rover)
-#define sctp_port_alloc_lock		(sctp_globals.port_alloc_lock)
 #define sctp_port_hashtable		(sctp_globals.port_hashtable)
 #define sctp_local_addr_list		(sctp_globals.local_addr_list)
 #define sctp_local_addr_lock		(sctp_globals.addr_list_lock)
diff --git a/include/net/snmp.h b/include/net/snmp.h
index a0e6180..762e2ab 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -60,9 +60,7 @@
 };
 
 /* ICMP */
-#define ICMP_MIB_DUMMY	__ICMP_MIB_MAX
-#define ICMP_MIB_MAX	(__ICMP_MIB_MAX + 1)
-
+#define ICMP_MIB_MAX	__ICMP_MIB_MAX
 struct icmp_mib {
 	unsigned long	mibs[ICMP_MIB_MAX];
 };
diff --git a/include/net/sock.h b/include/net/sock.h
index c7a7362..82e8603 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -57,7 +57,7 @@
 #include <linux/rculist_nulls.h>
 #include <linux/poll.h>
 
-#include <asm/atomic.h>
+#include <linux/atomic.h>
 #include <net/dst.h>
 #include <net/checksum.h>
 
@@ -105,10 +105,8 @@
 
 /**
  *	struct sock_common - minimal network layer representation of sockets
- *	@skc_node: main hash linkage for various protocol lookup tables
- *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
- *	@skc_refcnt: reference count
- *	@skc_tx_queue_mapping: tx queue number for this connection
+ *	@skc_daddr: Foreign IPv4 addr
+ *	@skc_rcv_saddr: Bound local IPv4 addr
  *	@skc_hash: hash value used with various protocol lookup tables
  *	@skc_u16hashes: two u16 hash values used by UDP lookup tables
  *	@skc_family: network address family
@@ -119,20 +117,20 @@
  *	@skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
  *	@skc_prot: protocol handlers inside a network family
  *	@skc_net: reference to the network namespace of this socket
+ *	@skc_node: main hash linkage for various protocol lookup tables
+ *	@skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
+ *	@skc_tx_queue_mapping: tx queue number for this connection
+ *	@skc_refcnt: reference count
  *
  *	This is the minimal network layer representation of sockets, the header
  *	for struct sock and struct inet_timewait_sock.
  */
 struct sock_common {
-	/*
-	 * first fields are not copied in sock_copy()
+	/* skc_daddr and skc_rcv_saddr must be grouped :
+	 * cf INET_MATCH() and INET_TW_MATCH()
 	 */
-	union {
-		struct hlist_node	skc_node;
-		struct hlist_nulls_node skc_nulls_node;
-	};
-	atomic_t		skc_refcnt;
-	int			skc_tx_queue_mapping;
+	__be32			skc_daddr;
+	__be32			skc_rcv_saddr;
 
 	union  {
 		unsigned int	skc_hash;
@@ -150,6 +148,18 @@
 #ifdef CONFIG_NET_NS
 	struct net	 	*skc_net;
 #endif
+	/*
+	 * fields between dontcopy_begin/dontcopy_end
+	 * are not copied in sock_copy()
+	 */
+	int			skc_dontcopy_begin[0];
+	union {
+		struct hlist_node	skc_node;
+		struct hlist_nulls_node skc_nulls_node;
+	};
+	int			skc_tx_queue_mapping;
+	atomic_t		skc_refcnt;
+	int                     skc_dontcopy_end[0];
 };
 
 /**
@@ -232,7 +242,8 @@
 #define sk_refcnt		__sk_common.skc_refcnt
 #define sk_tx_queue_mapping	__sk_common.skc_tx_queue_mapping
 
-#define sk_copy_start		__sk_common.skc_hash
+#define sk_dontcopy_begin	__sk_common.skc_dontcopy_begin
+#define sk_dontcopy_end		__sk_common.skc_dontcopy_end
 #define sk_hash			__sk_common.skc_hash
 #define sk_family		__sk_common.skc_family
 #define sk_state		__sk_common.skc_state
@@ -241,6 +252,47 @@
 #define sk_bind_node		__sk_common.skc_bind_node
 #define sk_prot			__sk_common.skc_prot
 #define sk_net			__sk_common.skc_net
+	socket_lock_t		sk_lock;
+	struct sk_buff_head	sk_receive_queue;
+	/*
+	 * The backlog queue is special, it is always used with
+	 * the per-socket spinlock held and requires low latency
+	 * access. Therefore we special case it's implementation.
+	 * Note : rmem_alloc is in this structure to fill a hole
+	 * on 64bit arches, not because its logically part of
+	 * backlog.
+	 */
+	struct {
+		atomic_t	rmem_alloc;
+		int		len;
+		struct sk_buff	*head;
+		struct sk_buff	*tail;
+	} sk_backlog;
+#define sk_rmem_alloc sk_backlog.rmem_alloc
+	int			sk_forward_alloc;
+#ifdef CONFIG_RPS
+	__u32			sk_rxhash;
+#endif
+	atomic_t		sk_drops;
+	int			sk_rcvbuf;
+
+	struct sk_filter __rcu	*sk_filter;
+	struct socket_wq	*sk_wq;
+
+#ifdef CONFIG_NET_DMA
+	struct sk_buff_head	sk_async_wait_queue;
+#endif
+
+#ifdef CONFIG_XFRM
+	struct xfrm_policy	*sk_policy[2];
+#endif
+	unsigned long 		sk_flags;
+	struct dst_entry	*sk_dst_cache;
+	spinlock_t		sk_dst_lock;
+	atomic_t		sk_wmem_alloc;
+	atomic_t		sk_omem_alloc;
+	int			sk_sndbuf;
+	struct sk_buff_head	sk_write_queue;
 	kmemcheck_bitfield_begin(flags);
 	unsigned int		sk_shutdown  : 2,
 				sk_no_check  : 2,
@@ -248,52 +300,19 @@
 				sk_protocol  : 8,
 				sk_type      : 16;
 	kmemcheck_bitfield_end(flags);
-	int			sk_rcvbuf;
-	socket_lock_t		sk_lock;
-	/*
-	 * The backlog queue is special, it is always used with
-	 * the per-socket spinlock held and requires low latency
-	 * access. Therefore we special case it's implementation.
-	 */
-	struct {
-		struct sk_buff *head;
-		struct sk_buff *tail;
-		int len;
-	} sk_backlog;
-	struct socket_wq	*sk_wq;
-	struct dst_entry	*sk_dst_cache;
-#ifdef CONFIG_XFRM
-	struct xfrm_policy	*sk_policy[2];
-#endif
-	spinlock_t		sk_dst_lock;
-	atomic_t		sk_rmem_alloc;
-	atomic_t		sk_wmem_alloc;
-	atomic_t		sk_omem_alloc;
-	int			sk_sndbuf;
-	struct sk_buff_head	sk_receive_queue;
-	struct sk_buff_head	sk_write_queue;
-#ifdef CONFIG_NET_DMA
-	struct sk_buff_head	sk_async_wait_queue;
-#endif
 	int			sk_wmem_queued;
-	int			sk_forward_alloc;
 	gfp_t			sk_allocation;
 	int			sk_route_caps;
 	int			sk_route_nocaps;
 	int			sk_gso_type;
 	unsigned int		sk_gso_max_size;
 	int			sk_rcvlowat;
-#ifdef CONFIG_RPS
-	__u32			sk_rxhash;
-#endif
-	unsigned long 		sk_flags;
 	unsigned long	        sk_lingertime;
 	struct sk_buff_head	sk_error_queue;
 	struct proto		*sk_prot_creator;
 	rwlock_t		sk_callback_lock;
 	int			sk_err,
 				sk_err_soft;
-	atomic_t		sk_drops;
 	unsigned short		sk_ack_backlog;
 	unsigned short		sk_max_ack_backlog;
 	__u32			sk_priority;
@@ -301,7 +320,6 @@
 	const struct cred	*sk_peer_cred;
 	long			sk_rcvtimeo;
 	long			sk_sndtimeo;
-	struct sk_filter __rcu	*sk_filter;
 	void			*sk_protinfo;
 	struct timer_list	sk_timer;
 	ktime_t			sk_stamp;
@@ -509,9 +527,6 @@
 #define sk_nulls_for_each_from(__sk, node) \
 	if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
 		hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
-#define sk_for_each_continue(__sk, node) \
-	if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
-		hlist_for_each_entry_continue(__sk, node, sk_node)
 #define sk_for_each_safe(__sk, node, tmp, list) \
 	hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
 #define sk_for_each_bound(__sk, node, list) \
@@ -762,7 +777,7 @@
 
 	/* Memory pressure */
 	void			(*enter_memory_pressure)(struct sock *sk);
-	atomic_t		*memory_allocated;	/* Current allocated memory. */
+	atomic_long_t		*memory_allocated;	/* Current allocated memory. */
 	struct percpu_counter	*sockets_allocated;	/* Current number of sockets. */
 	/*
 	 * Pressure flag: try to collapse.
@@ -771,7 +786,7 @@
 	 * is strict, actions are advisory and have some latency.
 	 */
 	int			*memory_pressure;
-	int			*sysctl_mem;
+	long			*sysctl_mem;
 	int			*sysctl_wmem;
 	int			*sysctl_rmem;
 	int			max_header;
@@ -1155,6 +1170,8 @@
 /* Initialise core socket variables */
 extern void sock_init_data(struct socket *sock, struct sock *sk);
 
+extern void sk_filter_release_rcu(struct rcu_head *rcu);
+
 /**
  *	sk_filter_release - release a socket filter
  *	@fp: filter to remove
@@ -1165,7 +1182,7 @@
 static inline void sk_filter_release(struct sk_filter *fp)
 {
 	if (atomic_dec_and_test(&fp->refcnt))
-		kfree(fp);
+		call_rcu_bh(&fp->rcu, sk_filter_release_rcu);
 }
 
 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 4fee042..3f227ba 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -100,12 +100,6 @@
 #define TCP_SYNACK_RETRIES 5	/* number of times to retry passive opening a
 				 * connection: ~180sec is RFC minimum	*/
 
-
-#define TCP_ORPHAN_RETRIES 7	/* number of times to retry on an orphaned
-				 * socket. 7 is ~50sec-16min.
-				 */
-
-
 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
 				  * state, about 60 seconds	*/
 #define TCP_FIN_TIMEOUT	TCP_TIMEWAIT_LEN
@@ -224,7 +218,7 @@
 extern int sysctl_tcp_reordering;
 extern int sysctl_tcp_ecn;
 extern int sysctl_tcp_dsack;
-extern int sysctl_tcp_mem[3];
+extern long sysctl_tcp_mem[3];
 extern int sysctl_tcp_wmem[3];
 extern int sysctl_tcp_rmem[3];
 extern int sysctl_tcp_app_win;
@@ -247,7 +241,7 @@
 extern int sysctl_tcp_thin_linear_timeouts;
 extern int sysctl_tcp_thin_dupack;
 
-extern atomic_t tcp_memory_allocated;
+extern atomic_long_t tcp_memory_allocated;
 extern struct percpu_counter tcp_sockets_allocated;
 extern int tcp_memory_pressure;
 
@@ -280,7 +274,7 @@
 	}
 
 	if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
-	    atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
+	    atomic_long_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
 		return true;
 	return false;
 }
@@ -312,7 +306,8 @@
 
 extern int tcp_v4_rcv(struct sk_buff *skb);
 
-extern int tcp_v4_remember_stamp(struct sock *sk);
+extern struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it);
+extern void *tcp_v4_tw_get_peer(struct sock *sk);
 extern int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
 extern int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		       size_t size);
diff --git a/include/net/timewait_sock.h b/include/net/timewait_sock.h
index 97c3b14..053b3cf 100644
--- a/include/net/timewait_sock.h
+++ b/include/net/timewait_sock.h
@@ -21,6 +21,7 @@
 	int		(*twsk_unique)(struct sock *sk,
 				       struct sock *sktw, void *twp);
 	void		(*twsk_destructor)(struct sock *sk);
+	void		*(*twsk_getpeer)(struct sock *sk);
 };
 
 static inline int twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
@@ -39,4 +40,11 @@
 		sk->sk_prot->twsk_prot->twsk_destructor(sk);
 }
 
+static inline void *twsk_getpeer(struct sock *sk)
+{
+	if (sk->sk_prot->twsk_prot->twsk_getpeer)
+		return sk->sk_prot->twsk_prot->twsk_getpeer(sk);
+	return NULL;
+}
+
 #endif /* _TIMEWAIT_SOCK_H */
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h
deleted file mode 100644
index 1e0645e..0000000
--- a/include/net/tipc/tipc.h
+++ /dev/null
@@ -1,186 +0,0 @@
-/*
- * include/net/tipc/tipc.h: Main include file for TIPC users
- * 
- * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005,2010 Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _NET_TIPC_H_
-#define _NET_TIPC_H_
-
-#ifdef __KERNEL__
-
-#include <linux/tipc.h>
-#include <linux/skbuff.h>
-
-/* 
- * Native API
- */
-
-/*
- * TIPC operating mode routines
- */
-
-#define TIPC_NOT_RUNNING  0
-#define TIPC_NODE_MODE    1
-#define TIPC_NET_MODE     2
-
-typedef void (*tipc_mode_event)(void *usr_handle, int mode, u32 addr);
-
-int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle);
-
-void tipc_detach(unsigned int userref);
-
-/*
- * TIPC port manipulation routines
- */
-
-typedef void (*tipc_msg_err_event) (void *usr_handle,
-				    u32 portref,
-				    struct sk_buff **buf,
-				    unsigned char const *data,
-				    unsigned int size,
-				    int reason, 
-				    struct tipc_portid const *attmpt_destid);
-
-typedef void (*tipc_named_msg_err_event) (void *usr_handle,
-					  u32 portref,
-					  struct sk_buff **buf,
-					  unsigned char const *data,
-					  unsigned int size,
-					  int reason, 
-					  struct tipc_name_seq const *attmpt_dest);
-
-typedef void (*tipc_conn_shutdown_event) (void *usr_handle,
-					  u32 portref,
-					  struct sk_buff **buf,
-					  unsigned char const *data,
-					  unsigned int size,
-					  int reason);
-
-typedef void (*tipc_msg_event) (void *usr_handle,
-				u32 portref,
-				struct sk_buff **buf,
-				unsigned char const *data,
-				unsigned int size,
-				unsigned int importance, 
-				struct tipc_portid const *origin);
-
-typedef void (*tipc_named_msg_event) (void *usr_handle,
-				      u32 portref,
-				      struct sk_buff **buf,
-				      unsigned char const *data,
-				      unsigned int size,
-				      unsigned int importance, 
-				      struct tipc_portid const *orig,
-				      struct tipc_name_seq const *dest);
-
-typedef void (*tipc_conn_msg_event) (void *usr_handle,
-				     u32 portref,
-				     struct sk_buff **buf,
-				     unsigned char const *data,
-				     unsigned int size);
-
-typedef void (*tipc_continue_event) (void *usr_handle, 
-				     u32 portref);
-
-int tipc_createport(unsigned int tipc_user, 
-		    void *usr_handle, 
-		    unsigned int importance, 
-		    tipc_msg_err_event error_cb, 
-		    tipc_named_msg_err_event named_error_cb, 
-		    tipc_conn_shutdown_event conn_error_cb, 
-		    tipc_msg_event message_cb, 
-		    tipc_named_msg_event named_message_cb, 
-		    tipc_conn_msg_event conn_message_cb, 
-		    tipc_continue_event continue_event_cb,
-		    u32 *portref);
-
-int tipc_deleteport(u32 portref);
-
-int tipc_ownidentity(u32 portref, struct tipc_portid *port);
-
-int tipc_portimportance(u32 portref, unsigned int *importance);
-int tipc_set_portimportance(u32 portref, unsigned int importance);
-
-int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
-int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
-
-int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
-int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
-
-int tipc_publish(u32 portref, unsigned int scope, 
-		 struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
-		  struct tipc_name_seq const *name_seq);
-
-int tipc_connect2port(u32 portref, struct tipc_portid const *port);
-
-int tipc_disconnect(u32 portref);
-
-int tipc_shutdown(u32 ref);
-
-/*
- * TIPC messaging routines
- */
-
-#define TIPC_PORT_IMPORTANCE 100	/* send using current port setting */
-
-
-int tipc_send(u32 portref,
-	      unsigned int num_sect,
-	      struct iovec const *msg_sect);
-
-int tipc_send2name(u32 portref, 
-		   struct tipc_name const *name, 
-		   u32 domain,
-		   unsigned int num_sect,
-		   struct iovec const *msg_sect);
-
-int tipc_send2port(u32 portref,
-		   struct tipc_portid const *dest,
-		   unsigned int num_sect,
-		   struct iovec const *msg_sect);
-
-int tipc_send_buf2port(u32 portref,
-		       struct tipc_portid const *dest,
-		       struct sk_buff *buf,
-		       unsigned int dsz);
-
-int tipc_multicast(u32 portref, 
-		   struct tipc_name_seq const *seq, 
-		   u32 domain,	/* currently unused */
-		   unsigned int section_count,
-		   struct iovec const *msg);
-#endif
-
-#endif
diff --git a/include/net/tipc/tipc_bearer.h b/include/net/tipc/tipc_bearer.h
deleted file mode 100644
index ee2f304..0000000
--- a/include/net/tipc/tipc_bearer.h
+++ /dev/null
@@ -1,138 +0,0 @@
-/*
- * include/net/tipc/tipc_bearer.h: Include file for privileged access to TIPC bearers
- * 
- * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _NET_TIPC_BEARER_H_
-#define _NET_TIPC_BEARER_H_
-
-#ifdef __KERNEL__
-
-#include <linux/tipc_config.h>
-#include <linux/skbuff.h>
-#include <linux/spinlock.h>
-
-/*
- * Identifiers of supported TIPC media types
- */
-
-#define TIPC_MEDIA_TYPE_ETH	1
-
-/* 
- * Destination address structure used by TIPC bearers when sending messages
- * 
- * IMPORTANT: The fields of this structure MUST be stored using the specified
- * byte order indicated below, as the structure is exchanged between nodes
- * as part of a link setup process.
- */
-
-struct tipc_media_addr {
-	__be32  type;			/* bearer type (network byte order) */
-	union {
-		__u8   eth_addr[6];	/* 48 bit Ethernet addr (byte array) */ 
-#if 0
-		/* Prototypes for other possible bearer types */
-
-		struct {
-			__u16 sin_family;
-			__u16 sin_port;
-			struct {
-				__u32 s_addr;
-			} sin_addr;
-			char pad[4];
-		} addr_in;		/* IP-based bearer */
-		__u16  sock_descr;	/* generic socket bearer */
-#endif
-	} dev_addr;
-};
-
-/**
- * struct tipc_bearer - TIPC bearer info available to privileged users
- * @usr_handle: pointer to additional user-defined information about bearer
- * @mtu: max packet size bearer can support
- * @blocked: non-zero if bearer is blocked
- * @lock: spinlock for controlling access to bearer
- * @addr: media-specific address associated with bearer
- * @name: bearer name (format = media:interface)
- * 
- * Note: TIPC initializes "name" and "lock" fields; user is responsible for
- * initialization all other fields when a bearer is enabled.
- */
-
-struct tipc_bearer {
-	void *usr_handle;
-	u32 mtu;
-	int blocked;
-	spinlock_t lock;
-	struct tipc_media_addr addr;
-	char name[TIPC_MAX_BEARER_NAME];
-};
-
-/*
- * TIPC routines available to supported media types
- */
-
-int  tipc_register_media(u32 media_type,
-			 char *media_name, 
-			 int (*enable)(struct tipc_bearer *), 
-			 void (*disable)(struct tipc_bearer *), 
-			 int (*send_msg)(struct sk_buff *, 
-					 struct tipc_bearer *,
-					 struct tipc_media_addr *), 
-			 char *(*addr2str)(struct tipc_media_addr *a,
-					   char *str_buf,
-					   int str_size),
-			 struct tipc_media_addr *bcast_addr,
-			 const u32 bearer_priority,
-			 const u32 link_tolerance,  /* [ms] */
-			 const u32 send_window_limit); 
-
-void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
-
-int  tipc_block_bearer(const char *name);
-void tipc_continue(struct tipc_bearer *tb_ptr); 
-
-int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
-int tipc_disable_bearer(const char *name);
-
-/*
- * Routines made available to TIPC by supported media types
- */
-
-int  tipc_eth_media_start(void);
-void tipc_eth_media_stop(void);
-
-#endif
-
-#endif
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h
deleted file mode 100644
index ffe50b4..0000000
--- a/include/net/tipc/tipc_msg.h
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * include/net/tipc/tipc_msg.h: Include file for privileged access to TIPC message headers
- * 
- * Copyright (c) 2003-2006, Ericsson AB
- * Copyright (c) 2005, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _NET_TIPC_MSG_H_
-#define _NET_TIPC_MSG_H_
-
-#ifdef __KERNEL__
-
-struct tipc_msg {
-	__be32 hdr[15];
-};
-
-
-/*
-		TIPC user data message header format, version 2:
-
-
-       1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0 
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w0:|vers | user  |hdr sz |n|d|s|-|          message size           |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w1:|mstyp| error |rer cnt|lsc|opt p|      broadcast ack no         |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w2:|        link level ack no      |   broadcast/link level seq no |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w3:|                       previous node                           |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w4:|                      originating port                         |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w5:|                      destination port                         |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
-   w6:|                      originating node                         |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w7:|                      destination node                         |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w8:|            name type / transport sequence number              |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-   w9:|              name instance/multicast lower bound              |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
-   wA:|                    multicast upper bound                      |
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+    
-      /                                                               /
-      \                           options                             \
-      /                                                               /
-      +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
-
-*/
-
-#define TIPC_CONN_MSG	0
-#define TIPC_MCAST_MSG	1
-#define TIPC_NAMED_MSG	2
-#define TIPC_DIRECT_MSG	3
-
-
-static inline u32 msg_word(struct tipc_msg *m, u32 pos)
-{
-	return ntohl(m->hdr[pos]);
-}
-
-static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
-{
-	return (msg_word(m, w) >> pos) & mask;
-}
-
-static inline u32 msg_importance(struct tipc_msg *m)
-{
-	return msg_bits(m, 0, 25, 0xf);
-}
-
-static inline u32 msg_hdr_sz(struct tipc_msg *m)
-{
-	return msg_bits(m, 0, 21, 0xf) << 2;
-}
-
-static inline int msg_short(struct tipc_msg *m)
-{
-	return msg_hdr_sz(m) == 24;
-}
-
-static inline u32 msg_size(struct tipc_msg *m)
-{
-	return msg_bits(m, 0, 0, 0x1ffff);
-}
-
-static inline u32 msg_data_sz(struct tipc_msg *m)
-{
-	return msg_size(m) - msg_hdr_sz(m);
-}
-
-static inline unchar *msg_data(struct tipc_msg *m)
-{
-	return ((unchar *)m) + msg_hdr_sz(m);
-}
-
-static inline u32 msg_type(struct tipc_msg *m)
-{
-	return msg_bits(m, 1, 29, 0x7);
-}
-
-static inline u32 msg_named(struct tipc_msg *m)
-{
-	return msg_type(m) == TIPC_NAMED_MSG;
-}
-
-static inline u32 msg_mcast(struct tipc_msg *m)
-{
-	return msg_type(m) == TIPC_MCAST_MSG;
-}
-
-static inline u32 msg_connected(struct tipc_msg *m)
-{
-	return msg_type(m) == TIPC_CONN_MSG;
-}
-
-static inline u32 msg_errcode(struct tipc_msg *m)
-{
-	return msg_bits(m, 1, 25, 0xf);
-}
-
-static inline u32 msg_prevnode(struct tipc_msg *m)
-{
-	return msg_word(m, 3);
-}
-
-static inline u32 msg_origport(struct tipc_msg *m)
-{
-	return msg_word(m, 4);
-}
-
-static inline u32 msg_destport(struct tipc_msg *m)
-{
-	return msg_word(m, 5);
-}
-
-static inline u32 msg_mc_netid(struct tipc_msg *m)
-{
-	return msg_word(m, 5);
-}
-
-static inline u32 msg_orignode(struct tipc_msg *m)
-{
-	if (likely(msg_short(m)))
-		return msg_prevnode(m);
-	return msg_word(m, 6);
-}
-
-static inline u32 msg_destnode(struct tipc_msg *m)
-{
-	return msg_word(m, 7);
-}
-
-static inline u32 msg_nametype(struct tipc_msg *m)
-{
-	return msg_word(m, 8);
-}
-
-static inline u32 msg_nameinst(struct tipc_msg *m)
-{
-	return msg_word(m, 9);
-}
-
-static inline u32 msg_namelower(struct tipc_msg *m)
-{
-	return msg_nameinst(m);
-}
-
-static inline u32 msg_nameupper(struct tipc_msg *m)
-{
-	return msg_word(m, 10);
-}
-
-#endif
-
-#endif
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h
deleted file mode 100644
index 1893aaf..0000000
--- a/include/net/tipc/tipc_port.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * include/net/tipc/tipc_port.h: Include file for privileged access to TIPC ports
- * 
- * Copyright (c) 1994-2007, Ericsson AB
- * Copyright (c) 2005-2008, Wind River Systems
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- *    notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- *    notice, this list of conditions and the following disclaimer in the
- *    documentation and/or other materials provided with the distribution.
- * 3. Neither the names of the copyright holders nor the names of its
- *    contributors may be used to endorse or promote products derived from
- *    this software without specific prior written permission.
- *
- * Alternatively, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") version 2 as published by the Free
- * Software Foundation.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _NET_TIPC_PORT_H_
-#define _NET_TIPC_PORT_H_
-
-#ifdef __KERNEL__
-
-#include <linux/tipc.h>
-#include <linux/skbuff.h>
-#include <net/tipc/tipc_msg.h>
-
-#define TIPC_FLOW_CONTROL_WIN 512
-
-/**
- * struct tipc_port - native TIPC port info available to privileged users
- * @usr_handle: pointer to additional user-defined information about port
- * @lock: pointer to spinlock for controlling access to port
- * @connected: non-zero if port is currently connected to a peer port
- * @conn_type: TIPC type used when connection was established
- * @conn_instance: TIPC instance used when connection was established
- * @conn_unacked: number of unacknowledged messages received from peer port
- * @published: non-zero if port has one or more associated names
- * @congested: non-zero if cannot send because of link or port congestion
- * @max_pkt: maximum packet size "hint" used when building messages sent by port
- * @ref: unique reference to port in TIPC object registry
- * @phdr: preformatted message header used when sending messages
- */
-
-struct tipc_port {
-        void *usr_handle;
-        spinlock_t *lock;
-	int connected;
-        u32 conn_type;
-        u32 conn_instance;
-	u32 conn_unacked;
-	int published;
-	u32 congested;
-	u32 max_pkt;
-	u32 ref;
-	struct tipc_msg phdr;
-};
-
-
-struct tipc_port *tipc_createport_raw(void *usr_handle,
-			u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
-			void (*wakeup)(struct tipc_port *),
-			const u32 importance);
-
-int tipc_reject_msg(struct sk_buff *buf, u32 err);
-
-int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
-
-void tipc_acknowledge(u32 port_ref,u32 ack);
-
-struct tipc_port *tipc_get_port(const u32 ref);
-
-/*
- * The following routines require that the port be locked on entry
- */
-
-int tipc_disconnect_port(struct tipc_port *tp_ptr);
-
-
-#endif
-
-#endif
-
diff --git a/include/net/udp.h b/include/net/udp.h
index 200b828..bb967dd 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -105,10 +105,10 @@
 
 extern struct proto udp_prot;
 
-extern atomic_t udp_memory_allocated;
+extern atomic_long_t udp_memory_allocated;
 
 /* sysctl variables for udp */
-extern int sysctl_udp_mem[3];
+extern long sysctl_udp_mem[3];
 extern int sysctl_udp_rmem_min;
 extern int sysctl_udp_wmem_min;
 
diff --git a/include/net/x25.h b/include/net/x25.h
index 1479cb4..a06119a 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -315,6 +315,8 @@
 extern rwlock_t x25_route_list_lock;
 extern struct list_head x25_forward_list;
 extern rwlock_t x25_forward_list_lock;
+extern struct list_head x25_neigh_list;
+extern rwlock_t x25_neigh_list_lock;
 
 extern int x25_proc_init(void);
 extern void x25_proc_exit(void);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index bcfb6b2..b9f385d 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -143,6 +143,7 @@
 	struct xfrm_id		id;
 	struct xfrm_selector	sel;
 	struct xfrm_mark	mark;
+	u32			tfcpad;
 
 	u32			genid;
 
@@ -805,6 +806,9 @@
 	case IPPROTO_MH:
 		port = htons(fl->fl_mh_type);
 		break;
+	case IPPROTO_GRE:
+		port = htons(ntohl(fl->fl_gre_key) >> 16);
+		break;
 	default:
 		port = 0;	/*XXX*/
 	}
@@ -826,6 +830,9 @@
 	case IPPROTO_ICMPV6:
 		port = htons(fl->fl_icmp_code);
 		break;
+	case IPPROTO_GRE:
+		port = htons(ntohl(fl->fl_gre_key) & 0xffff);
+		break;
 	default:
 		port = 0;	/*XXX*/
 	}
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 289010d..e5e345f 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -98,6 +98,103 @@
 		  (unsigned long) __entry->dir, __entry->mode)
 );
 
+TRACE_EVENT(ext4_evict_inode,
+	TP_PROTO(struct inode *inode),
+
+	TP_ARGS(inode),
+
+	TP_STRUCT__entry(
+		__field(	int,   dev_major                )
+		__field(	int,   dev_minor                )
+		__field(	ino_t,	ino			)
+		__field(	int,	nlink			)
+	),
+
+	TP_fast_assign(
+		__entry->dev_major = MAJOR(inode->i_sb->s_dev);
+		__entry->dev_minor = MINOR(inode->i_sb->s_dev);
+		__entry->ino	= inode->i_ino;
+		__entry->nlink	= inode->i_nlink;
+	),
+
+	TP_printk("dev %d,%d ino %lu nlink %d",
+		  __entry->dev_major, __entry->dev_minor,
+		  (unsigned long) __entry->ino, __entry->nlink)
+);
+
+TRACE_EVENT(ext4_drop_inode,
+	TP_PROTO(struct inode *inode, int drop),
+
+	TP_ARGS(inode, drop),
+
+	TP_STRUCT__entry(
+		__field(	int,	dev_major		)
+		__field(	int,	dev_minor		)
+		__field(	ino_t,	ino			)
+		__field(	int,	drop			)
+	),
+
+	TP_fast_assign(
+		__entry->dev_major = MAJOR(inode->i_sb->s_dev);
+		__entry->dev_minor = MINOR(inode->i_sb->s_dev);
+		__entry->ino	= inode->i_ino;
+		__entry->drop	= drop;
+	),
+
+	TP_printk("dev %d,%d ino %lu drop %d",
+		  __entry->dev_major, __entry->dev_minor,
+		  (unsigned long) __entry->ino, __entry->drop)
+);
+
+TRACE_EVENT(ext4_mark_inode_dirty,
+	TP_PROTO(struct inode *inode, unsigned long IP),
+
+	TP_ARGS(inode, IP),
+
+	TP_STRUCT__entry(
+		__field(	int,	dev_major		)
+		__field(	int,	dev_minor		)
+		__field(	ino_t,	ino			)
+		__field(unsigned long,	ip			)
+	),
+
+	TP_fast_assign(
+		__entry->dev_major = MAJOR(inode->i_sb->s_dev);
+		__entry->dev_minor = MINOR(inode->i_sb->s_dev);
+		__entry->ino	= inode->i_ino;
+		__entry->ip	= IP;
+	),
+
+	TP_printk("dev %d,%d ino %lu caller %pF",
+		  __entry->dev_major, __entry->dev_minor,
+		  (unsigned long) __entry->ino, (void *)__entry->ip)
+);
+
+TRACE_EVENT(ext4_begin_ordered_truncate,
+	TP_PROTO(struct inode *inode, loff_t new_size),
+
+	TP_ARGS(inode, new_size),
+
+	TP_STRUCT__entry(
+		__field(	int,	dev_major		)
+		__field(	int,	dev_minor		)
+		__field(	ino_t,	ino			)
+		__field(	loff_t,	new_size		)
+	),
+
+	TP_fast_assign(
+		__entry->dev_major	= MAJOR(inode->i_sb->s_dev);
+		__entry->dev_minor	= MINOR(inode->i_sb->s_dev);
+		__entry->ino		= inode->i_ino;
+		__entry->new_size	= new_size;
+	),
+
+	TP_printk("dev %d,%d ino %lu new_size %lld",
+		  __entry->dev_major, __entry->dev_minor,
+		  (unsigned long) __entry->ino,
+		  (long long) __entry->new_size)
+);
+
 DECLARE_EVENT_CLASS(ext4__write_begin,
 
 	TP_PROTO(struct inode *inode, loff_t pos, unsigned int len,
diff --git a/kernel/exit.c b/kernel/exit.c
index b194feb..21aa7b3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -96,6 +96,14 @@
 		sig->tty = NULL;
 	} else {
 		/*
+		 * This can only happen if the caller is de_thread().
+		 * FIXME: this is the temporary hack, we should teach
+		 * posix-cpu-timers to handle this case correctly.
+		 */
+		if (unlikely(has_group_leader_pid(tsk)))
+			posix_cpu_timers_exit_group(tsk);
+
+		/*
 		 * If there is any task waiting for the group exit
 		 * then notify it:
 		 */
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index 877fb30..17110a4 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -194,14 +194,7 @@
 
 	account_global_scheduler_latency(tsk, &lat);
 
-	/*
-	 * short term hack; if we're > 32 we stop; future we recycle:
-	 */
-	tsk->latency_record_count++;
-	if (tsk->latency_record_count >= LT_SAVECOUNT)
-		goto out_unlock;
-
-	for (i = 0; i < LT_SAVECOUNT; i++) {
+	for (i = 0; i < tsk->latency_record_count; i++) {
 		struct latency_record *mylat;
 		int same = 1;
 
@@ -227,8 +220,14 @@
 		}
 	}
 
+	/*
+	 * short term hack; if we're > 32 we stop; future we recycle:
+	 */
+	if (tsk->latency_record_count >= LT_SAVECOUNT)
+		goto out_unlock;
+
 	/* Allocated a new one: */
-	i = tsk->latency_record_count;
+	i = tsk->latency_record_count++;
 	memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
 
 out_unlock:
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 517d827..cb6c0d2 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -674,6 +674,8 @@
 
 	event->tstamp_running += ctx->time - event->tstamp_stopped;
 
+	event->shadow_ctx_time = ctx->time - ctx->timestamp;
+
 	if (!is_software_event(event))
 		cpuctx->active_oncpu++;
 	ctx->nr_active++;
@@ -3396,7 +3398,8 @@
 }
 
 static void perf_output_read_one(struct perf_output_handle *handle,
-				 struct perf_event *event)
+				 struct perf_event *event,
+				 u64 enabled, u64 running)
 {
 	u64 read_format = event->attr.read_format;
 	u64 values[4];
@@ -3404,11 +3407,11 @@
 
 	values[n++] = perf_event_count(event);
 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
-		values[n++] = event->total_time_enabled +
+		values[n++] = enabled +
 			atomic64_read(&event->child_total_time_enabled);
 	}
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
-		values[n++] = event->total_time_running +
+		values[n++] = running +
 			atomic64_read(&event->child_total_time_running);
 	}
 	if (read_format & PERF_FORMAT_ID)
@@ -3421,7 +3424,8 @@
  * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
  */
 static void perf_output_read_group(struct perf_output_handle *handle,
-			    struct perf_event *event)
+			    struct perf_event *event,
+			    u64 enabled, u64 running)
 {
 	struct perf_event *leader = event->group_leader, *sub;
 	u64 read_format = event->attr.read_format;
@@ -3431,10 +3435,10 @@
 	values[n++] = 1 + leader->nr_siblings;
 
 	if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
-		values[n++] = leader->total_time_enabled;
+		values[n++] = enabled;
 
 	if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
-		values[n++] = leader->total_time_running;
+		values[n++] = running;
 
 	if (leader != event)
 		leader->pmu->read(leader);
@@ -3459,13 +3463,35 @@
 	}
 }
 
+#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
+				 PERF_FORMAT_TOTAL_TIME_RUNNING)
+
 static void perf_output_read(struct perf_output_handle *handle,
 			     struct perf_event *event)
 {
+	u64 enabled = 0, running = 0, now, ctx_time;
+	u64 read_format = event->attr.read_format;
+
+	/*
+	 * compute total_time_enabled, total_time_running
+	 * based on snapshot values taken when the event
+	 * was last scheduled in.
+	 *
+	 * we cannot simply called update_context_time()
+	 * because of locking issue as we are called in
+	 * NMI context
+	 */
+	if (read_format & PERF_FORMAT_TOTAL_TIMES) {
+		now = perf_clock();
+		ctx_time = event->shadow_ctx_time + now;
+		enabled = ctx_time - event->tstamp_enabled;
+		running = ctx_time - event->tstamp_running;
+	}
+
 	if (event->attr.read_format & PERF_FORMAT_GROUP)
-		perf_output_read_group(handle, event);
+		perf_output_read_group(handle, event, enabled, running);
 	else
-		perf_output_read_one(handle, event);
+		perf_output_read_one(handle, event, enabled, running);
 }
 
 void perf_output_sample(struct perf_output_handle *handle,
diff --git a/kernel/printk.c b/kernel/printk.c
index b2ebaee..38e7d58 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -261,6 +261,12 @@
 }
 #endif
 
+#ifdef CONFIG_SECURITY_DMESG_RESTRICT
+int dmesg_restrict = 1;
+#else
+int dmesg_restrict;
+#endif
+
 int do_syslog(int type, char __user *buf, int len, bool from_file)
 {
 	unsigned i, j, limit, count;
diff --git a/kernel/range.c b/kernel/range.c
index 471b66a..37fa9b9 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -119,7 +119,7 @@
 
 int clean_sort_range(struct range *range, int az)
 {
-	int i, j, k = az - 1, nr_range = 0;
+	int i, j, k = az - 1, nr_range = az;
 
 	for (i = 0; i < k; i++) {
 		if (range[i].end)
diff --git a/kernel/relay.c b/kernel/relay.c
index c7cf397..859ea5a 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -70,17 +70,10 @@
  */
 static struct page **relay_alloc_page_array(unsigned int n_pages)
 {
-	struct page **array;
-	size_t pa_size = n_pages * sizeof(struct page *);
-
-	if (pa_size > PAGE_SIZE) {
-		array = vmalloc(pa_size);
-		if (array)
-			memset(array, 0, pa_size);
-	} else {
-		array = kzalloc(pa_size, GFP_KERNEL);
-	}
-	return array;
+	const size_t pa_size = n_pages * sizeof(struct page *);
+	if (pa_size > PAGE_SIZE)
+		return vzalloc(pa_size);
+	return kzalloc(pa_size, GFP_KERNEL);
 }
 
 /*
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index c33a1ed..b65bf63 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -704,6 +704,15 @@
 	},
 #endif
 	{
+		.procname	= "dmesg_restrict",
+		.data		= &dmesg_restrict,
+		.maxlen		= sizeof(int),
+		.mode		= 0644,
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &zero,
+		.extra2		= &one,
+	},
+	{
 		.procname	= "ngroups_max",
 		.data		= &ngroups_max,
 		.maxlen		= sizeof (int),
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index bc251ed..7b8ec02 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -168,7 +168,6 @@
 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
 				 BLK_TC_ACT(BLK_TC_WRITE) };
 
-#define BLK_TC_HARDBARRIER	BLK_TC_BARRIER
 #define BLK_TC_RAHEAD		BLK_TC_AHEAD
 
 /* The ilog2() calls fall out because they're constant */
@@ -196,7 +195,6 @@
 		return;
 
 	what |= ddir_act[rw & WRITE];
-	what |= MASK_TC_BIT(rw, HARDBARRIER);
 	what |= MASK_TC_BIT(rw, SYNC);
 	what |= MASK_TC_BIT(rw, RAHEAD);
 	what |= MASK_TC_BIT(rw, META);
@@ -1807,8 +1805,6 @@
 
 	if (rw & REQ_RAHEAD)
 		rwbs[i++] = 'A';
-	if (rw & REQ_HARDBARRIER)
-		rwbs[i++] = 'B';
 	if (rw & REQ_SYNC)
 		rwbs[i++] = 'S';
 	if (rw & REQ_META)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index bafba68..6e3c41a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -43,7 +43,7 @@
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int __initdata no_watchdog;
+static int no_watchdog;
 
 
 /* boot commands */
diff --git a/lib/nlattr.c b/lib/nlattr.c
index c4706eb..00e8a02 100644
--- a/lib/nlattr.c
+++ b/lib/nlattr.c
@@ -15,7 +15,7 @@
 #include <linux/types.h>
 #include <net/netlink.h>
 
-static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
+static const u16 nla_attr_minlen[NLA_TYPE_MAX+1] = {
 	[NLA_U8]	= sizeof(u8),
 	[NLA_U16]	= sizeof(u16),
 	[NLA_U32]	= sizeof(u32),
@@ -23,7 +23,7 @@
 	[NLA_NESTED]	= NLA_HDRLEN,
 };
 
-static int validate_nla(struct nlattr *nla, int maxtype,
+static int validate_nla(const struct nlattr *nla, int maxtype,
 			const struct nla_policy *policy)
 {
 	const struct nla_policy *pt;
@@ -115,10 +115,10 @@
  *
  * Returns 0 on success or a negative error code.
  */
-int nla_validate(struct nlattr *head, int len, int maxtype,
+int nla_validate(const struct nlattr *head, int len, int maxtype,
 		 const struct nla_policy *policy)
 {
-	struct nlattr *nla;
+	const struct nlattr *nla;
 	int rem, err;
 
 	nla_for_each_attr(nla, head, len, rem) {
@@ -173,10 +173,10 @@
  *
  * Returns 0 on success or a negative error code.
  */
-int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
-	      const struct nla_policy *policy)
+int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
+	      int len, const struct nla_policy *policy)
 {
-	struct nlattr *nla;
+	const struct nlattr *nla;
 	int rem, err;
 
 	memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
@@ -191,7 +191,7 @@
 					goto errout;
 			}
 
-			tb[type] = nla;
+			tb[type] = (struct nlattr *)nla;
 		}
 	}
 
@@ -212,14 +212,14 @@
  *
  * Returns the first attribute in the stream matching the specified type.
  */
-struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
+struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype)
 {
-	struct nlattr *nla;
+	const struct nlattr *nla;
 	int rem;
 
 	nla_for_each_attr(nla, head, len, rem)
 		if (nla_type(nla) == attrtype)
-			return nla;
+			return (struct nlattr *)nla;
 
 	return NULL;
 }
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 6f412ab4..5086bb9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -82,6 +82,16 @@
 };
 static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
 
+static inline void *ptr_to_indirect(void *ptr)
+{
+	return (void *)((unsigned long)ptr | RADIX_TREE_INDIRECT_PTR);
+}
+
+static inline void *indirect_to_ptr(void *ptr)
+{
+	return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR);
+}
+
 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
 {
 	return root->gfp_mask & __GFP_BITS_MASK;
@@ -265,7 +275,7 @@
 			return -ENOMEM;
 
 		/* Increase the height.  */
-		node->slots[0] = radix_tree_indirect_to_ptr(root->rnode);
+		node->slots[0] = indirect_to_ptr(root->rnode);
 
 		/* Propagate the aggregated tag info into the new root */
 		for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) {
@@ -276,7 +286,7 @@
 		newheight = root->height+1;
 		node->height = newheight;
 		node->count = 1;
-		node = radix_tree_ptr_to_indirect(node);
+		node = ptr_to_indirect(node);
 		rcu_assign_pointer(root->rnode, node);
 		root->height = newheight;
 	} while (height > root->height);
@@ -309,7 +319,7 @@
 			return error;
 	}
 
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 
 	height = root->height;
 	shift = (height-1) * RADIX_TREE_MAP_SHIFT;
@@ -325,8 +335,7 @@
 				rcu_assign_pointer(node->slots[offset], slot);
 				node->count++;
 			} else
-				rcu_assign_pointer(root->rnode,
-					radix_tree_ptr_to_indirect(slot));
+				rcu_assign_pointer(root->rnode, ptr_to_indirect(slot));
 		}
 
 		/* Go a level down */
@@ -374,7 +383,7 @@
 			return NULL;
 		return is_slot ? (void *)&root->rnode : node;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	height = node->height;
 	if (index > radix_tree_maxindex(height))
@@ -393,7 +402,7 @@
 		height--;
 	} while (height > 0);
 
-	return is_slot ? (void *)slot:node;
+	return is_slot ? (void *)slot : indirect_to_ptr(node);
 }
 
 /**
@@ -455,7 +464,7 @@
 	height = root->height;
 	BUG_ON(index > radix_tree_maxindex(height));
 
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 
 	while (height > 0) {
@@ -509,7 +518,7 @@
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 	pathp->node = NULL;
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 
 	while (height > 0) {
 		int offset;
@@ -579,7 +588,7 @@
 
 	if (!radix_tree_is_indirect_ptr(node))
 		return (index == 0);
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	height = node->height;
 	if (index > radix_tree_maxindex(height))
@@ -666,7 +675,7 @@
 	}
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
-	slot = radix_tree_indirect_to_ptr(root->rnode);
+	slot = indirect_to_ptr(root->rnode);
 
 	/*
 	 * we fill the path from (root->height - 2) to 0, leaving the index at
@@ -897,7 +906,7 @@
 		results[0] = node;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -916,7 +925,8 @@
 			slot = *(((void ***)results)[ret + i]);
 			if (!slot)
 				continue;
-			results[ret + nr_found] = rcu_dereference_raw(slot);
+			results[ret + nr_found] =
+				indirect_to_ptr(rcu_dereference_raw(slot));
 			nr_found++;
 		}
 		ret += nr_found;
@@ -965,7 +975,7 @@
 		results[0] = (void **)&root->rnode;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -1090,7 +1100,7 @@
 		results[0] = node;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -1109,7 +1119,8 @@
 			slot = *(((void ***)results)[ret + i]);
 			if (!slot)
 				continue;
-			results[ret + nr_found] = rcu_dereference_raw(slot);
+			results[ret + nr_found] =
+				indirect_to_ptr(rcu_dereference_raw(slot));
 			nr_found++;
 		}
 		ret += nr_found;
@@ -1159,7 +1170,7 @@
 		results[0] = (void **)&root->rnode;
 		return 1;
 	}
-	node = radix_tree_indirect_to_ptr(node);
+	node = indirect_to_ptr(node);
 
 	max_index = radix_tree_maxindex(node->height);
 
@@ -1195,7 +1206,7 @@
 		void *newptr;
 
 		BUG_ON(!radix_tree_is_indirect_ptr(to_free));
-		to_free = radix_tree_indirect_to_ptr(to_free);
+		to_free = indirect_to_ptr(to_free);
 
 		/*
 		 * The candidate node has more than one child, or its child
@@ -1208,16 +1219,39 @@
 
 		/*
 		 * We don't need rcu_assign_pointer(), since we are simply
-		 * moving the node from one part of the tree to another. If
-		 * it was safe to dereference the old pointer to it
+		 * moving the node from one part of the tree to another: if it
+		 * was safe to dereference the old pointer to it
 		 * (to_free->slots[0]), it will be safe to dereference the new
-		 * one (root->rnode).
+		 * one (root->rnode) as far as dependent read barriers go.
 		 */
 		newptr = to_free->slots[0];
 		if (root->height > 1)
-			newptr = radix_tree_ptr_to_indirect(newptr);
+			newptr = ptr_to_indirect(newptr);
 		root->rnode = newptr;
 		root->height--;
+
+		/*
+		 * We have a dilemma here. The node's slot[0] must not be
+		 * NULLed in case there are concurrent lookups expecting to
+		 * find the item. However if this was a bottom-level node,
+		 * then it may be subject to the slot pointer being visible
+		 * to callers dereferencing it. If item corresponding to
+		 * slot[0] is subsequently deleted, these callers would expect
+		 * their slot to become empty sooner or later.
+		 *
+		 * For example, lockless pagecache will look up a slot, deref
+		 * the page pointer, and if the page is 0 refcount it means it
+		 * was concurrently deleted from pagecache so try the deref
+		 * again. Fortunately there is already a requirement for logic
+		 * to retry the entire slot lookup -- the indirect pointer
+		 * problem (replacing direct root node with an indirect pointer
+		 * also results in a stale slot). So tag the slot as indirect
+		 * to force callers to retry.
+		 */
+		if (root->height == 0)
+			*((unsigned long *)&to_free->slots[0]) |=
+						RADIX_TREE_INDIRECT_PTR;
+
 		radix_tree_node_free(to_free);
 	}
 }
@@ -1254,7 +1288,7 @@
 		root->rnode = NULL;
 		goto out;
 	}
-	slot = radix_tree_indirect_to_ptr(slot);
+	slot = indirect_to_ptr(slot);
 
 	shift = (height - 1) * RADIX_TREE_MAP_SHIFT;
 	pathp->node = NULL;
@@ -1296,8 +1330,7 @@
 			radix_tree_node_free(to_free);
 
 		if (pathp->node->count) {
-			if (pathp->node ==
-					radix_tree_indirect_to_ptr(root->rnode))
+			if (pathp->node == indirect_to_ptr(root->rnode))
 				radix_tree_shrink(root);
 			goto out;
 		}
diff --git a/mm/filemap.c b/mm/filemap.c
index 75572b5..ea89840 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -644,7 +644,9 @@
 	pagep = radix_tree_lookup_slot(&mapping->page_tree, offset);
 	if (pagep) {
 		page = radix_tree_deref_slot(pagep);
-		if (unlikely(!page || page == RADIX_TREE_RETRY))
+		if (unlikely(!page))
+			goto out;
+		if (radix_tree_deref_retry(page))
 			goto repeat;
 
 		if (!page_cache_get_speculative(page))
@@ -660,6 +662,7 @@
 			goto repeat;
 		}
 	}
+out:
 	rcu_read_unlock();
 
 	return page;
@@ -777,12 +780,11 @@
 		page = radix_tree_deref_slot((void **)pages[i]);
 		if (unlikely(!page))
 			continue;
-		/*
-		 * this can only trigger if nr_found == 1, making livelock
-		 * a non issue.
-		 */
-		if (unlikely(page == RADIX_TREE_RETRY))
+		if (radix_tree_deref_retry(page)) {
+			if (ret)
+				start = pages[ret-1]->index;
 			goto restart;
+		}
 
 		if (!page_cache_get_speculative(page))
 			goto repeat;
@@ -830,11 +832,7 @@
 		page = radix_tree_deref_slot((void **)pages[i]);
 		if (unlikely(!page))
 			continue;
-		/*
-		 * this can only trigger if nr_found == 1, making livelock
-		 * a non issue.
-		 */
-		if (unlikely(page == RADIX_TREE_RETRY))
+		if (radix_tree_deref_retry(page))
 			goto restart;
 
 		if (page->mapping == NULL || page->index != index)
@@ -887,11 +885,7 @@
 		page = radix_tree_deref_slot((void **)pages[i]);
 		if (unlikely(!page))
 			continue;
-		/*
-		 * this can only trigger if nr_found == 1, making livelock
-		 * a non issue.
-		 */
-		if (unlikely(page == RADIX_TREE_RETRY))
+		if (radix_tree_deref_retry(page))
 			goto restart;
 
 		if (!page_cache_get_speculative(page))
@@ -1029,6 +1023,9 @@
 				goto page_not_up_to_date;
 			if (!trylock_page(page))
 				goto page_not_up_to_date;
+			/* Did it get truncated before we got the lock? */
+			if (!page->mapping)
+				goto page_not_up_to_date_locked;
 			if (!mapping->a_ops->is_partially_uptodate(page,
 								desc, offset))
 				goto page_not_up_to_date_locked;
@@ -1563,8 +1560,10 @@
 			goto no_cached_page;
 	}
 
-	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
+		page_cache_release(page);
 		return ret | VM_FAULT_RETRY;
+	}
 
 	/* Did it get truncated? */
 	if (unlikely(page->mapping != mapping)) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a99cfa..2efa8ea 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4208,15 +4208,17 @@
 
 	memset(mem, 0, size);
 	mem->stat = alloc_percpu(struct mem_cgroup_stat_cpu);
-	if (!mem->stat) {
-		if (size < PAGE_SIZE)
-			kfree(mem);
-		else
-			vfree(mem);
-		mem = NULL;
-	}
+	if (!mem->stat)
+		goto out_free;
 	spin_lock_init(&mem->pcp_counter_lock);
 	return mem;
+
+out_free:
+	if (size < PAGE_SIZE)
+		kfree(mem);
+	else
+		vfree(mem);
+	return NULL;
 }
 
 /*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 2d1bf7c..4c51338 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -211,6 +211,7 @@
 	mmu_notifier_invalidate_range_end(mm, start, end);
 	vm_stat_account(mm, oldflags, vma->vm_file, -nrpages);
 	vm_stat_account(mm, newflags, vma->vm_file, nrpages);
+	perf_event_mmap(vma);
 	return 0;
 
 fail:
@@ -299,7 +300,6 @@
 		error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
 		if (error)
 			goto out;
-		perf_event_mmap(vma);
 		nstart = tmp;
 
 		if (nstart < prev->vm_end)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b8a6fdc..d31d7ce 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -913,7 +913,7 @@
 	 * back off and wait for congestion to clear because further reclaim
 	 * will encounter the same problem
 	 */
-	if (nr_dirty == nr_congested)
+	if (nr_dirty == nr_congested && nr_dirty != 0)
 		zone_set_flag(zone, ZONE_CONGESTED);
 
 	free_page_list(&free_pages);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index cd2e42b..42eac4d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -949,7 +949,7 @@
 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
 	v[PGPGOUT] /= 2;
 #endif
-	return m->private + *pos;
+	return (unsigned long *)m->private + *pos;
 }
 
 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 52077ca..6e64f7c 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -272,13 +272,11 @@
 		snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
 	}
 
-	new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
-				  vlan_setup, real_dev->num_tx_queues);
+	new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
 
 	if (new_dev == NULL)
 		return -ENOBUFS;
 
-	netif_copy_real_num_queues(new_dev, real_dev);
 	dev_net_set(new_dev, net);
 	/* need 4 bytes for extra VLAN header info,
 	 * hope the underlying device can handle it.
@@ -334,12 +332,15 @@
 	vlandev->features &= ~dev->vlan_features;
 	vlandev->features |= dev->features & dev->vlan_features;
 	vlandev->gso_max_size = dev->gso_max_size;
+
+	if (dev->features & NETIF_F_HW_VLAN_TX)
+		vlandev->hard_header_len = dev->hard_header_len;
+	else
+		vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;
+
 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
 	vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
 #endif
-	vlandev->real_num_tx_queues = dev->real_num_tx_queues;
-	BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
-
 	if (old_features != vlandev->features)
 		netdev_features_change(vlandev);
 }
diff --git a/net/8021q/vlan.h b/net/8021q/vlan.h
index db01b31..5687c9b 100644
--- a/net/8021q/vlan.h
+++ b/net/8021q/vlan.h
@@ -19,19 +19,25 @@
 
 
 /**
- *	struct vlan_rx_stats - VLAN percpu rx stats
+ *	struct vlan_pcpu_stats - VLAN percpu rx/tx stats
  *	@rx_packets: number of received packets
  *	@rx_bytes: number of received bytes
  *	@rx_multicast: number of received multicast packets
+ *	@tx_packets: number of transmitted packets
+ *	@tx_bytes: number of transmitted bytes
  *	@syncp: synchronization point for 64bit counters
- *	@rx_errors: number of errors
+ *	@rx_errors: number of rx errors
+ *	@tx_dropped: number of tx drops
  */
-struct vlan_rx_stats {
+struct vlan_pcpu_stats {
 	u64			rx_packets;
 	u64			rx_bytes;
 	u64			rx_multicast;
+	u64			tx_packets;
+	u64			tx_bytes;
 	struct u64_stats_sync	syncp;
-	unsigned long		rx_errors;
+	u32			rx_errors;
+	u32			tx_dropped;
 };
 
 /**
@@ -45,9 +51,7 @@
  *	@real_dev: underlying netdevice
  *	@real_dev_addr: address of underlying netdevice
  *	@dent: proc dir entry
- *	@cnt_inc_headroom_on_tx: statistic - number of skb expansions on TX
- *	@cnt_encap_on_xmit: statistic - number of skb encapsulations on TX
- *	@vlan_rx_stats: ptr to percpu rx stats
+ *	@vlan_pcpu_stats: ptr to percpu rx stats
  */
 struct vlan_dev_info {
 	unsigned int				nr_ingress_mappings;
@@ -62,9 +66,7 @@
 	unsigned char				real_dev_addr[ETH_ALEN];
 
 	struct proc_dir_entry			*dent;
-	unsigned long				cnt_inc_headroom_on_tx;
-	unsigned long				cnt_encap_on_xmit;
-	struct vlan_rx_stats __percpu		*vlan_rx_stats;
+	struct vlan_pcpu_stats __percpu		*vlan_pcpu_stats;
 };
 
 static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 69b2f79..ce8e3ab 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -9,7 +9,7 @@
 	struct sk_buff *skb = *skbp;
 	u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
 	struct net_device *vlan_dev;
-	struct vlan_rx_stats *rx_stats;
+	struct vlan_pcpu_stats *rx_stats;
 
 	vlan_dev = vlan_find_dev(skb->dev, vlan_id);
 	if (!vlan_dev) {
@@ -26,7 +26,7 @@
 	skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
 	skb->vlan_tci = 0;
 
-	rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats);
+	rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
 
 	u64_stats_update_begin(&rx_stats->syncp);
 	rx_stats->rx_packets++;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 14e3d1f..be73753 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -141,7 +141,7 @@
 		  struct packet_type *ptype, struct net_device *orig_dev)
 {
 	struct vlan_hdr *vhdr;
-	struct vlan_rx_stats *rx_stats;
+	struct vlan_pcpu_stats *rx_stats;
 	struct net_device *vlan_dev;
 	u16 vlan_id;
 	u16 vlan_tci;
@@ -177,7 +177,7 @@
 	} else {
 		skb->dev = vlan_dev;
 
-		rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
+		rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
 
 		u64_stats_update_begin(&rx_stats->syncp);
 		rx_stats->rx_packets++;
@@ -274,9 +274,6 @@
 	u16 vlan_tci = 0;
 	int rc;
 
-	if (WARN_ON(skb_headroom(skb) < dev->hard_header_len))
-		return -ENOSPC;
-
 	if (!(vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR)) {
 		vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);
 
@@ -313,8 +310,6 @@
 static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
 					    struct net_device *dev)
 {
-	int i = skb_get_queue_mapping(skb);
-	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
 	struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
 	unsigned int len;
 	int ret;
@@ -326,71 +321,31 @@
 	 */
 	if (veth->h_vlan_proto != htons(ETH_P_8021Q) ||
 	    vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) {
-		unsigned int orig_headroom = skb_headroom(skb);
 		u16 vlan_tci;
-
-		vlan_dev_info(dev)->cnt_encap_on_xmit++;
-
 		vlan_tci = vlan_dev_info(dev)->vlan_id;
 		vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
-		skb = __vlan_put_tag(skb, vlan_tci);
-		if (!skb) {
-			txq->tx_dropped++;
-			return NETDEV_TX_OK;
-		}
-
-		if (orig_headroom < VLAN_HLEN)
-			vlan_dev_info(dev)->cnt_inc_headroom_on_tx++;
+		skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
 	}
 
-
 	skb_set_dev(skb, vlan_dev_info(dev)->real_dev);
 	len = skb->len;
 	ret = dev_queue_xmit(skb);
 
 	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-		txq->tx_packets++;
-		txq->tx_bytes += len;
-	} else
-		txq->tx_dropped++;
+		struct vlan_pcpu_stats *stats;
+
+		stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+		u64_stats_update_begin(&stats->syncp);
+		stats->tx_packets++;
+		stats->tx_bytes += len;
+		u64_stats_update_begin(&stats->syncp);
+	} else {
+		this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+	}
 
 	return ret;
 }
 
-static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
-						    struct net_device *dev)
-{
-	int i = skb_get_queue_mapping(skb);
-	struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
-	u16 vlan_tci;
-	unsigned int len;
-	int ret;
-
-	vlan_tci = vlan_dev_info(dev)->vlan_id;
-	vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb);
-	skb = __vlan_hwaccel_put_tag(skb, vlan_tci);
-
-	skb->dev = vlan_dev_info(dev)->real_dev;
-	len = skb->len;
-	ret = dev_queue_xmit(skb);
-
-	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-		txq->tx_packets++;
-		txq->tx_bytes += len;
-	} else
-		txq->tx_dropped++;
-
-	return ret;
-}
-
-static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
-{
-	struct net_device *rdev = vlan_dev_info(dev)->real_dev;
-	const struct net_device_ops *ops = rdev->netdev_ops;
-
-	return ops->ndo_select_queue(rdev, skb);
-}
-
 static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
 	/* TODO: gotta make sure the underlying layer can handle it,
@@ -719,8 +674,7 @@
 	.parse	 = eth_header_parse,
 };
 
-static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
-		    vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
+static const struct net_device_ops vlan_netdev_ops;
 
 static int vlan_dev_init(struct net_device *dev)
 {
@@ -738,6 +692,7 @@
 		      (1<<__LINK_STATE_PRESENT);
 
 	dev->features |= real_dev->features & real_dev->vlan_features;
+	dev->features |= NETIF_F_LLTX;
 	dev->gso_max_size = real_dev->gso_max_size;
 
 	/* ipv6 shared card related stuff */
@@ -755,26 +710,20 @@
 	if (real_dev->features & NETIF_F_HW_VLAN_TX) {
 		dev->header_ops      = real_dev->header_ops;
 		dev->hard_header_len = real_dev->hard_header_len;
-		if (real_dev->netdev_ops->ndo_select_queue)
-			dev->netdev_ops = &vlan_netdev_accel_ops_sq;
-		else
-			dev->netdev_ops = &vlan_netdev_accel_ops;
 	} else {
 		dev->header_ops      = &vlan_header_ops;
 		dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
-		if (real_dev->netdev_ops->ndo_select_queue)
-			dev->netdev_ops = &vlan_netdev_ops_sq;
-		else
-			dev->netdev_ops = &vlan_netdev_ops;
 	}
 
+	dev->netdev_ops = &vlan_netdev_ops;
+
 	if (is_vlan_dev(real_dev))
 		subclass = 1;
 
 	vlan_dev_set_lockdep_class(dev, subclass);
 
-	vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
-	if (!vlan_dev_info(dev)->vlan_rx_stats)
+	vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+	if (!vlan_dev_info(dev)->vlan_pcpu_stats)
 		return -ENOMEM;
 
 	return 0;
@@ -786,8 +735,8 @@
 	struct vlan_dev_info *vlan = vlan_dev_info(dev);
 	int i;
 
-	free_percpu(vlan->vlan_rx_stats);
-	vlan->vlan_rx_stats = NULL;
+	free_percpu(vlan->vlan_pcpu_stats);
+	vlan->vlan_pcpu_stats = NULL;
 	for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
 		while ((pm = vlan->egress_priority_map[i]) != NULL) {
 			vlan->egress_priority_map[i] = pm->next;
@@ -825,33 +774,37 @@
 
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
-	dev_txq_stats_fold(dev, stats);
 
-	if (vlan_dev_info(dev)->vlan_rx_stats) {
-		struct vlan_rx_stats *p, accum = {0};
+	if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+		struct vlan_pcpu_stats *p;
+		u32 rx_errors = 0, tx_dropped = 0;
 		int i;
 
 		for_each_possible_cpu(i) {
-			u64 rxpackets, rxbytes, rxmulticast;
+			u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
 			unsigned int start;
 
-			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
+			p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
 			do {
 				start = u64_stats_fetch_begin_bh(&p->syncp);
 				rxpackets	= p->rx_packets;
 				rxbytes		= p->rx_bytes;
 				rxmulticast	= p->rx_multicast;
+				txpackets	= p->tx_packets;
+				txbytes		= p->tx_bytes;
 			} while (u64_stats_fetch_retry_bh(&p->syncp, start));
-			accum.rx_packets += rxpackets;
-			accum.rx_bytes   += rxbytes;
-			accum.rx_multicast += rxmulticast;
-			/* rx_errors is ulong, not protected by syncp */
-			accum.rx_errors  += p->rx_errors;
+
+			stats->rx_packets	+= rxpackets;
+			stats->rx_bytes		+= rxbytes;
+			stats->multicast	+= rxmulticast;
+			stats->tx_packets	+= txpackets;
+			stats->tx_bytes		+= txbytes;
+			/* rx_errors & tx_dropped are u32 */
+			rx_errors	+= p->rx_errors;
+			tx_dropped	+= p->tx_dropped;
 		}
-		stats->rx_packets = accum.rx_packets;
-		stats->rx_bytes   = accum.rx_bytes;
-		stats->rx_errors  = accum.rx_errors;
-		stats->multicast  = accum.rx_multicast;
+		stats->rx_errors  = rx_errors;
+		stats->tx_dropped = tx_dropped;
 	}
 	return stats;
 }
@@ -908,80 +861,6 @@
 #endif
 };
 
-static const struct net_device_ops vlan_netdev_accel_ops = {
-	.ndo_change_mtu		= vlan_dev_change_mtu,
-	.ndo_init		= vlan_dev_init,
-	.ndo_uninit		= vlan_dev_uninit,
-	.ndo_open		= vlan_dev_open,
-	.ndo_stop		= vlan_dev_stop,
-	.ndo_start_xmit =  vlan_dev_hwaccel_hard_start_xmit,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= vlan_dev_set_mac_address,
-	.ndo_set_rx_mode	= vlan_dev_set_rx_mode,
-	.ndo_set_multicast_list	= vlan_dev_set_rx_mode,
-	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
-	.ndo_do_ioctl		= vlan_dev_ioctl,
-	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats64	= vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
-	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
-	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
-	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
-	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
-#endif
-};
-
-static const struct net_device_ops vlan_netdev_ops_sq = {
-	.ndo_select_queue	= vlan_dev_select_queue,
-	.ndo_change_mtu		= vlan_dev_change_mtu,
-	.ndo_init		= vlan_dev_init,
-	.ndo_uninit		= vlan_dev_uninit,
-	.ndo_open		= vlan_dev_open,
-	.ndo_stop		= vlan_dev_stop,
-	.ndo_start_xmit =  vlan_dev_hard_start_xmit,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= vlan_dev_set_mac_address,
-	.ndo_set_rx_mode	= vlan_dev_set_rx_mode,
-	.ndo_set_multicast_list	= vlan_dev_set_rx_mode,
-	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
-	.ndo_do_ioctl		= vlan_dev_ioctl,
-	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats64	= vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
-	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
-	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
-	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
-	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
-#endif
-};
-
-static const struct net_device_ops vlan_netdev_accel_ops_sq = {
-	.ndo_select_queue	= vlan_dev_select_queue,
-	.ndo_change_mtu		= vlan_dev_change_mtu,
-	.ndo_init		= vlan_dev_init,
-	.ndo_uninit		= vlan_dev_uninit,
-	.ndo_open		= vlan_dev_open,
-	.ndo_stop		= vlan_dev_stop,
-	.ndo_start_xmit =  vlan_dev_hwaccel_hard_start_xmit,
-	.ndo_validate_addr	= eth_validate_addr,
-	.ndo_set_mac_address	= vlan_dev_set_mac_address,
-	.ndo_set_rx_mode	= vlan_dev_set_rx_mode,
-	.ndo_set_multicast_list	= vlan_dev_set_rx_mode,
-	.ndo_change_rx_flags	= vlan_dev_change_rx_flags,
-	.ndo_do_ioctl		= vlan_dev_ioctl,
-	.ndo_neigh_setup	= vlan_dev_neigh_setup,
-	.ndo_get_stats64	= vlan_dev_get_stats64,
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
-	.ndo_fcoe_ddp_setup	= vlan_dev_fcoe_ddp_setup,
-	.ndo_fcoe_ddp_done	= vlan_dev_fcoe_ddp_done,
-	.ndo_fcoe_enable	= vlan_dev_fcoe_enable,
-	.ndo_fcoe_disable	= vlan_dev_fcoe_disable,
-	.ndo_fcoe_get_wwn	= vlan_dev_fcoe_get_wwn,
-#endif
-};
-
 void vlan_setup(struct net_device *dev)
 {
 	ether_setup(dev);
diff --git a/net/8021q/vlan_netlink.c b/net/8021q/vlan_netlink.c
index ddc1057..be9a5c1 100644
--- a/net/8021q/vlan_netlink.c
+++ b/net/8021q/vlan_netlink.c
@@ -101,25 +101,6 @@
 	return 0;
 }
 
-static int vlan_get_tx_queues(struct net *net,
-			      struct nlattr *tb[],
-			      unsigned int *num_tx_queues,
-			      unsigned int *real_num_tx_queues)
-{
-	struct net_device *real_dev;
-
-	if (!tb[IFLA_LINK])
-		return -EINVAL;
-
-	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
-	if (!real_dev)
-		return -ENODEV;
-
-	*num_tx_queues      = real_dev->num_tx_queues;
-	*real_num_tx_queues = real_dev->real_num_tx_queues;
-	return 0;
-}
-
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
 			struct nlattr *tb[], struct nlattr *data[])
 {
@@ -237,7 +218,6 @@
 	.maxtype	= IFLA_VLAN_MAX,
 	.policy		= vlan_policy,
 	.priv_size	= sizeof(struct vlan_dev_info),
-	.get_tx_queues  = vlan_get_tx_queues,
 	.setup		= vlan_setup,
 	.validate	= vlan_validate,
 	.newlink	= vlan_newlink,
diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c
index 80e280f..d1314cf 100644
--- a/net/8021q/vlanproc.c
+++ b/net/8021q/vlanproc.c
@@ -280,7 +280,6 @@
 	const struct vlan_dev_info *dev_info = vlan_dev_info(vlandev);
 	struct rtnl_link_stats64 temp;
 	const struct rtnl_link_stats64 *stats;
-	static const char fmt[] = "%30s %12lu\n";
 	static const char fmt64[] = "%30s %12llu\n";
 	int i;
 
@@ -299,10 +298,6 @@
 	seq_puts(seq, "\n");
 	seq_printf(seq, fmt64, "total frames transmitted", stats->tx_packets);
 	seq_printf(seq, fmt64, "total bytes transmitted", stats->tx_bytes);
-	seq_printf(seq, fmt, "total headroom inc",
-		   dev_info->cnt_inc_headroom_on_tx);
-	seq_printf(seq, fmt, "total encap on xmit",
-		   dev_info->cnt_encap_on_xmit);
 	seq_printf(seq, "Device: %s", dev_info->real_dev->name);
 	/* now show all PRIORITY mappings relating to this VLAN */
 	seq_printf(seq, "\nINGRESS priority mappings: "
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 45c15f4..798beac 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -27,31 +27,16 @@
 
 #include <linux/module.h>
 #include <linux/errno.h>
+#include <linux/kernel.h>
 #include <linux/uaccess.h>
 #include <linux/slab.h>
 #include <linux/sched.h>
+#include <linux/stddef.h>
 #include <linux/types.h>
 #include <net/9p/9p.h>
 #include <net/9p/client.h>
 #include "protocol.h"
 
-#ifndef MIN
-#define MIN(a, b) (((a) < (b)) ? (a) : (b))
-#endif
-
-#ifndef MAX
-#define MAX(a, b) (((a) > (b)) ? (a) : (b))
-#endif
-
-#ifndef offset_of
-#define offset_of(type, memb) \
-	((unsigned long)(&((type *)0)->memb))
-#endif
-#ifndef container_of
-#define container_of(obj, type, memb) \
-	((type *)(((char *)obj) - offset_of(type, memb)))
-#endif
-
 static int
 p9pdu_writef(struct p9_fcall *pdu, int proto_version, const char *fmt, ...);
 
@@ -104,7 +89,7 @@
 
 static size_t pdu_read(struct p9_fcall *pdu, void *data, size_t size)
 {
-	size_t len = MIN(pdu->size - pdu->offset, size);
+	size_t len = min(pdu->size - pdu->offset, size);
 	memcpy(data, &pdu->sdata[pdu->offset], len);
 	pdu->offset += len;
 	return size - len;
@@ -112,7 +97,7 @@
 
 static size_t pdu_write(struct p9_fcall *pdu, const void *data, size_t size)
 {
-	size_t len = MIN(pdu->capacity - pdu->size, size);
+	size_t len = min(pdu->capacity - pdu->size, size);
 	memcpy(&pdu->sdata[pdu->size], data, len);
 	pdu->size += len;
 	return size - len;
@@ -121,7 +106,7 @@
 static size_t
 pdu_write_u(struct p9_fcall *pdu, const char __user *udata, size_t size)
 {
-	size_t len = MIN(pdu->capacity - pdu->size, size);
+	size_t len = min(pdu->capacity - pdu->size, size);
 	if (copy_from_user(&pdu->sdata[pdu->size], udata, len))
 		len = 0;
 
@@ -201,7 +186,7 @@
 				if (errcode)
 					break;
 
-				size = MAX(len, 0);
+				size = max_t(int16_t, len, 0);
 
 				*sptr = kmalloc(size + 1, GFP_KERNEL);
 				if (*sptr == NULL) {
@@ -256,8 +241,8 @@
 				    p9pdu_readf(pdu, proto_version, "d", count);
 				if (!errcode) {
 					*count =
-					    MIN(*count,
-						pdu->size - pdu->offset);
+					    min_t(int32_t, *count,
+						  pdu->size - pdu->offset);
 					*data = &pdu->sdata[pdu->offset];
 				}
 			}
@@ -421,7 +406,7 @@
 				const char *sptr = va_arg(ap, const char *);
 				int16_t len = 0;
 				if (sptr)
-					len = MIN(strlen(sptr), USHRT_MAX);
+					len = min_t(int16_t, strlen(sptr), USHRT_MAX);
 
 				errcode = p9pdu_writef(pdu, proto_version,
 								"w", len);
diff --git a/net/Kconfig b/net/Kconfig
index 55fd82e..126c2af 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -220,6 +220,11 @@
 	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
 	default y
 
+config XPS
+	boolean
+	depends on SMP && SYSFS && USE_GENERIC_SMP_HELPERS
+	default y
+
 menu "Network testing"
 
 config NET_PKTGEN
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index ad2b232..fce2eae 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -97,7 +97,7 @@
 
 static inline struct br2684_dev *BRPRIV(const struct net_device *net_dev)
 {
-	return (struct br2684_dev *)netdev_priv(net_dev);
+	return netdev_priv(net_dev);
 }
 
 static inline struct net_device *list_entry_brdev(const struct list_head *le)
diff --git a/net/atm/clip.c b/net/atm/clip.c
index ff956d1..d257da5 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -502,7 +502,8 @@
 	struct atmarp_entry *entry;
 	int error;
 	struct clip_vcc *clip_vcc;
-	struct flowi fl = { .nl_u = { .ip4_u = { .daddr = ip, .tos = 1}} };
+	struct flowi fl = { .fl4_dst = ip,
+			    .fl4_tos = 1 };
 	struct rtable *rt;
 
 	if (vcc->push != clip_push) {
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 181d70c..179e04b 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -816,8 +816,7 @@
 	if (arg < 0 || arg >= MAX_LEC_ITF || !dev_lec[arg])
 		return -EINVAL;
 	vcc->proto_data = dev_lec[arg];
-	return lec_mcast_make((struct lec_priv *)netdev_priv(dev_lec[arg]),
-				vcc);
+	return lec_mcast_make(netdev_priv(dev_lec[arg]), vcc);
 }
 
 /* Initialize device. */
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index 26eaebf..bb86d29 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1392,6 +1392,7 @@
 	ax25_cb *ax25;
 	int err = 0;
 
+	memset(fsa, 0, sizeof(fsa));
 	lock_sock(sk);
 	ax25 = ax25_sk(sk);
 
@@ -1403,7 +1404,6 @@
 
 		fsa->fsa_ax25.sax25_family = AF_AX25;
 		fsa->fsa_ax25.sax25_call   = ax25->dest_addr;
-		fsa->fsa_ax25.sax25_ndigis = 0;
 
 		if (ax25->digipeat != NULL) {
 			ndigi = ax25->digipeat->ndigi;
diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile
index d1e433f..7ca1f46 100644
--- a/net/bluetooth/Makefile
+++ b/net/bluetooth/Makefile
@@ -10,4 +10,4 @@
 obj-$(CONFIG_BT_CMTP)	+= cmtp/
 obj-$(CONFIG_BT_HIDP)	+= hidp/
 
-bluetooth-objs := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o
+bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o hci_sock.o hci_sysfs.o lib.o
diff --git a/net/bridge/br.c b/net/bridge/br.c
index c8436fa..84bbb82 100644
--- a/net/bridge/br.c
+++ b/net/bridge/br.c
@@ -22,8 +22,6 @@
 
 #include "br_private.h"
 
-int (*br_should_route_hook)(struct sk_buff *skb);
-
 static const struct stp_proto br_stp_proto = {
 	.rcv	= br_stp_rcv,
 };
@@ -102,8 +100,6 @@
 	br_fdb_fini();
 }
 
-EXPORT_SYMBOL(br_should_route_hook);
-
 module_init(br_init)
 module_exit(br_deinit)
 MODULE_LICENSE("GPL");
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 17cb0b6..5564435 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -141,7 +141,7 @@
 
 #ifdef CONFIG_BRIDGE_NETFILTER
 	/* remember the MTU in the rtable for PMTU */
-	br->fake_rtable.dst.metrics[RTAX_MTU - 1] = new_mtu;
+	dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
 #endif
 
 	return 0;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 90512cc..2872393 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -238,15 +238,18 @@
 int br_fdb_test_addr(struct net_device *dev, unsigned char *addr)
 {
 	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge_port *port;
 	int ret;
 
-	if (!br_port_exists(dev))
-		return 0;
-
 	rcu_read_lock();
-	fdb = __br_fdb_get(br_port_get_rcu(dev)->br, addr);
-	ret = fdb && fdb->dst->dev != dev &&
-		fdb->dst->state == BR_STATE_FORWARDING;
+	port = br_port_get_rcu(dev);
+	if (!port)
+		ret = 0;
+	else {
+		fdb = __br_fdb_get(port->br, addr);
+		ret = fdb && fdb->dst->dev != dev &&
+			fdb->dst->state == BR_STATE_FORWARDING;
+	}
 	rcu_read_unlock();
 
 	return ret;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index cbfe87f..2bd11ec 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -223,7 +223,7 @@
 	struct net_bridge_port_group *p;
 	struct hlist_node *rp;
 
-	rp = rcu_dereference(br->router_list.first);
+	rp = rcu_dereference(hlist_first_rcu(&br->router_list));
 	p = mdst ? rcu_dereference(mdst->ports) : NULL;
 	while (p || rp) {
 		struct net_bridge_port *port, *lport, *rport;
@@ -242,7 +242,7 @@
 		if ((unsigned long)lport >= (unsigned long)port)
 			p = rcu_dereference(p->next);
 		if ((unsigned long)rport >= (unsigned long)port)
-			rp = rcu_dereference(rp->next);
+			rp = rcu_dereference(hlist_next_rcu(rp));
 	}
 
 	if (!prev)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index 89ad25a..d9d1e2b 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -475,11 +475,8 @@
 {
 	struct net_bridge_port *p;
 
-	if (!br_port_exists(dev))
-		return -EINVAL;
-
-	p = br_port_get(dev);
-	if (p->br != br)
+	p = br_port_get_rtnl(dev);
+	if (!p || p->br != br)
 		return -EINVAL;
 
 	del_nbp(p);
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 25207a1..6f6d8e1 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -21,6 +21,10 @@
 /* Bridge group multicast address 802.1d (pg 51). */
 const u8 br_group_address[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 };
 
+/* Hook for brouter */
+br_should_route_hook_t __rcu *br_should_route_hook __read_mostly;
+EXPORT_SYMBOL(br_should_route_hook);
+
 static int br_pass_frame_up(struct sk_buff *skb)
 {
 	struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
@@ -139,7 +143,7 @@
 {
 	struct net_bridge_port *p;
 	const unsigned char *dest = eth_hdr(skb)->h_dest;
-	int (*rhook)(struct sk_buff *skb);
+	br_should_route_hook_t *rhook;
 
 	if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
 		return skb;
@@ -173,8 +177,8 @@
 	switch (p->state) {
 	case BR_STATE_FORWARDING:
 		rhook = rcu_dereference(br_should_route_hook);
-		if (rhook != NULL) {
-			if (rhook(skb))
+		if (rhook) {
+			if ((*rhook)(skb))
 				return skb;
 			dest = eth_hdr(skb)->h_dest;
 		}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index eb5b256..85a0398 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -33,6 +33,9 @@
 
 #include "br_private.h"
 
+#define mlock_dereference(X, br) \
+	rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock))
+
 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
 static inline int ipv6_is_local_multicast(const struct in6_addr *addr)
 {
@@ -135,7 +138,7 @@
 struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
 					struct sk_buff *skb)
 {
-	struct net_bridge_mdb_htable *mdb = br->mdb;
+	struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);
 	struct br_ip ip;
 
 	if (br->multicast_disabled)
@@ -235,7 +238,8 @@
 	if (mp->ports)
 		goto out;
 
-	mdb = br->mdb;
+	mdb = mlock_dereference(br->mdb, br);
+
 	hlist_del_rcu(&mp->hlist[mdb->ver]);
 	mdb->size--;
 
@@ -249,16 +253,20 @@
 static void br_multicast_del_pg(struct net_bridge *br,
 				struct net_bridge_port_group *pg)
 {
-	struct net_bridge_mdb_htable *mdb = br->mdb;
+	struct net_bridge_mdb_htable *mdb;
 	struct net_bridge_mdb_entry *mp;
 	struct net_bridge_port_group *p;
-	struct net_bridge_port_group **pp;
+	struct net_bridge_port_group __rcu **pp;
+
+	mdb = mlock_dereference(br->mdb, br);
 
 	mp = br_mdb_ip_get(mdb, &pg->addr);
 	if (WARN_ON(!mp))
 		return;
 
-	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+	for (pp = &mp->ports;
+	     (p = mlock_dereference(*pp, br)) != NULL;
+	     pp = &p->next) {
 		if (p != pg)
 			continue;
 
@@ -294,10 +302,10 @@
 	spin_unlock(&br->multicast_lock);
 }
 
-static int br_mdb_rehash(struct net_bridge_mdb_htable **mdbp, int max,
+static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max,
 			 int elasticity)
 {
-	struct net_bridge_mdb_htable *old = *mdbp;
+	struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1);
 	struct net_bridge_mdb_htable *mdb;
 	int err;
 
@@ -569,7 +577,7 @@
 	struct net_bridge *br, struct net_bridge_port *port,
 	struct br_ip *group, int hash)
 {
-	struct net_bridge_mdb_htable *mdb = br->mdb;
+	struct net_bridge_mdb_htable *mdb;
 	struct net_bridge_mdb_entry *mp;
 	struct hlist_node *p;
 	unsigned count = 0;
@@ -577,6 +585,7 @@
 	int elasticity;
 	int err;
 
+	mdb = rcu_dereference_protected(br->mdb, 1);
 	hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) {
 		count++;
 		if (unlikely(br_ip_equal(group, &mp->addr)))
@@ -642,13 +651,16 @@
 	struct net_bridge *br, struct net_bridge_port *port,
 	struct br_ip *group)
 {
-	struct net_bridge_mdb_htable *mdb = br->mdb;
+	struct net_bridge_mdb_htable *mdb;
 	struct net_bridge_mdb_entry *mp;
 	int hash;
+	int err;
 
+	mdb = rcu_dereference_protected(br->mdb, 1);
 	if (!mdb) {
-		if (br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0))
-			return NULL;
+		err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0);
+		if (err)
+			return ERR_PTR(err);
 		goto rehash;
 	}
 
@@ -660,7 +672,7 @@
 
 	case -EAGAIN:
 rehash:
-		mdb = br->mdb;
+		mdb = rcu_dereference_protected(br->mdb, 1);
 		hash = br_ip_hash(mdb, group);
 		break;
 
@@ -670,7 +682,7 @@
 
 	mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
 	if (unlikely(!mp))
-		goto out;
+		return ERR_PTR(-ENOMEM);
 
 	mp->br = br;
 	mp->addr = *group;
@@ -692,7 +704,7 @@
 {
 	struct net_bridge_mdb_entry *mp;
 	struct net_bridge_port_group *p;
-	struct net_bridge_port_group **pp;
+	struct net_bridge_port_group __rcu **pp;
 	unsigned long now = jiffies;
 	int err;
 
@@ -703,7 +715,7 @@
 
 	mp = br_multicast_new_group(br, port, group);
 	err = PTR_ERR(mp);
-	if (unlikely(IS_ERR(mp) || !mp))
+	if (IS_ERR(mp))
 		goto err;
 
 	if (!port) {
@@ -712,7 +724,9 @@
 		goto out;
 	}
 
-	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+	for (pp = &mp->ports;
+	     (p = mlock_dereference(*pp, br)) != NULL;
+	     pp = &p->next) {
 		if (p->port == port)
 			goto found;
 		if ((unsigned long)p->port < (unsigned long)port)
@@ -1106,7 +1120,7 @@
 	struct net_bridge_mdb_entry *mp;
 	struct igmpv3_query *ih3;
 	struct net_bridge_port_group *p;
-	struct net_bridge_port_group **pp;
+	struct net_bridge_port_group __rcu **pp;
 	unsigned long max_delay;
 	unsigned long now = jiffies;
 	__be32 group;
@@ -1145,7 +1159,7 @@
 	if (!group)
 		goto out;
 
-	mp = br_mdb_ip4_get(br->mdb, group);
+	mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group);
 	if (!mp)
 		goto out;
 
@@ -1157,7 +1171,9 @@
 	     try_to_del_timer_sync(&mp->timer) >= 0))
 		mod_timer(&mp->timer, now + max_delay);
 
-	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+	for (pp = &mp->ports;
+	     (p = mlock_dereference(*pp, br)) != NULL;
+	     pp = &p->next) {
 		if (timer_pending(&p->timer) ?
 		    time_after(p->timer.expires, now + max_delay) :
 		    try_to_del_timer_sync(&p->timer) >= 0)
@@ -1178,7 +1194,8 @@
 	struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb);
 	struct net_bridge_mdb_entry *mp;
 	struct mld2_query *mld2q;
-	struct net_bridge_port_group *p, **pp;
+	struct net_bridge_port_group *p;
+	struct net_bridge_port_group __rcu **pp;
 	unsigned long max_delay;
 	unsigned long now = jiffies;
 	struct in6_addr *group = NULL;
@@ -1214,7 +1231,7 @@
 	if (!group)
 		goto out;
 
-	mp = br_mdb_ip6_get(br->mdb, group);
+	mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group);
 	if (!mp)
 		goto out;
 
@@ -1225,7 +1242,9 @@
 	     try_to_del_timer_sync(&mp->timer) >= 0))
 		mod_timer(&mp->timer, now + max_delay);
 
-	for (pp = &mp->ports; (p = *pp); pp = &p->next) {
+	for (pp = &mp->ports;
+	     (p = mlock_dereference(*pp, br)) != NULL;
+	     pp = &p->next) {
 		if (timer_pending(&p->timer) ?
 		    time_after(p->timer.expires, now + max_delay) :
 		    try_to_del_timer_sync(&p->timer) >= 0)
@@ -1254,7 +1273,7 @@
 	    timer_pending(&br->multicast_querier_timer))
 		goto out;
 
-	mdb = br->mdb;
+	mdb = mlock_dereference(br->mdb, br);
 	mp = br_mdb_ip_get(mdb, group);
 	if (!mp)
 		goto out;
@@ -1277,7 +1296,9 @@
 		goto out;
 	}
 
-	for (p = mp->ports; p; p = p->next) {
+	for (p = mlock_dereference(mp->ports, br);
+	     p != NULL;
+	     p = mlock_dereference(p->next, br)) {
 		if (p->port != port)
 			continue;
 
@@ -1625,7 +1646,7 @@
 	del_timer_sync(&br->multicast_query_timer);
 
 	spin_lock_bh(&br->multicast_lock);
-	mdb = br->mdb;
+	mdb = mlock_dereference(br->mdb, br);
 	if (!mdb)
 		goto out;
 
@@ -1729,6 +1750,7 @@
 {
 	struct net_bridge_port *port;
 	int err = 0;
+	struct net_bridge_mdb_htable *mdb;
 
 	spin_lock(&br->multicast_lock);
 	if (br->multicast_disabled == !val)
@@ -1741,15 +1763,16 @@
 	if (!netif_running(br->dev))
 		goto unlock;
 
-	if (br->mdb) {
-		if (br->mdb->old) {
+	mdb = mlock_dereference(br->mdb, br);
+	if (mdb) {
+		if (mdb->old) {
 			err = -EEXIST;
 rollback:
 			br->multicast_disabled = !!val;
 			goto unlock;
 		}
 
-		err = br_mdb_rehash(&br->mdb, br->mdb->max,
+		err = br_mdb_rehash(&br->mdb, mdb->max,
 				    br->hash_elasticity);
 		if (err)
 			goto rollback;
@@ -1774,6 +1797,7 @@
 {
 	int err = -ENOENT;
 	u32 old;
+	struct net_bridge_mdb_htable *mdb;
 
 	spin_lock(&br->multicast_lock);
 	if (!netif_running(br->dev))
@@ -1782,7 +1806,9 @@
 	err = -EINVAL;
 	if (!is_power_of_2(val))
 		goto unlock;
-	if (br->mdb && val < br->mdb->size)
+
+	mdb = mlock_dereference(br->mdb, br);
+	if (mdb && val < mdb->size)
 		goto unlock;
 
 	err = 0;
@@ -1790,8 +1816,8 @@
 	old = br->hash_max;
 	br->hash_max = val;
 
-	if (br->mdb) {
-		if (br->mdb->old) {
+	if (mdb) {
+		if (mdb->old) {
 			err = -EEXIST;
 rollback:
 			br->hash_max = old;
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 865fd76..4b5b66d 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -124,24 +124,25 @@
 	atomic_set(&rt->dst.__refcnt, 1);
 	rt->dst.dev = br->dev;
 	rt->dst.path = &rt->dst;
-	rt->dst.metrics[RTAX_MTU - 1] = 1500;
+	dst_metric_set(&rt->dst, RTAX_MTU, 1500);
 	rt->dst.flags	= DST_NOXFRM;
 	rt->dst.ops = &fake_dst_ops;
 }
 
 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
 {
-	if (!br_port_exists(dev))
-		return NULL;
-	return &br_port_get_rcu(dev)->br->fake_rtable;
+	struct net_bridge_port *port;
+
+	port = br_port_get_rcu(dev);
+	return port ? &port->br->fake_rtable : NULL;
 }
 
 static inline struct net_device *bridge_parent(const struct net_device *dev)
 {
-	if (!br_port_exists(dev))
-		return NULL;
+	struct net_bridge_port *port;
 
-	return br_port_get_rcu(dev)->br->dev;
+	port = br_port_get_rcu(dev);
+	return port ? port->br->dev : NULL;
 }
 
 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
@@ -412,13 +413,8 @@
 	if (dnat_took_place(skb)) {
 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
 			struct flowi fl = {
-				.nl_u = {
-					.ip4_u = {
-						 .daddr = iph->daddr,
-						 .saddr = 0,
-						 .tos = RT_TOS(iph->tos) },
-				},
-				.proto = 0,
+				.fl4_dst = iph->daddr,
+				.fl4_tos = RT_TOS(iph->tos),
 			};
 			struct in_device *in_dev = __in_dev_get_rcu(dev);
 
@@ -566,26 +562,26 @@
 	u32 pkt_len;
 
 	if (skb->len < sizeof(struct ipv6hdr))
-		goto inhdr_error;
+		return NF_DROP;
 
 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
-		goto inhdr_error;
+		return NF_DROP;
 
 	hdr = ipv6_hdr(skb);
 
 	if (hdr->version != 6)
-		goto inhdr_error;
+		return NF_DROP;
 
 	pkt_len = ntohs(hdr->payload_len);
 
 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
-			goto inhdr_error;
+			return NF_DROP;
 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
-			goto inhdr_error;
+			return NF_DROP;
 	}
 	if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
-		goto inhdr_error;
+		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
 	if (!nf_bridge_alloc(skb))
@@ -598,9 +594,6 @@
 		br_nf_pre_routing_finish_ipv6);
 
 	return NF_STOLEN;
-
-inhdr_error:
-	return NF_DROP;
 }
 
 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
@@ -619,11 +612,11 @@
 	__u32 len = nf_bridge_encap_header_len(skb);
 
 	if (unlikely(!pskb_may_pull(skb, len)))
-		goto out;
+		return NF_DROP;
 
 	p = br_port_get_rcu(in);
 	if (p == NULL)
-		goto out;
+		return NF_DROP;
 	br = p->br;
 
 	if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
@@ -645,8 +638,7 @@
 	nf_bridge_pull_encap_header_rcsum(skb);
 
 	if (br_parse_ip_options(skb))
-		/* Drop invalid packet */
-		goto out;
+		return NF_DROP;
 
 	nf_bridge_put(skb->nf_bridge);
 	if (!nf_bridge_alloc(skb))
@@ -660,9 +652,6 @@
 		br_nf_pre_routing_finish);
 
 	return NF_STOLEN;
-
-out:
-	return NF_DROP;
 }
 
 
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 4a6a378..f8bf4c7 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -119,11 +119,13 @@
 
 	idx = 0;
 	for_each_netdev(net, dev) {
+		struct net_bridge_port *port = br_port_get_rtnl(dev);
+
 		/* not a bridge port */
-		if (!br_port_exists(dev) || idx < cb->args[0])
+		if (!port || idx < cb->args[0])
 			goto skip;
 
-		if (br_fill_ifinfo(skb, br_port_get(dev),
+		if (br_fill_ifinfo(skb, port,
 				   NETLINK_CB(cb->skb).pid,
 				   cb->nlh->nlmsg_seq, RTM_NEWLINK,
 				   NLM_F_MULTI) < 0)
@@ -169,9 +171,9 @@
 	if (!dev)
 		return -ENODEV;
 
-	if (!br_port_exists(dev))
+	p = br_port_get_rtnl(dev);
+	if (!p)
 		return -EINVAL;
-	p = br_port_get(dev);
 
 	/* if kernel STP is running, don't allow changes */
 	if (p->br->stp_enabled == BR_KERNEL_STP)
diff --git a/net/bridge/br_notify.c b/net/bridge/br_notify.c
index 404d4e1..7d337c9 100644
--- a/net/bridge/br_notify.c
+++ b/net/bridge/br_notify.c
@@ -32,15 +32,15 @@
 static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
 {
 	struct net_device *dev = ptr;
-	struct net_bridge_port *p = br_port_get(dev);
+	struct net_bridge_port *p;
 	struct net_bridge *br;
 	int err;
 
 	/* not a port of a bridge */
-	if (!br_port_exists(dev))
+	p = br_port_get_rtnl(dev);
+	if (!p)
 		return NOTIFY_DONE;
 
-	p = br_port_get(dev);
 	br = p->br;
 
 	switch (event) {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 75c90ed..84aac77 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -72,7 +72,7 @@
 
 struct net_bridge_port_group {
 	struct net_bridge_port		*port;
-	struct net_bridge_port_group	*next;
+	struct net_bridge_port_group __rcu *next;
 	struct hlist_node		mglist;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
@@ -86,7 +86,7 @@
 	struct hlist_node		hlist[2];
 	struct hlist_node		mglist;
 	struct net_bridge		*br;
-	struct net_bridge_port_group	*ports;
+	struct net_bridge_port_group __rcu *ports;
 	struct rcu_head			rcu;
 	struct timer_list		timer;
 	struct timer_list		query_timer;
@@ -151,11 +151,20 @@
 #endif
 };
 
-#define br_port_get_rcu(dev) \
-	((struct net_bridge_port *) rcu_dereference(dev->rx_handler_data))
-#define br_port_get(dev) ((struct net_bridge_port *) dev->rx_handler_data)
 #define br_port_exists(dev) (dev->priv_flags & IFF_BRIDGE_PORT)
 
+static inline struct net_bridge_port *br_port_get_rcu(const struct net_device *dev)
+{
+	struct net_bridge_port *port = rcu_dereference(dev->rx_handler_data);
+	return br_port_exists(dev) ? port : NULL;
+}
+
+static inline struct net_bridge_port *br_port_get_rtnl(struct net_device *dev)
+{
+	return br_port_exists(dev) ?
+		rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
 struct br_cpu_netstats {
 	u64			rx_packets;
 	u64			rx_bytes;
@@ -227,7 +236,7 @@
 	unsigned long			multicast_startup_query_interval;
 
 	spinlock_t			multicast_lock;
-	struct net_bridge_mdb_htable	*mdb;
+	struct net_bridge_mdb_htable __rcu *mdb;
 	struct hlist_head		router_list;
 	struct hlist_head		mglist;
 
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 35cf270..3d9a55d 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -141,10 +141,6 @@
 	struct net_bridge *br;
 	const unsigned char *buf;
 
-	if (!br_port_exists(dev))
-		goto err;
-	p = br_port_get_rcu(dev);
-
 	if (!pskb_may_pull(skb, 4))
 		goto err;
 
@@ -153,6 +149,10 @@
 	if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
 		goto err;
 
+	p = br_port_get_rcu(dev);
+	if (!p)
+		goto err;
+
 	br = p->br;
 	spin_lock(&br->lock);
 
diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c
index ae3f106..1bcaf36 100644
--- a/net/bridge/netfilter/ebtable_broute.c
+++ b/net/bridge/netfilter/ebtable_broute.c
@@ -87,7 +87,8 @@
 	if (ret < 0)
 		return ret;
 	/* see br_input.c */
-	rcu_assign_pointer(br_should_route_hook, ebt_broute);
+	rcu_assign_pointer(br_should_route_hook,
+			   (br_should_route_hook_t *)ebt_broute);
 	return 0;
 }
 
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index a1dcf83..cbc9f39 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -128,6 +128,7 @@
                 const struct net_device *in, const struct net_device *out)
 {
 	const struct ethhdr *h = eth_hdr(skb);
+	const struct net_bridge_port *p;
 	__be16 ethproto;
 	int verdict, i;
 
@@ -148,13 +149,11 @@
 	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
 		return 1;
 	/* rcu_read_lock()ed by nf_hook_slow */
-	if (in && br_port_exists(in) &&
-	    FWINV2(ebt_dev_check(e->logical_in, br_port_get_rcu(in)->br->dev),
-		   EBT_ILOGICALIN))
+	if (in && (p = br_port_get_rcu(in)) != NULL &&
+	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
 		return 1;
-	if (out && br_port_exists(out) &&
-	    FWINV2(ebt_dev_check(e->logical_out, br_port_get_rcu(out)->br->dev),
-		   EBT_ILOGICALOUT))
+	if (out && (p = br_port_get_rcu(out)) != NULL &&
+	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
 		return 1;
 
 	if (e->bitmask & EBT_SOURCEMAC) {
diff --git a/net/caif/Makefile b/net/caif/Makefile
index f87481f..9d38e40 100644
--- a/net/caif/Makefile
+++ b/net/caif/Makefile
@@ -1,8 +1,6 @@
-ifeq ($(CONFIG_CAIF_DEBUG),y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_CAIF_DEBUG)     :=      -DDEBUG
 
-caif-objs := caif_dev.o \
+caif-y := caif_dev.o \
 	cfcnfg.o cfmuxl.o cfctrl.o  \
 	cffrml.o cfveil.o cfdbgl.o\
 	cfserl.o cfdgml.o  \
@@ -13,4 +11,4 @@
 obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
 obj-$(CONFIG_CAIF) += caif_socket.o
 
-export-objs := caif.o
+export-y := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 76ae683..d522d8c 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -16,11 +16,18 @@
 {
 	struct dev_info *dev_info;
 	enum cfcnfg_phy_preference pref;
-	memset(l, 0, sizeof(*l));
-	l->priority = s->priority;
+	int res;
 
-	if (s->link_name[0] != '\0')
-		l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+	memset(l, 0, sizeof(*l));
+	/* In caif protocol low value is high priority */
+	l->priority = CAIF_PRIO_MAX - s->priority + 1;
+
+	if (s->ifindex != 0){
+		res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
+		if (res < 0)
+			return res;
+		l->phyid = res;
+	}
 	else {
 		switch (s->link_selector) {
 		case CAIF_LINK_HIGH_BANDW:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index b99369a..a42a408 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -307,6 +307,8 @@
 
 	case NETDEV_UNREGISTER:
 		caifd = caif_get(dev);
+		if (caifd == NULL)
+			break;
 		netdev_info(dev, "unregister\n");
 		atomic_set(&caifd->state, what);
 		caif_device_destroy(dev);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2eca2dd..1bf0cf5 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -716,8 +716,7 @@
 {
 	struct sock *sk = sock->sk;
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-	int prio, linksel;
-	struct ifreq ifreq;
+	int linksel;
 
 	if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
 		return -ENOPROTOOPT;
@@ -735,33 +734,6 @@
 		release_sock(&cf_sk->sk);
 		return 0;
 
-	case SO_PRIORITY:
-		if (lvl != SOL_SOCKET)
-			goto bad_sol;
-		if (ol < sizeof(int))
-			return -EINVAL;
-		if (copy_from_user(&prio, ov, sizeof(int)))
-			return -EINVAL;
-		lock_sock(&(cf_sk->sk));
-		cf_sk->conn_req.priority = prio;
-		release_sock(&cf_sk->sk);
-		return 0;
-
-	case SO_BINDTODEVICE:
-		if (lvl != SOL_SOCKET)
-			goto bad_sol;
-		if (ol < sizeof(struct ifreq))
-			return -EINVAL;
-		if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
-			return -EFAULT;
-		lock_sock(&(cf_sk->sk));
-		strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
-			sizeof(cf_sk->conn_req.link_name));
-		cf_sk->conn_req.link_name
-			[sizeof(cf_sk->conn_req.link_name)-1] = 0;
-		release_sock(&cf_sk->sk);
-		return 0;
-
 	case CAIFSO_REQ_PARAM:
 		if (lvl != SOL_CAIF)
 			goto bad_sol;
@@ -880,6 +852,18 @@
 	sock->state = SS_CONNECTING;
 	sk->sk_state = CAIF_CONNECTING;
 
+	/* Check priority value comming from socket */
+	/* if priority value is out of range it will be ajusted */
+	if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
+		cf_sk->conn_req.priority = CAIF_PRIO_MAX;
+	else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
+		cf_sk->conn_req.priority = CAIF_PRIO_MIN;
+	else
+		cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
+
+	/*ifindex = id of the interface.*/
+	cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
+
 	dbfs_atomic_inc(&cnt.num_connect_req);
 	cf_sk->layer.receive = caif_sktrecv_cb;
 	err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@
 	cf_sk->maxframe = mtu - (headroom + tailroom);
 	if (cf_sk->maxframe < 1) {
 		pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
+		err = -ENODEV;
 		goto out;
 	}
 
@@ -1142,7 +1127,7 @@
 	set_rx_flow_on(cf_sk);
 
 	/* Set default options on configuration */
-	cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+	cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
 	cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
 	cf_sk->conn_req.protocol = protocol;
 	/* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 41adafd1..21ede14 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -173,18 +173,15 @@
 	return NULL;
 }
 
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
 {
 	int i;
-
-	/* Try to match with specified name */
-	for (i = 0; i < MAX_PHY_LAYERS; i++) {
-		if (cnfg->phy_layers[i].frm_layer != NULL
-		    && strcmp(cnfg->phy_layers[i].phy_layer->name,
-			      name) == 0)
-			return cnfg->phy_layers[i].frm_layer->id;
-	}
-	return 0;
+	for (i = 0; i < MAX_PHY_LAYERS; i++)
+		if (cnfg->phy_layers[i].frm_layer != NULL &&
+				cnfg->phy_layers[i].ifindex == ifi)
+			return i;
+	return -ENODEV;
 }
 
 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 08f267a..3cd8f97 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -361,11 +361,10 @@
 	struct cfctrl_request_info *p, *tmp;
 	struct cfctrl *ctrl = container_obj(layr);
 	spin_lock(&ctrl->info_list_lock);
-	pr_warn("enter\n");
 
 	list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
 		if (p->client_layer == adap_layer) {
-			pr_warn("cancel req :%d\n", p->sequence_no);
+			pr_debug("cancel req :%d\n", p->sequence_no);
 			list_del(&p->list);
 			kfree(p);
 		}
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 496fda9..11a2af4 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -12,6 +12,8 @@
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
 
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
 static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 
@@ -38,5 +40,17 @@
 
 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
+	struct cfsrvl *service = container_obj(layr);
+	struct caif_payload_info *info;
+	int ret;
+
+	if (!cfsrvl_ready(service, &ret))
+		return ret;
+
+	/* Add info for MUX-layer to route the packet out */
+	info = cfpkt_info(pkt);
+	info->channel_id = service->layer.id;
+	info->dev_info = &service->dev_info;
+
 	return layr->dn->transmit(layr->dn, pkt);
 }
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index bde8481..e2fb5fa 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-	caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
+	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
 
 	/* Add info for MUX-layer to route the packet out. */
 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/can/Makefile b/net/can/Makefile
index 9cd3c4b..2d3894b 100644
--- a/net/can/Makefile
+++ b/net/can/Makefile
@@ -3,10 +3,10 @@
 #
 
 obj-$(CONFIG_CAN)	+= can.o
-can-objs		:= af_can.o proc.o
+can-y			:= af_can.o proc.o
 
 obj-$(CONFIG_CAN_RAW)	+= can-raw.o
-can-raw-objs		:= raw.o
+can-raw-y		:= raw.o
 
 obj-$(CONFIG_CAN_BCM)	+= can-bcm.o
-can-bcm-objs		:= bcm.o
+can-bcm-y		:= bcm.o
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 08ffe9e..6faa825 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -125,7 +125,7 @@
 	struct list_head tx_ops;
 	unsigned long dropped_usr_msgs;
 	struct proc_dir_entry *bcm_proc_read;
-	char procname [9]; /* pointer printed in ASCII with \0 */
+	char procname [20]; /* pointer printed in ASCII with \0 */
 };
 
 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
diff --git a/net/ceph/Makefile b/net/ceph/Makefile
index aab1cab..e87ef43 100644
--- a/net/ceph/Makefile
+++ b/net/ceph/Makefile
@@ -1,12 +1,9 @@
 #
 # Makefile for CEPH filesystem.
 #
-
-ifneq ($(KERNELRELEASE),)
-
 obj-$(CONFIG_CEPH_LIB) += libceph.o
 
-libceph-objs := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
+libceph-y := ceph_common.o messenger.o msgpool.o buffer.o pagelist.o \
 	mon_client.o \
 	osd_client.o osdmap.o crush/crush.o crush/mapper.o crush/hash.o \
 	debugfs.o \
@@ -16,22 +13,3 @@
 	ceph_fs.o ceph_strings.o ceph_hash.o \
 	pagevec.o
 
-else
-#Otherwise we were called directly from the command
-# line; invoke the kernel build system.
-
-KERNELDIR ?= /lib/modules/$(shell uname -r)/build
-PWD := $(shell pwd)
-
-default: all
-
-all:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules
-
-modules_install:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) CONFIG_CEPH_LIB=m modules_install
-
-clean:
-	$(MAKE) -C $(KERNELDIR) M=$(PWD) clean
-
-endif
diff --git a/net/ceph/buffer.c b/net/ceph/buffer.c
index 53d8abf..bf3e6a1 100644
--- a/net/ceph/buffer.c
+++ b/net/ceph/buffer.c
@@ -19,7 +19,7 @@
 	if (b->vec.iov_base) {
 		b->is_vmalloc = false;
 	} else {
-		b->vec.iov_base = __vmalloc(len, gfp, PAGE_KERNEL);
+		b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL);
 		if (!b->vec.iov_base) {
 			kfree(b);
 			return NULL;
diff --git a/net/core/datagram.c b/net/core/datagram.c
index cd1e039..18ac112 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -177,7 +177,7 @@
 		 * interrupt level will suddenly eat the receive_queue.
 		 *
 		 * Look at current nfs client by the way...
-		 * However, this function was corrent in any case. 8)
+		 * However, this function was correct in any case. 8)
 		 */
 		unsigned long cpu_flags;
 
diff --git a/net/core/dev.c b/net/core/dev.c
index 35dfb83..d28b3a0 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -743,34 +743,31 @@
 EXPORT_SYMBOL(dev_get_by_index);
 
 /**
- *	dev_getbyhwaddr - find a device by its hardware address
+ *	dev_getbyhwaddr_rcu - find a device by its hardware address
  *	@net: the applicable net namespace
  *	@type: media type of device
  *	@ha: hardware address
  *
  *	Search for an interface by MAC address. Returns NULL if the device
- *	is not found or a pointer to the device. The caller must hold the
- *	rtnl semaphore. The returned device has not had its ref count increased
+ *	is not found or a pointer to the device. The caller must hold RCU
+ *	The returned device has not had its ref count increased
  *	and the caller must therefore be careful about locking
  *
- *	BUGS:
- *	If the API was consistent this would be __dev_get_by_hwaddr
  */
 
-struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
+struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
+				       const char *ha)
 {
 	struct net_device *dev;
 
-	ASSERT_RTNL();
-
-	for_each_netdev(net, dev)
+	for_each_netdev_rcu(net, dev)
 		if (dev->type == type &&
 		    !memcmp(dev->dev_addr, ha, dev->addr_len))
 			return dev;
 
 	return NULL;
 }
-EXPORT_SYMBOL(dev_getbyhwaddr);
+EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
 
 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
 {
@@ -1557,12 +1554,19 @@
  */
 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
 {
+	int rc;
+
 	if (txq < 1 || txq > dev->num_tx_queues)
 		return -EINVAL;
 
 	if (dev->reg_state == NETREG_REGISTERED) {
 		ASSERT_RTNL();
 
+		rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
+						  txq);
+		if (rc)
+			return rc;
+
 		if (txq < dev->real_num_tx_queues)
 			qdisc_reset_all_tx_gt(dev, txq);
 	}
@@ -1794,16 +1798,18 @@
 	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
 	struct packet_type *ptype;
 	__be16 type = skb->protocol;
+	int vlan_depth = ETH_HLEN;
 	int err;
 
-	if (type == htons(ETH_P_8021Q)) {
-		struct vlan_ethhdr *veh;
+	while (type == htons(ETH_P_8021Q)) {
+		struct vlan_hdr *vh;
 
-		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
 			return ERR_PTR(-EINVAL);
 
-		veh = (struct vlan_ethhdr *)skb->data;
-		type = veh->h_vlan_encapsulated_proto;
+		vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+		type = vh->h_vlan_encapsulated_proto;
+		vlan_depth += VLAN_HLEN;
 	}
 
 	skb_reset_mac_header(skb);
@@ -1817,8 +1823,7 @@
 		if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
 			dev->ethtool_ops->get_drvinfo(dev, &info);
 
-		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
-			"ip_summed=%d",
+		WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d ip_summed=%d\n",
 		     info.driver, dev ? dev->features : 0L,
 		     skb->sk ? skb->sk->sk_route_caps : 0L,
 		     skb->len, skb->data_len, skb->ip_summed);
@@ -1967,6 +1972,23 @@
 	}
 }
 
+int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+{
+	__be16 protocol = skb->protocol;
+
+	if (protocol == htons(ETH_P_8021Q)) {
+		struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+		protocol = veh->h_vlan_encapsulated_proto;
+	} else if (!skb->vlan_tci)
+		return dev->features;
+
+	if (protocol != htons(ETH_P_8021Q))
+		return dev->features & dev->vlan_features;
+	else
+		return 0;
+}
+EXPORT_SYMBOL(netif_get_vlan_features);
+
 /*
  * Returns true if either:
  *	1. skb has frag_list and the device doesn't support FRAGLIST, or
@@ -1977,15 +1999,20 @@
 static inline int skb_needs_linearize(struct sk_buff *skb,
 				      struct net_device *dev)
 {
-	int features = dev->features;
+	if (skb_is_nonlinear(skb)) {
+		int features = dev->features;
 
-	if (skb->protocol == htons(ETH_P_8021Q) || vlan_tx_tag_present(skb))
-		features &= dev->vlan_features;
+		if (vlan_tx_tag_present(skb))
+			features &= dev->vlan_features;
 
-	return skb_is_nonlinear(skb) &&
-	       ((skb_has_frag_list(skb) && !(features & NETIF_F_FRAGLIST)) ||
-		(skb_shinfo(skb)->nr_frags && (!(features & NETIF_F_SG) ||
-					      illegal_highdma(dev, skb))));
+		return (skb_has_frag_list(skb) &&
+			!(features & NETIF_F_FRAGLIST)) ||
+			(skb_shinfo(skb)->nr_frags &&
+			(!(features & NETIF_F_SG) ||
+			illegal_highdma(dev, skb)));
+	}
+
+	return 0;
 }
 
 int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -1995,9 +2022,6 @@
 	int rc = NETDEV_TX_OK;
 
 	if (likely(!skb->next)) {
-		if (!list_empty(&ptype_all))
-			dev_queue_xmit_nit(skb, dev);
-
 		/*
 		 * If device doesnt need skb->dst, release it right now while
 		 * its hot in this cpu cache
@@ -2005,6 +2029,9 @@
 		if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
 			skb_dst_drop(skb);
 
+		if (!list_empty(&ptype_all))
+			dev_queue_xmit_nit(skb, dev);
+
 		skb_orphan_try(skb);
 
 		if (vlan_tx_tag_present(skb) &&
@@ -2119,26 +2146,70 @@
 	return queue_index;
 }
 
+static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+	struct xps_dev_maps *dev_maps;
+	struct xps_map *map;
+	int queue_index = -1;
+
+	rcu_read_lock();
+	dev_maps = rcu_dereference(dev->xps_maps);
+	if (dev_maps) {
+		map = rcu_dereference(
+		    dev_maps->cpu_map[raw_smp_processor_id()]);
+		if (map) {
+			if (map->len == 1)
+				queue_index = map->queues[0];
+			else {
+				u32 hash;
+				if (skb->sk && skb->sk->sk_hash)
+					hash = skb->sk->sk_hash;
+				else
+					hash = (__force u16) skb->protocol ^
+					    skb->rxhash;
+				hash = jhash_1word(hash, hashrnd);
+				queue_index = map->queues[
+				    ((u64)hash * map->len) >> 32];
+			}
+			if (unlikely(queue_index >= dev->real_num_tx_queues))
+				queue_index = -1;
+		}
+	}
+	rcu_read_unlock();
+
+	return queue_index;
+#else
+	return -1;
+#endif
+}
+
 static struct netdev_queue *dev_pick_tx(struct net_device *dev,
 					struct sk_buff *skb)
 {
 	int queue_index;
 	const struct net_device_ops *ops = dev->netdev_ops;
 
-	if (ops->ndo_select_queue) {
+	if (dev->real_num_tx_queues == 1)
+		queue_index = 0;
+	else if (ops->ndo_select_queue) {
 		queue_index = ops->ndo_select_queue(dev, skb);
 		queue_index = dev_cap_txqueue(dev, queue_index);
 	} else {
 		struct sock *sk = skb->sk;
 		queue_index = sk_tx_queue_get(sk);
-		if (queue_index < 0) {
 
-			queue_index = 0;
-			if (dev->real_num_tx_queues > 1)
+		if (queue_index < 0 || skb->ooo_okay ||
+		    queue_index >= dev->real_num_tx_queues) {
+			int old_index = queue_index;
+
+			queue_index = get_xps_queue(dev, skb);
+			if (queue_index < 0)
 				queue_index = skb_tx_hash(dev, skb);
 
-			if (sk) {
-				struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
+			if (queue_index != old_index && sk) {
+				struct dst_entry *dst =
+				    rcu_dereference_check(sk->sk_dst_cache, 1);
 
 				if (dst && skb_dst(skb) == dst)
 					sk_tx_queue_set(sk, queue_index);
@@ -4967,10 +5038,13 @@
 	}
 
 	if (features & NETIF_F_UFO) {
-		if (!(features & NETIF_F_GEN_CSUM)) {
+		/* maybe split UFO into V4 and V6? */
+		if (!((features & NETIF_F_GEN_CSUM) ||
+		    (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+			    == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
 			if (name)
 				printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
-				       "since no NETIF_F_HW_CSUM feature.\n",
+				       "since no checksum offload features.\n",
 				       name);
 			features &= ~NETIF_F_UFO;
 		}
@@ -5014,9 +5088,9 @@
 }
 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
 
+#ifdef CONFIG_RPS
 static int netif_alloc_rx_queues(struct net_device *dev)
 {
-#ifdef CONFIG_RPS
 	unsigned int i, count = dev->num_rx_queues;
 	struct netdev_rx_queue *rx;
 
@@ -5029,15 +5103,22 @@
 	}
 	dev->_rx = rx;
 
-	/*
-	 * Set a pointer to first element in the array which holds the
-	 * reference count.
-	 */
 	for (i = 0; i < count; i++)
-		rx[i].first = rx;
-#endif
+		rx[i].dev = dev;
 	return 0;
 }
+#endif
+
+static void netdev_init_one_queue(struct net_device *dev,
+				  struct netdev_queue *queue, void *_unused)
+{
+	/* Initialize queue lock */
+	spin_lock_init(&queue->_xmit_lock);
+	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
+	queue->xmit_lock_owner = -1;
+	netdev_queue_numa_node_write(queue, -1);
+	queue->dev = dev;
+}
 
 static int netif_alloc_netdev_queues(struct net_device *dev)
 {
@@ -5053,25 +5134,11 @@
 		return -ENOMEM;
 	}
 	dev->_tx = tx;
-	return 0;
-}
 
-static void netdev_init_one_queue(struct net_device *dev,
-				  struct netdev_queue *queue,
-				  void *_unused)
-{
-	queue->dev = dev;
-
-	/* Initialize queue lock */
-	spin_lock_init(&queue->_xmit_lock);
-	netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
-	queue->xmit_lock_owner = -1;
-}
-
-static void netdev_init_queues(struct net_device *dev)
-{
 	netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
 	spin_lock_init(&dev->tx_global_lock);
+
+	return 0;
 }
 
 /**
@@ -5110,16 +5177,6 @@
 
 	dev->iflink = -1;
 
-	ret = netif_alloc_rx_queues(dev);
-	if (ret)
-		goto out;
-
-	ret = netif_alloc_netdev_queues(dev);
-	if (ret)
-		goto out;
-
-	netdev_init_queues(dev);
-
 	/* Init, if this function is available */
 	if (dev->netdev_ops->ndo_init) {
 		ret = dev->netdev_ops->ndo_init(dev);
@@ -5577,10 +5634,14 @@
 
 	dev->num_tx_queues = queue_count;
 	dev->real_num_tx_queues = queue_count;
+	if (netif_alloc_netdev_queues(dev))
+		goto free_pcpu;
 
 #ifdef CONFIG_RPS
 	dev->num_rx_queues = queue_count;
 	dev->real_num_rx_queues = queue_count;
+	if (netif_alloc_rx_queues(dev))
+		goto free_pcpu;
 #endif
 
 	dev->gso_max_size = GSO_MAX_SIZE;
@@ -5597,6 +5658,11 @@
 
 free_pcpu:
 	free_percpu(dev->pcpu_refcnt);
+	kfree(dev->_tx);
+#ifdef CONFIG_RPS
+	kfree(dev->_rx);
+#endif
+
 free_p:
 	kfree(p);
 	return NULL;
@@ -5618,6 +5684,9 @@
 	release_net(dev_net(dev));
 
 	kfree(dev->_tx);
+#ifdef CONFIG_RPS
+	kfree(dev->_rx);
+#endif
 
 	kfree(rcu_dereference_raw(dev->ingress_queue));
 
diff --git a/net/core/dst.c b/net/core/dst.c
index 8abe628..b99c7c7 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -370,6 +370,7 @@
 
 static struct notifier_block dst_dev_notifier = {
 	.notifier_call	= dst_dev_event,
+	.priority = -10, /* must be called after other network notifiers */
 };
 
 void __init dst_init(void)
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 956a9f4..1774178 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -891,6 +891,20 @@
 	return dev->ethtool_ops->nway_reset(dev);
 }
 
+static int ethtool_get_link(struct net_device *dev, char __user *useraddr)
+{
+	struct ethtool_value edata = { .cmd = ETHTOOL_GLINK };
+
+	if (!dev->ethtool_ops->get_link)
+		return -EOPNOTSUPP;
+
+	edata.data = netif_running(dev) && dev->ethtool_ops->get_link(dev);
+
+	if (copy_to_user(useraddr, &edata, sizeof(edata)))
+		return -EFAULT;
+	return 0;
+}
+
 static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
 {
 	struct ethtool_eeprom eeprom;
@@ -1171,7 +1185,9 @@
 		return -EFAULT;
 	if (edata.data && !(dev->features & NETIF_F_SG))
 		return -EINVAL;
-	if (edata.data && !(dev->features & NETIF_F_HW_CSUM))
+	if (edata.data && !((dev->features & NETIF_F_GEN_CSUM) ||
+		(dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
+			== (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM)))
 		return -EINVAL;
 	return dev->ethtool_ops->set_ufo(dev, edata.data);
 }
@@ -1528,8 +1544,7 @@
 		rc = ethtool_nway_reset(dev);
 		break;
 	case ETHTOOL_GLINK:
-		rc = ethtool_get_value(dev, useraddr, ethcmd,
-				       dev->ethtool_ops->get_link);
+		rc = ethtool_get_link(dev, useraddr);
 		break;
 	case ETHTOOL_GEEPROM:
 		rc = ethtool_get_eeprom(dev, useraddr);
diff --git a/net/core/filter.c b/net/core/filter.c
index 7beaec3..e8a6ac4 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -37,9 +37,58 @@
 #include <asm/uaccess.h>
 #include <asm/unaligned.h>
 #include <linux/filter.h>
+#include <linux/reciprocal_div.h>
+
+enum {
+	BPF_S_RET_K = 1,
+	BPF_S_RET_A,
+	BPF_S_ALU_ADD_K,
+	BPF_S_ALU_ADD_X,
+	BPF_S_ALU_SUB_K,
+	BPF_S_ALU_SUB_X,
+	BPF_S_ALU_MUL_K,
+	BPF_S_ALU_MUL_X,
+	BPF_S_ALU_DIV_X,
+	BPF_S_ALU_AND_K,
+	BPF_S_ALU_AND_X,
+	BPF_S_ALU_OR_K,
+	BPF_S_ALU_OR_X,
+	BPF_S_ALU_LSH_K,
+	BPF_S_ALU_LSH_X,
+	BPF_S_ALU_RSH_K,
+	BPF_S_ALU_RSH_X,
+	BPF_S_ALU_NEG,
+	BPF_S_LD_W_ABS,
+	BPF_S_LD_H_ABS,
+	BPF_S_LD_B_ABS,
+	BPF_S_LD_W_LEN,
+	BPF_S_LD_W_IND,
+	BPF_S_LD_H_IND,
+	BPF_S_LD_B_IND,
+	BPF_S_LD_IMM,
+	BPF_S_LDX_W_LEN,
+	BPF_S_LDX_B_MSH,
+	BPF_S_LDX_IMM,
+	BPF_S_MISC_TAX,
+	BPF_S_MISC_TXA,
+	BPF_S_ALU_DIV_K,
+	BPF_S_LD_MEM,
+	BPF_S_LDX_MEM,
+	BPF_S_ST,
+	BPF_S_STX,
+	BPF_S_JMP_JA,
+	BPF_S_JMP_JEQ_K,
+	BPF_S_JMP_JEQ_X,
+	BPF_S_JMP_JGE_K,
+	BPF_S_JMP_JGE_X,
+	BPF_S_JMP_JGT_K,
+	BPF_S_JMP_JGT_X,
+	BPF_S_JMP_JSET_K,
+	BPF_S_JMP_JSET_X,
+};
 
 /* No hurry in this branch */
-static void *__load_pointer(struct sk_buff *skb, int k)
+static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size)
 {
 	u8 *ptr = NULL;
 
@@ -48,12 +97,12 @@
 	else if (k >= SKF_LL_OFF)
 		ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
 
-	if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
+	if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
 		return ptr;
 	return NULL;
 }
 
-static inline void *load_pointer(struct sk_buff *skb, int k,
+static inline void *load_pointer(const struct sk_buff *skb, int k,
 				 unsigned int size, void *buffer)
 {
 	if (k >= 0)
@@ -61,7 +110,7 @@
 	else {
 		if (k >= SKF_AD_OFF)
 			return NULL;
-		return __load_pointer(skb, k);
+		return __load_pointer(skb, k, size);
 	}
 }
 
@@ -89,7 +138,7 @@
 	rcu_read_lock_bh();
 	filter = rcu_dereference_bh(sk->sk_filter);
 	if (filter) {
-		unsigned int pkt_len = sk_run_filter(skb, filter->insns, filter->len);
+		unsigned int pkt_len = sk_run_filter(skb, filter->insns);
 
 		err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
 	}
@@ -103,48 +152,52 @@
  *	sk_run_filter - run a filter on a socket
  *	@skb: buffer to run the filter on
  *	@filter: filter to apply
- *	@flen: length of filter
  *
  * Decode and apply filter instructions to the skb->data.
- * Return length to keep, 0 for none. skb is the data we are
- * filtering, filter is the array of filter instructions, and
- * len is the number of filter blocks in the array.
+ * Return length to keep, 0 for none. @skb is the data we are
+ * filtering, @filter is the array of filter instructions.
+ * Because all jumps are guaranteed to be before last instruction,
+ * and last instruction guaranteed to be a RET, we dont need to check
+ * flen. (We used to pass to this function the length of filter)
  */
-unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+unsigned int sk_run_filter(const struct sk_buff *skb,
+			   const struct sock_filter *fentry)
 {
-	struct sock_filter *fentry;	/* We walk down these */
 	void *ptr;
 	u32 A = 0;			/* Accumulator */
 	u32 X = 0;			/* Index Register */
 	u32 mem[BPF_MEMWORDS];		/* Scratch Memory Store */
 	u32 tmp;
 	int k;
-	int pc;
 
 	/*
 	 * Process array of filter instructions.
 	 */
-	for (pc = 0; pc < flen; pc++) {
-		fentry = &filter[pc];
+	for (;; fentry++) {
+#if defined(CONFIG_X86_32)
+#define	K (fentry->k)
+#else
+		const u32 K = fentry->k;
+#endif
 
 		switch (fentry->code) {
 		case BPF_S_ALU_ADD_X:
 			A += X;
 			continue;
 		case BPF_S_ALU_ADD_K:
-			A += fentry->k;
+			A += K;
 			continue;
 		case BPF_S_ALU_SUB_X:
 			A -= X;
 			continue;
 		case BPF_S_ALU_SUB_K:
-			A -= fentry->k;
+			A -= K;
 			continue;
 		case BPF_S_ALU_MUL_X:
 			A *= X;
 			continue;
 		case BPF_S_ALU_MUL_K:
-			A *= fentry->k;
+			A *= K;
 			continue;
 		case BPF_S_ALU_DIV_X:
 			if (X == 0)
@@ -152,64 +205,64 @@
 			A /= X;
 			continue;
 		case BPF_S_ALU_DIV_K:
-			A /= fentry->k;
+			A = reciprocal_divide(A, K);
 			continue;
 		case BPF_S_ALU_AND_X:
 			A &= X;
 			continue;
 		case BPF_S_ALU_AND_K:
-			A &= fentry->k;
+			A &= K;
 			continue;
 		case BPF_S_ALU_OR_X:
 			A |= X;
 			continue;
 		case BPF_S_ALU_OR_K:
-			A |= fentry->k;
+			A |= K;
 			continue;
 		case BPF_S_ALU_LSH_X:
 			A <<= X;
 			continue;
 		case BPF_S_ALU_LSH_K:
-			A <<= fentry->k;
+			A <<= K;
 			continue;
 		case BPF_S_ALU_RSH_X:
 			A >>= X;
 			continue;
 		case BPF_S_ALU_RSH_K:
-			A >>= fentry->k;
+			A >>= K;
 			continue;
 		case BPF_S_ALU_NEG:
 			A = -A;
 			continue;
 		case BPF_S_JMP_JA:
-			pc += fentry->k;
+			fentry += K;
 			continue;
 		case BPF_S_JMP_JGT_K:
-			pc += (A > fentry->k) ? fentry->jt : fentry->jf;
+			fentry += (A > K) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JGE_K:
-			pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
+			fentry += (A >= K) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JEQ_K:
-			pc += (A == fentry->k) ? fentry->jt : fentry->jf;
+			fentry += (A == K) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JSET_K:
-			pc += (A & fentry->k) ? fentry->jt : fentry->jf;
+			fentry += (A & K) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JGT_X:
-			pc += (A > X) ? fentry->jt : fentry->jf;
+			fentry += (A > X) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JGE_X:
-			pc += (A >= X) ? fentry->jt : fentry->jf;
+			fentry += (A >= X) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JEQ_X:
-			pc += (A == X) ? fentry->jt : fentry->jf;
+			fentry += (A == X) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_JMP_JSET_X:
-			pc += (A & X) ? fentry->jt : fentry->jf;
+			fentry += (A & X) ? fentry->jt : fentry->jf;
 			continue;
 		case BPF_S_LD_W_ABS:
-			k = fentry->k;
+			k = K;
 load_w:
 			ptr = load_pointer(skb, k, 4, &tmp);
 			if (ptr != NULL) {
@@ -218,7 +271,7 @@
 			}
 			break;
 		case BPF_S_LD_H_ABS:
-			k = fentry->k;
+			k = K;
 load_h:
 			ptr = load_pointer(skb, k, 2, &tmp);
 			if (ptr != NULL) {
@@ -227,7 +280,7 @@
 			}
 			break;
 		case BPF_S_LD_B_ABS:
-			k = fentry->k;
+			k = K;
 load_b:
 			ptr = load_pointer(skb, k, 1, &tmp);
 			if (ptr != NULL) {
@@ -242,32 +295,32 @@
 			X = skb->len;
 			continue;
 		case BPF_S_LD_W_IND:
-			k = X + fentry->k;
+			k = X + K;
 			goto load_w;
 		case BPF_S_LD_H_IND:
-			k = X + fentry->k;
+			k = X + K;
 			goto load_h;
 		case BPF_S_LD_B_IND:
-			k = X + fentry->k;
+			k = X + K;
 			goto load_b;
 		case BPF_S_LDX_B_MSH:
-			ptr = load_pointer(skb, fentry->k, 1, &tmp);
+			ptr = load_pointer(skb, K, 1, &tmp);
 			if (ptr != NULL) {
 				X = (*(u8 *)ptr & 0xf) << 2;
 				continue;
 			}
 			return 0;
 		case BPF_S_LD_IMM:
-			A = fentry->k;
+			A = K;
 			continue;
 		case BPF_S_LDX_IMM:
-			X = fentry->k;
+			X = K;
 			continue;
 		case BPF_S_LD_MEM:
-			A = mem[fentry->k];
+			A = mem[K];
 			continue;
 		case BPF_S_LDX_MEM:
-			X = mem[fentry->k];
+			X = mem[K];
 			continue;
 		case BPF_S_MISC_TAX:
 			X = A;
@@ -276,14 +329,14 @@
 			A = X;
 			continue;
 		case BPF_S_RET_K:
-			return fentry->k;
+			return K;
 		case BPF_S_RET_A:
 			return A;
 		case BPF_S_ST:
-			mem[fentry->k] = A;
+			mem[K] = A;
 			continue;
 		case BPF_S_STX:
-			mem[fentry->k] = X;
+			mem[K] = X;
 			continue;
 		default:
 			WARN_ON(1);
@@ -317,6 +370,12 @@
 				return 0;
 			A = skb->dev->type;
 			continue;
+		case SKF_AD_RXHASH:
+			A = skb->rxhash;
+			continue;
+		case SKF_AD_CPU:
+			A = raw_smp_processor_id();
+			continue;
 		case SKF_AD_NLATTR: {
 			struct nlattr *nla;
 
@@ -361,6 +420,66 @@
 }
 EXPORT_SYMBOL(sk_run_filter);
 
+/*
+ * Security :
+ * A BPF program is able to use 16 cells of memory to store intermediate
+ * values (check u32 mem[BPF_MEMWORDS] in sk_run_filter())
+ * As we dont want to clear mem[] array for each packet going through
+ * sk_run_filter(), we check that filter loaded by user never try to read
+ * a cell if not previously written, and we check all branches to be sure
+ * a malicious user doesnt try to abuse us.
+ */
+static int check_load_and_stores(struct sock_filter *filter, int flen)
+{
+	u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */
+	int pc, ret = 0;
+
+	BUILD_BUG_ON(BPF_MEMWORDS > 16);
+	masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
+	if (!masks)
+		return -ENOMEM;
+	memset(masks, 0xff, flen * sizeof(*masks));
+
+	for (pc = 0; pc < flen; pc++) {
+		memvalid &= masks[pc];
+
+		switch (filter[pc].code) {
+		case BPF_S_ST:
+		case BPF_S_STX:
+			memvalid |= (1 << filter[pc].k);
+			break;
+		case BPF_S_LD_MEM:
+		case BPF_S_LDX_MEM:
+			if (!(memvalid & (1 << filter[pc].k))) {
+				ret = -EINVAL;
+				goto error;
+			}
+			break;
+		case BPF_S_JMP_JA:
+			/* a jump must set masks on target */
+			masks[pc + 1 + filter[pc].k] &= memvalid;
+			memvalid = ~0;
+			break;
+		case BPF_S_JMP_JEQ_K:
+		case BPF_S_JMP_JEQ_X:
+		case BPF_S_JMP_JGE_K:
+		case BPF_S_JMP_JGE_X:
+		case BPF_S_JMP_JGT_K:
+		case BPF_S_JMP_JGT_X:
+		case BPF_S_JMP_JSET_X:
+		case BPF_S_JMP_JSET_K:
+			/* a jump must set masks on targets */
+			masks[pc + 1 + filter[pc].jt] &= memvalid;
+			masks[pc + 1 + filter[pc].jf] &= memvalid;
+			memvalid = ~0;
+			break;
+		}
+	}
+error:
+	kfree(masks);
+	return ret;
+}
+
 /**
  *	sk_chk_filter - verify socket filter code
  *	@filter: filter to verify
@@ -377,7 +496,57 @@
  */
 int sk_chk_filter(struct sock_filter *filter, int flen)
 {
-	struct sock_filter *ftest;
+	/*
+	 * Valid instructions are initialized to non-0.
+	 * Invalid instructions are initialized to 0.
+	 */
+	static const u8 codes[] = {
+		[BPF_ALU|BPF_ADD|BPF_K]  = BPF_S_ALU_ADD_K,
+		[BPF_ALU|BPF_ADD|BPF_X]  = BPF_S_ALU_ADD_X,
+		[BPF_ALU|BPF_SUB|BPF_K]  = BPF_S_ALU_SUB_K,
+		[BPF_ALU|BPF_SUB|BPF_X]  = BPF_S_ALU_SUB_X,
+		[BPF_ALU|BPF_MUL|BPF_K]  = BPF_S_ALU_MUL_K,
+		[BPF_ALU|BPF_MUL|BPF_X]  = BPF_S_ALU_MUL_X,
+		[BPF_ALU|BPF_DIV|BPF_X]  = BPF_S_ALU_DIV_X,
+		[BPF_ALU|BPF_AND|BPF_K]  = BPF_S_ALU_AND_K,
+		[BPF_ALU|BPF_AND|BPF_X]  = BPF_S_ALU_AND_X,
+		[BPF_ALU|BPF_OR|BPF_K]   = BPF_S_ALU_OR_K,
+		[BPF_ALU|BPF_OR|BPF_X]   = BPF_S_ALU_OR_X,
+		[BPF_ALU|BPF_LSH|BPF_K]  = BPF_S_ALU_LSH_K,
+		[BPF_ALU|BPF_LSH|BPF_X]  = BPF_S_ALU_LSH_X,
+		[BPF_ALU|BPF_RSH|BPF_K]  = BPF_S_ALU_RSH_K,
+		[BPF_ALU|BPF_RSH|BPF_X]  = BPF_S_ALU_RSH_X,
+		[BPF_ALU|BPF_NEG]        = BPF_S_ALU_NEG,
+		[BPF_LD|BPF_W|BPF_ABS]   = BPF_S_LD_W_ABS,
+		[BPF_LD|BPF_H|BPF_ABS]   = BPF_S_LD_H_ABS,
+		[BPF_LD|BPF_B|BPF_ABS]   = BPF_S_LD_B_ABS,
+		[BPF_LD|BPF_W|BPF_LEN]   = BPF_S_LD_W_LEN,
+		[BPF_LD|BPF_W|BPF_IND]   = BPF_S_LD_W_IND,
+		[BPF_LD|BPF_H|BPF_IND]   = BPF_S_LD_H_IND,
+		[BPF_LD|BPF_B|BPF_IND]   = BPF_S_LD_B_IND,
+		[BPF_LD|BPF_IMM]         = BPF_S_LD_IMM,
+		[BPF_LDX|BPF_W|BPF_LEN]  = BPF_S_LDX_W_LEN,
+		[BPF_LDX|BPF_B|BPF_MSH]  = BPF_S_LDX_B_MSH,
+		[BPF_LDX|BPF_IMM]        = BPF_S_LDX_IMM,
+		[BPF_MISC|BPF_TAX]       = BPF_S_MISC_TAX,
+		[BPF_MISC|BPF_TXA]       = BPF_S_MISC_TXA,
+		[BPF_RET|BPF_K]          = BPF_S_RET_K,
+		[BPF_RET|BPF_A]          = BPF_S_RET_A,
+		[BPF_ALU|BPF_DIV|BPF_K]  = BPF_S_ALU_DIV_K,
+		[BPF_LD|BPF_MEM]         = BPF_S_LD_MEM,
+		[BPF_LDX|BPF_MEM]        = BPF_S_LDX_MEM,
+		[BPF_ST]                 = BPF_S_ST,
+		[BPF_STX]                = BPF_S_STX,
+		[BPF_JMP|BPF_JA]         = BPF_S_JMP_JA,
+		[BPF_JMP|BPF_JEQ|BPF_K]  = BPF_S_JMP_JEQ_K,
+		[BPF_JMP|BPF_JEQ|BPF_X]  = BPF_S_JMP_JEQ_X,
+		[BPF_JMP|BPF_JGE|BPF_K]  = BPF_S_JMP_JGE_K,
+		[BPF_JMP|BPF_JGE|BPF_X]  = BPF_S_JMP_JGE_X,
+		[BPF_JMP|BPF_JGT|BPF_K]  = BPF_S_JMP_JGT_K,
+		[BPF_JMP|BPF_JGT|BPF_X]  = BPF_S_JMP_JGT_X,
+		[BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K,
+		[BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X,
+	};
 	int pc;
 
 	if (flen == 0 || flen > BPF_MAXINSNS)
@@ -385,136 +554,31 @@
 
 	/* check the filter code now */
 	for (pc = 0; pc < flen; pc++) {
-		ftest = &filter[pc];
+		struct sock_filter *ftest = &filter[pc];
+		u16 code = ftest->code;
 
-		/* Only allow valid instructions */
-		switch (ftest->code) {
-		case BPF_ALU|BPF_ADD|BPF_K:
-			ftest->code = BPF_S_ALU_ADD_K;
-			break;
-		case BPF_ALU|BPF_ADD|BPF_X:
-			ftest->code = BPF_S_ALU_ADD_X;
-			break;
-		case BPF_ALU|BPF_SUB|BPF_K:
-			ftest->code = BPF_S_ALU_SUB_K;
-			break;
-		case BPF_ALU|BPF_SUB|BPF_X:
-			ftest->code = BPF_S_ALU_SUB_X;
-			break;
-		case BPF_ALU|BPF_MUL|BPF_K:
-			ftest->code = BPF_S_ALU_MUL_K;
-			break;
-		case BPF_ALU|BPF_MUL|BPF_X:
-			ftest->code = BPF_S_ALU_MUL_X;
-			break;
-		case BPF_ALU|BPF_DIV|BPF_X:
-			ftest->code = BPF_S_ALU_DIV_X;
-			break;
-		case BPF_ALU|BPF_AND|BPF_K:
-			ftest->code = BPF_S_ALU_AND_K;
-			break;
-		case BPF_ALU|BPF_AND|BPF_X:
-			ftest->code = BPF_S_ALU_AND_X;
-			break;
-		case BPF_ALU|BPF_OR|BPF_K:
-			ftest->code = BPF_S_ALU_OR_K;
-			break;
-		case BPF_ALU|BPF_OR|BPF_X:
-			ftest->code = BPF_S_ALU_OR_X;
-			break;
-		case BPF_ALU|BPF_LSH|BPF_K:
-			ftest->code = BPF_S_ALU_LSH_K;
-			break;
-		case BPF_ALU|BPF_LSH|BPF_X:
-			ftest->code = BPF_S_ALU_LSH_X;
-			break;
-		case BPF_ALU|BPF_RSH|BPF_K:
-			ftest->code = BPF_S_ALU_RSH_K;
-			break;
-		case BPF_ALU|BPF_RSH|BPF_X:
-			ftest->code = BPF_S_ALU_RSH_X;
-			break;
-		case BPF_ALU|BPF_NEG:
-			ftest->code = BPF_S_ALU_NEG;
-			break;
-		case BPF_LD|BPF_W|BPF_ABS:
-			ftest->code = BPF_S_LD_W_ABS;
-			break;
-		case BPF_LD|BPF_H|BPF_ABS:
-			ftest->code = BPF_S_LD_H_ABS;
-			break;
-		case BPF_LD|BPF_B|BPF_ABS:
-			ftest->code = BPF_S_LD_B_ABS;
-			break;
-		case BPF_LD|BPF_W|BPF_LEN:
-			ftest->code = BPF_S_LD_W_LEN;
-			break;
-		case BPF_LD|BPF_W|BPF_IND:
-			ftest->code = BPF_S_LD_W_IND;
-			break;
-		case BPF_LD|BPF_H|BPF_IND:
-			ftest->code = BPF_S_LD_H_IND;
-			break;
-		case BPF_LD|BPF_B|BPF_IND:
-			ftest->code = BPF_S_LD_B_IND;
-			break;
-		case BPF_LD|BPF_IMM:
-			ftest->code = BPF_S_LD_IMM;
-			break;
-		case BPF_LDX|BPF_W|BPF_LEN:
-			ftest->code = BPF_S_LDX_W_LEN;
-			break;
-		case BPF_LDX|BPF_B|BPF_MSH:
-			ftest->code = BPF_S_LDX_B_MSH;
-			break;
-		case BPF_LDX|BPF_IMM:
-			ftest->code = BPF_S_LDX_IMM;
-			break;
-		case BPF_MISC|BPF_TAX:
-			ftest->code = BPF_S_MISC_TAX;
-			break;
-		case BPF_MISC|BPF_TXA:
-			ftest->code = BPF_S_MISC_TXA;
-			break;
-		case BPF_RET|BPF_K:
-			ftest->code = BPF_S_RET_K;
-			break;
-		case BPF_RET|BPF_A:
-			ftest->code = BPF_S_RET_A;
-			break;
-
+		if (code >= ARRAY_SIZE(codes))
+			return -EINVAL;
+		code = codes[code];
+		if (!code)
+			return -EINVAL;
 		/* Some instructions need special checks */
-
+		switch (code) {
+		case BPF_S_ALU_DIV_K:
 			/* check for division by zero */
-		case BPF_ALU|BPF_DIV|BPF_K:
 			if (ftest->k == 0)
 				return -EINVAL;
-			ftest->code = BPF_S_ALU_DIV_K;
+			ftest->k = reciprocal_value(ftest->k);
 			break;
-
-		/* check for invalid memory addresses */
-		case BPF_LD|BPF_MEM:
+		case BPF_S_LD_MEM:
+		case BPF_S_LDX_MEM:
+		case BPF_S_ST:
+		case BPF_S_STX:
+			/* check for invalid memory addresses */
 			if (ftest->k >= BPF_MEMWORDS)
 				return -EINVAL;
-			ftest->code = BPF_S_LD_MEM;
 			break;
-		case BPF_LDX|BPF_MEM:
-			if (ftest->k >= BPF_MEMWORDS)
-				return -EINVAL;
-			ftest->code = BPF_S_LDX_MEM;
-			break;
-		case BPF_ST:
-			if (ftest->k >= BPF_MEMWORDS)
-				return -EINVAL;
-			ftest->code = BPF_S_ST;
-			break;
-		case BPF_STX:
-			if (ftest->k >= BPF_MEMWORDS)
-				return -EINVAL;
-			ftest->code = BPF_S_STX;
-			break;
-
-		case BPF_JMP|BPF_JA:
+		case BPF_S_JMP_JA:
 			/*
 			 * Note, the large ftest->k might cause loops.
 			 * Compare this with conditional jumps below,
@@ -522,40 +586,7 @@
 			 */
 			if (ftest->k >= (unsigned)(flen-pc-1))
 				return -EINVAL;
-			ftest->code = BPF_S_JMP_JA;
 			break;
-
-		case BPF_JMP|BPF_JEQ|BPF_K:
-			ftest->code = BPF_S_JMP_JEQ_K;
-			break;
-		case BPF_JMP|BPF_JEQ|BPF_X:
-			ftest->code = BPF_S_JMP_JEQ_X;
-			break;
-		case BPF_JMP|BPF_JGE|BPF_K:
-			ftest->code = BPF_S_JMP_JGE_K;
-			break;
-		case BPF_JMP|BPF_JGE|BPF_X:
-			ftest->code = BPF_S_JMP_JGE_X;
-			break;
-		case BPF_JMP|BPF_JGT|BPF_K:
-			ftest->code = BPF_S_JMP_JGT_K;
-			break;
-		case BPF_JMP|BPF_JGT|BPF_X:
-			ftest->code = BPF_S_JMP_JGT_X;
-			break;
-		case BPF_JMP|BPF_JSET|BPF_K:
-			ftest->code = BPF_S_JMP_JSET_K;
-			break;
-		case BPF_JMP|BPF_JSET|BPF_X:
-			ftest->code = BPF_S_JMP_JSET_X;
-			break;
-
-		default:
-			return -EINVAL;
-		}
-
-			/* for conditionals both must be safe */
-		switch (ftest->code) {
 		case BPF_S_JMP_JEQ_K:
 		case BPF_S_JMP_JEQ_X:
 		case BPF_S_JMP_JGE_K:
@@ -564,42 +595,36 @@
 		case BPF_S_JMP_JGT_X:
 		case BPF_S_JMP_JSET_X:
 		case BPF_S_JMP_JSET_K:
+			/* for conditionals both must be safe */
 			if (pc + ftest->jt + 1 >= flen ||
 			    pc + ftest->jf + 1 >= flen)
 				return -EINVAL;
+			break;
 		}
+		ftest->code = code;
 	}
 
 	/* last instruction must be a RET code */
 	switch (filter[flen - 1].code) {
 	case BPF_S_RET_K:
 	case BPF_S_RET_A:
-		return 0;
-		break;
-		default:
-			return -EINVAL;
-		}
+		return check_load_and_stores(filter, flen);
+	}
+	return -EINVAL;
 }
 EXPORT_SYMBOL(sk_chk_filter);
 
 /**
- * 	sk_filter_rcu_release: Release a socket filter by rcu_head
+ * 	sk_filter_release_rcu - Release a socket filter by rcu_head
  *	@rcu: rcu_head that contains the sk_filter to free
  */
-static void sk_filter_rcu_release(struct rcu_head *rcu)
+void sk_filter_release_rcu(struct rcu_head *rcu)
 {
 	struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
-	sk_filter_release(fp);
+	kfree(fp);
 }
-
-static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
-{
-	unsigned int size = sk_filter_len(fp);
-
-	atomic_sub(size, &sk->sk_omem_alloc);
-	call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
-}
+EXPORT_SYMBOL(sk_filter_release_rcu);
 
 /**
  *	sk_attach_filter - attach a socket filter
@@ -643,7 +668,7 @@
 	rcu_assign_pointer(sk->sk_filter, fp);
 
 	if (old_fp)
-		sk_filter_delayed_uncharge(sk, old_fp);
+		sk_filter_uncharge(sk, old_fp);
 	return 0;
 }
 EXPORT_SYMBOL_GPL(sk_attach_filter);
@@ -657,7 +682,7 @@
 					   sock_owned_by_user(sk));
 	if (filter) {
 		rcu_assign_pointer(sk->sk_filter, NULL);
-		sk_filter_delayed_uncharge(sk, filter);
+		sk_filter_uncharge(sk, filter);
 		ret = 0;
 	}
 	return ret;
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index a5ff5a8..85e8b53 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -706,21 +706,24 @@
 static void rx_queue_release(struct kobject *kobj)
 {
 	struct netdev_rx_queue *queue = to_rx_queue(kobj);
-	struct netdev_rx_queue *first = queue->first;
 	struct rps_map *map;
 	struct rps_dev_flow_table *flow_table;
 
 
 	map = rcu_dereference_raw(queue->rps_map);
-	if (map)
+	if (map) {
+		RCU_INIT_POINTER(queue->rps_map, NULL);
 		call_rcu(&map->rcu, rps_map_release);
+	}
 
 	flow_table = rcu_dereference_raw(queue->rps_flow_table);
-	if (flow_table)
+	if (flow_table) {
+		RCU_INIT_POINTER(queue->rps_flow_table, NULL);
 		call_rcu(&flow_table->rcu, rps_dev_flow_table_release);
+	}
 
-	if (atomic_dec_and_test(&first->count))
-		kfree(first);
+	memset(kobj, 0, sizeof(*kobj));
+	dev_put(queue->dev);
 }
 
 static struct kobj_type rx_queue_ktype = {
@@ -732,7 +735,6 @@
 static int rx_queue_add_kobject(struct net_device *net, int index)
 {
 	struct netdev_rx_queue *queue = net->_rx + index;
-	struct netdev_rx_queue *first = queue->first;
 	struct kobject *kobj = &queue->kobj;
 	int error = 0;
 
@@ -745,14 +747,16 @@
 	}
 
 	kobject_uevent(kobj, KOBJ_ADD);
-	atomic_inc(&first->count);
+	dev_hold(queue->dev);
 
 	return error;
 }
+#endif /* CONFIG_RPS */
 
 int
 net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 {
+#ifdef CONFIG_RPS
 	int i;
 	int error = 0;
 
@@ -768,23 +772,422 @@
 		kobject_put(&net->_rx[i].kobj);
 
 	return error;
+#else
+	return 0;
+#endif
 }
 
-static int rx_queue_register_kobjects(struct net_device *net)
+#ifdef CONFIG_XPS
+/*
+ * netdev_queue sysfs structures and functions.
+ */
+struct netdev_queue_attribute {
+	struct attribute attr;
+	ssize_t (*show)(struct netdev_queue *queue,
+	    struct netdev_queue_attribute *attr, char *buf);
+	ssize_t (*store)(struct netdev_queue *queue,
+	    struct netdev_queue_attribute *attr, const char *buf, size_t len);
+};
+#define to_netdev_queue_attr(_attr) container_of(_attr,		\
+    struct netdev_queue_attribute, attr)
+
+#define to_netdev_queue(obj) container_of(obj, struct netdev_queue, kobj)
+
+static ssize_t netdev_queue_attr_show(struct kobject *kobj,
+				      struct attribute *attr, char *buf)
 {
+	struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+	struct netdev_queue *queue = to_netdev_queue(kobj);
+
+	if (!attribute->show)
+		return -EIO;
+
+	return attribute->show(queue, attribute, buf);
+}
+
+static ssize_t netdev_queue_attr_store(struct kobject *kobj,
+				       struct attribute *attr,
+				       const char *buf, size_t count)
+{
+	struct netdev_queue_attribute *attribute = to_netdev_queue_attr(attr);
+	struct netdev_queue *queue = to_netdev_queue(kobj);
+
+	if (!attribute->store)
+		return -EIO;
+
+	return attribute->store(queue, attribute, buf, count);
+}
+
+static const struct sysfs_ops netdev_queue_sysfs_ops = {
+	.show = netdev_queue_attr_show,
+	.store = netdev_queue_attr_store,
+};
+
+static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+{
+	struct net_device *dev = queue->dev;
+	int i;
+
+	for (i = 0; i < dev->num_tx_queues; i++)
+		if (queue == &dev->_tx[i])
+			break;
+
+	BUG_ON(i >= dev->num_tx_queues);
+
+	return i;
+}
+
+
+static ssize_t show_xps_map(struct netdev_queue *queue,
+			    struct netdev_queue_attribute *attribute, char *buf)
+{
+	struct net_device *dev = queue->dev;
+	struct xps_dev_maps *dev_maps;
+	cpumask_var_t mask;
+	unsigned long index;
+	size_t len = 0;
+	int i;
+
+	if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
+		return -ENOMEM;
+
+	index = get_netdev_queue_index(queue);
+
+	rcu_read_lock();
+	dev_maps = rcu_dereference(dev->xps_maps);
+	if (dev_maps) {
+		for_each_possible_cpu(i) {
+			struct xps_map *map =
+			    rcu_dereference(dev_maps->cpu_map[i]);
+			if (map) {
+				int j;
+				for (j = 0; j < map->len; j++) {
+					if (map->queues[j] == index) {
+						cpumask_set_cpu(i, mask);
+						break;
+					}
+				}
+			}
+		}
+	}
+	rcu_read_unlock();
+
+	len += cpumask_scnprintf(buf + len, PAGE_SIZE, mask);
+	if (PAGE_SIZE - len < 3) {
+		free_cpumask_var(mask);
+		return -EINVAL;
+	}
+
+	free_cpumask_var(mask);
+	len += sprintf(buf + len, "\n");
+	return len;
+}
+
+static void xps_map_release(struct rcu_head *rcu)
+{
+	struct xps_map *map = container_of(rcu, struct xps_map, rcu);
+
+	kfree(map);
+}
+
+static void xps_dev_maps_release(struct rcu_head *rcu)
+{
+	struct xps_dev_maps *dev_maps =
+	    container_of(rcu, struct xps_dev_maps, rcu);
+
+	kfree(dev_maps);
+}
+
+static DEFINE_MUTEX(xps_map_mutex);
+#define xmap_dereference(P)		\
+	rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
+
+static ssize_t store_xps_map(struct netdev_queue *queue,
+		      struct netdev_queue_attribute *attribute,
+		      const char *buf, size_t len)
+{
+	struct net_device *dev = queue->dev;
+	cpumask_var_t mask;
+	int err, i, cpu, pos, map_len, alloc_len, need_set;
+	unsigned long index;
+	struct xps_map *map, *new_map;
+	struct xps_dev_maps *dev_maps, *new_dev_maps;
+	int nonempty = 0;
+	int numa_node = -2;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
+		return -ENOMEM;
+
+	index = get_netdev_queue_index(queue);
+
+	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
+	if (err) {
+		free_cpumask_var(mask);
+		return err;
+	}
+
+	new_dev_maps = kzalloc(max_t(unsigned,
+	    XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES), GFP_KERNEL);
+	if (!new_dev_maps) {
+		free_cpumask_var(mask);
+		return -ENOMEM;
+	}
+
+	mutex_lock(&xps_map_mutex);
+
+	dev_maps = xmap_dereference(dev->xps_maps);
+
+	for_each_possible_cpu(cpu) {
+		map = dev_maps ?
+			xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+		new_map = map;
+		if (map) {
+			for (pos = 0; pos < map->len; pos++)
+				if (map->queues[pos] == index)
+					break;
+			map_len = map->len;
+			alloc_len = map->alloc_len;
+		} else
+			pos = map_len = alloc_len = 0;
+
+		need_set = cpu_isset(cpu, *mask) && cpu_online(cpu);
+#ifdef CONFIG_NUMA
+		if (need_set) {
+			if (numa_node == -2)
+				numa_node = cpu_to_node(cpu);
+			else if (numa_node != cpu_to_node(cpu))
+				numa_node = -1;
+		}
+#endif
+		if (need_set && pos >= map_len) {
+			/* Need to add queue to this CPU's map */
+			if (map_len >= alloc_len) {
+				alloc_len = alloc_len ?
+				    2 * alloc_len : XPS_MIN_MAP_ALLOC;
+				new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len),
+						       GFP_KERNEL,
+						       cpu_to_node(cpu));
+				if (!new_map)
+					goto error;
+				new_map->alloc_len = alloc_len;
+				for (i = 0; i < map_len; i++)
+					new_map->queues[i] = map->queues[i];
+				new_map->len = map_len;
+			}
+			new_map->queues[new_map->len++] = index;
+		} else if (!need_set && pos < map_len) {
+			/* Need to remove queue from this CPU's map */
+			if (map_len > 1)
+				new_map->queues[pos] =
+				    new_map->queues[--new_map->len];
+			else
+				new_map = NULL;
+		}
+		RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], new_map);
+	}
+
+	/* Cleanup old maps */
+	for_each_possible_cpu(cpu) {
+		map = dev_maps ?
+			xmap_dereference(dev_maps->cpu_map[cpu]) : NULL;
+		if (map && xmap_dereference(new_dev_maps->cpu_map[cpu]) != map)
+			call_rcu(&map->rcu, xps_map_release);
+		if (new_dev_maps->cpu_map[cpu])
+			nonempty = 1;
+	}
+
+	if (nonempty)
+		rcu_assign_pointer(dev->xps_maps, new_dev_maps);
+	else {
+		kfree(new_dev_maps);
+		rcu_assign_pointer(dev->xps_maps, NULL);
+	}
+
+	if (dev_maps)
+		call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+
+	netdev_queue_numa_node_write(queue, (numa_node >= 0) ? numa_node : -1);
+
+	mutex_unlock(&xps_map_mutex);
+
+	free_cpumask_var(mask);
+	return len;
+
+error:
+	mutex_unlock(&xps_map_mutex);
+
+	if (new_dev_maps)
+		for_each_possible_cpu(i)
+			kfree(rcu_dereference_protected(
+				new_dev_maps->cpu_map[i],
+				1));
+	kfree(new_dev_maps);
+	free_cpumask_var(mask);
+	return -ENOMEM;
+}
+
+static struct netdev_queue_attribute xps_cpus_attribute =
+    __ATTR(xps_cpus, S_IRUGO | S_IWUSR, show_xps_map, store_xps_map);
+
+static struct attribute *netdev_queue_default_attrs[] = {
+	&xps_cpus_attribute.attr,
+	NULL
+};
+
+static void netdev_queue_release(struct kobject *kobj)
+{
+	struct netdev_queue *queue = to_netdev_queue(kobj);
+	struct net_device *dev = queue->dev;
+	struct xps_dev_maps *dev_maps;
+	struct xps_map *map;
+	unsigned long index;
+	int i, pos, nonempty = 0;
+
+	index = get_netdev_queue_index(queue);
+
+	mutex_lock(&xps_map_mutex);
+	dev_maps = xmap_dereference(dev->xps_maps);
+
+	if (dev_maps) {
+		for_each_possible_cpu(i) {
+			map = xmap_dereference(dev_maps->cpu_map[i]);
+			if (!map)
+				continue;
+
+			for (pos = 0; pos < map->len; pos++)
+				if (map->queues[pos] == index)
+					break;
+
+			if (pos < map->len) {
+				if (map->len > 1)
+					map->queues[pos] =
+					    map->queues[--map->len];
+				else {
+					RCU_INIT_POINTER(dev_maps->cpu_map[i],
+					    NULL);
+					call_rcu(&map->rcu, xps_map_release);
+					map = NULL;
+				}
+			}
+			if (map)
+				nonempty = 1;
+		}
+
+		if (!nonempty) {
+			RCU_INIT_POINTER(dev->xps_maps, NULL);
+			call_rcu(&dev_maps->rcu, xps_dev_maps_release);
+		}
+	}
+
+	mutex_unlock(&xps_map_mutex);
+
+	memset(kobj, 0, sizeof(*kobj));
+	dev_put(queue->dev);
+}
+
+static struct kobj_type netdev_queue_ktype = {
+	.sysfs_ops = &netdev_queue_sysfs_ops,
+	.release = netdev_queue_release,
+	.default_attrs = netdev_queue_default_attrs,
+};
+
+static int netdev_queue_add_kobject(struct net_device *net, int index)
+{
+	struct netdev_queue *queue = net->_tx + index;
+	struct kobject *kobj = &queue->kobj;
+	int error = 0;
+
+	kobj->kset = net->queues_kset;
+	error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
+	    "tx-%u", index);
+	if (error) {
+		kobject_put(kobj);
+		return error;
+	}
+
+	kobject_uevent(kobj, KOBJ_ADD);
+	dev_hold(queue->dev);
+
+	return error;
+}
+#endif /* CONFIG_XPS */
+
+int
+netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+{
+#ifdef CONFIG_XPS
+	int i;
+	int error = 0;
+
+	for (i = old_num; i < new_num; i++) {
+		error = netdev_queue_add_kobject(net, i);
+		if (error) {
+			new_num = old_num;
+			break;
+		}
+	}
+
+	while (--i >= new_num)
+		kobject_put(&net->_tx[i].kobj);
+
+	return error;
+#else
+	return 0;
+#endif
+}
+
+static int register_queue_kobjects(struct net_device *net)
+{
+	int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
+
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
 	net->queues_kset = kset_create_and_add("queues",
 	    NULL, &net->dev.kobj);
 	if (!net->queues_kset)
 		return -ENOMEM;
-	return net_rx_queue_update_kobjects(net, 0, net->real_num_rx_queues);
+#endif
+
+#ifdef CONFIG_RPS
+	real_rx = net->real_num_rx_queues;
+#endif
+	real_tx = net->real_num_tx_queues;
+
+	error = net_rx_queue_update_kobjects(net, 0, real_rx);
+	if (error)
+		goto error;
+	rxq = real_rx;
+
+	error = netdev_queue_update_kobjects(net, 0, real_tx);
+	if (error)
+		goto error;
+	txq = real_tx;
+
+	return 0;
+
+error:
+	netdev_queue_update_kobjects(net, txq, 0);
+	net_rx_queue_update_kobjects(net, rxq, 0);
+	return error;
 }
 
-static void rx_queue_remove_kobjects(struct net_device *net)
+static void remove_queue_kobjects(struct net_device *net)
 {
-	net_rx_queue_update_kobjects(net, net->real_num_rx_queues, 0);
+	int real_rx = 0, real_tx = 0;
+
+#ifdef CONFIG_RPS
+	real_rx = net->real_num_rx_queues;
+#endif
+	real_tx = net->real_num_tx_queues;
+
+	net_rx_queue_update_kobjects(net, real_rx, 0);
+	netdev_queue_update_kobjects(net, real_tx, 0);
+#if defined(CONFIG_RPS) || defined(CONFIG_XPS)
 	kset_unregister(net->queues_kset);
+#endif
 }
-#endif /* CONFIG_RPS */
 
 static const void *net_current_ns(void)
 {
@@ -883,9 +1286,7 @@
 
 	kobject_get(&dev->kobj);
 
-#ifdef CONFIG_RPS
-	rx_queue_remove_kobjects(net);
-#endif
+	remove_queue_kobjects(net);
 
 	device_del(dev);
 }
@@ -924,13 +1325,11 @@
 	if (error)
 		return error;
 
-#ifdef CONFIG_RPS
-	error = rx_queue_register_kobjects(net);
+	error = register_queue_kobjects(net);
 	if (error) {
 		device_del(dev);
 		return error;
 	}
-#endif
 
 	return error;
 }
diff --git a/net/core/net-sysfs.h b/net/core/net-sysfs.h
index 778e157..bd7751e 100644
--- a/net/core/net-sysfs.h
+++ b/net/core/net-sysfs.h
@@ -4,8 +4,8 @@
 int netdev_kobject_init(void);
 int netdev_register_kobject(struct net_device *);
 void netdev_unregister_kobject(struct net_device *);
-#ifdef CONFIG_RPS
 int net_rx_queue_update_kobjects(struct net_device *, int old_num, int new_num);
-#endif
+int netdev_queue_update_kobjects(struct net_device *net,
+				 int old_num, int new_num);
 
 #endif
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 4e98ffa..ee38acb 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -76,8 +76,7 @@
 
 		local_irq_save(flags);
 		__netif_tx_lock(txq, smp_processor_id());
-		if (netif_tx_queue_stopped(txq) ||
-		    netif_tx_queue_frozen(txq) ||
+		if (netif_tx_queue_frozen_or_stopped(txq) ||
 		    ops->ndo_start_xmit(skb, dev) != NETDEV_TX_OK) {
 			skb_queue_head(&npinfo->txq, skb);
 			__netif_tx_unlock(txq);
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fbce4b0..18fe20d 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -378,6 +378,7 @@
 
 	u16 queue_map_min;
 	u16 queue_map_max;
+	__u32 skb_priority;	/* skb priority field */
 	int node;               /* Memory node */
 
 #ifdef CONFIG_XFRM
@@ -394,6 +395,8 @@
 	__be32 tv_usec;
 };
 
+static bool pktgen_exiting __read_mostly;
+
 struct pktgen_thread {
 	spinlock_t if_lock;		/* for list of devices */
 	struct list_head if_list;	/* All device here */
@@ -547,6 +550,10 @@
 		   pkt_dev->queue_map_min,
 		   pkt_dev->queue_map_max);
 
+	if (pkt_dev->skb_priority)
+		seq_printf(seq, "     skb_priority: %u\n",
+			   pkt_dev->skb_priority);
+
 	if (pkt_dev->flags & F_IPV6) {
 		char b1[128], b2[128], b3[128];
 		fmt_ip6(b1, pkt_dev->in6_saddr.s6_addr);
@@ -887,7 +894,7 @@
 	i += len;
 
 	if (debug) {
-		size_t copy = min(count, 1023);
+		size_t copy = min_t(size_t, count, 1023);
 		char tb[copy + 1];
 		if (copy_from_user(tb, user_buffer, copy))
 			return -EFAULT;
@@ -1711,6 +1718,18 @@
 		return count;
 	}
 
+	if (!strcmp(name, "skb_priority")) {
+		len = num_arg(&user_buffer[i], 9, &value);
+		if (len < 0)
+			return len;
+
+		i += len;
+		pkt_dev->skb_priority = value;
+		sprintf(pg_result, "OK: skb_priority=%i",
+			pkt_dev->skb_priority);
+		return count;
+	}
+
 	sprintf(pkt_dev->result, "No such parameter \"%s\"", name);
 	return -EINVAL;
 }
@@ -2612,8 +2631,8 @@
 	/* Update any of the values, used when we're incrementing various
 	 * fields.
 	 */
-	queue_map = pkt_dev->cur_queue_map;
 	mod_cur_headers(pkt_dev);
+	queue_map = pkt_dev->cur_queue_map;
 
 	datalen = (odev->hard_header_len + 16) & ~0xf;
 
@@ -2641,6 +2660,7 @@
 		sprintf(pkt_dev->result, "No memory");
 		return NULL;
 	}
+	prefetchw(skb->data);
 
 	skb_reserve(skb, datalen);
 
@@ -2671,6 +2691,8 @@
 	skb->transport_header = skb->network_header + sizeof(struct iphdr);
 	skb_put(skb, sizeof(struct iphdr) + sizeof(struct udphdr));
 	skb_set_queue_mapping(skb, queue_map);
+	skb->priority = pkt_dev->skb_priority;
+
 	iph = ip_hdr(skb);
 	udph = udp_hdr(skb);
 
@@ -2976,8 +2998,8 @@
 	/* Update any of the values, used when we're incrementing various
 	 * fields.
 	 */
-	queue_map = pkt_dev->cur_queue_map;
 	mod_cur_headers(pkt_dev);
+	queue_map = pkt_dev->cur_queue_map;
 
 	skb = __netdev_alloc_skb(odev,
 				 pkt_dev->cur_pkt_size + 64
@@ -2986,6 +3008,7 @@
 		sprintf(pkt_dev->result, "No memory");
 		return NULL;
 	}
+	prefetchw(skb->data);
 
 	skb_reserve(skb, 16);
 
@@ -3016,6 +3039,7 @@
 	skb->transport_header = skb->network_header + sizeof(struct ipv6hdr);
 	skb_put(skb, sizeof(struct ipv6hdr) + sizeof(struct udphdr));
 	skb_set_queue_mapping(skb, queue_map);
+	skb->priority = pkt_dev->skb_priority;
 	iph = ipv6_hdr(skb);
 	udph = udp_hdr(skb);
 
@@ -3431,11 +3455,6 @@
 
 	remove_proc_entry(t->tsk->comm, pg_proc_dir);
 
-	mutex_lock(&pktgen_thread_lock);
-
-	list_del(&t->th_list);
-
-	mutex_unlock(&pktgen_thread_lock);
 }
 
 static void pktgen_resched(struct pktgen_dev *pkt_dev)
@@ -3510,7 +3529,7 @@
 
 	__netif_tx_lock_bh(txq);
 
-	if (unlikely(netif_tx_queue_stopped(txq) || netif_tx_queue_frozen(txq))) {
+	if (unlikely(netif_tx_queue_frozen_or_stopped(txq))) {
 		ret = NETDEV_TX_BUSY;
 		pkt_dev->last_ok = 0;
 		goto unlock;
@@ -3582,6 +3601,8 @@
 		pkt_dev = next_to_run(t);
 
 		if (unlikely(!pkt_dev && t->control == 0)) {
+			if (pktgen_exiting)
+				break;
 			wait_event_interruptible_timeout(t->queue,
 							 t->control != 0,
 							 HZ/10);
@@ -3634,6 +3655,13 @@
 	pr_debug("%s removing thread\n", t->tsk->comm);
 	pktgen_rem_thread(t);
 
+	/* Wait for kthread_stop */
+	while (!kthread_should_stop()) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule();
+	}
+	__set_current_state(TASK_RUNNING);
+
 	return 0;
 }
 
@@ -3908,6 +3936,7 @@
 	struct list_head *q, *n;
 
 	/* Stop all interfaces & threads */
+	pktgen_exiting = true;
 
 	list_for_each_safe(q, n, &pktgen_threads) {
 		t = list_entry(q, struct pktgen_thread, th_list);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 7552495..182236b 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -33,6 +33,7 @@
  * Note : Dont forget somaxconn that may limit backlog too.
  */
 int sysctl_max_syn_backlog = 256;
+EXPORT_SYMBOL(sysctl_max_syn_backlog);
 
 int reqsk_queue_alloc(struct request_sock_queue *queue,
 		      unsigned int nr_table_entries)
@@ -45,9 +46,7 @@
 	nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
 	lopt_size += nr_table_entries * sizeof(struct request_sock *);
 	if (lopt_size > PAGE_SIZE)
-		lopt = __vmalloc(lopt_size,
-			GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
-			PAGE_KERNEL);
+		lopt = vzalloc(lopt_size);
 	else
 		lopt = kzalloc(lopt_size, GFP_KERNEL);
 	if (lopt == NULL)
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 8121268..750db57 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -347,16 +347,106 @@
 	if (!ops)
 		return 0;
 
-	size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
-	       nlmsg_total_size(strlen(ops->kind) + 1);	 /* IFLA_INFO_KIND */
+	size = nla_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
+	       nla_total_size(strlen(ops->kind) + 1);  /* IFLA_INFO_KIND */
 
 	if (ops->get_size)
 		/* IFLA_INFO_DATA + nested data */
-		size += nlmsg_total_size(sizeof(struct nlattr)) +
+		size += nla_total_size(sizeof(struct nlattr)) +
 			ops->get_size(dev);
 
 	if (ops->get_xstats_size)
-		size += ops->get_xstats_size(dev);	/* IFLA_INFO_XSTATS */
+		/* IFLA_INFO_XSTATS */
+		size += nla_total_size(ops->get_xstats_size(dev));
+
+	return size;
+}
+
+static LIST_HEAD(rtnl_af_ops);
+
+static const struct rtnl_af_ops *rtnl_af_lookup(const int family)
+{
+	const struct rtnl_af_ops *ops;
+
+	list_for_each_entry(ops, &rtnl_af_ops, list) {
+		if (ops->family == family)
+			return ops;
+	}
+
+	return NULL;
+}
+
+/**
+ * __rtnl_af_register - Register rtnl_af_ops with rtnetlink.
+ * @ops: struct rtnl_af_ops * to register
+ *
+ * The caller must hold the rtnl_mutex.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_af_register(struct rtnl_af_ops *ops)
+{
+	list_add_tail(&ops->list, &rtnl_af_ops);
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__rtnl_af_register);
+
+/**
+ * rtnl_af_register - Register rtnl_af_ops with rtnetlink.
+ * @ops: struct rtnl_af_ops * to register
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_af_register(struct rtnl_af_ops *ops)
+{
+	int err;
+
+	rtnl_lock();
+	err = __rtnl_af_register(ops);
+	rtnl_unlock();
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtnl_af_register);
+
+/**
+ * __rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
+ * @ops: struct rtnl_af_ops * to unregister
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void __rtnl_af_unregister(struct rtnl_af_ops *ops)
+{
+	list_del(&ops->list);
+}
+EXPORT_SYMBOL_GPL(__rtnl_af_unregister);
+
+/**
+ * rtnl_af_unregister - Unregister rtnl_af_ops from rtnetlink.
+ * @ops: struct rtnl_af_ops * to unregister
+ */
+void rtnl_af_unregister(struct rtnl_af_ops *ops)
+{
+	rtnl_lock();
+	__rtnl_af_unregister(ops);
+	rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(rtnl_af_unregister);
+
+static size_t rtnl_link_get_af_size(const struct net_device *dev)
+{
+	struct rtnl_af_ops *af_ops;
+	size_t size;
+
+	/* IFLA_AF_SPEC */
+	size = nla_total_size(sizeof(struct nlattr));
+
+	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+		if (af_ops->get_link_af_size) {
+			/* AF_* + nested data */
+			size += nla_total_size(sizeof(struct nlattr)) +
+				af_ops->get_link_af_size(dev);
+		}
+	}
 
 	return size;
 }
@@ -670,7 +760,8 @@
 	       + nla_total_size(4) /* IFLA_NUM_VF */
 	       + rtnl_vfinfo_size(dev) /* IFLA_VFINFO_LIST */
 	       + rtnl_port_size(dev) /* IFLA_VF_PORTS + IFLA_PORT_SELF */
-	       + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
+	       + rtnl_link_get_size(dev) /* IFLA_LINKINFO */
+	       + rtnl_link_get_af_size(dev); /* IFLA_AF_SPEC */
 }
 
 static int rtnl_vf_ports_fill(struct sk_buff *skb, struct net_device *dev)
@@ -756,7 +847,8 @@
 	struct nlmsghdr *nlh;
 	struct rtnl_link_stats64 temp;
 	const struct rtnl_link_stats64 *stats;
-	struct nlattr *attr;
+	struct nlattr *attr, *af_spec;
+	struct rtnl_af_ops *af_ops;
 
 	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
 	if (nlh == NULL)
@@ -865,6 +957,36 @@
 			goto nla_put_failure;
 	}
 
+	if (!(af_spec = nla_nest_start(skb, IFLA_AF_SPEC)))
+		goto nla_put_failure;
+
+	list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+		if (af_ops->fill_link_af) {
+			struct nlattr *af;
+			int err;
+
+			if (!(af = nla_nest_start(skb, af_ops->family)))
+				goto nla_put_failure;
+
+			err = af_ops->fill_link_af(skb, dev);
+
+			/*
+			 * Caller may return ENODATA to indicate that there
+			 * was no data to be dumped. This is not an error, it
+			 * means we should trim the attribute header and
+			 * continue.
+			 */
+			if (err == -ENODATA)
+				nla_nest_cancel(skb, af);
+			else if (err < 0)
+				goto nla_put_failure;
+
+			nla_nest_end(skb, af);
+		}
+	}
+
+	nla_nest_end(skb, af_spec);
+
 	return nlmsg_end(skb, nlh);
 
 nla_put_failure:
@@ -923,6 +1045,7 @@
 	[IFLA_VFINFO_LIST]	= {. type = NLA_NESTED },
 	[IFLA_VF_PORTS]		= { .type = NLA_NESTED },
 	[IFLA_PORT_SELF]	= { .type = NLA_NESTED },
+	[IFLA_AF_SPEC]		= { .type = NLA_NESTED },
 };
 EXPORT_SYMBOL(ifla_policy);
 
@@ -984,6 +1107,28 @@
 			return -EINVAL;
 	}
 
+	if (tb[IFLA_AF_SPEC]) {
+		struct nlattr *af;
+		int rem, err;
+
+		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+			const struct rtnl_af_ops *af_ops;
+
+			if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+				return -EAFNOSUPPORT;
+
+			if (!af_ops->set_link_af)
+				return -EOPNOTSUPP;
+
+			if (af_ops->validate_link_af) {
+				err = af_ops->validate_link_af(dev,
+							tb[IFLA_AF_SPEC]);
+				if (err < 0)
+					return err;
+			}
+		}
+	}
+
 	return 0;
 }
 
@@ -1224,6 +1369,24 @@
 			goto errout;
 		modified = 1;
 	}
+
+	if (tb[IFLA_AF_SPEC]) {
+		struct nlattr *af;
+		int rem;
+
+		nla_for_each_nested(af, tb[IFLA_AF_SPEC], rem) {
+			const struct rtnl_af_ops *af_ops;
+
+			if (!(af_ops = rtnl_af_lookup(nla_type(af))))
+				BUG();
+
+			err = af_ops->set_link_af(dev, af);
+			if (err < 0)
+				goto errout;
+
+			modified = 1;
+		}
+	}
 	err = 0;
 
 errout:
diff --git a/net/core/scm.c b/net/core/scm.c
index 413cab8..bbe4544 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -79,10 +79,11 @@
 			return -ENOMEM;
 		*fplp = fpl;
 		fpl->count = 0;
+		fpl->max = SCM_MAX_FD;
 	}
 	fpp = &fpl->fp[fpl->count];
 
-	if (fpl->count + num > SCM_MAX_FD)
+	if (fpl->count + num > fpl->max)
 		return -EINVAL;
 
 	/*
@@ -331,11 +332,12 @@
 	if (!fpl)
 		return NULL;
 
-	new_fpl = kmalloc(sizeof(*fpl), GFP_KERNEL);
+	new_fpl = kmemdup(fpl, offsetof(struct scm_fp_list, fp[fpl->count]),
+			  GFP_KERNEL);
 	if (new_fpl) {
-		for (i=fpl->count-1; i>=0; i--)
+		for (i = 0; i < fpl->count; i++)
 			get_file(fpl->fp[i]);
-		memcpy(new_fpl, fpl, sizeof(*fpl));
+		new_fpl->max = new_fpl->count;
 	}
 	return new_fpl;
 }
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 104f844..8814a9a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -778,6 +778,28 @@
 
 	size = SKB_DATA_ALIGN(size);
 
+	/* Check if we can avoid taking references on fragments if we own
+	 * the last reference on skb->head. (see skb_release_data())
+	 */
+	if (!skb->cloned)
+		fastpath = true;
+	else {
+		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
+
+		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
+	}
+
+	if (fastpath &&
+	    size + sizeof(struct skb_shared_info) <= ksize(skb->head)) {
+		memmove(skb->head + size, skb_shinfo(skb),
+			offsetof(struct skb_shared_info,
+				 frags[skb_shinfo(skb)->nr_frags]));
+		memmove(skb->head + nhead, skb->head,
+			skb_tail_pointer(skb) - skb->head);
+		off = nhead;
+		goto adjust_others;
+	}
+
 	data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
 	if (!data)
 		goto nodata;
@@ -791,17 +813,6 @@
 	       skb_shinfo(skb),
 	       offsetof(struct skb_shared_info, frags[skb_shinfo(skb)->nr_frags]));
 
-	/* Check if we can avoid taking references on fragments if we own
-	 * the last reference on skb->head. (see skb_release_data())
-	 */
-	if (!skb->cloned)
-		fastpath = true;
-	else {
-		int delta = skb->nohdr ? (1 << SKB_DATAREF_SHIFT) + 1 : 1;
-
-		fastpath = atomic_read(&skb_shinfo(skb)->dataref) == delta;
-	}
-
 	if (fastpath) {
 		kfree(skb->head);
 	} else {
@@ -816,6 +827,7 @@
 	off = (data + nhead) - skb->head;
 
 	skb->head     = data;
+adjust_others:
 	skb->data    += off;
 #ifdef NET_SKBUFF_DATA_USES_OFFSET
 	skb->end      = size;
diff --git a/net/core/sock.c b/net/core/sock.c
index 3eed542..bcdb6ff 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -992,17 +992,18 @@
 /*
  * Copy all fields from osk to nsk but nsk->sk_refcnt must not change yet,
  * even temporarly, because of RCU lookups. sk_node should also be left as is.
+ * We must not copy fields between sk_dontcopy_begin and sk_dontcopy_end
  */
 static void sock_copy(struct sock *nsk, const struct sock *osk)
 {
 #ifdef CONFIG_SECURITY_NETWORK
 	void *sptr = nsk->sk_security;
 #endif
-	BUILD_BUG_ON(offsetof(struct sock, sk_copy_start) !=
-		     sizeof(osk->sk_node) + sizeof(osk->sk_refcnt) +
-		     sizeof(osk->sk_tx_queue_mapping));
-	memcpy(&nsk->sk_copy_start, &osk->sk_copy_start,
-	       osk->sk_prot->obj_size - offsetof(struct sock, sk_copy_start));
+	memcpy(nsk, osk, offsetof(struct sock, sk_dontcopy_begin));
+
+	memcpy(&nsk->sk_dontcopy_end, &osk->sk_dontcopy_end,
+	       osk->sk_prot->obj_size - offsetof(struct sock, sk_dontcopy_end));
+
 #ifdef CONFIG_SECURITY_NETWORK
 	nsk->sk_security = sptr;
 	security_sk_clone(osk, nsk);
@@ -1653,10 +1654,10 @@
 {
 	struct proto *prot = sk->sk_prot;
 	int amt = sk_mem_pages(size);
-	int allocated;
+	long allocated;
 
 	sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
-	allocated = atomic_add_return(amt, prot->memory_allocated);
+	allocated = atomic_long_add_return(amt, prot->memory_allocated);
 
 	/* Under limit. */
 	if (allocated <= prot->sysctl_mem[0]) {
@@ -1714,7 +1715,7 @@
 
 	/* Alas. Undo changes. */
 	sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
-	atomic_sub(amt, prot->memory_allocated);
+	atomic_long_sub(amt, prot->memory_allocated);
 	return 0;
 }
 EXPORT_SYMBOL(__sk_mem_schedule);
@@ -1727,12 +1728,12 @@
 {
 	struct proto *prot = sk->sk_prot;
 
-	atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
+	atomic_long_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
 		   prot->memory_allocated);
 	sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
 	if (prot->memory_pressure && *prot->memory_pressure &&
-	    (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
+	    (atomic_long_read(prot->memory_allocated) < prot->sysctl_mem[0]))
 		*prot->memory_pressure = 0;
 }
 EXPORT_SYMBOL(__sk_mem_reclaim);
@@ -2452,12 +2453,12 @@
 
 static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
 {
-	seq_printf(seq, "%-9s %4u %6d  %6d   %-3s %6u   %-3s  %-10s "
+	seq_printf(seq, "%-9s %4u %6d  %6ld   %-3s %6u   %-3s  %-10s "
 			"%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
 		   proto->name,
 		   proto->obj_size,
 		   sock_prot_inuse_get(seq_file_net(seq), proto),
-		   proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
+		   proto->memory_allocated != NULL ? atomic_long_read(proto->memory_allocated) : -1L,
 		   proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
 		   proto->max_header,
 		   proto->slab == NULL ? "no" : "yes",
diff --git a/net/core/timestamping.c b/net/core/timestamping.c
index 0ae6c22..b124d28 100644
--- a/net/core/timestamping.c
+++ b/net/core/timestamping.c
@@ -26,12 +26,12 @@
 	PTP_FILTER
 };
 
-static unsigned int classify(struct sk_buff *skb)
+static unsigned int classify(const struct sk_buff *skb)
 {
 	if (likely(skb->dev &&
 		   skb->dev->phydev &&
 		   skb->dev->phydev->drv))
-		return sk_run_filter(skb, ptp_filter, ARRAY_SIZE(ptp_filter));
+		return sk_run_filter(skb, ptp_filter);
 	else
 		return PTP_CLASS_NONE;
 }
diff --git a/net/dccp/Makefile b/net/dccp/Makefile
index 2991efc..5c8362b 100644
--- a/net/dccp/Makefile
+++ b/net/dccp/Makefile
@@ -1,7 +1,7 @@
 obj-$(CONFIG_IP_DCCP) += dccp.o dccp_ipv4.o
 
-dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o
-
+dccp-y := ccid.o feat.o input.o minisocks.o options.o output.o proto.o timer.o \
+	  qpolicy.o
 #
 # CCID algorithms to be used by dccp.ko
 #
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index 92a6fcb..25b7a8d 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -1,444 +1,375 @@
 /*
  *  net/dccp/ackvec.c
  *
- *  An implementation of the DCCP protocol
+ *  An implementation of Ack Vectors for the DCCP protocol
+ *  Copyright (c) 2007 University of Aberdeen, Scotland, UK
  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  *
  *      This program is free software; you can redistribute it and/or modify it
  *      under the terms of the GNU General Public License as published by the
  *      Free Software Foundation; version 2 of the License;
  */
-
-#include "ackvec.h"
 #include "dccp.h"
-
-#include <linux/init.h>
-#include <linux/errno.h>
 #include <linux/kernel.h>
-#include <linux/skbuff.h>
 #include <linux/slab.h>
 
-#include <net/sock.h>
-
 static struct kmem_cache *dccp_ackvec_slab;
 static struct kmem_cache *dccp_ackvec_record_slab;
 
-static struct dccp_ackvec_record *dccp_ackvec_record_new(void)
-{
-	struct dccp_ackvec_record *avr =
-			kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
-
-	if (avr != NULL)
-		INIT_LIST_HEAD(&avr->avr_node);
-
-	return avr;
-}
-
-static void dccp_ackvec_record_delete(struct dccp_ackvec_record *avr)
-{
-	if (unlikely(avr == NULL))
-		return;
-	/* Check if deleting a linked record */
-	WARN_ON(!list_empty(&avr->avr_node));
-	kmem_cache_free(dccp_ackvec_record_slab, avr);
-}
-
-static void dccp_ackvec_insert_avr(struct dccp_ackvec *av,
-				   struct dccp_ackvec_record *avr)
-{
-	/*
-	 * AVRs are sorted by seqno. Since we are sending them in order, we
-	 * just add the AVR at the head of the list.
-	 * -sorbo.
-	 */
-	if (!list_empty(&av->av_records)) {
-		const struct dccp_ackvec_record *head =
-					list_entry(av->av_records.next,
-						   struct dccp_ackvec_record,
-						   avr_node);
-		BUG_ON(before48(avr->avr_ack_seqno, head->avr_ack_seqno));
-	}
-
-	list_add(&avr->avr_node, &av->av_records);
-}
-
-int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
-{
-	struct dccp_sock *dp = dccp_sk(sk);
-	struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
-	/* Figure out how many options do we need to represent the ackvec */
-	const u8 nr_opts = DIV_ROUND_UP(av->av_vec_len, DCCP_SINGLE_OPT_MAXLEN);
-	u16 len = av->av_vec_len + 2 * nr_opts, i;
-	u32 elapsed_time;
-	const unsigned char *tail, *from;
-	unsigned char *to;
-	struct dccp_ackvec_record *avr;
-	suseconds_t delta;
-
-	if (DCCP_SKB_CB(skb)->dccpd_opt_len + len > DCCP_MAX_OPT_LEN)
-		return -1;
-
-	delta = ktime_us_delta(ktime_get_real(), av->av_time);
-	elapsed_time = delta / 10;
-
-	if (elapsed_time != 0 &&
-	    dccp_insert_option_elapsed_time(skb, elapsed_time))
-		return -1;
-
-	avr = dccp_ackvec_record_new();
-	if (avr == NULL)
-		return -1;
-
-	DCCP_SKB_CB(skb)->dccpd_opt_len += len;
-
-	to   = skb_push(skb, len);
-	len  = av->av_vec_len;
-	from = av->av_buf + av->av_buf_head;
-	tail = av->av_buf + DCCP_MAX_ACKVEC_LEN;
-
-	for (i = 0; i < nr_opts; ++i) {
-		int copylen = len;
-
-		if (len > DCCP_SINGLE_OPT_MAXLEN)
-			copylen = DCCP_SINGLE_OPT_MAXLEN;
-
-		*to++ = DCCPO_ACK_VECTOR_0;
-		*to++ = copylen + 2;
-
-		/* Check if buf_head wraps */
-		if (from + copylen > tail) {
-			const u16 tailsize = tail - from;
-
-			memcpy(to, from, tailsize);
-			to	+= tailsize;
-			len	-= tailsize;
-			copylen	-= tailsize;
-			from	= av->av_buf;
-		}
-
-		memcpy(to, from, copylen);
-		from += copylen;
-		to   += copylen;
-		len  -= copylen;
-	}
-
-	/*
-	 *	From RFC 4340, A.2:
-	 *
-	 *	For each acknowledgement it sends, the HC-Receiver will add an
-	 *	acknowledgement record.  ack_seqno will equal the HC-Receiver
-	 *	sequence number it used for the ack packet; ack_ptr will equal
-	 *	buf_head; ack_ackno will equal buf_ackno; and ack_nonce will
-	 *	equal buf_nonce.
-	 */
-	avr->avr_ack_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
-	avr->avr_ack_ptr   = av->av_buf_head;
-	avr->avr_ack_ackno = av->av_buf_ackno;
-	avr->avr_ack_nonce = av->av_buf_nonce;
-	avr->avr_sent_len  = av->av_vec_len;
-
-	dccp_ackvec_insert_avr(av, avr);
-
-	dccp_pr_debug("%s ACK Vector 0, len=%d, ack_seqno=%llu, "
-		      "ack_ackno=%llu\n",
-		      dccp_role(sk), avr->avr_sent_len,
-		      (unsigned long long)avr->avr_ack_seqno,
-		      (unsigned long long)avr->avr_ack_ackno);
-	return 0;
-}
-
 struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority)
 {
-	struct dccp_ackvec *av = kmem_cache_alloc(dccp_ackvec_slab, priority);
+	struct dccp_ackvec *av = kmem_cache_zalloc(dccp_ackvec_slab, priority);
 
 	if (av != NULL) {
-		av->av_buf_head	 = DCCP_MAX_ACKVEC_LEN - 1;
-		av->av_buf_ackno = UINT48_MAX + 1;
-		av->av_buf_nonce = 0;
-		av->av_time	 = ktime_set(0, 0);
-		av->av_vec_len	 = 0;
+		av->av_buf_head	= av->av_buf_tail = DCCPAV_MAX_ACKVEC_LEN - 1;
 		INIT_LIST_HEAD(&av->av_records);
 	}
-
 	return av;
 }
 
+static void dccp_ackvec_purge_records(struct dccp_ackvec *av)
+{
+	struct dccp_ackvec_record *cur, *next;
+
+	list_for_each_entry_safe(cur, next, &av->av_records, avr_node)
+		kmem_cache_free(dccp_ackvec_record_slab, cur);
+	INIT_LIST_HEAD(&av->av_records);
+}
+
 void dccp_ackvec_free(struct dccp_ackvec *av)
 {
-	if (unlikely(av == NULL))
-		return;
-
-	if (!list_empty(&av->av_records)) {
-		struct dccp_ackvec_record *avr, *next;
-
-		list_for_each_entry_safe(avr, next, &av->av_records, avr_node) {
-			list_del_init(&avr->avr_node);
-			dccp_ackvec_record_delete(avr);
-		}
+	if (likely(av != NULL)) {
+		dccp_ackvec_purge_records(av);
+		kmem_cache_free(dccp_ackvec_slab, av);
 	}
-
-	kmem_cache_free(dccp_ackvec_slab, av);
 }
 
-static inline u8 dccp_ackvec_state(const struct dccp_ackvec *av,
-				   const u32 index)
-{
-	return av->av_buf[index] & DCCP_ACKVEC_STATE_MASK;
-}
-
-static inline u8 dccp_ackvec_len(const struct dccp_ackvec *av,
-				 const u32 index)
-{
-	return av->av_buf[index] & DCCP_ACKVEC_LEN_MASK;
-}
-
-/*
- * If several packets are missing, the HC-Receiver may prefer to enter multiple
- * bytes with run length 0, rather than a single byte with a larger run length;
- * this simplifies table updates if one of the missing packets arrives.
+/**
+ * dccp_ackvec_update_records  -  Record information about sent Ack Vectors
+ * @av:		Ack Vector records to update
+ * @seqno:	Sequence number of the packet carrying the Ack Vector just sent
+ * @nonce_sum:	The sum of all buffer nonces contained in the Ack Vector
  */
-static inline int dccp_ackvec_set_buf_head_state(struct dccp_ackvec *av,
-						 const unsigned int packets,
-						 const unsigned char state)
+int dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seqno, u8 nonce_sum)
 {
-	long gap;
-	long new_head;
+	struct dccp_ackvec_record *avr;
 
-	if (av->av_vec_len + packets > DCCP_MAX_ACKVEC_LEN)
+	avr = kmem_cache_alloc(dccp_ackvec_record_slab, GFP_ATOMIC);
+	if (avr == NULL)
 		return -ENOBUFS;
 
-	gap	 = packets - 1;
-	new_head = av->av_buf_head - packets;
+	avr->avr_ack_seqno  = seqno;
+	avr->avr_ack_ptr    = av->av_buf_head;
+	avr->avr_ack_ackno  = av->av_buf_ackno;
+	avr->avr_ack_nonce  = nonce_sum;
+	avr->avr_ack_runlen = dccp_ackvec_runlen(av->av_buf + av->av_buf_head);
+	/*
+	 * When the buffer overflows, we keep no more than one record. This is
+	 * the simplest way of disambiguating sender-Acks dating from before the
+	 * overflow from sender-Acks which refer to after the overflow; a simple
+	 * solution is preferable here since we are handling an exception.
+	 */
+	if (av->av_overflow)
+		dccp_ackvec_purge_records(av);
+	/*
+	 * Since GSS is incremented for each packet, the list is automatically
+	 * arranged in descending order of @ack_seqno.
+	 */
+	list_add(&avr->avr_node, &av->av_records);
 
-	if (new_head < 0) {
-		if (gap > 0) {
-			memset(av->av_buf, DCCP_ACKVEC_STATE_NOT_RECEIVED,
-			       gap + new_head + 1);
-			gap = -new_head;
-		}
-		new_head += DCCP_MAX_ACKVEC_LEN;
-	}
-
-	av->av_buf_head = new_head;
-
-	if (gap > 0)
-		memset(av->av_buf + av->av_buf_head + 1,
-		       DCCP_ACKVEC_STATE_NOT_RECEIVED, gap);
-
-	av->av_buf[av->av_buf_head] = state;
-	av->av_vec_len += packets;
+	dccp_pr_debug("Added Vector, ack_seqno=%llu, ack_ackno=%llu (rl=%u)\n",
+		      (unsigned long long)avr->avr_ack_seqno,
+		      (unsigned long long)avr->avr_ack_ackno,
+		      avr->avr_ack_runlen);
 	return 0;
 }
 
+static struct dccp_ackvec_record *dccp_ackvec_lookup(struct list_head *av_list,
+						     const u64 ackno)
+{
+	struct dccp_ackvec_record *avr;
+	/*
+	 * Exploit that records are inserted in descending order of sequence
+	 * number, start with the oldest record first. If @ackno is `before'
+	 * the earliest ack_ackno, the packet is too old to be considered.
+	 */
+	list_for_each_entry_reverse(avr, av_list, avr_node) {
+		if (avr->avr_ack_seqno == ackno)
+			return avr;
+		if (before48(ackno, avr->avr_ack_seqno))
+			break;
+	}
+	return NULL;
+}
+
 /*
- * Implements the RFC 4340, Appendix A
+ * Buffer index and length computation using modulo-buffersize arithmetic.
+ * Note that, as pointers move from right to left, head is `before' tail.
  */
-int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
-		    const u64 ackno, const u8 state)
+static inline u16 __ackvec_idx_add(const u16 a, const u16 b)
 {
-	/*
-	 * Check at the right places if the buffer is full, if it is, tell the
-	 * caller to start dropping packets till the HC-Sender acks our ACK
-	 * vectors, when we will free up space in av_buf.
-	 *
-	 * We may well decide to do buffer compression, etc, but for now lets
-	 * just drop.
-	 *
-	 * From Appendix A.1.1 (`New Packets'):
-	 *
-	 *	Of course, the circular buffer may overflow, either when the
-	 *	HC-Sender is sending data at a very high rate, when the
-	 *	HC-Receiver's acknowledgements are not reaching the HC-Sender,
-	 *	or when the HC-Sender is forgetting to acknowledge those acks
-	 *	(so the HC-Receiver is unable to clean up old state). In this
-	 *	case, the HC-Receiver should either compress the buffer (by
-	 *	increasing run lengths when possible), transfer its state to
-	 *	a larger buffer, or, as a last resort, drop all received
-	 *	packets, without processing them whatsoever, until its buffer
-	 *	shrinks again.
-	 */
-
-	/* See if this is the first ackno being inserted */
-	if (av->av_vec_len == 0) {
-		av->av_buf[av->av_buf_head] = state;
-		av->av_vec_len = 1;
-	} else if (after48(ackno, av->av_buf_ackno)) {
-		const u64 delta = dccp_delta_seqno(av->av_buf_ackno, ackno);
-
-		/*
-		 * Look if the state of this packet is the same as the
-		 * previous ackno and if so if we can bump the head len.
-		 */
-		if (delta == 1 &&
-		    dccp_ackvec_state(av, av->av_buf_head) == state &&
-		    dccp_ackvec_len(av, av->av_buf_head) < DCCP_ACKVEC_LEN_MASK)
-			av->av_buf[av->av_buf_head]++;
-		else if (dccp_ackvec_set_buf_head_state(av, delta, state))
-			return -ENOBUFS;
-	} else {
-		/*
-		 * A.1.2.  Old Packets
-		 *
-		 *	When a packet with Sequence Number S <= buf_ackno
-		 *	arrives, the HC-Receiver will scan the table for
-		 *	the byte corresponding to S. (Indexing structures
-		 *	could reduce the complexity of this scan.)
-		 */
-		u64 delta = dccp_delta_seqno(ackno, av->av_buf_ackno);
-		u32 index = av->av_buf_head;
-
-		while (1) {
-			const u8 len = dccp_ackvec_len(av, index);
-			const u8 av_state = dccp_ackvec_state(av, index);
-			/*
-			 * valid packets not yet in av_buf have a reserved
-			 * entry, with a len equal to 0.
-			 */
-			if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED &&
-			    len == 0 && delta == 0) { /* Found our
-							 reserved seat! */
-				dccp_pr_debug("Found %llu reserved seat!\n",
-					      (unsigned long long)ackno);
-				av->av_buf[index] = state;
-				goto out;
-			}
-			/* len == 0 means one packet */
-			if (delta < len + 1)
-				goto out_duplicate;
-
-			delta -= len + 1;
-			if (++index == DCCP_MAX_ACKVEC_LEN)
-				index = 0;
-		}
-	}
-
-	av->av_buf_ackno = ackno;
-	av->av_time = ktime_get_real();
-out:
-	return 0;
-
-out_duplicate:
-	/* Duplicate packet */
-	dccp_pr_debug("Received a dup or already considered lost "
-		      "packet: %llu\n", (unsigned long long)ackno);
-	return -EILSEQ;
+	return (a + b) % DCCPAV_MAX_ACKVEC_LEN;
 }
 
-static void dccp_ackvec_throw_record(struct dccp_ackvec *av,
-				     struct dccp_ackvec_record *avr)
+static inline u16 __ackvec_idx_sub(const u16 a, const u16 b)
 {
-	struct dccp_ackvec_record *next;
-
-	/* sort out vector length */
-	if (av->av_buf_head <= avr->avr_ack_ptr)
-		av->av_vec_len = avr->avr_ack_ptr - av->av_buf_head;
-	else
-		av->av_vec_len = DCCP_MAX_ACKVEC_LEN - 1 -
-				 av->av_buf_head + avr->avr_ack_ptr;
-
-	/* free records */
-	list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
-		list_del_init(&avr->avr_node);
-		dccp_ackvec_record_delete(avr);
-	}
+	return __ackvec_idx_add(a, DCCPAV_MAX_ACKVEC_LEN - b);
 }
 
-void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av, struct sock *sk,
-				 const u64 ackno)
+u16 dccp_ackvec_buflen(const struct dccp_ackvec *av)
 {
-	struct dccp_ackvec_record *avr;
-
-	/*
-	 * If we traverse backwards, it should be faster when we have large
-	 * windows. We will be receiving ACKs for stuff we sent a while back
-	 * -sorbo.
-	 */
-	list_for_each_entry_reverse(avr, &av->av_records, avr_node) {
-		if (ackno == avr->avr_ack_seqno) {
-			dccp_pr_debug("%s ACK packet 0, len=%d, ack_seqno=%llu, "
-				      "ack_ackno=%llu, ACKED!\n",
-				      dccp_role(sk), 1,
-				      (unsigned long long)avr->avr_ack_seqno,
-				      (unsigned long long)avr->avr_ack_ackno);
-			dccp_ackvec_throw_record(av, avr);
-			break;
-		} else if (avr->avr_ack_seqno > ackno)
-			break; /* old news */
-	}
+	if (unlikely(av->av_overflow))
+		return DCCPAV_MAX_ACKVEC_LEN;
+	return __ackvec_idx_sub(av->av_buf_tail, av->av_buf_head);
 }
 
-static void dccp_ackvec_check_rcv_ackvector(struct dccp_ackvec *av,
-					    struct sock *sk, u64 *ackno,
-					    const unsigned char len,
-					    const unsigned char *vector)
+/**
+ * dccp_ackvec_update_old  -  Update previous state as per RFC 4340, 11.4.1
+ * @av:		non-empty buffer to update
+ * @distance:   negative or zero distance of @seqno from buf_ackno downward
+ * @seqno:	the (old) sequence number whose record is to be updated
+ * @state:	state in which packet carrying @seqno was received
+ */
+static void dccp_ackvec_update_old(struct dccp_ackvec *av, s64 distance,
+				   u64 seqno, enum dccp_ackvec_states state)
 {
-	unsigned char i;
-	struct dccp_ackvec_record *avr;
+	u16 ptr = av->av_buf_head;
 
-	/* Check if we actually sent an ACK vector */
-	if (list_empty(&av->av_records))
+	BUG_ON(distance > 0);
+	if (unlikely(dccp_ackvec_is_empty(av)))
 		return;
 
-	i = len;
-	/*
-	 * XXX
-	 * I think it might be more efficient to work backwards. See comment on
-	 * rcv_ackno. -sorbo.
-	 */
-	avr = list_entry(av->av_records.next, struct dccp_ackvec_record, avr_node);
-	while (i--) {
-		const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
-		u64 ackno_end_rl;
+	do {
+		u8 runlen = dccp_ackvec_runlen(av->av_buf + ptr);
 
-		dccp_set_seqno(&ackno_end_rl, *ackno - rl);
-
-		/*
-		 * If our AVR sequence number is greater than the ack, go
-		 * forward in the AVR list until it is not so.
-		 */
-		list_for_each_entry_from(avr, &av->av_records, avr_node) {
-			if (!after48(avr->avr_ack_seqno, *ackno))
-				goto found;
-		}
-		/* End of the av_records list, not found, exit */
-		break;
-found:
-		if (between48(avr->avr_ack_seqno, ackno_end_rl, *ackno)) {
-			const u8 state = *vector & DCCP_ACKVEC_STATE_MASK;
-			if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED) {
-				dccp_pr_debug("%s ACK vector 0, len=%d, "
-					      "ack_seqno=%llu, ack_ackno=%llu, "
-					      "ACKED!\n",
-					      dccp_role(sk), len,
-					      (unsigned long long)
-					      avr->avr_ack_seqno,
-					      (unsigned long long)
-					      avr->avr_ack_ackno);
-				dccp_ackvec_throw_record(av, avr);
-				break;
-			}
+		if (distance + runlen >= 0) {
 			/*
-			 * If it wasn't received, continue scanning... we might
-			 * find another one.
+			 * Only update the state if packet has not been received
+			 * yet. This is OK as per the second table in RFC 4340,
+			 * 11.4.1; i.e. here we are using the following table:
+			 *                     RECEIVED
+			 *                      0   1   3
+			 *              S     +---+---+---+
+			 *              T   0 | 0 | 0 | 0 |
+			 *              O     +---+---+---+
+			 *              R   1 | 1 | 1 | 1 |
+			 *              E     +---+---+---+
+			 *              D   3 | 0 | 1 | 3 |
+			 *                    +---+---+---+
+			 * The "Not Received" state was set by reserve_seats().
 			 */
+			if (av->av_buf[ptr] == DCCPAV_NOT_RECEIVED)
+				av->av_buf[ptr] = state;
+			else
+				dccp_pr_debug("Not changing %llu state to %u\n",
+					      (unsigned long long)seqno, state);
+			break;
 		}
 
-		dccp_set_seqno(ackno, ackno_end_rl - 1);
-		++vector;
+		distance += runlen + 1;
+		ptr	  = __ackvec_idx_add(ptr, 1);
+
+	} while (ptr != av->av_buf_tail);
+}
+
+/* Mark @num entries after buf_head as "Not yet received". */
+static void dccp_ackvec_reserve_seats(struct dccp_ackvec *av, u16 num)
+{
+	u16 start = __ackvec_idx_add(av->av_buf_head, 1),
+	    len	  = DCCPAV_MAX_ACKVEC_LEN - start;
+
+	/* check for buffer wrap-around */
+	if (num > len) {
+		memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, len);
+		start = 0;
+		num  -= len;
+	}
+	if (num)
+		memset(av->av_buf + start, DCCPAV_NOT_RECEIVED, num);
+}
+
+/**
+ * dccp_ackvec_add_new  -  Record one or more new entries in Ack Vector buffer
+ * @av:		 container of buffer to update (can be empty or non-empty)
+ * @num_packets: number of packets to register (must be >= 1)
+ * @seqno:	 sequence number of the first packet in @num_packets
+ * @state:	 state in which packet carrying @seqno was received
+ */
+static void dccp_ackvec_add_new(struct dccp_ackvec *av, u32 num_packets,
+				u64 seqno, enum dccp_ackvec_states state)
+{
+	u32 num_cells = num_packets;
+
+	if (num_packets > DCCPAV_BURST_THRESH) {
+		u32 lost_packets = num_packets - 1;
+
+		DCCP_WARN("Warning: large burst loss (%u)\n", lost_packets);
+		/*
+		 * We received 1 packet and have a loss of size "num_packets-1"
+		 * which we squeeze into num_cells-1 rather than reserving an
+		 * entire byte for each lost packet.
+		 * The reason is that the vector grows in O(burst_length); when
+		 * it grows too large there will no room left for the payload.
+		 * This is a trade-off: if a few packets out of the burst show
+		 * up later, their state will not be changed; it is simply too
+		 * costly to reshuffle/reallocate/copy the buffer each time.
+		 * Should such problems persist, we will need to switch to a
+		 * different underlying data structure.
+		 */
+		for (num_packets = num_cells = 1; lost_packets; ++num_cells) {
+			u8 len = min(lost_packets, (u32)DCCPAV_MAX_RUNLEN);
+
+			av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, 1);
+			av->av_buf[av->av_buf_head] = DCCPAV_NOT_RECEIVED | len;
+
+			lost_packets -= len;
+		}
+	}
+
+	if (num_cells + dccp_ackvec_buflen(av) >= DCCPAV_MAX_ACKVEC_LEN) {
+		DCCP_CRIT("Ack Vector buffer overflow: dropping old entries\n");
+		av->av_overflow = true;
+	}
+
+	av->av_buf_head = __ackvec_idx_sub(av->av_buf_head, num_packets);
+	if (av->av_overflow)
+		av->av_buf_tail = av->av_buf_head;
+
+	av->av_buf[av->av_buf_head] = state;
+	av->av_buf_ackno	    = seqno;
+
+	if (num_packets > 1)
+		dccp_ackvec_reserve_seats(av, num_packets - 1);
+}
+
+/**
+ * dccp_ackvec_input  -  Register incoming packet in the buffer
+ */
+void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb)
+{
+	u64 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
+	enum dccp_ackvec_states state = DCCPAV_RECEIVED;
+
+	if (dccp_ackvec_is_empty(av)) {
+		dccp_ackvec_add_new(av, 1, seqno, state);
+		av->av_tail_ackno = seqno;
+
+	} else {
+		s64 num_packets = dccp_delta_seqno(av->av_buf_ackno, seqno);
+		u8 *current_head = av->av_buf + av->av_buf_head;
+
+		if (num_packets == 1 &&
+		    dccp_ackvec_state(current_head) == state &&
+		    dccp_ackvec_runlen(current_head) < DCCPAV_MAX_RUNLEN) {
+
+			*current_head   += 1;
+			av->av_buf_ackno = seqno;
+
+		} else if (num_packets > 0) {
+			dccp_ackvec_add_new(av, num_packets, seqno, state);
+		} else {
+			dccp_ackvec_update_old(av, num_packets, seqno, state);
+		}
 	}
 }
 
-int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-		      u64 *ackno, const u8 opt, const u8 *value, const u8 len)
+/**
+ * dccp_ackvec_clear_state  -  Perform house-keeping / garbage-collection
+ * This routine is called when the peer acknowledges the receipt of Ack Vectors
+ * up to and including @ackno. While based on on section A.3 of RFC 4340, here
+ * are additional precautions to prevent corrupted buffer state. In particular,
+ * we use tail_ackno to identify outdated records; it always marks the earliest
+ * packet of group (2) in 11.4.2.
+ */
+void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno)
 {
-	if (len > DCCP_SINGLE_OPT_MAXLEN)
-		return -1;
+	struct dccp_ackvec_record *avr, *next;
+	u8 runlen_now, eff_runlen;
+	s64 delta;
 
-	/* dccp_ackvector_print(DCCP_SKB_CB(skb)->dccpd_ack_seq, value, len); */
-	dccp_ackvec_check_rcv_ackvector(dccp_sk(sk)->dccps_hc_rx_ackvec, sk,
-					ackno, len, value);
+	avr = dccp_ackvec_lookup(&av->av_records, ackno);
+	if (avr == NULL)
+		return;
+	/*
+	 * Deal with outdated acknowledgments: this arises when e.g. there are
+	 * several old records and the acks from the peer come in slowly. In
+	 * that case we may still have records that pre-date tail_ackno.
+	 */
+	delta = dccp_delta_seqno(av->av_tail_ackno, avr->avr_ack_ackno);
+	if (delta < 0)
+		goto free_records;
+	/*
+	 * Deal with overlapping Ack Vectors: don't subtract more than the
+	 * number of packets between tail_ackno and ack_ackno.
+	 */
+	eff_runlen = delta < avr->avr_ack_runlen ? delta : avr->avr_ack_runlen;
+
+	runlen_now = dccp_ackvec_runlen(av->av_buf + avr->avr_ack_ptr);
+	/*
+	 * The run length of Ack Vector cells does not decrease over time. If
+	 * the run length is the same as at the time the Ack Vector was sent, we
+	 * free the ack_ptr cell. That cell can however not be freed if the run
+	 * length has increased: in this case we need to move the tail pointer
+	 * backwards (towards higher indices), to its next-oldest neighbour.
+	 */
+	if (runlen_now > eff_runlen) {
+
+		av->av_buf[avr->avr_ack_ptr] -= eff_runlen + 1;
+		av->av_buf_tail = __ackvec_idx_add(avr->avr_ack_ptr, 1);
+
+		/* This move may not have cleared the overflow flag. */
+		if (av->av_overflow)
+			av->av_overflow = (av->av_buf_head == av->av_buf_tail);
+	} else {
+		av->av_buf_tail	= avr->avr_ack_ptr;
+		/*
+		 * We have made sure that avr points to a valid cell within the
+		 * buffer. This cell is either older than head, or equals head
+		 * (empty buffer): in both cases we no longer have any overflow.
+		 */
+		av->av_overflow	= 0;
+	}
+
+	/*
+	 * The peer has acknowledged up to and including ack_ackno. Hence the
+	 * first packet in group (2) of 11.4.2 is the successor of ack_ackno.
+	 */
+	av->av_tail_ackno = ADD48(avr->avr_ack_ackno, 1);
+
+free_records:
+	list_for_each_entry_safe_from(avr, next, &av->av_records, avr_node) {
+		list_del(&avr->avr_node);
+		kmem_cache_free(dccp_ackvec_record_slab, avr);
+	}
+}
+
+/*
+ *	Routines to keep track of Ack Vectors received in an skb
+ */
+int dccp_ackvec_parsed_add(struct list_head *head, u8 *vec, u8 len, u8 nonce)
+{
+	struct dccp_ackvec_parsed *new = kmalloc(sizeof(*new), GFP_ATOMIC);
+
+	if (new == NULL)
+		return -ENOBUFS;
+	new->vec   = vec;
+	new->len   = len;
+	new->nonce = nonce;
+
+	list_add_tail(&new->node, head);
 	return 0;
 }
+EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_add);
+
+void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks)
+{
+	struct dccp_ackvec_parsed *cur, *next;
+
+	list_for_each_entry_safe(cur, next, parsed_chunks, node)
+		kfree(cur);
+	INIT_LIST_HEAD(parsed_chunks);
+}
+EXPORT_SYMBOL_GPL(dccp_ackvec_parsed_cleanup);
 
 int __init dccp_ackvec_init(void)
 {
@@ -448,10 +379,9 @@
 	if (dccp_ackvec_slab == NULL)
 		goto out_err;
 
-	dccp_ackvec_record_slab =
-			kmem_cache_create("dccp_ackvec_record",
-					  sizeof(struct dccp_ackvec_record),
-					  0, SLAB_HWCACHE_ALIGN, NULL);
+	dccp_ackvec_record_slab = kmem_cache_create("dccp_ackvec_record",
+					     sizeof(struct dccp_ackvec_record),
+					     0, SLAB_HWCACHE_ALIGN, NULL);
 	if (dccp_ackvec_record_slab == NULL)
 		goto out_destroy_slab;
 
diff --git a/net/dccp/ackvec.h b/net/dccp/ackvec.h
index 7ea557b..e2ab062 100644
--- a/net/dccp/ackvec.h
+++ b/net/dccp/ackvec.h
@@ -3,9 +3,9 @@
 /*
  *  net/dccp/ackvec.h
  *
- *  An implementation of the DCCP protocol
+ *  An implementation of Ack Vectors for the DCCP protocol
+ *  Copyright (c) 2007 University of Aberdeen, Scotland, UK
  *  Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@mandriva.com>
- *
  *	This program is free software; you can redistribute it and/or modify it
  *	under the terms of the GNU General Public License version 2 as
  *	published by the Free Software Foundation.
@@ -13,99 +13,124 @@
 
 #include <linux/dccp.h>
 #include <linux/compiler.h>
-#include <linux/ktime.h>
 #include <linux/list.h>
 #include <linux/types.h>
 
-/* We can spread an ack vector across multiple options */
-#define DCCP_MAX_ACKVEC_LEN (DCCP_SINGLE_OPT_MAXLEN * 2)
+/*
+ * Ack Vector buffer space is static, in multiples of %DCCP_SINGLE_OPT_MAXLEN,
+ * the maximum size of a single Ack Vector. Setting %DCCPAV_NUM_ACKVECS to 1
+ * will be sufficient for most cases of low Ack Ratios, using a value of 2 gives
+ * more headroom if Ack Ratio is higher or when the sender acknowledges slowly.
+ * The maximum value is bounded by the u16 types for indices and functions.
+ */
+#define DCCPAV_NUM_ACKVECS	2
+#define DCCPAV_MAX_ACKVEC_LEN	(DCCP_SINGLE_OPT_MAXLEN * DCCPAV_NUM_ACKVECS)
 
 /* Estimated minimum average Ack Vector length - used for updating MPS */
 #define DCCPAV_MIN_OPTLEN	16
 
-#define DCCP_ACKVEC_STATE_RECEIVED	0
-#define DCCP_ACKVEC_STATE_ECN_MARKED	(1 << 6)
-#define DCCP_ACKVEC_STATE_NOT_RECEIVED	(3 << 6)
+/* Threshold for coping with large bursts of losses */
+#define DCCPAV_BURST_THRESH	(DCCPAV_MAX_ACKVEC_LEN / 8)
 
-#define DCCP_ACKVEC_STATE_MASK		0xC0 /* 11000000 */
-#define DCCP_ACKVEC_LEN_MASK		0x3F /* 00111111 */
+enum dccp_ackvec_states {
+	DCCPAV_RECEIVED =	0x00,
+	DCCPAV_ECN_MARKED =	0x40,
+	DCCPAV_RESERVED =	0x80,
+	DCCPAV_NOT_RECEIVED =	0xC0
+};
+#define DCCPAV_MAX_RUNLEN	0x3F
 
-/** struct dccp_ackvec - ack vector
+static inline u8 dccp_ackvec_runlen(const u8 *cell)
+{
+	return *cell & DCCPAV_MAX_RUNLEN;
+}
+
+static inline u8 dccp_ackvec_state(const u8 *cell)
+{
+	return *cell & ~DCCPAV_MAX_RUNLEN;
+}
+
+/** struct dccp_ackvec - Ack Vector main data structure
  *
- * This data structure is the one defined in RFC 4340, Appendix A.
+ * This implements a fixed-size circular buffer within an array and is largely
+ * based on Appendix A of RFC 4340.
  *
- * @av_buf_head - circular buffer head
- * @av_buf_tail - circular buffer tail
- * @av_buf_ackno - ack # of the most recent packet acknowledgeable in the
- *		       buffer (i.e. %av_buf_head)
- * @av_buf_nonce - the one-bit sum of the ECN Nonces on all packets acked
- * 		       by the buffer with State 0
- *
- * Additionally, the HC-Receiver must keep some information about the
- * Ack Vectors it has recently sent. For each packet sent carrying an
- * Ack Vector, it remembers four variables:
- *
- * @av_records - list of dccp_ackvec_record
- * @av_ack_nonce - the one-bit sum of the ECN Nonces for all State 0.
- *
- * @av_time - the time in usecs
- * @av_buf - circular buffer of acknowledgeable packets
+ * @av_buf:	   circular buffer storage area
+ * @av_buf_head:   head index; begin of live portion in @av_buf
+ * @av_buf_tail:   tail index; first index _after_ the live portion in @av_buf
+ * @av_buf_ackno:  highest seqno of acknowledgeable packet recorded in @av_buf
+ * @av_tail_ackno: lowest  seqno of acknowledgeable packet recorded in @av_buf
+ * @av_buf_nonce:  ECN nonce sums, each covering subsequent segments of up to
+ *		   %DCCP_SINGLE_OPT_MAXLEN cells in the live portion of @av_buf
+ * @av_overflow:   if 1 then buf_head == buf_tail indicates buffer wraparound
+ * @av_records:	   list of %dccp_ackvec_record (Ack Vectors sent previously)
  */
 struct dccp_ackvec {
-	u64			av_buf_ackno;
-	struct list_head	av_records;
-	ktime_t			av_time;
+	u8			av_buf[DCCPAV_MAX_ACKVEC_LEN];
 	u16			av_buf_head;
-	u16			av_vec_len;
-	u8			av_buf_nonce;
-	u8			av_ack_nonce;
-	u8			av_buf[DCCP_MAX_ACKVEC_LEN];
+	u16			av_buf_tail;
+	u64			av_buf_ackno:48;
+	u64			av_tail_ackno:48;
+	bool			av_buf_nonce[DCCPAV_NUM_ACKVECS];
+	u8			av_overflow:1;
+	struct list_head	av_records;
 };
 
-/** struct dccp_ackvec_record - ack vector record
+/** struct dccp_ackvec_record - Records information about sent Ack Vectors
  *
- * ACK vector record as defined in Appendix A of spec.
+ * These list entries define the additional information which the HC-Receiver
+ * keeps about recently-sent Ack Vectors; again refer to RFC 4340, Appendix A.
  *
- * The list is sorted by avr_ack_seqno
+ * @avr_node:	    the list node in @av_records
+ * @avr_ack_seqno:  sequence number of the packet the Ack Vector was sent on
+ * @avr_ack_ackno:  the Ack number that this record/Ack Vector refers to
+ * @avr_ack_ptr:    pointer into @av_buf where this record starts
+ * @avr_ack_runlen: run length of @avr_ack_ptr at the time of sending
+ * @avr_ack_nonce:  the sum of @av_buf_nonce's at the time this record was sent
  *
- * @avr_node - node in av_records
- * @avr_ack_seqno - sequence number of the packet this record was sent on
- * @avr_ack_ackno - sequence number being acknowledged
- * @avr_ack_ptr - pointer into av_buf where this record starts
- * @avr_ack_nonce - av_ack_nonce at the time this record was sent
- * @avr_sent_len - lenght of the record in av_buf
+ * The list as a whole is sorted in descending order by @avr_ack_seqno.
  */
 struct dccp_ackvec_record {
 	struct list_head avr_node;
-	u64		 avr_ack_seqno;
-	u64		 avr_ack_ackno;
+	u64		 avr_ack_seqno:48;
+	u64		 avr_ack_ackno:48;
 	u16		 avr_ack_ptr;
-	u16		 avr_sent_len;
-	u8		 avr_ack_nonce;
+	u8		 avr_ack_runlen;
+	u8		 avr_ack_nonce:1;
 };
 
-struct sock;
-struct sk_buff;
-
 extern int dccp_ackvec_init(void);
 extern void dccp_ackvec_exit(void);
 
 extern struct dccp_ackvec *dccp_ackvec_alloc(const gfp_t priority);
 extern void dccp_ackvec_free(struct dccp_ackvec *av);
 
-extern int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk,
-			   const u64 ackno, const u8 state);
+extern void dccp_ackvec_input(struct dccp_ackvec *av, struct sk_buff *skb);
+extern int  dccp_ackvec_update_records(struct dccp_ackvec *av, u64 seq, u8 sum);
+extern void dccp_ackvec_clear_state(struct dccp_ackvec *av, const u64 ackno);
+extern u16  dccp_ackvec_buflen(const struct dccp_ackvec *av);
 
-extern void dccp_ackvec_check_rcv_ackno(struct dccp_ackvec *av,
-					struct sock *sk, const u64 ackno);
-extern int dccp_ackvec_parse(struct sock *sk, const struct sk_buff *skb,
-			     u64 *ackno, const u8 opt,
-			     const u8 *value, const u8 len);
-
-extern int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb);
-
-static inline int dccp_ackvec_pending(const struct dccp_ackvec *av)
+static inline bool dccp_ackvec_is_empty(const struct dccp_ackvec *av)
 {
-	return av->av_vec_len;
+	return av->av_overflow == 0 && av->av_buf_head == av->av_buf_tail;
 }
+
+/**
+ * struct dccp_ackvec_parsed  -  Record offsets of Ack Vectors in skb
+ * @vec:	start of vector (offset into skb)
+ * @len:	length of @vec
+ * @nonce:	whether @vec had an ECN nonce of 0 or 1
+ * @node:	FIFO - arranged in descending order of ack_ackno
+ * This structure is used by CCIDs to access Ack Vectors in a received skb.
+ */
+struct dccp_ackvec_parsed {
+	u8		 *vec,
+			 len,
+			 nonce:1;
+	struct list_head node;
+};
+
+extern int dccp_ackvec_parsed_add(struct list_head *head,
+				  u8 *vec, u8 len, u8 nonce);
+extern void dccp_ackvec_parsed_cleanup(struct list_head *parsed_chunks);
 #endif /* _ACKVEC_H */
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 6576eae..e96d5e8 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -246,68 +246,6 @@
 #endif
 }
 
-/* XXX Lame code duplication!
- * returns -1 if none was found.
- * else returns the next offset to use in the function call.
- */
-static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
-			   unsigned char **vec, unsigned char *veclen)
-{
-	const struct dccp_hdr *dh = dccp_hdr(skb);
-	unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
-	unsigned char *opt_ptr;
-	const unsigned char *opt_end = (unsigned char *)dh +
-					(dh->dccph_doff * 4);
-	unsigned char opt, len;
-	unsigned char *value;
-
-	BUG_ON(offset < 0);
-	options += offset;
-	opt_ptr = options;
-	if (opt_ptr >= opt_end)
-		return -1;
-
-	while (opt_ptr != opt_end) {
-		opt   = *opt_ptr++;
-		len   = 0;
-		value = NULL;
-
-		/* Check if this isn't a single byte option */
-		if (opt > DCCPO_MAX_RESERVED) {
-			if (opt_ptr == opt_end)
-				goto out_invalid_option;
-
-			len = *opt_ptr++;
-			if (len < 3)
-				goto out_invalid_option;
-			/*
-			 * Remove the type and len fields, leaving
-			 * just the value size
-			 */
-			len     -= 2;
-			value   = opt_ptr;
-			opt_ptr += len;
-
-			if (opt_ptr > opt_end)
-				goto out_invalid_option;
-		}
-
-		switch (opt) {
-		case DCCPO_ACK_VECTOR_0:
-		case DCCPO_ACK_VECTOR_1:
-			*vec	= value;
-			*veclen = len;
-			return offset + (opt_ptr - options);
-		}
-	}
-
-	return -1;
-
-out_invalid_option:
-	DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
-	return -1;
-}
-
 /**
  * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
  * This code is almost identical with TCP's tcp_rtt_estimator(), since
@@ -432,16 +370,28 @@
 		ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
 }
 
+static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
+				     u8 option, u8 *optval, u8 optlen)
+{
+	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
+
+	switch (option) {
+	case DCCPO_ACK_VECTOR_0:
+	case DCCPO_ACK_VECTOR_1:
+		return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
+					      option - DCCPO_ACK_VECTOR_0);
+	}
+	return 0;
+}
+
 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
 {
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
 	const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
+	struct dccp_ackvec_parsed *avp;
 	u64 ackno, seqno;
 	struct ccid2_seq *seqp;
-	unsigned char *vector;
-	unsigned char veclen;
-	int offset = 0;
 	int done = 0;
 	unsigned int maxincr = 0;
 
@@ -475,17 +425,12 @@
 	}
 
 	/* check forward path congestion */
-	/* still didn't send out new data packets */
-	if (hc->tx_seqh == hc->tx_seqt)
+	if (dccp_packet_without_ack(skb))
 		return;
 
-	switch (DCCP_SKB_CB(skb)->dccpd_type) {
-	case DCCP_PKT_ACK:
-	case DCCP_PKT_DATAACK:
-		break;
-	default:
-		return;
-	}
+	/* still didn't send out new data packets */
+	if (hc->tx_seqh == hc->tx_seqt)
+		goto done;
 
 	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
 	if (after48(ackno, hc->tx_high_ack))
@@ -509,16 +454,16 @@
 		maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
 
 	/* go through all ack vectors */
-	while ((offset = ccid2_ackvector(sk, skb, offset,
-					 &vector, &veclen)) != -1) {
+	list_for_each_entry(avp, &hc->tx_av_chunks, node) {
 		/* go through this ack vector */
-		while (veclen--) {
-			const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
-			u64 ackno_end_rl = SUB48(ackno, rl);
+		for (; avp->len--; avp->vec++) {
+			u64 ackno_end_rl = SUB48(ackno,
+						 dccp_ackvec_runlen(avp->vec));
 
-			ccid2_pr_debug("ackvec start:%llu end:%llu\n",
+			ccid2_pr_debug("ackvec %llu |%u,%u|\n",
 				       (unsigned long long)ackno,
-				       (unsigned long long)ackno_end_rl);
+				       dccp_ackvec_state(avp->vec) >> 6,
+				       dccp_ackvec_runlen(avp->vec));
 			/* if the seqno we are analyzing is larger than the
 			 * current ackno, then move towards the tail of our
 			 * seqnos.
@@ -537,17 +482,15 @@
 			 * run length
 			 */
 			while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
-				const u8 state = *vector &
-						 DCCP_ACKVEC_STATE_MASK;
+				const u8 state = dccp_ackvec_state(avp->vec);
 
 				/* new packet received or marked */
-				if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
+				if (state != DCCPAV_NOT_RECEIVED &&
 				    !seqp->ccid2s_acked) {
-					if (state ==
-					    DCCP_ACKVEC_STATE_ECN_MARKED) {
+					if (state == DCCPAV_ECN_MARKED)
 						ccid2_congestion_event(sk,
 								       seqp);
-					} else
+					else
 						ccid2_new_ack(sk, seqp,
 							      &maxincr);
 
@@ -566,7 +509,6 @@
 				break;
 
 			ackno = SUB48(ackno_end_rl, 1);
-			vector++;
 		}
 		if (done)
 			break;
@@ -634,10 +576,11 @@
 		sk_stop_timer(sk, &hc->tx_rtotimer);
 	else
 		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
-
+done:
 	/* check if incoming Acks allow pending packets to be sent */
 	if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
 		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
 }
 
 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
@@ -666,6 +609,7 @@
 	hc->tx_last_cong = ccid2_time_stamp;
 	setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
 			(unsigned long)sk);
+	INIT_LIST_HEAD(&hc->tx_av_chunks);
 	return 0;
 }
 
@@ -699,16 +643,17 @@
 }
 
 struct ccid_operations ccid2_ops = {
-	.ccid_id		= DCCPC_CCID2,
-	.ccid_name		= "TCP-like",
-	.ccid_hc_tx_obj_size	= sizeof(struct ccid2_hc_tx_sock),
-	.ccid_hc_tx_init	= ccid2_hc_tx_init,
-	.ccid_hc_tx_exit	= ccid2_hc_tx_exit,
-	.ccid_hc_tx_send_packet	= ccid2_hc_tx_send_packet,
-	.ccid_hc_tx_packet_sent	= ccid2_hc_tx_packet_sent,
-	.ccid_hc_tx_packet_recv	= ccid2_hc_tx_packet_recv,
-	.ccid_hc_rx_obj_size	= sizeof(struct ccid2_hc_rx_sock),
-	.ccid_hc_rx_packet_recv	= ccid2_hc_rx_packet_recv,
+	.ccid_id		  = DCCPC_CCID2,
+	.ccid_name		  = "TCP-like",
+	.ccid_hc_tx_obj_size	  = sizeof(struct ccid2_hc_tx_sock),
+	.ccid_hc_tx_init	  = ccid2_hc_tx_init,
+	.ccid_hc_tx_exit	  = ccid2_hc_tx_exit,
+	.ccid_hc_tx_send_packet	  = ccid2_hc_tx_send_packet,
+	.ccid_hc_tx_packet_sent	  = ccid2_hc_tx_packet_sent,
+	.ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
+	.ccid_hc_tx_packet_recv	  = ccid2_hc_tx_packet_recv,
+	.ccid_hc_rx_obj_size	  = sizeof(struct ccid2_hc_rx_sock),
+	.ccid_hc_rx_packet_recv	  = ccid2_hc_rx_packet_recv,
 };
 
 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
diff --git a/net/dccp/ccids/ccid2.h b/net/dccp/ccids/ccid2.h
index 25cb6b2..e9985da 100644
--- a/net/dccp/ccids/ccid2.h
+++ b/net/dccp/ccids/ccid2.h
@@ -55,6 +55,7 @@
  * @tx_rtt_seq:		     to decay RTTVAR at most once per flight
  * @tx_rpseq:		     last consecutive seqno
  * @tx_rpdupack:	     dupacks since rpseq
+ * @tx_av_chunks:	     list of Ack Vectors received on current skb
  */
 struct ccid2_hc_tx_sock {
 	u32			tx_cwnd;
@@ -79,6 +80,7 @@
 	int			tx_rpdupack;
 	u32			tx_last_cong;
 	u64			tx_high_ack;
+	struct list_head	tx_av_chunks;
 };
 
 static inline bool ccid2_cwnd_network_limited(struct ccid2_hc_tx_sock *hc)
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index a8ed459..4508705 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -93,9 +93,6 @@
 #define DCCP_FALLBACK_RTT	(USEC_PER_SEC / 5)
 #define DCCP_SANE_RTT_MAX	(3 * USEC_PER_SEC)
 
-/* Maximal interval between probes for local resources.  */
-#define DCCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ / 2U))
-
 /* sysctl variables for DCCP */
 extern int  sysctl_dccp_request_retries;
 extern int  sysctl_dccp_retries1;
@@ -203,12 +200,7 @@
 DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
 #define DCCP_INC_STATS(field)	    SNMP_INC_STATS(dccp_statistics, field)
 #define DCCP_INC_STATS_BH(field)    SNMP_INC_STATS_BH(dccp_statistics, field)
-#define DCCP_INC_STATS_USER(field)  SNMP_INC_STATS_USER(dccp_statistics, field)
 #define DCCP_DEC_STATS(field)	    SNMP_DEC_STATS(dccp_statistics, field)
-#define DCCP_ADD_STATS_BH(field, val) \
-			SNMP_ADD_STATS_BH(dccp_statistics, field, val)
-#define DCCP_ADD_STATS_USER(field, val)	\
-			SNMP_ADD_STATS_USER(dccp_statistics, field, val)
 
 /*
  * 	Checksumming routines
@@ -243,6 +235,19 @@
 extern void dccp_send_sync(struct sock *sk, const u64 seq,
 			   const enum dccp_pkt_type pkt_type);
 
+/*
+ * TX Packet Dequeueing Interface
+ */
+extern void		dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb);
+extern bool		dccp_qpolicy_full(struct sock *sk);
+extern void		dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb);
+extern struct sk_buff	*dccp_qpolicy_top(struct sock *sk);
+extern struct sk_buff	*dccp_qpolicy_pop(struct sock *sk);
+extern bool		dccp_qpolicy_param_ok(struct sock *sk, __be32 param);
+
+/*
+ * TX Packet Output and TX Timers
+ */
 extern void   dccp_write_xmit(struct sock *sk);
 extern void   dccp_write_space(struct sock *sk);
 extern void   dccp_flush_write_queue(struct sock *sk, long *time_budget);
@@ -457,12 +462,15 @@
 	dp->dccps_awh = dp->dccps_gss;
 }
 
+static inline int dccp_ackvec_pending(const struct sock *sk)
+{
+	return dccp_sk(sk)->dccps_hc_rx_ackvec != NULL &&
+	       !dccp_ackvec_is_empty(dccp_sk(sk)->dccps_hc_rx_ackvec);
+}
+
 static inline int dccp_ack_pending(const struct sock *sk)
 {
-	const struct dccp_sock *dp = dccp_sk(sk);
-	return (dp->dccps_hc_rx_ackvec != NULL &&
-		dccp_ackvec_pending(dp->dccps_hc_rx_ackvec)) ||
-	       inet_csk_ack_scheduled(sk);
+	return dccp_ackvec_pending(sk) || inet_csk_ack_scheduled(sk);
 }
 
 extern int  dccp_feat_finalise_settings(struct dccp_sock *dp);
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 2659853..15af247 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -160,13 +160,15 @@
 	dccp_time_wait(sk, DCCP_TIME_WAIT, 0);
 }
 
-static void dccp_event_ack_recv(struct sock *sk, struct sk_buff *skb)
+static void dccp_handle_ackvec_processing(struct sock *sk, struct sk_buff *skb)
 {
-	struct dccp_sock *dp = dccp_sk(sk);
+	struct dccp_ackvec *av = dccp_sk(sk)->dccps_hc_rx_ackvec;
 
-	if (dp->dccps_hc_rx_ackvec != NULL)
-		dccp_ackvec_check_rcv_ackno(dp->dccps_hc_rx_ackvec, sk,
-					    DCCP_SKB_CB(skb)->dccpd_ack_seq);
+	if (av == NULL)
+		return;
+	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
+		dccp_ackvec_clear_state(av, DCCP_SKB_CB(skb)->dccpd_ack_seq);
+	dccp_ackvec_input(av, skb);
 }
 
 static void dccp_deliver_input_to_ccids(struct sock *sk, struct sk_buff *skb)
@@ -239,7 +241,8 @@
 		dccp_update_gsr(sk, seqno);
 
 		if (dh->dccph_type != DCCP_PKT_SYNC &&
-		    (ackno != DCCP_PKT_WITHOUT_ACK_SEQ))
+		    ackno != DCCP_PKT_WITHOUT_ACK_SEQ &&
+		    after48(ackno, dp->dccps_gar))
 			dp->dccps_gar = ackno;
 	} else {
 		unsigned long now = jiffies;
@@ -365,22 +368,13 @@
 int dccp_rcv_established(struct sock *sk, struct sk_buff *skb,
 			 const struct dccp_hdr *dh, const unsigned len)
 {
-	struct dccp_sock *dp = dccp_sk(sk);
-
 	if (dccp_check_seqno(sk, skb))
 		goto discard;
 
 	if (dccp_parse_options(sk, NULL, skb))
 		return 1;
 
-	if (DCCP_SKB_CB(skb)->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
-		dccp_event_ack_recv(sk, skb);
-
-	if (dp->dccps_hc_rx_ackvec != NULL &&
-	    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-			    DCCP_SKB_CB(skb)->dccpd_seq,
-			    DCCP_ACKVEC_STATE_RECEIVED))
-		goto discard;
+	dccp_handle_ackvec_processing(sk, skb);
 	dccp_deliver_input_to_ccids(sk, skb);
 
 	return __dccp_rcv_established(sk, skb, dh, len);
@@ -632,15 +626,7 @@
 		if (dccp_parse_options(sk, NULL, skb))
 			return 1;
 
-		if (dcb->dccpd_ack_seq != DCCP_PKT_WITHOUT_ACK_SEQ)
-			dccp_event_ack_recv(sk, skb);
-
-		if (dp->dccps_hc_rx_ackvec != NULL &&
-		    dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
-				    DCCP_SKB_CB(skb)->dccpd_seq,
-				    DCCP_ACKVEC_STATE_RECEIVED))
-			goto discard;
-
+		dccp_handle_ackvec_processing(sk, skb);
 		dccp_deliver_input_to_ccids(sk, skb);
 	}
 
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 3f69ea1..45a434f 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -462,15 +462,12 @@
 {
 	struct rtable *rt;
 	struct flowi fl = { .oif = skb_rtable(skb)->rt_iif,
-			    .nl_u = { .ip4_u =
-				      { .daddr = ip_hdr(skb)->saddr,
-					.saddr = ip_hdr(skb)->daddr,
-					.tos = RT_CONN_FLAGS(sk) } },
+			    .fl4_dst = ip_hdr(skb)->saddr,
+			    .fl4_src = ip_hdr(skb)->daddr,
+			    .fl4_tos = RT_CONN_FLAGS(sk),
 			    .proto = sk->sk_protocol,
-			    .uli_u = { .ports =
-				       { .sport = dccp_hdr(skb)->dccph_dport,
-					 .dport = dccp_hdr(skb)->dccph_sport }
-				     }
+			    .fl_ip_sport = dccp_hdr(skb)->dccph_dport,
+			    .fl_ip_dport = dccp_hdr(skb)->dccph_sport
 			  };
 
 	security_skb_classify_flow(skb, &fl);
diff --git a/net/dccp/options.c b/net/dccp/options.c
index cd30618..f06ffcf 100644
--- a/net/dccp/options.c
+++ b/net/dccp/options.c
@@ -54,7 +54,6 @@
 	struct dccp_sock *dp = dccp_sk(sk);
 	const struct dccp_hdr *dh = dccp_hdr(skb);
 	const u8 pkt_type = DCCP_SKB_CB(skb)->dccpd_type;
-	u64 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
 	unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
 	unsigned char *opt_ptr = options;
 	const unsigned char *opt_end = (unsigned char *)dh +
@@ -129,14 +128,6 @@
 			if (rc)
 				goto out_featneg_failed;
 			break;
-		case DCCPO_ACK_VECTOR_0:
-		case DCCPO_ACK_VECTOR_1:
-			if (dccp_packet_without_ack(skb))   /* RFC 4340, 11.4 */
-				break;
-			if (dp->dccps_hc_rx_ackvec != NULL &&
-			    dccp_ackvec_parse(sk, skb, &ackno, opt, value, len))
-				goto out_invalid_option;
-			break;
 		case DCCPO_TIMESTAMP:
 			if (len != 4)
 				goto out_invalid_option;
@@ -226,6 +217,16 @@
 						     pkt_type, opt, value, len))
 				goto out_invalid_option;
 			break;
+		case DCCPO_ACK_VECTOR_0:
+		case DCCPO_ACK_VECTOR_1:
+			if (dccp_packet_without_ack(skb))   /* RFC 4340, 11.4 */
+				break;
+			/*
+			 * Ack vectors are processed by the TX CCID if it is
+			 * interested. The RX CCID need not parse Ack Vectors,
+			 * since it is only interested in clearing old state.
+			 * Fall through.
+			 */
 		case DCCPO_MIN_TX_CCID_SPECIFIC ... DCCPO_MAX_TX_CCID_SPECIFIC:
 			if (ccid_hc_tx_parse_options(dp->dccps_hc_tx_ccid, sk,
 						     pkt_type, opt, value, len))
@@ -340,6 +341,7 @@
 	return elapsed_time == 0 ? 0 : elapsed_time <= 0xFFFF ? 2 : 4;
 }
 
+/* FIXME: This function is currently not used anywhere */
 int dccp_insert_option_elapsed_time(struct sk_buff *skb, u32 elapsed_time)
 {
 	const int elapsed_time_len = dccp_elapsed_time_len(elapsed_time);
@@ -424,6 +426,83 @@
 	return 0;
 }
 
+static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
+{
+	struct dccp_sock *dp = dccp_sk(sk);
+	struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
+	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
+	const u16 buflen = dccp_ackvec_buflen(av);
+	/* Figure out how many options do we need to represent the ackvec */
+	const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
+	u16 len = buflen + 2 * nr_opts;
+	u8 i, nonce = 0;
+	const unsigned char *tail, *from;
+	unsigned char *to;
+
+	if (dcb->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
+		DCCP_WARN("Lacking space for %u bytes on %s packet\n", len,
+			  dccp_packet_name(dcb->dccpd_type));
+		return -1;
+	}
+	/*
+	 * Since Ack Vectors are variable-length, we can not always predict
+	 * their size. To catch exception cases where the space is running out
+	 * on the skb, a separate Sync is scheduled to carry the Ack Vector.
+	 */
+	if (len > DCCPAV_MIN_OPTLEN &&
+	    len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) {
+		DCCP_WARN("No space left for Ack Vector (%u) on skb (%u+%u), "
+			  "MPS=%u ==> reduce payload size?\n", len, skb->len,
+			  dcb->dccpd_opt_len, dp->dccps_mss_cache);
+		dp->dccps_sync_scheduled = 1;
+		return 0;
+	}
+	dcb->dccpd_opt_len += len;
+
+	to   = skb_push(skb, len);
+	len  = buflen;
+	from = av->av_buf + av->av_buf_head;
+	tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;
+
+	for (i = 0; i < nr_opts; ++i) {
+		int copylen = len;
+
+		if (len > DCCP_SINGLE_OPT_MAXLEN)
+			copylen = DCCP_SINGLE_OPT_MAXLEN;
+
+		/*
+		 * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
+		 * its type; ack_nonce is the sum of all individual buf_nonce's.
+		 */
+		nonce ^= av->av_buf_nonce[i];
+
+		*to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
+		*to++ = copylen + 2;
+
+		/* Check if buf_head wraps */
+		if (from + copylen > tail) {
+			const u16 tailsize = tail - from;
+
+			memcpy(to, from, tailsize);
+			to	+= tailsize;
+			len	-= tailsize;
+			copylen	-= tailsize;
+			from	= av->av_buf;
+		}
+
+		memcpy(to, from, copylen);
+		from += copylen;
+		to   += copylen;
+		len  -= copylen;
+	}
+	/*
+	 * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
+	 */
+	if (dccp_ackvec_update_records(av, dcb->dccpd_seq, nonce))
+		return -ENOBUFS;
+	return 0;
+}
+
 /**
  * dccp_insert_option_mandatory  -  Mandatory option (5.8.2)
  * Note that since we are using skb_push, this function needs to be called
@@ -519,8 +598,7 @@
 			if (dccp_insert_option_timestamp(skb))
 				return -1;
 
-		} else if (dp->dccps_hc_rx_ackvec != NULL &&
-			   dccp_ackvec_pending(dp->dccps_hc_rx_ackvec) &&
+		} else if (dccp_ackvec_pending(sk) &&
 			   dccp_insert_option_ackvec(sk, skb)) {
 				return -1;
 		}
diff --git a/net/dccp/output.c b/net/dccp/output.c
index 45b9185..784d3021 100644
--- a/net/dccp/output.c
+++ b/net/dccp/output.c
@@ -242,7 +242,7 @@
 {
 	int err, len;
 	struct dccp_sock *dp = dccp_sk(sk);
-	struct sk_buff *skb = skb_dequeue(&sk->sk_write_queue);
+	struct sk_buff *skb = dccp_qpolicy_pop(sk);
 
 	if (unlikely(skb == NULL))
 		return;
@@ -283,6 +283,15 @@
 	 * any local drop will eventually be reported via receiver feedback.
 	 */
 	ccid_hc_tx_packet_sent(dp->dccps_hc_tx_ccid, sk, len);
+
+	/*
+	 * If the CCID needs to transfer additional header options out-of-band
+	 * (e.g. Ack Vectors or feature-negotiation options), it activates this
+	 * flag to schedule a Sync. The Sync will automatically incorporate all
+	 * currently pending header options, thus clearing the backlog.
+	 */
+	if (dp->dccps_sync_scheduled)
+		dccp_send_sync(sk, dp->dccps_gsr, DCCP_PKT_SYNC);
 }
 
 /**
@@ -336,7 +345,7 @@
 	struct dccp_sock *dp = dccp_sk(sk);
 	struct sk_buff *skb;
 
-	while ((skb = skb_peek(&sk->sk_write_queue))) {
+	while ((skb = dccp_qpolicy_top(sk))) {
 		int rc = ccid_hc_tx_send_packet(dp->dccps_hc_tx_ccid, sk, skb);
 
 		switch (ccid_packet_dequeue_eval(rc)) {
@@ -350,8 +359,7 @@
 			dccp_xmit_packet(sk);
 			break;
 		case CCID_PACKET_ERR:
-			skb_dequeue(&sk->sk_write_queue);
-			kfree_skb(skb);
+			dccp_qpolicy_drop(sk, skb);
 			dccp_pr_debug("packet discarded due to err=%d\n", rc);
 		}
 	}
@@ -636,6 +644,12 @@
 	DCCP_SKB_CB(skb)->dccpd_type = pkt_type;
 	DCCP_SKB_CB(skb)->dccpd_ack_seq = ackno;
 
+	/*
+	 * Clear the flag in case the Sync was scheduled for out-of-band data,
+	 * such as carrying a long Ack Vector.
+	 */
+	dccp_sk(sk)->dccps_sync_scheduled = 0;
+
 	dccp_transmit_skb(sk, skb);
 }
 
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ef343d5..152975d 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -185,6 +185,7 @@
 	dp->dccps_role		= DCCP_ROLE_UNDEFINED;
 	dp->dccps_service	= DCCP_SERVICE_CODE_IS_ABSENT;
 	dp->dccps_l_ack_ratio	= dp->dccps_r_ack_ratio = 1;
+	dp->dccps_tx_qlen	= sysctl_dccp_tx_qlen;
 
 	dccp_init_xmit_timers(sk);
 
@@ -532,6 +533,20 @@
 	case DCCP_SOCKOPT_RECV_CSCOV:
 		err = dccp_setsockopt_cscov(sk, val, true);
 		break;
+	case DCCP_SOCKOPT_QPOLICY_ID:
+		if (sk->sk_state != DCCP_CLOSED)
+			err = -EISCONN;
+		else if (val < 0 || val >= DCCPQ_POLICY_MAX)
+			err = -EINVAL;
+		else
+			dp->dccps_qpolicy = val;
+		break;
+	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
+		if (val < 0)
+			err = -EINVAL;
+		else
+			dp->dccps_tx_qlen = val;
+		break;
 	default:
 		err = -ENOPROTOOPT;
 		break;
@@ -639,6 +654,12 @@
 	case DCCP_SOCKOPT_RECV_CSCOV:
 		val = dp->dccps_pcrlen;
 		break;
+	case DCCP_SOCKOPT_QPOLICY_ID:
+		val = dp->dccps_qpolicy;
+		break;
+	case DCCP_SOCKOPT_QPOLICY_TXQLEN:
+		val = dp->dccps_tx_qlen;
+		break;
 	case 128 ... 191:
 		return ccid_hc_rx_getsockopt(dp->dccps_hc_rx_ccid, sk, optname,
 					     len, (u32 __user *)optval, optlen);
@@ -681,6 +702,47 @@
 EXPORT_SYMBOL_GPL(compat_dccp_getsockopt);
 #endif
 
+static int dccp_msghdr_parse(struct msghdr *msg, struct sk_buff *skb)
+{
+	struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+
+	/*
+	 * Assign an (opaque) qpolicy priority value to skb->priority.
+	 *
+	 * We are overloading this skb field for use with the qpolicy subystem.
+	 * The skb->priority is normally used for the SO_PRIORITY option, which
+	 * is initialised from sk_priority. Since the assignment of sk_priority
+	 * to skb->priority happens later (on layer 3), we overload this field
+	 * for use with queueing priorities as long as the skb is on layer 4.
+	 * The default priority value (if nothing is set) is 0.
+	 */
+	skb->priority = 0;
+
+	for (; cmsg != NULL; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+
+		if (!CMSG_OK(msg, cmsg))
+			return -EINVAL;
+
+		if (cmsg->cmsg_level != SOL_DCCP)
+			continue;
+
+		if (cmsg->cmsg_type <= DCCP_SCM_QPOLICY_MAX &&
+		    !dccp_qpolicy_param_ok(skb->sk, cmsg->cmsg_type))
+			return -EINVAL;
+
+		switch (cmsg->cmsg_type) {
+		case DCCP_SCM_PRIORITY:
+			if (cmsg->cmsg_len != CMSG_LEN(sizeof(__u32)))
+				return -EINVAL;
+			skb->priority = *(__u32 *)CMSG_DATA(cmsg);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
 		 size_t len)
 {
@@ -696,8 +758,7 @@
 
 	lock_sock(sk);
 
-	if (sysctl_dccp_tx_qlen &&
-	    (sk->sk_write_queue.qlen >= sysctl_dccp_tx_qlen)) {
+	if (dccp_qpolicy_full(sk)) {
 		rc = -EAGAIN;
 		goto out_release;
 	}
@@ -725,7 +786,11 @@
 	if (rc != 0)
 		goto out_discard;
 
-	skb_queue_tail(&sk->sk_write_queue, skb);
+	rc = dccp_msghdr_parse(msg, skb);
+	if (rc != 0)
+		goto out_discard;
+
+	dccp_qpolicy_push(sk, skb);
 	/*
 	 * The xmit_timer is set if the TX CCID is rate-based and will expire
 	 * when congestion control permits to release further packets into the
diff --git a/net/dccp/qpolicy.c b/net/dccp/qpolicy.c
new file mode 100644
index 0000000..63c30bf
--- /dev/null
+++ b/net/dccp/qpolicy.c
@@ -0,0 +1,137 @@
+/*
+ *  net/dccp/qpolicy.c
+ *
+ *  Policy-based packet dequeueing interface for DCCP.
+ *
+ *  Copyright (c) 2008 Tomasz Grobelny <tomasz@grobelny.oswiecenia.net>
+ *
+ *  This program is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU General Public License v2
+ *  as published by the Free Software Foundation.
+ */
+#include "dccp.h"
+
+/*
+ *	Simple Dequeueing Policy:
+ *	If tx_qlen is different from 0, enqueue up to tx_qlen elements.
+ */
+static void qpolicy_simple_push(struct sock *sk, struct sk_buff *skb)
+{
+	skb_queue_tail(&sk->sk_write_queue, skb);
+}
+
+static bool qpolicy_simple_full(struct sock *sk)
+{
+	return dccp_sk(sk)->dccps_tx_qlen &&
+	       sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen;
+}
+
+static struct sk_buff *qpolicy_simple_top(struct sock *sk)
+{
+	return skb_peek(&sk->sk_write_queue);
+}
+
+/*
+ *	Priority-based Dequeueing Policy:
+ *	If tx_qlen is different from 0 and the queue has reached its upper bound
+ *	of tx_qlen elements, replace older packets lowest-priority-first.
+ */
+static struct sk_buff *qpolicy_prio_best_skb(struct sock *sk)
+{
+	struct sk_buff *skb, *best = NULL;
+
+	skb_queue_walk(&sk->sk_write_queue, skb)
+		if (best == NULL || skb->priority > best->priority)
+			best = skb;
+	return best;
+}
+
+static struct sk_buff *qpolicy_prio_worst_skb(struct sock *sk)
+{
+	struct sk_buff *skb, *worst = NULL;
+
+	skb_queue_walk(&sk->sk_write_queue, skb)
+		if (worst == NULL || skb->priority < worst->priority)
+			worst = skb;
+	return worst;
+}
+
+static bool qpolicy_prio_full(struct sock *sk)
+{
+	if (qpolicy_simple_full(sk))
+		dccp_qpolicy_drop(sk, qpolicy_prio_worst_skb(sk));
+	return false;
+}
+
+/**
+ * struct dccp_qpolicy_operations  -  TX Packet Dequeueing Interface
+ * @push: add a new @skb to the write queue
+ * @full: indicates that no more packets will be admitted
+ * @top:  peeks at whatever the queueing policy defines as its `top'
+ */
+static struct dccp_qpolicy_operations {
+	void		(*push)	(struct sock *sk, struct sk_buff *skb);
+	bool		(*full) (struct sock *sk);
+	struct sk_buff*	(*top)  (struct sock *sk);
+	__be32		params;
+
+} qpol_table[DCCPQ_POLICY_MAX] = {
+	[DCCPQ_POLICY_SIMPLE] = {
+		.push   = qpolicy_simple_push,
+		.full   = qpolicy_simple_full,
+		.top    = qpolicy_simple_top,
+		.params = 0,
+	},
+	[DCCPQ_POLICY_PRIO] = {
+		.push   = qpolicy_simple_push,
+		.full   = qpolicy_prio_full,
+		.top    = qpolicy_prio_best_skb,
+		.params = DCCP_SCM_PRIORITY,
+	},
+};
+
+/*
+ *	Externally visible interface
+ */
+void dccp_qpolicy_push(struct sock *sk, struct sk_buff *skb)
+{
+	qpol_table[dccp_sk(sk)->dccps_qpolicy].push(sk, skb);
+}
+
+bool dccp_qpolicy_full(struct sock *sk)
+{
+	return qpol_table[dccp_sk(sk)->dccps_qpolicy].full(sk);
+}
+
+void dccp_qpolicy_drop(struct sock *sk, struct sk_buff *skb)
+{
+	if (skb != NULL) {
+		skb_unlink(skb, &sk->sk_write_queue);
+		kfree_skb(skb);
+	}
+}
+
+struct sk_buff *dccp_qpolicy_top(struct sock *sk)
+{
+	return qpol_table[dccp_sk(sk)->dccps_qpolicy].top(sk);
+}
+
+struct sk_buff *dccp_qpolicy_pop(struct sock *sk)
+{
+	struct sk_buff *skb = dccp_qpolicy_top(sk);
+
+	if (skb != NULL) {
+		/* Clear any skb fields that we used internally */
+		skb->priority = 0;
+		skb_unlink(skb, &sk->sk_write_queue);
+	}
+	return skb;
+}
+
+bool dccp_qpolicy_param_ok(struct sock *sk, __be32 param)
+{
+	/* check if exactly one bit is set */
+	if (!param || (param & (param - 1)))
+		return false;
+	return (qpol_table[dccp_sk(sk)->dccps_qpolicy].params & param) == param;
+}
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index d6b93d1..0065e7e 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -155,7 +155,7 @@
 static DEFINE_RWLOCK(dn_hash_lock);
 static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE];
 static struct hlist_head dn_wild_sk;
-static atomic_t decnet_memory_allocated;
+static atomic_long_t decnet_memory_allocated;
 
 static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags);
 static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags);
@@ -1556,6 +1556,8 @@
 			if (r_len > sizeof(struct linkinfo_dn))
 				r_len = sizeof(struct linkinfo_dn);
 
+			memset(&link, 0, sizeof(link));
+
 			switch(sock->state) {
 				case SS_CONNECTING:
 					link.idn_linkstate = LL_CONNECTING;
@@ -1848,7 +1850,7 @@
 {
 	unsigned mss = 230 - DN_MAX_NSP_DATA_HEADER;
 	if (dev) {
-		struct dn_dev *dn_db = dev->dn_ptr;
+		struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 		mtu -= LL_RESERVED_SPACE(dev);
 		if (dn_db->use_long)
 			mtu -= 21;
diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c
index 4c409b4..0ba1563 100644
--- a/net/decnet/dn_dev.c
+++ b/net/decnet/dn_dev.c
@@ -267,7 +267,7 @@
 	if (table->extra1 == NULL)
 		return -EINVAL;
 
-	dn_db = dev->dn_ptr;
+	dn_db = rcu_dereference_raw(dev->dn_ptr);
 	old = dn_db->parms.forwarding;
 
 	err = proc_dointvec(table, write, buffer, lenp, ppos);
@@ -332,14 +332,19 @@
 	return ifa;
 }
 
-static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
+static void dn_dev_free_ifa_rcu(struct rcu_head *head)
 {
-	kfree(ifa);
+	kfree(container_of(head, struct dn_ifaddr, rcu));
 }
 
-static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
+static void dn_dev_free_ifa(struct dn_ifaddr *ifa)
 {
-	struct dn_ifaddr *ifa1 = *ifap;
+	call_rcu(&ifa->rcu, dn_dev_free_ifa_rcu);
+}
+
+static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr __rcu **ifap, int destroy)
+{
+	struct dn_ifaddr *ifa1 = rtnl_dereference(*ifap);
 	unsigned char mac_addr[6];
 	struct net_device *dev = dn_db->dev;
 
@@ -373,7 +378,9 @@
 	ASSERT_RTNL();
 
 	/* Check for duplicates */
-	for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
+	for (ifa1 = rtnl_dereference(dn_db->ifa_list);
+	     ifa1 != NULL;
+	     ifa1 = rtnl_dereference(ifa1->ifa_next)) {
 		if (ifa1->ifa_local == ifa->ifa_local)
 			return -EEXIST;
 	}
@@ -386,7 +393,7 @@
 	}
 
 	ifa->ifa_next = dn_db->ifa_list;
-	dn_db->ifa_list = ifa;
+	rcu_assign_pointer(dn_db->ifa_list, ifa);
 
 	dn_ifaddr_notify(RTM_NEWADDR, ifa);
 	blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
@@ -396,7 +403,7 @@
 
 static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 	int rv;
 
 	if (dn_db == NULL) {
@@ -425,7 +432,8 @@
 	struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
 	struct dn_dev *dn_db;
 	struct net_device *dev;
-	struct dn_ifaddr *ifa = NULL, **ifap = NULL;
+	struct dn_ifaddr *ifa = NULL;
+	struct dn_ifaddr __rcu **ifap = NULL;
 	int ret = 0;
 
 	if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
@@ -454,8 +462,10 @@
 		goto done;
 	}
 
-	if ((dn_db = dev->dn_ptr) != NULL) {
-		for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
+	if ((dn_db = rtnl_dereference(dev->dn_ptr)) != NULL) {
+		for (ifap = &dn_db->ifa_list;
+		     (ifa = rtnl_dereference(*ifap)) != NULL;
+		     ifap = &ifa->ifa_next)
 			if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
 				break;
 	}
@@ -558,7 +568,7 @@
 
 	dev = __dev_get_by_index(&init_net, ifindex);
 	if (dev)
-		dn_dev = dev->dn_ptr;
+		dn_dev = rtnl_dereference(dev->dn_ptr);
 
 	return dn_dev;
 }
@@ -576,7 +586,8 @@
 	struct nlattr *tb[IFA_MAX+1];
 	struct dn_dev *dn_db;
 	struct ifaddrmsg *ifm;
-	struct dn_ifaddr *ifa, **ifap;
+	struct dn_ifaddr *ifa;
+	struct dn_ifaddr __rcu **ifap;
 	int err = -EINVAL;
 
 	if (!net_eq(net, &init_net))
@@ -592,7 +603,9 @@
 		goto errout;
 
 	err = -EADDRNOTAVAIL;
-	for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
+	for (ifap = &dn_db->ifa_list;
+	     (ifa = rtnl_dereference(*ifap)) != NULL;
+	     ifap = &ifa->ifa_next) {
 		if (tb[IFA_LOCAL] &&
 		    nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
 			continue;
@@ -632,7 +645,7 @@
 	if ((dev = __dev_get_by_index(&init_net, ifm->ifa_index)) == NULL)
 		return -ENODEV;
 
-	if ((dn_db = dev->dn_ptr) == NULL) {
+	if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL) {
 		dn_db = dn_dev_create(dev, &err);
 		if (!dn_db)
 			return err;
@@ -748,11 +761,11 @@
 			skip_naddr = 0;
 		}
 
-		if ((dn_db = dev->dn_ptr) == NULL)
+		if ((dn_db = rtnl_dereference(dev->dn_ptr)) == NULL)
 			goto cont;
 
-		for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
-		     ifa = ifa->ifa_next, dn_idx++) {
+		for (ifa = rtnl_dereference(dn_db->ifa_list), dn_idx = 0; ifa;
+		     ifa = rtnl_dereference(ifa->ifa_next), dn_idx++) {
 			if (dn_idx < skip_naddr)
 				continue;
 
@@ -773,21 +786,22 @@
 
 static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
 {
-	struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+	struct dn_dev *dn_db;
 	struct dn_ifaddr *ifa;
 	int rv = -ENODEV;
 
+	rcu_read_lock();
+	dn_db = rcu_dereference(dev->dn_ptr);
 	if (dn_db == NULL)
 		goto out;
 
-	rtnl_lock();
-	ifa = dn_db->ifa_list;
+	ifa = rcu_dereference(dn_db->ifa_list);
 	if (ifa != NULL) {
 		*addr = ifa->ifa_local;
 		rv = 0;
 	}
-	rtnl_unlock();
 out:
+	rcu_read_unlock();
 	return rv;
 }
 
@@ -823,7 +837,7 @@
 	struct endnode_hello_message *msg;
 	struct sk_buff *skb = NULL;
 	__le16 *pktlen;
-	struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
 	if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
 		return;
@@ -889,7 +903,7 @@
 static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
 	int n;
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 	struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
 	struct sk_buff *skb;
 	size_t size;
@@ -960,7 +974,7 @@
 
 static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
 {
-	struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
 	if (dn_db->parms.forwarding == 0)
 		dn_send_endnode_hello(dev, ifa);
@@ -998,7 +1012,7 @@
 
 static int dn_eth_up(struct net_device *dev)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
 	if (dn_db->parms.forwarding == 0)
 		dev_mc_add(dev, dn_rt_all_end_mcast);
@@ -1012,7 +1026,7 @@
 
 static void dn_eth_down(struct net_device *dev)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
 	if (dn_db->parms.forwarding == 0)
 		dev_mc_del(dev, dn_rt_all_end_mcast);
@@ -1025,12 +1039,16 @@
 static void dn_dev_timer_func(unsigned long arg)
 {
 	struct net_device *dev = (struct net_device *)arg;
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db;
 	struct dn_ifaddr *ifa;
 
+	rcu_read_lock();
+	dn_db = rcu_dereference(dev->dn_ptr);
 	if (dn_db->t3 <= dn_db->parms.t2) {
 		if (dn_db->parms.timer3) {
-			for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+			for (ifa = rcu_dereference(dn_db->ifa_list);
+			     ifa;
+			     ifa = rcu_dereference(ifa->ifa_next)) {
 				if (!(ifa->ifa_flags & IFA_F_SECONDARY))
 					dn_db->parms.timer3(dev, ifa);
 			}
@@ -1039,13 +1057,13 @@
 	} else {
 		dn_db->t3 -= dn_db->parms.t2;
 	}
-
+	rcu_read_unlock();
 	dn_dev_set_timer(dev);
 }
 
 static void dn_dev_set_timer(struct net_device *dev)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr);
 
 	if (dn_db->parms.t2 > dn_db->parms.t3)
 		dn_db->parms.t2 = dn_db->parms.t3;
@@ -1077,8 +1095,8 @@
 		return NULL;
 
 	memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
-	smp_wmb();
-	dev->dn_ptr = dn_db;
+
+	rcu_assign_pointer(dev->dn_ptr, dn_db);
 	dn_db->dev = dev;
 	init_timer(&dn_db->timer);
 
@@ -1086,7 +1104,7 @@
 
 	dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
 	if (!dn_db->neigh_parms) {
-		dev->dn_ptr = NULL;
+		rcu_assign_pointer(dev->dn_ptr, NULL);
 		kfree(dn_db);
 		return NULL;
 	}
@@ -1125,7 +1143,7 @@
 	struct dn_ifaddr *ifa;
 	__le16 addr = decnet_address;
 	int maybe_default = 0;
-	struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
+	struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 
 	if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
 		return;
@@ -1176,7 +1194,7 @@
 
 static void dn_dev_delete(struct net_device *dev)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 
 	if (dn_db == NULL)
 		return;
@@ -1204,13 +1222,13 @@
 
 void dn_dev_down(struct net_device *dev)
 {
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db = rtnl_dereference(dev->dn_ptr);
 	struct dn_ifaddr *ifa;
 
 	if (dn_db == NULL)
 		return;
 
-	while((ifa = dn_db->ifa_list) != NULL) {
+	while ((ifa = rtnl_dereference(dn_db->ifa_list)) != NULL) {
 		dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
 		dn_dev_free_ifa(ifa);
 	}
@@ -1270,7 +1288,7 @@
 }
 
 static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
-	__acquires(rcu)
+	__acquires(RCU)
 {
 	int i;
 	struct net_device *dev;
@@ -1313,7 +1331,7 @@
 }
 
 static void dn_dev_seq_stop(struct seq_file *seq, void *v)
-	__releases(rcu)
+	__releases(RCU)
 {
 	rcu_read_unlock();
 }
@@ -1340,7 +1358,7 @@
 		struct net_device *dev = v;
 		char peer_buf[DN_ASCBUF_LEN];
 		char router_buf[DN_ASCBUF_LEN];
-		struct dn_dev *dn_db = dev->dn_ptr;
+		struct dn_dev *dn_db = rcu_dereference(dev->dn_ptr);
 
 		seq_printf(seq, "%-8s %1s     %04u %04u   %04lu %04lu"
 				"   %04hu    %03d %02x    %-10s %-7s %-7s\n",
diff --git a/net/decnet/dn_fib.c b/net/decnet/dn_fib.c
index 4ab96c1..0ef0a81 100644
--- a/net/decnet/dn_fib.c
+++ b/net/decnet/dn_fib.c
@@ -610,10 +610,12 @@
 	/* Scan device list */
 	rcu_read_lock();
 	for_each_netdev_rcu(&init_net, dev) {
-		dn_db = dev->dn_ptr;
+		dn_db = rcu_dereference(dev->dn_ptr);
 		if (dn_db == NULL)
 			continue;
-		for(ifa2 = dn_db->ifa_list; ifa2; ifa2 = ifa2->ifa_next) {
+		for (ifa2 = rcu_dereference(dn_db->ifa_list);
+		     ifa2 != NULL;
+		     ifa2 = rcu_dereference(ifa2->ifa_next)) {
 			if (ifa2->ifa_local == ifa->ifa_local) {
 				found_it = 1;
 				break;
diff --git a/net/decnet/dn_neigh.c b/net/decnet/dn_neigh.c
index a085dbc..602dade 100644
--- a/net/decnet/dn_neigh.c
+++ b/net/decnet/dn_neigh.c
@@ -391,7 +391,7 @@
 		write_lock(&neigh->lock);
 
 		neigh->used = jiffies;
-		dn_db = (struct dn_dev *)neigh->dev->dn_ptr;
+		dn_db = rcu_dereference(neigh->dev->dn_ptr);
 
 		if (!(neigh->nud_state & NUD_PERMANENT)) {
 			neigh->updated = jiffies;
diff --git a/net/decnet/dn_route.c b/net/decnet/dn_route.c
index df0f3e5..e2e9268 100644
--- a/net/decnet/dn_route.c
+++ b/net/decnet/dn_route.c
@@ -93,7 +93,7 @@
 
 struct dn_rt_hash_bucket
 {
-	struct dn_route *chain;
+	struct dn_route __rcu *chain;
 	spinlock_t lock;
 };
 
@@ -157,15 +157,17 @@
 static void dn_dst_check_expire(unsigned long dummy)
 {
 	int i;
-	struct dn_route *rt, **rtp;
+	struct dn_route *rt;
+	struct dn_route __rcu **rtp;
 	unsigned long now = jiffies;
 	unsigned long expire = 120 * HZ;
 
-	for(i = 0; i <= dn_rt_hash_mask; i++) {
+	for (i = 0; i <= dn_rt_hash_mask; i++) {
 		rtp = &dn_rt_hash_table[i].chain;
 
 		spin_lock(&dn_rt_hash_table[i].lock);
-		while((rt=*rtp) != NULL) {
+		while ((rt = rcu_dereference_protected(*rtp,
+						lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
 			if (atomic_read(&rt->dst.__refcnt) ||
 					(now - rt->dst.lastuse) < expire) {
 				rtp = &rt->dst.dn_next;
@@ -186,17 +188,19 @@
 
 static int dn_dst_gc(struct dst_ops *ops)
 {
-	struct dn_route *rt, **rtp;
+	struct dn_route *rt;
+	struct dn_route __rcu **rtp;
 	int i;
 	unsigned long now = jiffies;
 	unsigned long expire = 10 * HZ;
 
-	for(i = 0; i <= dn_rt_hash_mask; i++) {
+	for (i = 0; i <= dn_rt_hash_mask; i++) {
 
 		spin_lock_bh(&dn_rt_hash_table[i].lock);
 		rtp = &dn_rt_hash_table[i].chain;
 
-		while((rt=*rtp) != NULL) {
+		while ((rt = rcu_dereference_protected(*rtp,
+						lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
 			if (atomic_read(&rt->dst.__refcnt) ||
 					(now - rt->dst.lastuse) < expire) {
 				rtp = &rt->dst.dn_next;
@@ -227,7 +231,7 @@
 {
 	u32 min_mtu = 230;
 	struct dn_dev *dn = dst->neighbour ?
-			    (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL;
+			    rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
 
 	if (dn && dn->use_long == 0)
 		min_mtu -= 6;
@@ -236,13 +240,13 @@
 
 	if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
 		if (!(dst_metric_locked(dst, RTAX_MTU))) {
-			dst->metrics[RTAX_MTU-1] = mtu;
+			dst_metric_set(dst, RTAX_MTU, mtu);
 			dst_set_expires(dst, dn_rt_mtu_expires);
 		}
 		if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
 			u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
 			if (dst_metric(dst, RTAX_ADVMSS) > mss)
-				dst->metrics[RTAX_ADVMSS-1] = mss;
+				dst_metric_set(dst, RTAX_ADVMSS, mss);
 		}
 	}
 }
@@ -267,23 +271,25 @@
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-	return ((fl1->nl_u.dn_u.daddr ^ fl2->nl_u.dn_u.daddr) |
-		(fl1->nl_u.dn_u.saddr ^ fl2->nl_u.dn_u.saddr) |
+	return ((fl1->fld_dst ^ fl2->fld_dst) |
+		(fl1->fld_src ^ fl2->fld_src) |
 		(fl1->mark ^ fl2->mark) |
-		(fl1->nl_u.dn_u.scope ^ fl2->nl_u.dn_u.scope) |
+		(fl1->fld_scope ^ fl2->fld_scope) |
 		(fl1->oif ^ fl2->oif) |
 		(fl1->iif ^ fl2->iif)) == 0;
 }
 
 static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
 {
-	struct dn_route *rth, **rthp;
+	struct dn_route *rth;
+	struct dn_route __rcu **rthp;
 	unsigned long now = jiffies;
 
 	rthp = &dn_rt_hash_table[hash].chain;
 
 	spin_lock_bh(&dn_rt_hash_table[hash].lock);
-	while((rth = *rthp) != NULL) {
+	while ((rth = rcu_dereference_protected(*rthp,
+						lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
 		if (compare_keys(&rth->fl, &rt->fl)) {
 			/* Put it first */
 			*rthp = rth->dst.dn_next;
@@ -315,15 +321,15 @@
 	int i;
 	struct dn_route *rt, *next;
 
-	for(i = 0; i < dn_rt_hash_mask; i++) {
+	for (i = 0; i < dn_rt_hash_mask; i++) {
 		spin_lock_bh(&dn_rt_hash_table[i].lock);
 
-		if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL)
+		if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
 			goto nothing_to_declare;
 
-		for(; rt; rt=next) {
-			next = rt->dst.dn_next;
-			rt->dst.dn_next = NULL;
+		for(; rt; rt = next) {
+			next = rcu_dereference_raw(rt->dst.dn_next);
+			RCU_INIT_POINTER(rt->dst.dn_next, NULL);
 			dst_free((struct dst_entry *)rt);
 		}
 
@@ -458,15 +464,16 @@
  */
 static int dn_route_rx_packet(struct sk_buff *skb)
 {
-	struct dn_skb_cb *cb = DN_SKB_CB(skb);
+	struct dn_skb_cb *cb;
 	int err;
 
 	if ((err = dn_route_input(skb)) == 0)
 		return dst_input(skb);
 
+	cb = DN_SKB_CB(skb);
 	if (decnet_debug_level & 4) {
 		char *devname = skb->dev ? skb->dev->name : "???";
-		struct dn_skb_cb *cb = DN_SKB_CB(skb);
+
 		printk(KERN_DEBUG
 			"DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
 			(int)cb->rt_flags, devname, skb->len,
@@ -573,7 +580,7 @@
 	struct dn_skb_cb *cb;
 	unsigned char flags = 0;
 	__u16 len = le16_to_cpu(*(__le16 *)skb->data);
-	struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr;
+	struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
 	unsigned char padlen = 0;
 
 	if (!net_eq(dev_net(dev), &init_net))
@@ -728,7 +735,7 @@
 {
 	struct dn_skb_cb *cb = DN_SKB_CB(skb);
 	struct dst_entry *dst = skb_dst(skb);
-	struct dn_dev *dn_db = dst->dev->dn_ptr;
+	struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
 	struct dn_route *rt;
 	struct neighbour *neigh = dst->neighbour;
 	int header_len;
@@ -799,8 +806,7 @@
 		if (DN_FIB_RES_GW(*res) &&
 		    DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
 			rt->rt_gateway = DN_FIB_RES_GW(*res);
-		memcpy(rt->dst.metrics, fi->fib_metrics,
-		       sizeof(rt->dst.metrics));
+		dst_import_metrics(&rt->dst, fi->fib_metrics);
 	}
 	rt->rt_type = res->type;
 
@@ -813,11 +819,11 @@
 
 	if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
 	    dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
-		rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
+		dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
 	mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
 	if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
 	    dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
-		rt->dst.metrics[RTAX_ADVMSS-1] = mss;
+		dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
 	return 0;
 }
 
@@ -835,13 +841,16 @@
 static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
 {
 	__le16 saddr = 0;
-	struct dn_dev *dn_db = dev->dn_ptr;
+	struct dn_dev *dn_db;
 	struct dn_ifaddr *ifa;
 	int best_match = 0;
 	int ret;
 
-	read_lock(&dev_base_lock);
-	for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
+	rcu_read_lock();
+	dn_db = rcu_dereference(dev->dn_ptr);
+	for (ifa = rcu_dereference(dn_db->ifa_list);
+	     ifa != NULL;
+	     ifa = rcu_dereference(ifa->ifa_next)) {
 		if (ifa->ifa_scope > scope)
 			continue;
 		if (!daddr) {
@@ -854,7 +863,7 @@
 		if (best_match == 0)
 			saddr = ifa->ifa_local;
 	}
-	read_unlock(&dev_base_lock);
+	rcu_read_unlock();
 
 	return saddr;
 }
@@ -872,11 +881,9 @@
 
 static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
 {
-	struct flowi fl = { .nl_u = { .dn_u =
-				      { .daddr = oldflp->fld_dst,
-					.saddr = oldflp->fld_src,
-					.scope = RT_SCOPE_UNIVERSE,
-				     } },
+	struct flowi fl = { .fld_dst = oldflp->fld_dst,
+			    .fld_src = oldflp->fld_src,
+			    .fld_scope = RT_SCOPE_UNIVERSE,
 			    .mark = oldflp->mark,
 			    .iif = init_net.loopback_dev->ifindex,
 			    .oif = oldflp->oif };
@@ -1020,7 +1027,7 @@
 		err = -ENODEV;
 		if (dev_out == NULL)
 			goto out;
-		dn_db = dev_out->dn_ptr;
+		dn_db = rcu_dereference_raw(dev_out->dn_ptr);
 		/* Possible improvement - check all devices for local addr */
 		if (dn_dev_islocal(dev_out, fl.fld_dst)) {
 			dev_put(dev_out);
@@ -1171,7 +1178,7 @@
 			if ((flp->fld_dst == rt->fl.fld_dst) &&
 			    (flp->fld_src == rt->fl.fld_src) &&
 			    (flp->mark == rt->fl.mark) &&
-			    (rt->fl.iif == 0) &&
+			    dn_is_output_route(rt) &&
 			    (rt->fl.oif == flp->oif)) {
 				dst_use(&rt->dst, jiffies);
 				rcu_read_unlock_bh();
@@ -1220,11 +1227,9 @@
 	int flags = 0;
 	__le16 gateway = 0;
 	__le16 local_src = 0;
-	struct flowi fl = { .nl_u = { .dn_u =
-				     { .daddr = cb->dst,
-				       .saddr = cb->src,
-				       .scope = RT_SCOPE_UNIVERSE,
-				    } },
+	struct flowi fl = { .fld_dst = cb->dst,
+			    .fld_src = cb->src,
+			    .fld_scope = RT_SCOPE_UNIVERSE,
 			    .mark = skb->mark,
 			    .iif = skb->dev->ifindex };
 	struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
@@ -1233,7 +1238,7 @@
 
 	dev_hold(in_dev);
 
-	if ((dn_db = in_dev->dn_ptr) == NULL)
+	if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
 		goto out;
 
 	/* Zero source addresses are not allowed */
@@ -1496,13 +1501,13 @@
 	RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
 	if (rt->rt_daddr != rt->rt_gateway)
 		RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
-	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
 		goto rtattr_failure;
 	expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
 	if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
 			       rt->dst.error) < 0)
 		goto rtattr_failure;
-	if (rt->fl.iif)
+	if (dn_is_input_route(rt))
 		RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
 
 	nlh->nlmsg_len = skb_tail_pointer(skb) - b;
@@ -1677,15 +1682,15 @@
 {
 	struct dn_rt_cache_iter_state *s = seq->private;
 
-	rt = rt->dst.dn_next;
-	while(!rt) {
+	rt = rcu_dereference_bh(rt->dst.dn_next);
+	while (!rt) {
 		rcu_read_unlock_bh();
 		if (--s->bucket < 0)
 			break;
 		rcu_read_lock_bh();
-		rt = dn_rt_hash_table[s->bucket].chain;
+		rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
 	}
-	return rcu_dereference_bh(rt);
+	return rt;
 }
 
 static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c
index 48fdf10..6eb91df 100644
--- a/net/decnet/dn_rules.c
+++ b/net/decnet/dn_rules.c
@@ -175,7 +175,7 @@
 
 unsigned dnet_addr_type(__le16 addr)
 {
-	struct flowi fl = { .nl_u = { .dn_u = { .daddr = addr } } };
+	struct flowi fl = { .fld_dst = addr };
 	struct dn_fib_res res;
 	unsigned ret = RTN_UNICAST;
 	struct dn_fib_table *tb = dn_fib_get_table(RT_TABLE_LOCAL, 0);
diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
index be3eb8e..28f8b5e 100644
--- a/net/decnet/sysctl_net_decnet.c
+++ b/net/decnet/sysctl_net_decnet.c
@@ -38,7 +38,7 @@
 int decnet_no_fc_max_cwnd = NSP_MIN_WINDOW;
 
 /* Reasonable defaults, I hope, based on tcp's defaults */
-int sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
+long sysctl_decnet_mem[3] = { 768 << 3, 1024 << 3, 1536 << 3 };
 int sysctl_decnet_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
 int sysctl_decnet_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
 
@@ -324,7 +324,7 @@
 		.data = &sysctl_decnet_mem,
 		.maxlen = sizeof(sysctl_decnet_mem),
 		.mode = 0644,
-		.proc_handler = proc_dointvec,
+		.proc_handler = proc_doulongvec_minmax
 	},
 	{
 		.procname = "decnet_rmem",
diff --git a/net/dns_resolver/Makefile b/net/dns_resolver/Makefile
index c0ef4e7..d5c13c2eb 100644
--- a/net/dns_resolver/Makefile
+++ b/net/dns_resolver/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_DNS_RESOLVER) += dns_resolver.o
 
-dns_resolver-objs :=  dns_key.o dns_query.o
+dns_resolver-y :=  dns_key.o dns_query.o
diff --git a/net/econet/Makefile b/net/econet/Makefile
index 39f0a77..05fae8b 100644
--- a/net/econet/Makefile
+++ b/net/econet/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_ECONET) += econet.o
 
-econet-objs := af_econet.o
+econet-y := af_econet.o
diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
index f8c1ae4..f180371 100644
--- a/net/econet/af_econet.c
+++ b/net/econet/af_econet.c
@@ -31,6 +31,7 @@
 #include <linux/skbuff.h>
 #include <linux/udp.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/sock.h>
 #include <net/inet_common.h>
 #include <linux/stat.h>
@@ -276,12 +277,12 @@
 #endif
 #ifdef CONFIG_ECONET_AUNUDP
 	struct msghdr udpmsg;
-	struct iovec iov[msg->msg_iovlen+1];
+	struct iovec iov[2];
 	struct aunhdr ah;
 	struct sockaddr_in udpdest;
 	__kernel_size_t size;
-	int i;
 	mm_segment_t oldfs;
+	char *userbuf;
 #endif
 
 	/*
@@ -297,23 +298,14 @@
 
 	mutex_lock(&econet_mutex);
 
-	if (saddr == NULL) {
-		struct econet_sock *eo = ec_sk(sk);
-
-		addr.station = eo->station;
-		addr.net     = eo->net;
-		port	     = eo->port;
-		cb	     = eo->cb;
-	} else {
-		if (msg->msg_namelen < sizeof(struct sockaddr_ec)) {
-			mutex_unlock(&econet_mutex);
-			return -EINVAL;
-		}
-		addr.station = saddr->addr.station;
-		addr.net = saddr->addr.net;
-		port = saddr->port;
-		cb = saddr->cb;
-	}
+        if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
+                mutex_unlock(&econet_mutex);
+                return -EINVAL;
+        }
+        addr.station = saddr->addr.station;
+        addr.net = saddr->addr.net;
+        port = saddr->port;
+        cb = saddr->cb;
 
 	/* Look for a device with the right network number. */
 	dev = net2dev_map[addr.net];
@@ -328,17 +320,17 @@
 		}
 	}
 
-	if (len + 15 > dev->mtu) {
-		mutex_unlock(&econet_mutex);
-		return -EMSGSIZE;
-	}
-
 	if (dev->type == ARPHRD_ECONET) {
 		/* Real hardware Econet.  We're not worthy etc. */
 #ifdef CONFIG_ECONET_NATIVE
 		unsigned short proto = 0;
 		int res;
 
+		if (len + 15 > dev->mtu) {
+			mutex_unlock(&econet_mutex);
+			return -EMSGSIZE;
+		}
+
 		dev_hold(dev);
 
 		skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
@@ -351,7 +343,6 @@
 
 		eb = (struct ec_cb *)&skb->cb;
 
-		/* BUG: saddr may be NULL */
 		eb->cookie = saddr->cookie;
 		eb->sec = *saddr;
 		eb->sent = ec_tx_done;
@@ -415,6 +406,11 @@
 		return -ENETDOWN;		/* No socket - can't send */
 	}
 
+	if (len > 32768) {
+		err = -E2BIG;
+		goto error;
+	}
+
 	/* Make up a UDP datagram and hand it off to some higher intellect. */
 
 	memset(&udpdest, 0, sizeof(udpdest));
@@ -446,36 +442,26 @@
 
 	/* tack our header on the front of the iovec */
 	size = sizeof(struct aunhdr);
-	/*
-	 * XXX: that is b0rken.  We can't mix userland and kernel pointers
-	 * in iovec, since on a lot of platforms copy_from_user() will
-	 * *not* work with the kernel and userland ones at the same time,
-	 * regardless of what we do with set_fs().  And we are talking about
-	 * econet-over-ethernet here, so "it's only ARM anyway" doesn't
-	 * apply.  Any suggestions on fixing that code?		-- AV
-	 */
 	iov[0].iov_base = (void *)&ah;
 	iov[0].iov_len = size;
-	for (i = 0; i < msg->msg_iovlen; i++) {
-		void __user *base = msg->msg_iov[i].iov_base;
-		size_t iov_len = msg->msg_iov[i].iov_len;
-		/* Check it now since we switch to KERNEL_DS later. */
-		if (!access_ok(VERIFY_READ, base, iov_len)) {
-			mutex_unlock(&econet_mutex);
-			return -EFAULT;
-		}
-		iov[i+1].iov_base = base;
-		iov[i+1].iov_len = iov_len;
-		size += iov_len;
+
+	userbuf = vmalloc(len);
+	if (userbuf == NULL) {
+		err = -ENOMEM;
+		goto error;
 	}
 
+	iov[1].iov_base = userbuf;
+	iov[1].iov_len = len;
+	err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
+	if (err)
+		goto error_free_buf;
+
 	/* Get a skbuff (no data, just holds our cb information) */
 	if ((skb = sock_alloc_send_skb(sk, 0,
 				       msg->msg_flags & MSG_DONTWAIT,
-				       &err)) == NULL) {
-		mutex_unlock(&econet_mutex);
-		return err;
-	}
+				       &err)) == NULL)
+		goto error_free_buf;
 
 	eb = (struct ec_cb *)&skb->cb;
 
@@ -491,7 +477,7 @@
 	udpmsg.msg_name = (void *)&udpdest;
 	udpmsg.msg_namelen = sizeof(udpdest);
 	udpmsg.msg_iov = &iov[0];
-	udpmsg.msg_iovlen = msg->msg_iovlen + 1;
+	udpmsg.msg_iovlen = 2;
 	udpmsg.msg_control = NULL;
 	udpmsg.msg_controllen = 0;
 	udpmsg.msg_flags=0;
@@ -499,9 +485,13 @@
 	oldfs = get_fs(); set_fs(KERNEL_DS);	/* More privs :-) */
 	err = sock_sendmsg(udpsock, &udpmsg, size);
 	set_fs(oldfs);
+
+error_free_buf:
+	vfree(userbuf);
 #else
 	err = -EPROTOTYPE;
 #endif
+	error:
 	mutex_unlock(&econet_mutex);
 
 	return err;
@@ -671,6 +661,11 @@
 	err = 0;
 	switch (cmd) {
 	case SIOCSIFADDR:
+		if (!capable(CAP_NET_ADMIN)) {
+			err = -EPERM;
+			break;
+		}
+
 		edev = dev->ec_ptr;
 		if (edev == NULL) {
 			/* Magic up a new one. */
diff --git a/net/ieee802154/af_ieee802154.c b/net/ieee802154/af_ieee802154.c
index 93c91b6..6df6ecf 100644
--- a/net/ieee802154/af_ieee802154.c
+++ b/net/ieee802154/af_ieee802154.c
@@ -52,11 +52,11 @@
 
 	switch (addr->addr_type) {
 	case IEEE802154_ADDR_LONG:
-		rtnl_lock();
-		dev = dev_getbyhwaddr(net, ARPHRD_IEEE802154, addr->hwaddr);
+		rcu_read_lock();
+		dev = dev_getbyhwaddr_rcu(net, ARPHRD_IEEE802154, addr->hwaddr);
 		if (dev)
 			dev_hold(dev);
-		rtnl_unlock();
+		rcu_read_unlock();
 		break;
 	case IEEE802154_ADDR_SHORT:
 		if (addr->pan_id == 0xffff ||
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index f581f77..f2b6110 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1148,21 +1148,13 @@
 	struct flowi fl = {
 		.oif = sk->sk_bound_dev_if,
 		.mark = sk->sk_mark,
-		.nl_u = {
-			.ip4_u = {
-				.daddr	= daddr,
-				.saddr	= inet->inet_saddr,
-				.tos	= RT_CONN_FLAGS(sk),
-			},
-		},
+		.fl4_dst = daddr,
+		.fl4_src = inet->inet_saddr,
+		.fl4_tos = RT_CONN_FLAGS(sk),
 		.proto = sk->sk_protocol,
 		.flags = inet_sk_flowi_flags(sk),
-		.uli_u = {
-			.ports = {
-				.sport = inet->inet_sport,
-				.dport = inet->inet_dport,
-			},
-		},
+		.fl_ip_sport = inet->inet_sport,
+		.fl_ip_dport = inet->inet_dport,
 	};
 
 	security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index d8e540c..a2fc7b9 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -433,8 +433,8 @@
 
 static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
 {
-	struct flowi fl = { .nl_u = { .ip4_u = { .daddr = sip,
-						 .saddr = tip } } };
+	struct flowi fl = { .fl4_dst = sip,
+			    .fl4_src = tip };
 	struct rtable *rt;
 	int flag = 0;
 	/*unsigned long now; */
@@ -883,7 +883,7 @@
 
 			dont_send = arp_ignore(in_dev, sip, tip);
 			if (!dont_send && IN_DEV_ARPFILTER(in_dev))
-				dont_send |= arp_filter(sip, tip, dev);
+				dont_send = arp_filter(sip, tip, dev);
 			if (!dont_send) {
 				n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
 				if (n) {
@@ -1017,13 +1017,14 @@
 		IPV4_DEVCONF_ALL(net, PROXY_ARP) = on;
 		return 0;
 	}
-	if (__in_dev_get_rtnl(dev)) {
-		IN_DEV_CONF_SET(__in_dev_get_rtnl(dev), PROXY_ARP, on);
+	if (__in_dev_get_rcu(dev)) {
+		IN_DEV_CONF_SET(__in_dev_get_rcu(dev), PROXY_ARP, on);
 		return 0;
 	}
 	return -ENXIO;
 }
 
+/* must be called with rcu_read_lock() */
 static int arp_req_set_public(struct net *net, struct arpreq *r,
 		struct net_device *dev)
 {
@@ -1033,7 +1034,7 @@
 	if (mask && mask != htonl(0xFFFFFFFF))
 		return -EINVAL;
 	if (!dev && (r->arp_flags & ATF_COM)) {
-		dev = dev_getbyhwaddr(net, r->arp_ha.sa_family,
+		dev = dev_getbyhwaddr_rcu(net, r->arp_ha.sa_family,
 				      r->arp_ha.sa_data);
 		if (!dev)
 			return -ENODEV;
@@ -1061,8 +1062,8 @@
 	if (r->arp_flags & ATF_PERM)
 		r->arp_flags |= ATF_COM;
 	if (dev == NULL) {
-		struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
-						    .tos = RTO_ONLINK } };
+		struct flowi fl = { .fl4_dst = ip,
+				    .fl4_tos = RTO_ONLINK };
 		struct rtable *rt;
 		err = ip_route_output_key(net, &rt, &fl);
 		if (err != 0)
@@ -1169,8 +1170,8 @@
 
 	ip = ((struct sockaddr_in *)&r->arp_pa)->sin_addr.s_addr;
 	if (dev == NULL) {
-		struct flowi fl = { .nl_u.ip4_u = { .daddr = ip,
-						    .tos = RTO_ONLINK } };
+		struct flowi fl = { .fl4_dst = ip,
+				    .fl4_tos = RTO_ONLINK };
 		struct rtable *rt;
 		err = ip_route_output_key(net, &rt, &fl);
 		if (err != 0)
@@ -1225,10 +1226,10 @@
 	if (!(r.arp_flags & ATF_NETMASK))
 		((struct sockaddr_in *)&r.arp_netmask)->sin_addr.s_addr =
 							   htonl(0xFFFFFFFFUL);
-	rtnl_lock();
+	rcu_read_lock();
 	if (r.arp_dev[0]) {
 		err = -ENODEV;
-		dev = __dev_get_by_name(net, r.arp_dev);
+		dev = dev_get_by_name_rcu(net, r.arp_dev);
 		if (dev == NULL)
 			goto out;
 
@@ -1252,12 +1253,12 @@
 		break;
 	case SIOCGARP:
 		err = arp_req_get(&r, dev);
-		if (!err && copy_to_user(arg, &r, sizeof(r)))
-			err = -EFAULT;
 		break;
 	}
 out:
-	rtnl_unlock();
+	rcu_read_unlock();
+	if (cmd == SIOCGARP && !err && copy_to_user(arg, &r, sizeof(r)))
+		err = -EFAULT;
 	return err;
 }
 
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index dc94b03..748cb5b 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1256,6 +1256,87 @@
 		rtnl_set_sk_err(net, RTNLGRP_IPV4_IFADDR, err);
 }
 
+static size_t inet_get_link_af_size(const struct net_device *dev)
+{
+	struct in_device *in_dev = __in_dev_get_rtnl(dev);
+
+	if (!in_dev)
+		return 0;
+
+	return nla_total_size(IPV4_DEVCONF_MAX * 4); /* IFLA_INET_CONF */
+}
+
+static int inet_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct in_device *in_dev = __in_dev_get_rtnl(dev);
+	struct nlattr *nla;
+	int i;
+
+	if (!in_dev)
+		return -ENODATA;
+
+	nla = nla_reserve(skb, IFLA_INET_CONF, IPV4_DEVCONF_MAX * 4);
+	if (nla == NULL)
+		return -EMSGSIZE;
+
+	for (i = 0; i < IPV4_DEVCONF_MAX; i++)
+		((u32 *) nla_data(nla))[i] = in_dev->cnf.data[i];
+
+	return 0;
+}
+
+static const struct nla_policy inet_af_policy[IFLA_INET_MAX+1] = {
+	[IFLA_INET_CONF]	= { .type = NLA_NESTED },
+};
+
+static int inet_validate_link_af(const struct net_device *dev,
+				 const struct nlattr *nla)
+{
+	struct nlattr *a, *tb[IFLA_INET_MAX+1];
+	int err, rem;
+
+	if (dev && !__in_dev_get_rtnl(dev))
+		return -EAFNOSUPPORT;
+
+	err = nla_parse_nested(tb, IFLA_INET_MAX, nla, inet_af_policy);
+	if (err < 0)
+		return err;
+
+	if (tb[IFLA_INET_CONF]) {
+		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem) {
+			int cfgid = nla_type(a);
+
+			if (nla_len(a) < 4)
+				return -EINVAL;
+
+			if (cfgid <= 0 || cfgid > IPV4_DEVCONF_MAX)
+				return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
+{
+	struct in_device *in_dev = __in_dev_get_rtnl(dev);
+	struct nlattr *a, *tb[IFLA_INET_MAX+1];
+	int rem;
+
+	if (!in_dev)
+		return -EAFNOSUPPORT;
+
+	if (nla_parse_nested(tb, IFLA_INET_MAX, nla, NULL) < 0)
+		BUG();
+
+	if (tb[IFLA_INET_CONF]) {
+		nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
+			ipv4_devconf_set(in_dev, nla_type(a), nla_get_u32(a));
+	}
+
+	return 0;
+}
+
 #ifdef CONFIG_SYSCTL
 
 static void devinet_copy_dflt_conf(struct net *net, int i)
@@ -1349,9 +1430,9 @@
 	return ret;
 }
 
-int ipv4_doint_and_flush(ctl_table *ctl, int write,
-			 void __user *buffer,
-			 size_t *lenp, loff_t *ppos)
+static int ipv4_doint_and_flush(ctl_table *ctl, int write,
+				void __user *buffer,
+				size_t *lenp, loff_t *ppos)
 {
 	int *valp = ctl->data;
 	int val = *valp;
@@ -1619,6 +1700,14 @@
 	.exit = devinet_exit_net,
 };
 
+static struct rtnl_af_ops inet_af_ops = {
+	.family		  = AF_INET,
+	.fill_link_af	  = inet_fill_link_af,
+	.get_link_af_size = inet_get_link_af_size,
+	.validate_link_af = inet_validate_link_af,
+	.set_link_af	  = inet_set_link_af,
+};
+
 void __init devinet_init(void)
 {
 	register_pernet_subsys(&devinet_ops);
@@ -1626,6 +1715,8 @@
 	register_gifconf(PF_INET, inet_gifconf);
 	register_netdevice_notifier(&ip_netdev_notifier);
 
+	rtnl_af_register(&inet_af_ops);
+
 	rtnl_register(PF_INET, RTM_NEWADDR, inet_rtm_newaddr, NULL);
 	rtnl_register(PF_INET, RTM_DELADDR, inet_rtm_deladdr, NULL);
 	rtnl_register(PF_INET, RTM_GETADDR, NULL, inet_dump_ifaddr);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 14ca1f1..e42a905 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -23,6 +23,8 @@
 
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
+static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
+
 /*
  * Allocate an AEAD request structure with extra space for SG and IV.
  *
@@ -117,25 +119,35 @@
 	int blksize;
 	int clen;
 	int alen;
+	int plen;
+	int tfclen;
 	int nfrags;
 
 	/* skb is pure payload to encrypt */
 
 	err = -ENOMEM;
 
-	/* Round to block size */
-	clen = skb->len;
-
 	esp = x->data;
 	aead = esp->aead;
 	alen = crypto_aead_authsize(aead);
 
+	tfclen = 0;
+	if (x->tfcpad) {
+		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+		u32 padto;
+
+		padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
+		if (skb->len < padto)
+			tfclen = padto - skb->len;
+	}
 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
-	clen = ALIGN(clen + 2, blksize);
+	clen = ALIGN(skb->len + 2 + tfclen, blksize);
 	if (esp->padlen)
 		clen = ALIGN(clen, esp->padlen);
+	plen = clen - skb->len - tfclen;
 
-	if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
+	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
+	if (err < 0)
 		goto error;
 	nfrags = err;
 
@@ -150,13 +162,17 @@
 
 	/* Fill padding... */
 	tail = skb_tail_pointer(trailer);
+	if (tfclen) {
+		memset(tail, 0, tfclen);
+		tail += tfclen;
+	}
 	do {
 		int i;
-		for (i=0; i<clen-skb->len - 2; i++)
+		for (i = 0; i < plen - 2; i++)
 			tail[i] = i + 1;
 	} while (0);
-	tail[clen - skb->len - 2] = (clen - skb->len) - 2;
-	tail[clen - skb->len - 1] = *skb_mac_header(skb);
+	tail[plen - 2] = plen - 2;
+	tail[plen - 1] = *skb_mac_header(skb);
 	pskb_put(skb, trailer, clen - skb->len + alen);
 
 	skb_push(skb, -skb_network_offset(skb));
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eb6f69a..d3a1112 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -158,11 +158,7 @@
 struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref)
 {
 	struct flowi fl = {
-		.nl_u = {
-			.ip4_u = {
-				.daddr = addr
-			}
-		},
+		.fl4_dst = addr,
 		.flags = FLOWI_FLAG_MATCH_ANY_IIF
 	};
 	struct fib_result res = { 0 };
@@ -193,7 +189,7 @@
 					    const struct net_device *dev,
 					    __be32 addr)
 {
-	struct flowi		fl = { .nl_u = { .ip4_u = { .daddr = addr } } };
+	struct flowi		fl = { .fl4_dst = addr };
 	struct fib_result	res;
 	unsigned ret = RTN_BROADCAST;
 	struct fib_table *local_table;
@@ -247,13 +243,9 @@
 {
 	struct in_device *in_dev;
 	struct flowi fl = {
-		.nl_u = {
-			.ip4_u = {
-				.daddr = src,
-				.saddr = dst,
-				.tos = tos
-			}
-		},
+		.fl4_dst = src,
+		.fl4_src = dst,
+		.fl4_tos = tos,
 		.mark = mark,
 		.iif = oif
 	};
@@ -853,13 +845,9 @@
 	struct fib_result       res;
 	struct flowi            fl = {
 		.mark = frn->fl_mark,
-		.nl_u = {
-			.ip4_u = {
-				.daddr = frn->fl_addr,
-				.tos = frn->fl_tos,
-				.scope = frn->fl_scope
-			}
-		}
+		.fl4_dst = frn->fl_addr,
+		.fl4_tos = frn->fl_tos,
+		.fl4_scope = frn->fl_scope,
 	};
 
 #ifdef CONFIG_IP_MULTIPLE_TABLES
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index a29edf2..c079cc0 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -47,11 +47,8 @@
 static inline void fib_result_assign(struct fib_result *res,
 				     struct fib_info *fi)
 {
-	if (res->fi != NULL)
-		fib_info_put(res->fi);
+	/* we used to play games with refcounts, but we now use RCU */
 	res->fi = fi;
-	if (fi != NULL)
-		atomic_inc(&fi->fib_clntref);
 }
 
 #endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 3e0da3e..12d3dc3 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -563,12 +563,8 @@
 		rcu_read_lock();
 		{
 			struct flowi fl = {
-				.nl_u = {
-					.ip4_u = {
-						.daddr = nh->nh_gw,
-						.scope = cfg->fc_scope + 1,
-					},
-				},
+				.fl4_dst = nh->nh_gw,
+				.fl4_scope = cfg->fc_scope + 1,
 				.oif = nh->nh_oif,
 			};
 
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 200eb53..0f28034 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -365,7 +365,7 @@
 	if (size <= PAGE_SIZE)
 		return kzalloc(size, GFP_KERNEL);
 	else
-		return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+		return vzalloc(size);
 }
 
 static void __tnode_vfree(struct work_struct *arg)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 96bc7f9..4aa1b7f 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -386,10 +386,9 @@
 			daddr = icmp_param->replyopts.faddr;
 	}
 	{
-		struct flowi fl = { .nl_u = { .ip4_u =
-					      { .daddr = daddr,
-						.saddr = rt->rt_spec_dst,
-						.tos = RT_TOS(ip_hdr(skb)->tos) } },
+		struct flowi fl = { .fl4_dst= daddr,
+				    .fl4_src = rt->rt_spec_dst,
+				    .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
 				    .proto = IPPROTO_ICMP };
 		security_skb_classify_flow(skb, &fl);
 		if (ip_route_output_key(net, &rt, &fl))
@@ -506,8 +505,8 @@
 		struct net_device *dev = NULL;
 
 		rcu_read_lock();
-		if (rt->fl.iif &&
-			net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
+		if (rt_is_input_route(rt) &&
+		    net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
 			dev = dev_get_by_index_rcu(net, rt->fl.iif);
 
 		if (dev)
@@ -542,22 +541,13 @@
 
 	{
 		struct flowi fl = {
-			.nl_u = {
-				.ip4_u = {
-					.daddr = icmp_param.replyopts.srr ?
-						icmp_param.replyopts.faddr :
-						iph->saddr,
-					.saddr = saddr,
-					.tos = RT_TOS(tos)
-				}
-			},
+			.fl4_dst = icmp_param.replyopts.srr ?
+				   icmp_param.replyopts.faddr : iph->saddr,
+			.fl4_src = saddr,
+			.fl4_tos = RT_TOS(tos),
 			.proto = IPPROTO_ICMP,
-			.uli_u = {
-				.icmpt = {
-					.type = type,
-					.code = code
-				}
-			}
+			.fl_icmp_type = type,
+			.fl_icmp_code = code,
 		};
 		int err;
 		struct rtable *rt2;
@@ -569,6 +559,9 @@
 		/* No need to clone since we're just using its address. */
 		rt2 = rt;
 
+		if (!fl.nl_u.ip4_u.saddr)
+			fl.nl_u.ip4_u.saddr = rt->rt_src;
+
 		err = xfrm_lookup(net, (struct dst_entry **)&rt, &fl, NULL, 0);
 		switch (err) {
 		case 0:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c8877c6..e0e77e2 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -149,21 +149,37 @@
 static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
 			 int sfcount, __be32 *psfsrc, int delta);
 
+
+static void ip_mc_list_reclaim(struct rcu_head *head)
+{
+	kfree(container_of(head, struct ip_mc_list, rcu));
+}
+
 static void ip_ma_put(struct ip_mc_list *im)
 {
 	if (atomic_dec_and_test(&im->refcnt)) {
 		in_dev_put(im->interface);
-		kfree(im);
+		call_rcu(&im->rcu, ip_mc_list_reclaim);
 	}
 }
 
+#define for_each_pmc_rcu(in_dev, pmc)				\
+	for (pmc = rcu_dereference(in_dev->mc_list);		\
+	     pmc != NULL;					\
+	     pmc = rcu_dereference(pmc->next_rcu))
+
+#define for_each_pmc_rtnl(in_dev, pmc)				\
+	for (pmc = rtnl_dereference(in_dev->mc_list);		\
+	     pmc != NULL;					\
+	     pmc = rtnl_dereference(pmc->next_rcu))
+
 #ifdef CONFIG_IP_MULTICAST
 
 /*
  *	Timer management
  */
 
-static __inline__ void igmp_stop_timer(struct ip_mc_list *im)
+static void igmp_stop_timer(struct ip_mc_list *im)
 {
 	spin_lock_bh(&im->lock);
 	if (del_timer(&im->timer))
@@ -284,6 +300,8 @@
 	return scount;
 }
 
+#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
+
 static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
 {
 	struct sk_buff *skb;
@@ -292,14 +310,20 @@
 	struct igmpv3_report *pig;
 	struct net *net = dev_net(dev);
 
-	skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
-	if (skb == NULL)
-		return NULL;
+	while (1) {
+		skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev),
+				GFP_ATOMIC | __GFP_NOWARN);
+		if (skb)
+			break;
+		size >>= 1;
+		if (size < 256)
+			return NULL;
+	}
+	igmp_skb_size(skb) = size;
 
 	{
 		struct flowi fl = { .oif = dev->ifindex,
-				    .nl_u = { .ip4_u = {
-				    .daddr = IGMPV3_ALL_MCR } },
+				    .fl4_dst = IGMPV3_ALL_MCR,
 				    .proto = IPPROTO_IGMP };
 		if (ip_route_output_key(net, &rt, &fl)) {
 			kfree_skb(skb);
@@ -384,7 +408,7 @@
 	return skb;
 }
 
-#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \
+#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
 	skb_tailroom(skb)) : 0)
 
 static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
@@ -502,8 +526,8 @@
 	int type;
 
 	if (!pmc) {
-		read_lock(&in_dev->mc_list_lock);
-		for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+		rcu_read_lock();
+		for_each_pmc_rcu(in_dev, pmc) {
 			if (pmc->multiaddr == IGMP_ALL_HOSTS)
 				continue;
 			spin_lock_bh(&pmc->lock);
@@ -514,7 +538,7 @@
 			skb = add_grec(skb, pmc, type, 0, 0);
 			spin_unlock_bh(&pmc->lock);
 		}
-		read_unlock(&in_dev->mc_list_lock);
+		rcu_read_unlock();
 	} else {
 		spin_lock_bh(&pmc->lock);
 		if (pmc->sfcount[MCAST_EXCLUDE])
@@ -556,7 +580,7 @@
 	struct sk_buff *skb = NULL;
 	int type, dtype;
 
-	read_lock(&in_dev->mc_list_lock);
+	rcu_read_lock();
 	spin_lock_bh(&in_dev->mc_tomb_lock);
 
 	/* deleted MCA's */
@@ -593,7 +617,7 @@
 	spin_unlock_bh(&in_dev->mc_tomb_lock);
 
 	/* change recs */
-	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rcu(in_dev, pmc) {
 		spin_lock_bh(&pmc->lock);
 		if (pmc->sfcount[MCAST_EXCLUDE]) {
 			type = IGMPV3_BLOCK_OLD_SOURCES;
@@ -616,7 +640,7 @@
 		}
 		spin_unlock_bh(&pmc->lock);
 	}
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 
 	if (!skb)
 		return;
@@ -644,7 +668,7 @@
 
 	{
 		struct flowi fl = { .oif = dev->ifindex,
-				    .nl_u = { .ip4_u = { .daddr = dst } },
+				    .fl4_dst = dst,
 				    .proto = IPPROTO_IGMP };
 		if (ip_route_output_key(net, &rt, &fl))
 			return -1;
@@ -813,14 +837,14 @@
 	if (group == IGMP_ALL_HOSTS)
 		return;
 
-	read_lock(&in_dev->mc_list_lock);
-	for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(in_dev, im) {
 		if (im->multiaddr == group) {
 			igmp_stop_timer(im);
 			break;
 		}
 	}
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 }
 
 static void igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
@@ -906,8 +930,8 @@
 	 * - Use the igmp->igmp_code field as the maximum
 	 *   delay possible
 	 */
-	read_lock(&in_dev->mc_list_lock);
-	for (im=in_dev->mc_list; im!=NULL; im=im->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(in_dev, im) {
 		int changed;
 
 		if (group && group != im->multiaddr)
@@ -925,7 +949,7 @@
 		if (changed)
 			igmp_mod_timer(im, max_delay);
 	}
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 }
 
 /* called in rcu_read_lock() section */
@@ -961,7 +985,7 @@
 	case IGMP_HOST_MEMBERSHIP_REPORT:
 	case IGMPV2_HOST_MEMBERSHIP_REPORT:
 		/* Is it our report looped back? */
-		if (skb_rtable(skb)->fl.iif == 0)
+		if (rt_is_output_route(skb_rtable(skb)))
 			break;
 		/* don't rely on MC router hearing unicast reports */
 		if (skb->pkt_type == PACKET_MULTICAST ||
@@ -1110,8 +1134,8 @@
 		kfree(pmc);
 	}
 	/* clear dead sources, too */
-	read_lock(&in_dev->mc_list_lock);
-	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(in_dev, pmc) {
 		struct ip_sf_list *psf, *psf_next;
 
 		spin_lock_bh(&pmc->lock);
@@ -1123,7 +1147,7 @@
 			kfree(psf);
 		}
 	}
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 }
 #endif
 
@@ -1209,7 +1233,7 @@
 
 	ASSERT_RTNL();
 
-	for (im=in_dev->mc_list; im; im=im->next) {
+	for_each_pmc_rtnl(in_dev, im) {
 		if (im->multiaddr == addr) {
 			im->users++;
 			ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
@@ -1217,7 +1241,7 @@
 		}
 	}
 
-	im = kmalloc(sizeof(*im), GFP_KERNEL);
+	im = kzalloc(sizeof(*im), GFP_KERNEL);
 	if (!im)
 		goto out;
 
@@ -1227,26 +1251,18 @@
 	im->multiaddr = addr;
 	/* initial mode is (EX, empty) */
 	im->sfmode = MCAST_EXCLUDE;
-	im->sfcount[MCAST_INCLUDE] = 0;
 	im->sfcount[MCAST_EXCLUDE] = 1;
-	im->sources = NULL;
-	im->tomb = NULL;
-	im->crcount = 0;
 	atomic_set(&im->refcnt, 1);
 	spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-	im->tm_running = 0;
 	setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
 	im->unsolicit_count = IGMP_Unsolicited_Report_Count;
-	im->reporter = 0;
-	im->gsquery = 0;
 #endif
-	im->loaded = 0;
-	write_lock_bh(&in_dev->mc_list_lock);
-	im->next = in_dev->mc_list;
-	in_dev->mc_list = im;
+
+	im->next_rcu = in_dev->mc_list;
 	in_dev->mc_count++;
-	write_unlock_bh(&in_dev->mc_list_lock);
+	rcu_assign_pointer(in_dev->mc_list, im);
+
 #ifdef CONFIG_IP_MULTICAST
 	igmpv3_del_delrec(in_dev, im->multiaddr);
 #endif
@@ -1260,26 +1276,32 @@
 
 /*
  *	Resend IGMP JOIN report; used for bonding.
+ *	Called with rcu_read_lock()
  */
-void ip_mc_rejoin_group(struct ip_mc_list *im)
+void ip_mc_rejoin_groups(struct in_device *in_dev)
 {
 #ifdef CONFIG_IP_MULTICAST
-	struct in_device *in_dev = im->interface;
+	struct ip_mc_list *im;
+	int type;
 
-	if (im->multiaddr == IGMP_ALL_HOSTS)
-		return;
+	for_each_pmc_rcu(in_dev, im) {
+		if (im->multiaddr == IGMP_ALL_HOSTS)
+			continue;
 
-	/* a failover is happening and switches
-	 * must be notified immediately */
-	if (IGMP_V1_SEEN(in_dev))
-		igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
-	else if (IGMP_V2_SEEN(in_dev))
-		igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
-	else
-		igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
+		/* a failover is happening and switches
+		 * must be notified immediately
+		 */
+		if (IGMP_V1_SEEN(in_dev))
+			type = IGMP_HOST_MEMBERSHIP_REPORT;
+		else if (IGMP_V2_SEEN(in_dev))
+			type = IGMPV2_HOST_MEMBERSHIP_REPORT;
+		else
+			type = IGMPV3_HOST_MEMBERSHIP_REPORT;
+		igmp_send_report(in_dev, im, type);
+	}
 #endif
 }
-EXPORT_SYMBOL(ip_mc_rejoin_group);
+EXPORT_SYMBOL(ip_mc_rejoin_groups);
 
 /*
  *	A socket has left a multicast group on device dev
@@ -1287,17 +1309,18 @@
 
 void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
 {
-	struct ip_mc_list *i, **ip;
+	struct ip_mc_list *i;
+	struct ip_mc_list __rcu **ip;
 
 	ASSERT_RTNL();
 
-	for (ip=&in_dev->mc_list; (i=*ip)!=NULL; ip=&i->next) {
+	for (ip = &in_dev->mc_list;
+	     (i = rtnl_dereference(*ip)) != NULL;
+	     ip = &i->next_rcu) {
 		if (i->multiaddr == addr) {
 			if (--i->users == 0) {
-				write_lock_bh(&in_dev->mc_list_lock);
-				*ip = i->next;
+				*ip = i->next_rcu;
 				in_dev->mc_count--;
-				write_unlock_bh(&in_dev->mc_list_lock);
 				igmp_group_dropped(i);
 
 				if (!in_dev->dead)
@@ -1316,34 +1339,34 @@
 
 void ip_mc_unmap(struct in_device *in_dev)
 {
-	struct ip_mc_list *i;
+	struct ip_mc_list *pmc;
 
 	ASSERT_RTNL();
 
-	for (i = in_dev->mc_list; i; i = i->next)
-		igmp_group_dropped(i);
+	for_each_pmc_rtnl(in_dev, pmc)
+		igmp_group_dropped(pmc);
 }
 
 void ip_mc_remap(struct in_device *in_dev)
 {
-	struct ip_mc_list *i;
+	struct ip_mc_list *pmc;
 
 	ASSERT_RTNL();
 
-	for (i = in_dev->mc_list; i; i = i->next)
-		igmp_group_added(i);
+	for_each_pmc_rtnl(in_dev, pmc)
+		igmp_group_added(pmc);
 }
 
 /* Device going down */
 
 void ip_mc_down(struct in_device *in_dev)
 {
-	struct ip_mc_list *i;
+	struct ip_mc_list *pmc;
 
 	ASSERT_RTNL();
 
-	for (i=in_dev->mc_list; i; i=i->next)
-		igmp_group_dropped(i);
+	for_each_pmc_rtnl(in_dev, pmc)
+		igmp_group_dropped(pmc);
 
 #ifdef CONFIG_IP_MULTICAST
 	in_dev->mr_ifc_count = 0;
@@ -1374,7 +1397,6 @@
 	in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
 #endif
 
-	rwlock_init(&in_dev->mc_list_lock);
 	spin_lock_init(&in_dev->mc_tomb_lock);
 }
 
@@ -1382,14 +1404,14 @@
 
 void ip_mc_up(struct in_device *in_dev)
 {
-	struct ip_mc_list *i;
+	struct ip_mc_list *pmc;
 
 	ASSERT_RTNL();
 
 	ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
 
-	for (i=in_dev->mc_list; i; i=i->next)
-		igmp_group_added(i);
+	for_each_pmc_rtnl(in_dev, pmc)
+		igmp_group_added(pmc);
 }
 
 /*
@@ -1405,24 +1427,19 @@
 	/* Deactivate timers */
 	ip_mc_down(in_dev);
 
-	write_lock_bh(&in_dev->mc_list_lock);
-	while ((i = in_dev->mc_list) != NULL) {
-		in_dev->mc_list = i->next;
+	while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
+		in_dev->mc_list = i->next_rcu;
 		in_dev->mc_count--;
-		write_unlock_bh(&in_dev->mc_list_lock);
+
 		igmp_group_dropped(i);
 		ip_ma_put(i);
-
-		write_lock_bh(&in_dev->mc_list_lock);
 	}
-	write_unlock_bh(&in_dev->mc_list_lock);
 }
 
 /* RTNL is locked */
 static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
 {
-	struct flowi fl = { .nl_u = { .ip4_u =
-				      { .daddr = imr->imr_multiaddr.s_addr } } };
+	struct flowi fl = { .fl4_dst = imr->imr_multiaddr.s_addr };
 	struct rtable *rt;
 	struct net_device *dev = NULL;
 	struct in_device *idev = NULL;
@@ -1513,18 +1530,18 @@
 
 	if (!in_dev)
 		return -ENODEV;
-	read_lock(&in_dev->mc_list_lock);
-	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(in_dev, pmc) {
 		if (*pmca == pmc->multiaddr)
 			break;
 	}
 	if (!pmc) {
 		/* MCA not found?? bug */
-		read_unlock(&in_dev->mc_list_lock);
+		rcu_read_unlock();
 		return -ESRCH;
 	}
 	spin_lock_bh(&pmc->lock);
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 #ifdef CONFIG_IP_MULTICAST
 	sf_markstate(pmc);
 #endif
@@ -1685,18 +1702,18 @@
 
 	if (!in_dev)
 		return -ENODEV;
-	read_lock(&in_dev->mc_list_lock);
-	for (pmc=in_dev->mc_list; pmc; pmc=pmc->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(in_dev, pmc) {
 		if (*pmca == pmc->multiaddr)
 			break;
 	}
 	if (!pmc) {
 		/* MCA not found?? bug */
-		read_unlock(&in_dev->mc_list_lock);
+		rcu_read_unlock();
 		return -ESRCH;
 	}
 	spin_lock_bh(&pmc->lock);
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 
 #ifdef CONFIG_IP_MULTICAST
 	sf_markstate(pmc);
@@ -1793,7 +1810,7 @@
 
 	err = -EADDRINUSE;
 	ifindex = imr->imr_ifindex;
-	for (i = inet->mc_list; i; i = i->next) {
+	for_each_pmc_rtnl(inet, i) {
 		if (i->multi.imr_multiaddr.s_addr == addr &&
 		    i->multi.imr_ifindex == ifindex)
 			goto done;
@@ -1807,7 +1824,7 @@
 		goto done;
 
 	memcpy(&iml->multi, imr, sizeof(*imr));
-	iml->next = inet->mc_list;
+	iml->next_rcu = inet->mc_list;
 	iml->sflist = NULL;
 	iml->sfmode = MCAST_EXCLUDE;
 	rcu_assign_pointer(inet->mc_list, iml);
@@ -1821,17 +1838,14 @@
 
 static void ip_sf_socklist_reclaim(struct rcu_head *rp)
 {
-	struct ip_sf_socklist *psf;
-
-	psf = container_of(rp, struct ip_sf_socklist, rcu);
+	kfree(container_of(rp, struct ip_sf_socklist, rcu));
 	/* sk_omem_alloc should have been decreased by the caller*/
-	kfree(psf);
 }
 
 static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
 			   struct in_device *in_dev)
 {
-	struct ip_sf_socklist *psf = iml->sflist;
+	struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
 	int err;
 
 	if (psf == NULL) {
@@ -1851,11 +1865,8 @@
 
 static void ip_mc_socklist_reclaim(struct rcu_head *rp)
 {
-	struct ip_mc_socklist *iml;
-
-	iml = container_of(rp, struct ip_mc_socklist, rcu);
+	kfree(container_of(rp, struct ip_mc_socklist, rcu));
 	/* sk_omem_alloc should have been decreased by the caller*/
-	kfree(iml);
 }
 
 
@@ -1866,7 +1877,8 @@
 int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 {
 	struct inet_sock *inet = inet_sk(sk);
-	struct ip_mc_socklist *iml, **imlp;
+	struct ip_mc_socklist *iml;
+	struct ip_mc_socklist __rcu **imlp;
 	struct in_device *in_dev;
 	struct net *net = sock_net(sk);
 	__be32 group = imr->imr_multiaddr.s_addr;
@@ -1876,7 +1888,9 @@
 	rtnl_lock();
 	in_dev = ip_mc_find_dev(net, imr);
 	ifindex = imr->imr_ifindex;
-	for (imlp = &inet->mc_list; (iml = *imlp) != NULL; imlp = &iml->next) {
+	for (imlp = &inet->mc_list;
+	     (iml = rtnl_dereference(*imlp)) != NULL;
+	     imlp = &iml->next_rcu) {
 		if (iml->multi.imr_multiaddr.s_addr != group)
 			continue;
 		if (ifindex) {
@@ -1888,7 +1902,7 @@
 
 		(void) ip_mc_leave_src(sk, iml, in_dev);
 
-		rcu_assign_pointer(*imlp, iml->next);
+		*imlp = iml->next_rcu;
 
 		if (in_dev)
 			ip_mc_dec_group(in_dev, group);
@@ -1934,7 +1948,7 @@
 	}
 	err = -EADDRNOTAVAIL;
 
-	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rtnl(inet, pmc) {
 		if ((pmc->multi.imr_multiaddr.s_addr ==
 		     imr.imr_multiaddr.s_addr) &&
 		    (pmc->multi.imr_ifindex == imr.imr_ifindex))
@@ -1958,7 +1972,7 @@
 		pmc->sfmode = omode;
 	}
 
-	psl = pmc->sflist;
+	psl = rtnl_dereference(pmc->sflist);
 	if (!add) {
 		if (!psl)
 			goto done;	/* err = -EADDRNOTAVAIL */
@@ -2077,7 +2091,7 @@
 		goto done;
 	}
 
-	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rtnl(inet, pmc) {
 		if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
 		    pmc->multi.imr_ifindex == imr.imr_ifindex)
 			break;
@@ -2107,7 +2121,7 @@
 		(void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
 				     msf->imsf_fmode, 0, NULL, 0);
 	}
-	psl = pmc->sflist;
+	psl = rtnl_dereference(pmc->sflist);
 	if (psl) {
 		(void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
 			psl->sl_count, psl->sl_addr, 0);
@@ -2155,7 +2169,7 @@
 	}
 	err = -EADDRNOTAVAIL;
 
-	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rtnl(inet, pmc) {
 		if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
 		    pmc->multi.imr_ifindex == imr.imr_ifindex)
 			break;
@@ -2163,7 +2177,7 @@
 	if (!pmc)		/* must have a prior join */
 		goto done;
 	msf->imsf_fmode = pmc->sfmode;
-	psl = pmc->sflist;
+	psl = rtnl_dereference(pmc->sflist);
 	rtnl_unlock();
 	if (!psl) {
 		len = 0;
@@ -2208,7 +2222,7 @@
 
 	err = -EADDRNOTAVAIL;
 
-	for (pmc=inet->mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rtnl(inet, pmc) {
 		if (pmc->multi.imr_multiaddr.s_addr == addr &&
 		    pmc->multi.imr_ifindex == gsf->gf_interface)
 			break;
@@ -2216,7 +2230,7 @@
 	if (!pmc)		/* must have a prior join */
 		goto done;
 	gsf->gf_fmode = pmc->sfmode;
-	psl = pmc->sflist;
+	psl = rtnl_dereference(pmc->sflist);
 	rtnl_unlock();
 	count = psl ? psl->sl_count : 0;
 	copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
@@ -2257,7 +2271,7 @@
 		goto out;
 
 	rcu_read_lock();
-	for (pmc=rcu_dereference(inet->mc_list); pmc; pmc=rcu_dereference(pmc->next)) {
+	for_each_pmc_rcu(inet, pmc) {
 		if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
 		    pmc->multi.imr_ifindex == dif)
 			break;
@@ -2265,7 +2279,7 @@
 	ret = inet->mc_all;
 	if (!pmc)
 		goto unlock;
-	psl = pmc->sflist;
+	psl = rcu_dereference(pmc->sflist);
 	ret = (pmc->sfmode == MCAST_EXCLUDE);
 	if (!psl)
 		goto unlock;
@@ -2300,16 +2314,14 @@
 		return;
 
 	rtnl_lock();
-	while ((iml = inet->mc_list) != NULL) {
+	while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
 		struct in_device *in_dev;
-		rcu_assign_pointer(inet->mc_list, iml->next);
 
+		inet->mc_list = iml->next_rcu;
 		in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
 		(void) ip_mc_leave_src(sk, iml, in_dev);
-		if (in_dev != NULL) {
+		if (in_dev != NULL)
 			ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
-			in_dev_put(in_dev);
-		}
 		/* decrease mem now to avoid the memleak warning */
 		atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
 		call_rcu(&iml->rcu, ip_mc_socklist_reclaim);
@@ -2323,8 +2335,8 @@
 	struct ip_sf_list *psf;
 	int rv = 0;
 
-	read_lock(&in_dev->mc_list_lock);
-	for (im=in_dev->mc_list; im; im=im->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(in_dev, im) {
 		if (im->multiaddr == mc_addr)
 			break;
 	}
@@ -2345,7 +2357,7 @@
 		} else
 			rv = 1; /* unspecified source; tentatively allow */
 	}
-	read_unlock(&in_dev->mc_list_lock);
+	rcu_read_unlock();
 	return rv;
 }
 
@@ -2371,13 +2383,11 @@
 		in_dev = __in_dev_get_rcu(state->dev);
 		if (!in_dev)
 			continue;
-		read_lock(&in_dev->mc_list_lock);
-		im = in_dev->mc_list;
+		im = rcu_dereference(in_dev->mc_list);
 		if (im) {
 			state->in_dev = in_dev;
 			break;
 		}
-		read_unlock(&in_dev->mc_list_lock);
 	}
 	return im;
 }
@@ -2385,11 +2395,9 @@
 static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
 {
 	struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
-	im = im->next;
-	while (!im) {
-		if (likely(state->in_dev != NULL))
-			read_unlock(&state->in_dev->mc_list_lock);
 
+	im = rcu_dereference(im->next_rcu);
+	while (!im) {
 		state->dev = next_net_device_rcu(state->dev);
 		if (!state->dev) {
 			state->in_dev = NULL;
@@ -2398,8 +2406,7 @@
 		state->in_dev = __in_dev_get_rcu(state->dev);
 		if (!state->in_dev)
 			continue;
-		read_lock(&state->in_dev->mc_list_lock);
-		im = state->in_dev->mc_list;
+		im = rcu_dereference(state->in_dev->mc_list);
 	}
 	return im;
 }
@@ -2435,10 +2442,8 @@
 	__releases(rcu)
 {
 	struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
-	if (likely(state->in_dev != NULL)) {
-		read_unlock(&state->in_dev->mc_list_lock);
-		state->in_dev = NULL;
-	}
+
+	state->in_dev = NULL;
 	state->dev = NULL;
 	rcu_read_unlock();
 }
@@ -2460,7 +2465,7 @@
 		querier = "NONE";
 #endif
 
-		if (state->in_dev->mc_list == im) {
+		if (rcu_dereference(state->in_dev->mc_list) == im) {
 			seq_printf(seq, "%d\t%-10s: %5d %7s\n",
 				   state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
 		}
@@ -2519,8 +2524,7 @@
 		idev = __in_dev_get_rcu(state->dev);
 		if (unlikely(idev == NULL))
 			continue;
-		read_lock(&idev->mc_list_lock);
-		im = idev->mc_list;
+		im = rcu_dereference(idev->mc_list);
 		if (likely(im != NULL)) {
 			spin_lock_bh(&im->lock);
 			psf = im->sources;
@@ -2531,7 +2535,6 @@
 			}
 			spin_unlock_bh(&im->lock);
 		}
-		read_unlock(&idev->mc_list_lock);
 	}
 	return psf;
 }
@@ -2545,9 +2548,6 @@
 		spin_unlock_bh(&state->im->lock);
 		state->im = state->im->next;
 		while (!state->im) {
-			if (likely(state->idev != NULL))
-				read_unlock(&state->idev->mc_list_lock);
-
 			state->dev = next_net_device_rcu(state->dev);
 			if (!state->dev) {
 				state->idev = NULL;
@@ -2556,8 +2556,7 @@
 			state->idev = __in_dev_get_rcu(state->dev);
 			if (!state->idev)
 				continue;
-			read_lock(&state->idev->mc_list_lock);
-			state->im = state->idev->mc_list;
+			state->im = rcu_dereference(state->idev->mc_list);
 		}
 		if (!state->im)
 			break;
@@ -2603,10 +2602,7 @@
 		spin_unlock_bh(&state->im->lock);
 		state->im = NULL;
 	}
-	if (likely(state->idev != NULL)) {
-		read_unlock(&state->idev->mc_list_lock);
-		state->idev = NULL;
-	}
+	state->idev = NULL;
 	state->dev = NULL;
 	rcu_read_unlock();
 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 7174370..25e3181 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -55,7 +55,6 @@
 int inet_csk_bind_conflict(const struct sock *sk,
 			   const struct inet_bind_bucket *tb)
 {
-	const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
 	struct sock *sk2;
 	struct hlist_node *node;
 	int reuse = sk->sk_reuse;
@@ -75,9 +74,9 @@
 		     sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
 			if (!reuse || !sk2->sk_reuse ||
 			    sk2->sk_state == TCP_LISTEN) {
-				const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
-				if (!sk2_rcv_saddr || !sk_rcv_saddr ||
-				    sk2_rcv_saddr == sk_rcv_saddr)
+				const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
+				if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
+				    sk2_rcv_saddr == sk_rcv_saddr(sk))
 					break;
 			}
 		}
@@ -358,17 +357,14 @@
 	struct ip_options *opt = inet_rsk(req)->opt;
 	struct flowi fl = { .oif = sk->sk_bound_dev_if,
 			    .mark = sk->sk_mark,
-			    .nl_u = { .ip4_u =
-				      { .daddr = ((opt && opt->srr) ?
-						  opt->faddr :
-						  ireq->rmt_addr),
-					.saddr = ireq->loc_addr,
-					.tos = RT_CONN_FLAGS(sk) } },
+			    .fl4_dst = ((opt && opt->srr) ?
+					  opt->faddr : ireq->rmt_addr),
+			    .fl4_src = ireq->loc_addr,
+			    .fl4_tos = RT_CONN_FLAGS(sk),
 			    .proto = sk->sk_protocol,
 			    .flags = inet_sk_flowi_flags(sk),
-			    .uli_u = { .ports =
-				       { .sport = inet_sk(sk)->inet_sport,
-					 .dport = ireq->rmt_port } } };
+			    .fl_ip_sport = inet_sk(sk)->inet_sport,
+			    .fl_ip_dport = ireq->rmt_port };
 	struct net *net = sock_net(sk);
 
 	security_req_classify_flow(req, &fl);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ba80426..2ada171 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -490,9 +490,11 @@
 {
 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 
-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
 		struct inet_diag_entry entry;
-		struct rtattr *bc = (struct rtattr *)(r + 1);
+		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+							  sizeof(*r),
+							  INET_DIAG_REQ_BYTECODE);
 		struct inet_sock *inet = inet_sk(sk);
 
 		entry.family = sk->sk_family;
@@ -512,7 +514,7 @@
 		entry.dport = ntohs(inet->inet_dport);
 		entry.userlocks = sk->sk_userlocks;
 
-		if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
 			return 0;
 	}
 
@@ -527,9 +529,11 @@
 {
 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 
-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
 		struct inet_diag_entry entry;
-		struct rtattr *bc = (struct rtattr *)(r + 1);
+		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+							  sizeof(*r),
+							  INET_DIAG_REQ_BYTECODE);
 
 		entry.family = tw->tw_family;
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@
 		entry.dport = ntohs(tw->tw_dport);
 		entry.userlocks = 0;
 
-		if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
 			return 0;
 	}
 
@@ -618,7 +622,7 @@
 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct listen_sock *lopt;
-	struct rtattr *bc = NULL;
+	const struct nlattr *bc = NULL;
 	struct inet_sock *inet = inet_sk(sk);
 	int j, s_j;
 	int reqnum, s_reqnum;
@@ -638,8 +642,9 @@
 	if (!lopt || !lopt->qlen)
 		goto out;
 
-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
-		bc = (struct rtattr *)(r + 1);
+	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+		bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
+				     INET_DIAG_REQ_BYTECODE);
 		entry.sport = inet->inet_num;
 		entry.userlocks = sk->sk_userlocks;
 	}
@@ -672,8 +677,8 @@
 					&ireq->rmt_addr;
 				entry.dport = ntohs(ireq->rmt_port);
 
-				if (!inet_diag_bc_run(RTA_DATA(bc),
-						    RTA_PAYLOAD(bc), &entry))
+				if (!inet_diag_bc_run(nla_data(bc),
+						      nla_len(bc), &entry))
 					continue;
 			}
 
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 1b344f3..3c0369a 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -133,8 +133,7 @@
 			}
 		}
 	}
-	sk_add_bind_node(child, &tb->owners);
-	inet_csk(child)->icsk_bind_hash = tb;
+	inet_bind_hash(child, tb, port);
 	spin_unlock(&head->lock);
 
 	return 0;
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 9e94d7c..d9bc857 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -63,7 +63,7 @@
  *		refcnt: atomically against modifications on other CPU;
  *		   usually under some other lock to prevent node disappearing
  *		dtime: unused node list lock
- *		v4daddr: unchangeable
+ *		daddr: unchangeable
  *		ip_id_count: atomic value (no lock needed)
  */
 
@@ -79,15 +79,24 @@
 	.avl_height	= 0
 };
 
-static struct {
+struct inet_peer_base {
 	struct inet_peer __rcu *root;
 	spinlock_t	lock;
 	int		total;
-} peers = {
+};
+
+static struct inet_peer_base v4_peers = {
 	.root		= peer_avl_empty_rcu,
-	.lock		= __SPIN_LOCK_UNLOCKED(peers.lock),
+	.lock		= __SPIN_LOCK_UNLOCKED(v4_peers.lock),
 	.total		= 0,
 };
+
+static struct inet_peer_base v6_peers = {
+	.root		= peer_avl_empty_rcu,
+	.lock		= __SPIN_LOCK_UNLOCKED(v6_peers.lock),
+	.total		= 0,
+};
+
 #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */
 
 /* Exported for sysctl_net_ipv4.  */
@@ -152,28 +161,45 @@
 	}
 }
 
+static int addr_compare(const struct inetpeer_addr *a,
+			const struct inetpeer_addr *b)
+{
+	int i, n = (a->family == AF_INET ? 1 : 4);
+
+	for (i = 0; i < n; i++) {
+		if (a->a6[i] == b->a6[i])
+			continue;
+		if (a->a6[i] < b->a6[i])
+			return -1;
+		return 1;
+	}
+
+	return 0;
+}
+
 /*
  * Called with local BH disabled and the pool lock held.
  */
-#define lookup(_daddr, _stack) 					\
+#define lookup(_daddr, _stack, _base)				\
 ({								\
 	struct inet_peer *u;					\
 	struct inet_peer __rcu **v;				\
 								\
 	stackptr = _stack;					\
-	*stackptr++ = &peers.root;				\
-	for (u = rcu_dereference_protected(peers.root,		\
-			lockdep_is_held(&peers.lock));		\
+	*stackptr++ = &_base->root;				\
+	for (u = rcu_dereference_protected(_base->root,		\
+			lockdep_is_held(&_base->lock));		\
 	     u != peer_avl_empty; ) {				\
-		if (_daddr == u->v4daddr)			\
+		int cmp = addr_compare(_daddr, &u->daddr);	\
+		if (cmp == 0)					\
 			break;					\
-		if ((__force __u32)_daddr < (__force __u32)u->v4daddr)	\
+		if (cmp == -1)					\
 			v = &u->avl_left;			\
 		else						\
 			v = &u->avl_right;			\
 		*stackptr++ = v;				\
 		u = rcu_dereference_protected(*v,		\
-			lockdep_is_held(&peers.lock));		\
+			lockdep_is_held(&_base->lock));		\
 	}							\
 	u;							\
 })
@@ -185,13 +211,15 @@
  * But every pointer we follow is guaranteed to be valid thanks to RCU.
  * We exit from this function if number of links exceeds PEER_MAXDEPTH
  */
-static struct inet_peer *lookup_rcu_bh(__be32 daddr)
+static struct inet_peer *lookup_rcu_bh(const struct inetpeer_addr *daddr,
+				       struct inet_peer_base *base)
 {
-	struct inet_peer *u = rcu_dereference_bh(peers.root);
+	struct inet_peer *u = rcu_dereference_bh(base->root);
 	int count = 0;
 
 	while (u != peer_avl_empty) {
-		if (daddr == u->v4daddr) {
+		int cmp = addr_compare(daddr, &u->daddr);
+		if (cmp == 0) {
 			/* Before taking a reference, check if this entry was
 			 * deleted, unlink_from_pool() sets refcnt=-1 to make
 			 * distinction between an unused entry (refcnt=0) and
@@ -201,7 +229,7 @@
 				u = NULL;
 			return u;
 		}
-		if ((__force __u32)daddr < (__force __u32)u->v4daddr)
+		if (cmp == -1)
 			u = rcu_dereference_bh(u->avl_left);
 		else
 			u = rcu_dereference_bh(u->avl_right);
@@ -212,19 +240,19 @@
 }
 
 /* Called with local BH disabled and the pool lock held. */
-#define lookup_rightempty(start)				\
+#define lookup_rightempty(start, base)				\
 ({								\
 	struct inet_peer *u;					\
 	struct inet_peer __rcu **v;				\
 	*stackptr++ = &start->avl_left;				\
 	v = &start->avl_left;					\
 	for (u = rcu_dereference_protected(*v,			\
-			lockdep_is_held(&peers.lock));		\
+			lockdep_is_held(&base->lock));		\
 	     u->avl_right != peer_avl_empty_rcu; ) {		\
 		v = &u->avl_right;				\
 		*stackptr++ = v;				\
 		u = rcu_dereference_protected(*v,		\
-			lockdep_is_held(&peers.lock));		\
+			lockdep_is_held(&base->lock));		\
 	}							\
 	u;							\
 })
@@ -234,7 +262,8 @@
  * Look into mm/map_avl.c for more detail description of the ideas.
  */
 static void peer_avl_rebalance(struct inet_peer __rcu **stack[],
-		struct inet_peer __rcu ***stackend)
+			       struct inet_peer __rcu ***stackend,
+			       struct inet_peer_base *base)
 {
 	struct inet_peer __rcu **nodep;
 	struct inet_peer *node, *l, *r;
@@ -243,20 +272,20 @@
 	while (stackend > stack) {
 		nodep = *--stackend;
 		node = rcu_dereference_protected(*nodep,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 		l = rcu_dereference_protected(node->avl_left,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 		r = rcu_dereference_protected(node->avl_right,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 		lh = node_height(l);
 		rh = node_height(r);
 		if (lh > rh + 1) { /* l: RH+2 */
 			struct inet_peer *ll, *lr, *lrl, *lrr;
 			int lrh;
 			ll = rcu_dereference_protected(l->avl_left,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 			lr = rcu_dereference_protected(l->avl_right,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 			lrh = node_height(lr);
 			if (lrh <= node_height(ll)) {	/* ll: RH+1 */
 				RCU_INIT_POINTER(node->avl_left, lr);	/* lr: RH or RH+1 */
@@ -268,9 +297,9 @@
 				RCU_INIT_POINTER(*nodep, l);
 			} else { /* ll: RH, lr: RH+1 */
 				lrl = rcu_dereference_protected(lr->avl_left,
-					lockdep_is_held(&peers.lock));	/* lrl: RH or RH-1 */
+					lockdep_is_held(&base->lock));	/* lrl: RH or RH-1 */
 				lrr = rcu_dereference_protected(lr->avl_right,
-					lockdep_is_held(&peers.lock));	/* lrr: RH or RH-1 */
+					lockdep_is_held(&base->lock));	/* lrr: RH or RH-1 */
 				RCU_INIT_POINTER(node->avl_left, lrr);	/* lrr: RH or RH-1 */
 				RCU_INIT_POINTER(node->avl_right, r);	/* r: RH */
 				node->avl_height = rh + 1; /* node: RH+1 */
@@ -286,9 +315,9 @@
 			struct inet_peer *rr, *rl, *rlr, *rll;
 			int rlh;
 			rr = rcu_dereference_protected(r->avl_right,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 			rl = rcu_dereference_protected(r->avl_left,
-				lockdep_is_held(&peers.lock));
+				lockdep_is_held(&base->lock));
 			rlh = node_height(rl);
 			if (rlh <= node_height(rr)) {	/* rr: LH+1 */
 				RCU_INIT_POINTER(node->avl_right, rl);	/* rl: LH or LH+1 */
@@ -300,9 +329,9 @@
 				RCU_INIT_POINTER(*nodep, r);
 			} else { /* rr: RH, rl: RH+1 */
 				rlr = rcu_dereference_protected(rl->avl_right,
-					lockdep_is_held(&peers.lock));	/* rlr: LH or LH-1 */
+					lockdep_is_held(&base->lock));	/* rlr: LH or LH-1 */
 				rll = rcu_dereference_protected(rl->avl_left,
-					lockdep_is_held(&peers.lock));	/* rll: LH or LH-1 */
+					lockdep_is_held(&base->lock));	/* rll: LH or LH-1 */
 				RCU_INIT_POINTER(node->avl_right, rll);	/* rll: LH or LH-1 */
 				RCU_INIT_POINTER(node->avl_left, l);	/* l: LH */
 				node->avl_height = lh + 1; /* node: LH+1 */
@@ -321,14 +350,14 @@
 }
 
 /* Called with local BH disabled and the pool lock held. */
-#define link_to_pool(n)						\
+#define link_to_pool(n, base)					\
 do {								\
 	n->avl_height = 1;					\
 	n->avl_left = peer_avl_empty_rcu;			\
 	n->avl_right = peer_avl_empty_rcu;			\
 	/* lockless readers can catch us now */			\
 	rcu_assign_pointer(**--stackptr, n);			\
-	peer_avl_rebalance(stack, stackptr);			\
+	peer_avl_rebalance(stack, stackptr, base);		\
 } while (0)
 
 static void inetpeer_free_rcu(struct rcu_head *head)
@@ -337,13 +366,13 @@
 }
 
 /* May be called with local BH enabled. */
-static void unlink_from_pool(struct inet_peer *p)
+static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base)
 {
 	int do_free;
 
 	do_free = 0;
 
-	spin_lock_bh(&peers.lock);
+	spin_lock_bh(&base->lock);
 	/* Check the reference counter.  It was artificially incremented by 1
 	 * in cleanup() function to prevent sudden disappearing.  If we can
 	 * atomically (because of lockless readers) take this last reference,
@@ -353,7 +382,7 @@
 	if (atomic_cmpxchg(&p->refcnt, 1, -1) == 1) {
 		struct inet_peer __rcu **stack[PEER_MAXDEPTH];
 		struct inet_peer __rcu ***stackptr, ***delp;
-		if (lookup(p->v4daddr, stack) != p)
+		if (lookup(&p->daddr, stack, base) != p)
 			BUG();
 		delp = stackptr - 1; /* *delp[0] == p */
 		if (p->avl_left == peer_avl_empty_rcu) {
@@ -362,11 +391,11 @@
 		} else {
 			/* look for a node to insert instead of p */
 			struct inet_peer *t;
-			t = lookup_rightempty(p);
+			t = lookup_rightempty(p, base);
 			BUG_ON(rcu_dereference_protected(*stackptr[-1],
-					lockdep_is_held(&peers.lock)) != t);
+					lockdep_is_held(&base->lock)) != t);
 			**--stackptr = t->avl_left;
-			/* t is removed, t->v4daddr > x->v4daddr for any
+			/* t is removed, t->daddr > x->daddr for any
 			 * x in p->avl_left subtree.
 			 * Put t in the old place of p. */
 			RCU_INIT_POINTER(*delp[0], t);
@@ -376,11 +405,11 @@
 			BUG_ON(delp[1] != &p->avl_left);
 			delp[1] = &t->avl_left; /* was &p->avl_left */
 		}
-		peer_avl_rebalance(stack, stackptr);
-		peers.total--;
+		peer_avl_rebalance(stack, stackptr, base);
+		base->total--;
 		do_free = 1;
 	}
-	spin_unlock_bh(&peers.lock);
+	spin_unlock_bh(&base->lock);
 
 	if (do_free)
 		call_rcu_bh(&p->rcu, inetpeer_free_rcu);
@@ -395,6 +424,16 @@
 		inet_putpeer(p);
 }
 
+static struct inet_peer_base *family_to_base(int family)
+{
+	return (family == AF_INET ? &v4_peers : &v6_peers);
+}
+
+static struct inet_peer_base *peer_to_base(struct inet_peer *p)
+{
+	return family_to_base(p->daddr.family);
+}
+
 /* May be called with local BH enabled. */
 static int cleanup_once(unsigned long ttl)
 {
@@ -428,21 +467,22 @@
 		 * happen because of entry limits in route cache. */
 		return -1;
 
-	unlink_from_pool(p);
+	unlink_from_pool(p, peer_to_base(p));
 	return 0;
 }
 
 /* Called with or without local BH being disabled. */
-struct inet_peer *inet_getpeer(__be32 daddr, int create)
+struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create)
 {
-	struct inet_peer *p;
 	struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr;
+	struct inet_peer_base *base = family_to_base(AF_INET);
+	struct inet_peer *p;
 
 	/* Look up for the address quickly, lockless.
 	 * Because of a concurrent writer, we might not find an existing entry.
 	 */
 	rcu_read_lock_bh();
-	p = lookup_rcu_bh(daddr);
+	p = lookup_rcu_bh(daddr, base);
 	rcu_read_unlock_bh();
 
 	if (p) {
@@ -456,50 +496,57 @@
 	/* retry an exact lookup, taking the lock before.
 	 * At least, nodes should be hot in our cache.
 	 */
-	spin_lock_bh(&peers.lock);
-	p = lookup(daddr, stack);
+	spin_lock_bh(&base->lock);
+	p = lookup(daddr, stack, base);
 	if (p != peer_avl_empty) {
 		atomic_inc(&p->refcnt);
-		spin_unlock_bh(&peers.lock);
+		spin_unlock_bh(&base->lock);
 		/* Remove the entry from unused list if it was there. */
 		unlink_from_unused(p);
 		return p;
 	}
 	p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
 	if (p) {
-		p->v4daddr = daddr;
+		p->daddr = *daddr;
 		atomic_set(&p->refcnt, 1);
 		atomic_set(&p->rid, 0);
-		atomic_set(&p->ip_id_count, secure_ip_id(daddr));
+		atomic_set(&p->ip_id_count, secure_ip_id(daddr->a4));
 		p->tcp_ts_stamp = 0;
 		INIT_LIST_HEAD(&p->unused);
 
 
 		/* Link the node. */
-		link_to_pool(p);
-		peers.total++;
+		link_to_pool(p, base);
+		base->total++;
 	}
-	spin_unlock_bh(&peers.lock);
+	spin_unlock_bh(&base->lock);
 
-	if (peers.total >= inet_peer_threshold)
+	if (base->total >= inet_peer_threshold)
 		/* Remove one less-recently-used entry. */
 		cleanup_once(0);
 
 	return p;
 }
 
+static int compute_total(void)
+{
+	return v4_peers.total + v6_peers.total;
+}
+EXPORT_SYMBOL_GPL(inet_getpeer);
+
 /* Called with local BH disabled. */
 static void peer_check_expire(unsigned long dummy)
 {
 	unsigned long now = jiffies;
-	int ttl;
+	int ttl, total;
 
-	if (peers.total >= inet_peer_threshold)
+	total = compute_total();
+	if (total >= inet_peer_threshold)
 		ttl = inet_peer_minttl;
 	else
 		ttl = inet_peer_maxttl
 				- (inet_peer_maxttl - inet_peer_minttl) / HZ *
-					peers.total / inet_peer_threshold * HZ;
+					total / inet_peer_threshold * HZ;
 	while (!cleanup_once(ttl)) {
 		if (jiffies != now)
 			break;
@@ -508,13 +555,14 @@
 	/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
 	 * interval depending on the total number of entries (more entries,
 	 * less interval). */
-	if (peers.total >= inet_peer_threshold)
+	total = compute_total();
+	if (total >= inet_peer_threshold)
 		peer_periodic_timer.expires = jiffies + inet_peer_gc_mintime;
 	else
 		peer_periodic_timer.expires = jiffies
 			+ inet_peer_gc_maxtime
 			- (inet_peer_gc_maxtime - inet_peer_gc_mintime) / HZ *
-				peers.total / inet_peer_threshold * HZ;
+				total / inet_peer_threshold * HZ;
 	add_timer(&peer_periodic_timer);
 }
 
@@ -530,3 +578,4 @@
 
 	local_bh_enable();
 }
+EXPORT_SYMBOL_GPL(inet_putpeer);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 1684408..e6215bd 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -141,7 +141,7 @@
 	qp->daddr = arg->iph->daddr;
 	qp->user = arg->user;
 	qp->peer = sysctl_ipfrag_max_dist ?
-		inet_getpeer(arg->iph->saddr, 1) : NULL;
+		inet_getpeer_v4(arg->iph->saddr, 1) : NULL;
 }
 
 static __inline__ void ip4_frag_free(struct inet_frag_queue *q)
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 70ff77f..eb68a0e 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -405,11 +405,11 @@
 	if (parms->name[0])
 		strlcpy(name, parms->name, IFNAMSIZ);
 	else
-		sprintf(name, "gre%%d");
+		strcpy(name, "gre%d");
 
 	dev = alloc_netdev(sizeof(*t), name, ipgre_tunnel_setup);
 	if (!dev)
-	  return NULL;
+		return NULL;
 
 	dev_net_set(dev, net);
 
@@ -634,7 +634,7 @@
 #ifdef CONFIG_NET_IPGRE_BROADCAST
 		if (ipv4_is_multicast(iph->daddr)) {
 			/* Looped back packet, drop it! */
-			if (skb_rtable(skb)->fl.iif == 0)
+			if (rt_is_output_route(skb_rtable(skb)))
 				goto drop;
 			tunnel->dev->stats.multicast++;
 			skb->pkt_type = PACKET_BROADCAST;
@@ -772,16 +772,11 @@
 	{
 		struct flowi fl = {
 			.oif = tunnel->parms.link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = dst,
-					.saddr = tiph->saddr,
-					.tos = RT_TOS(tos)
-				}
-			},
-			.proto = IPPROTO_GRE
-		}
-;
+			.fl4_dst = dst,
+			.fl4_src = tiph->saddr,
+			.fl4_tos = RT_TOS(tos),
+			.fl_gre_key = tunnel->parms.o_key
+		};
 		if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
 			dev->stats.tx_carrier_errors++;
 			goto tx_error;
@@ -823,7 +818,7 @@
 			     !ipv4_is_multicast(tunnel->parms.iph.daddr)) ||
 			    rt6->rt6i_dst.plen == 128) {
 				rt6->rt6i_flags |= RTF_MODIFIED;
-				skb_dst(skb)->metrics[RTAX_MTU-1] = mtu;
+				dst_metric_set(skb_dst(skb), RTAX_MTU, mtu);
 			}
 		}
 
@@ -895,7 +890,7 @@
 			iph->ttl = ((struct ipv6hdr *)old_iph)->hop_limit;
 #endif
 		else
-			iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT);
+			iph->ttl = ip4_dst_hoplimit(&rt->dst);
 	}
 
 	((__be16 *)(iph + 1))[0] = tunnel->parms.o_flags;
@@ -951,14 +946,11 @@
 	if (iph->daddr) {
 		struct flowi fl = {
 			.oif = tunnel->parms.link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = iph->daddr,
-					.saddr = iph->saddr,
-					.tos = RT_TOS(iph->tos)
-				}
-			},
-			.proto = IPPROTO_GRE
+			.fl4_dst = iph->daddr,
+			.fl4_src = iph->saddr,
+			.fl4_tos = RT_TOS(iph->tos),
+			.proto = IPPROTO_GRE,
+			.fl_gre_key = tunnel->parms.o_key
 		};
 		struct rtable *rt;
 
@@ -1216,14 +1208,11 @@
 	if (ipv4_is_multicast(t->parms.iph.daddr)) {
 		struct flowi fl = {
 			.oif = t->parms.link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = t->parms.iph.daddr,
-					.saddr = t->parms.iph.saddr,
-					.tos = RT_TOS(t->parms.iph.tos)
-				}
-			},
-			.proto = IPPROTO_GRE
+			.fl4_dst = t->parms.iph.daddr,
+			.fl4_src = t->parms.iph.saddr,
+			.fl4_tos = RT_TOS(t->parms.iph.tos),
+			.proto = IPPROTO_GRE,
+			.fl_gre_key = t->parms.o_key
 		};
 		struct rtable *rt;
 
@@ -1775,3 +1764,4 @@
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_RTNL_LINK("gre");
 MODULE_ALIAS_RTNL_LINK("gretap");
+MODULE_ALIAS("gre0");
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 439d2a3..04c7b3b 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -82,6 +82,7 @@
 #include <linux/tcp.h>
 
 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
+EXPORT_SYMBOL(sysctl_ip_default_ttl);
 
 /* Generate a checksum for an outgoing IP datagram. */
 __inline__ void ip_send_check(struct iphdr *iph)
@@ -130,7 +131,7 @@
 	int ttl = inet->uc_ttl;
 
 	if (ttl < 0)
-		ttl = dst_metric(dst, RTAX_HOPLIMIT);
+		ttl = ip4_dst_hoplimit(dst);
 	return ttl;
 }
 
@@ -341,15 +342,13 @@
 		{
 			struct flowi fl = { .oif = sk->sk_bound_dev_if,
 					    .mark = sk->sk_mark,
-					    .nl_u = { .ip4_u =
-						      { .daddr = daddr,
-							.saddr = inet->inet_saddr,
-							.tos = RT_CONN_FLAGS(sk) } },
+					    .fl4_dst = daddr,
+					    .fl4_src = inet->inet_saddr,
+					    .fl4_tos = RT_CONN_FLAGS(sk),
 					    .proto = sk->sk_protocol,
 					    .flags = inet_sk_flowi_flags(sk),
-					    .uli_u = { .ports =
-						       { .sport = inet->inet_sport,
-							 .dport = inet->inet_dport } } };
+					    .fl_ip_sport = inet->inet_sport,
+					    .fl_ip_dport = inet->inet_dport };
 
 			/* If this fails, retransmit mechanism of transport layer will
 			 * keep trying until route appears or the connection times
@@ -1404,14 +1403,11 @@
 
 	{
 		struct flowi fl = { .oif = arg->bound_dev_if,
-				    .nl_u = { .ip4_u =
-					      { .daddr = daddr,
-						.saddr = rt->rt_spec_dst,
-						.tos = RT_TOS(ip_hdr(skb)->tos) } },
-				    /* Not quite clean, but right. */
-				    .uli_u = { .ports =
-					       { .sport = tcp_hdr(skb)->dest,
-						 .dport = tcp_hdr(skb)->source } },
+				    .fl4_dst = daddr,
+				    .fl4_src = rt->rt_spec_dst,
+				    .fl4_tos = RT_TOS(ip_hdr(skb)->tos),
+				    .fl_ip_sport = tcp_hdr(skb)->dest,
+				    .fl_ip_dport = tcp_hdr(skb)->source,
 				    .proto = sk->sk_protocol,
 				    .flags = ip_reply_arg_flowi_flags(arg) };
 		security_skb_classify_flow(skb, &fl);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 3a6e1ec..2b09775 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -1191,13 +1191,13 @@
 		    (ic_proto_enabled & IC_USE_DHCP) &&
 		    ic_dhcp_msgtype != DHCPACK) {
 			ic_got_reply = 0;
-			printk(",");
+			printk(KERN_CONT ",");
 			continue;
 		}
 #endif /* IPCONFIG_DHCP */
 
 		if (ic_got_reply) {
-			printk(" OK\n");
+			printk(KERN_CONT " OK\n");
 			break;
 		}
 
@@ -1205,7 +1205,7 @@
 			continue;
 
 		if (! --retries) {
-			printk(" timed out!\n");
+			printk(KERN_CONT " timed out!\n");
 			break;
 		}
 
@@ -1215,7 +1215,7 @@
 		if (timeout > CONF_TIMEOUT_MAX)
 			timeout = CONF_TIMEOUT_MAX;
 
-		printk(".");
+		printk(KERN_CONT ".");
 	}
 
 #ifdef IPCONFIG_BOOTP
@@ -1236,7 +1236,7 @@
 		((ic_got_reply & IC_RARP) ? "RARP"
 		 : (ic_proto_enabled & IC_USE_DHCP) ? "DHCP" : "BOOTP"),
 		&ic_servaddr);
-	printk("my address is %pI4\n", &ic_myaddr);
+	printk(KERN_CONT "my address is %pI4\n", &ic_myaddr);
 
 	return 0;
 }
@@ -1468,19 +1468,19 @@
 	/*
 	 * Clue in the operator.
 	 */
-	printk("IP-Config: Complete:");
-	printk("\n     device=%s", ic_dev->name);
-	printk(", addr=%pI4", &ic_myaddr);
-	printk(", mask=%pI4", &ic_netmask);
-	printk(", gw=%pI4", &ic_gateway);
-	printk(",\n     host=%s, domain=%s, nis-domain=%s",
+	printk("IP-Config: Complete:\n");
+	printk("     device=%s", ic_dev->name);
+	printk(KERN_CONT ", addr=%pI4", &ic_myaddr);
+	printk(KERN_CONT ", mask=%pI4", &ic_netmask);
+	printk(KERN_CONT ", gw=%pI4", &ic_gateway);
+	printk(KERN_CONT ",\n     host=%s, domain=%s, nis-domain=%s",
 	       utsname()->nodename, ic_domain, utsname()->domainname);
-	printk(",\n     bootserver=%pI4", &ic_servaddr);
-	printk(", rootserver=%pI4", &root_server_addr);
-	printk(", rootpath=%s", root_server_path);
+	printk(KERN_CONT ",\n     bootserver=%pI4", &ic_servaddr);
+	printk(KERN_CONT ", rootserver=%pI4", &root_server_addr);
+	printk(KERN_CONT ", rootpath=%s", root_server_path);
 	if (ic_dev_mtu)
-		printk(", mtu=%d", ic_dev_mtu);
-	printk("\n");
+		printk(KERN_CONT ", mtu=%d", ic_dev_mtu);
+	printk(KERN_CONT "\n");
 #endif /* !SILENT */
 
 	return 0;
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
index cd300aa..988f52f 100644
--- a/net/ipv4/ipip.c
+++ b/net/ipv4/ipip.c
@@ -463,13 +463,9 @@
 	{
 		struct flowi fl = {
 			.oif = tunnel->parms.link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = dst,
-					.saddr = tiph->saddr,
-					.tos = RT_TOS(tos)
-				}
-			},
+			.fl4_dst = dst,
+			.fl4_src= tiph->saddr,
+			.fl4_tos = RT_TOS(tos),
 			.proto = IPPROTO_IPIP
 		};
 
@@ -589,13 +585,9 @@
 	if (iph->daddr) {
 		struct flowi fl = {
 			.oif = tunnel->parms.link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = iph->daddr,
-					.saddr = iph->saddr,
-					.tos = RT_TOS(iph->tos)
-				}
-			},
+			.fl4_dst = iph->daddr,
+			.fl4_src = iph->saddr,
+			.fl4_tos = RT_TOS(iph->tos),
 			.proto = IPPROTO_IPIP
 		};
 		struct rtable *rt;
@@ -921,3 +913,4 @@
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("tunl0");
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 86dd569..3f3a9af 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1537,13 +1537,9 @@
 	if (vif->flags & VIFF_TUNNEL) {
 		struct flowi fl = {
 			.oif = vif->link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = vif->remote,
-					.saddr = vif->local,
-					.tos = RT_TOS(iph->tos)
-				}
-			},
+			.fl4_dst = vif->remote,
+			.fl4_src = vif->local,
+			.fl4_tos = RT_TOS(iph->tos),
 			.proto = IPPROTO_IPIP
 		};
 
@@ -1553,12 +1549,8 @@
 	} else {
 		struct flowi fl = {
 			.oif = vif->link,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = iph->daddr,
-					.tos = RT_TOS(iph->tos)
-				}
-			},
+			.fl4_dst = iph->daddr,
+			.fl4_tos = RT_TOS(iph->tos),
 			.proto = IPPROTO_IPIP
 		};
 
@@ -1654,7 +1646,7 @@
 	if (mrt->vif_table[vif].dev != skb->dev) {
 		int true_vifi;
 
-		if (skb_rtable(skb)->fl.iif == 0) {
+		if (rt_is_output_route(skb_rtable(skb))) {
 			/* It is our own packet, looped back.
 			 * Very complicated situation...
 			 *
diff --git a/net/ipv4/netfilter.c b/net/ipv4/netfilter.c
index d88a46c..994a1f2 100644
--- a/net/ipv4/netfilter.c
+++ b/net/ipv4/netfilter.c
@@ -31,10 +31,10 @@
 	 * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook.
 	 */
 	if (addr_type == RTN_LOCAL) {
-		fl.nl_u.ip4_u.daddr = iph->daddr;
+		fl.fl4_dst = iph->daddr;
 		if (type == RTN_LOCAL)
-			fl.nl_u.ip4_u.saddr = iph->saddr;
-		fl.nl_u.ip4_u.tos = RT_TOS(iph->tos);
+			fl.fl4_src = iph->saddr;
+		fl.fl4_tos = RT_TOS(iph->tos);
 		fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0;
 		fl.mark = skb->mark;
 		fl.flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0;
@@ -47,7 +47,7 @@
 	} else {
 		/* non-local src, find valid iif to satisfy
 		 * rp-filter when calling ip_route_input. */
-		fl.nl_u.ip4_u.daddr = iph->saddr;
+		fl.fl4_dst = iph->saddr;
 		if (ip_route_output_key(net, &rt, &fl) != 0)
 			return -1;
 
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index 4811159..19eb59d 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -3,15 +3,15 @@
 #
 
 # objects for l3 independent conntrack
-nf_conntrack_ipv4-objs  :=  nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
+nf_conntrack_ipv4-y	:=  nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
 ifeq ($(CONFIG_NF_CONNTRACK_PROC_COMPAT),y)
 ifeq ($(CONFIG_PROC_FS),y)
 nf_conntrack_ipv4-objs	+= nf_conntrack_l3proto_ipv4_compat.o
 endif
 endif
 
-nf_nat-objs		:= nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
-iptable_nat-objs	:= nf_nat_rule.o nf_nat_standalone.o
+nf_nat-y		:= nf_nat_core.o nf_nat_helper.o nf_nat_proto_unknown.o nf_nat_proto_common.o nf_nat_proto_tcp.o nf_nat_proto_udp.o nf_nat_proto_icmp.o
+iptable_nat-y	:= nf_nat_rule.o nf_nat_standalone.o
 
 # connection tracking
 obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3cad259..3fac340 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -927,6 +927,7 @@
 			private = &tmp;
 		}
 #endif
+		memset(&info, 0, sizeof(info));
 		info.valid_hooks = t->valid_hooks;
 		memcpy(info.hook_entry, private->hook_entry,
 		       sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d31b007..a846d63 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1124,6 +1124,7 @@
 			private = &tmp;
 		}
 #endif
+		memset(&info, 0, sizeof(info));
 		info.valid_hooks = t->valid_hooks;
 		memcpy(info.hook_entry, private->hook_entry,
 		       sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ipt_REJECT.c b/net/ipv4/netfilter/ipt_REJECT.c
index 43eec80..1ff79e5 100644
--- a/net/ipv4/netfilter/ipt_REJECT.c
+++ b/net/ipv4/netfilter/ipt_REJECT.c
@@ -116,7 +116,7 @@
 	if (ip_route_me_harder(nskb, addr_type))
 		goto free_nskb;
 
-	niph->ttl	= dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
+	niph->ttl	= ip4_dst_hoplimit(skb_dst(nskb));
 
 	/* "Never happens" */
 	if (nskb->len > dst_mtu(skb_dst(nskb)))
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 295c974..c04787c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -47,26 +47,6 @@
 	return rcu_dereference(nf_nat_protos[protonum]);
 }
 
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
-	const struct nf_nat_protocol *p;
-
-	rcu_read_lock();
-	p = __nf_nat_proto_find(protonum);
-	if (!try_module_get(p->me))
-		p = &nf_nat_unknown_protocol;
-	rcu_read_unlock();
-
-	return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
-	module_put(p->me);
-}
-
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
 hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
+static const struct nf_nat_protocol *
+nf_nat_proto_find_get(u_int8_t protonum)
+{
+	const struct nf_nat_protocol *p;
+
+	rcu_read_lock();
+	p = __nf_nat_proto_find(protonum);
+	if (!try_module_get(p->me))
+		p = &nf_nat_unknown_protocol;
+	rcu_read_unlock();
+
+	return p;
+}
+
+static void
+nf_nat_proto_put(const struct nf_nat_protocol *p)
+{
+	module_put(p->me);
+}
+
 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
 	[CTA_PROTONAT_PORT_MIN]	= { .type = NLA_U16 },
 	[CTA_PROTONAT_PORT_MAX]	= { .type = NLA_U16 },
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 4ae1f20..b14ec7d 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -59,13 +59,13 @@
 	local_bh_enable();
 
 	socket_seq_show(seq);
-	seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %d\n",
+	seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
 		   sock_prot_inuse_get(net, &tcp_prot), orphans,
 		   tcp_death_row.tw_count, sockets,
-		   atomic_read(&tcp_memory_allocated));
-	seq_printf(seq, "UDP: inuse %d mem %d\n",
+		   atomic_long_read(&tcp_memory_allocated));
+	seq_printf(seq, "UDP: inuse %d mem %ld\n",
 		   sock_prot_inuse_get(net, &udp_prot),
-		   atomic_read(&udp_memory_allocated));
+		   atomic_long_read(&udp_memory_allocated));
 	seq_printf(seq, "UDPLITE: inuse %d\n",
 		   sock_prot_inuse_get(net, &udplite_prot));
 	seq_printf(seq, "RAW: inuse %d\n",
@@ -253,6 +253,7 @@
 	SNMP_MIB_ITEM("TCPMinTTLDrop", LINUX_MIB_TCPMINTTLDROP),
 	SNMP_MIB_ITEM("TCPDeferAcceptDrop", LINUX_MIB_TCPDEFERACCEPTDROP),
 	SNMP_MIB_ITEM("IPReversePathFilter", LINUX_MIB_IPRPFILTER),
+	SNMP_MIB_ITEM("TCPTimeWaitOverflow", LINUX_MIB_TCPTIMEWAITOVERFLOW),
 	SNMP_MIB_SENTINEL
 };
 
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 1f85ef2..a3d5ab7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -549,10 +549,9 @@
 	{
 		struct flowi fl = { .oif = ipc.oif,
 				    .mark = sk->sk_mark,
-				    .nl_u = { .ip4_u =
-					      { .daddr = daddr,
-						.saddr = saddr,
-						.tos = tos } },
+				    .fl4_dst = daddr,
+				    .fl4_src = saddr,
+				    .fl4_tos = tos,
 				    .proto = inet->hdrincl ? IPPROTO_RAW :
 							     sk->sk_protocol,
 				  };
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 987bf9a..770f704 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -140,13 +140,15 @@
 
 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
 static void		 ipv4_dst_destroy(struct dst_entry *dst);
-static void		 ipv4_dst_ifdown(struct dst_entry *dst,
-					 struct net_device *dev, int how);
 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
 static void		 ipv4_link_failure(struct sk_buff *skb);
 static void		 ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu);
 static int rt_garbage_collect(struct dst_ops *ops);
 
+static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
+			    int how)
+{
+}
 
 static struct dst_ops ipv4_dst_ops = {
 	.family =		AF_INET,
@@ -621,7 +623,7 @@
 	/* Kill broadcast/multicast entries very aggresively, if they
 	   collide in hash table with more useful entries */
 	return (rth->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) &&
-		rth->fl.iif && rth->dst.rt_next;
+		rt_is_input_route(rth) && rth->dst.rt_next;
 }
 
 static inline int rt_valuable(struct rtable *rth)
@@ -666,7 +668,7 @@
 	if (rt_valuable(rt))
 		score |= (1<<31);
 
-	if (!rt->fl.iif ||
+	if (rt_is_output_route(rt) ||
 	    !(rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST|RTCF_LOCAL)))
 		score |= (1<<30);
 
@@ -682,17 +684,17 @@
 static inline bool compare_hash_inputs(const struct flowi *fl1,
 					const struct flowi *fl2)
 {
-	return ((((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
-		((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
+	return ((((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
+		((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
 		(fl1->iif ^ fl2->iif)) == 0);
 }
 
 static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
 {
-	return (((__force u32)fl1->nl_u.ip4_u.daddr ^ (__force u32)fl2->nl_u.ip4_u.daddr) |
-		((__force u32)fl1->nl_u.ip4_u.saddr ^ (__force u32)fl2->nl_u.ip4_u.saddr) |
+	return (((__force u32)fl1->fl4_dst ^ (__force u32)fl2->fl4_dst) |
+		((__force u32)fl1->fl4_src ^ (__force u32)fl2->fl4_src) |
 		(fl1->mark ^ fl2->mark) |
-		(*(u16 *)&fl1->nl_u.ip4_u.tos ^ *(u16 *)&fl2->nl_u.ip4_u.tos) |
+		(*(u16 *)&fl1->fl4_tos ^ *(u16 *)&fl2->fl4_tos) |
 		(fl1->oif ^ fl2->oif) |
 		(fl1->iif ^ fl2->iif)) == 0;
 }
@@ -1124,7 +1126,7 @@
 		 */
 
 		rt->dst.flags |= DST_NOCACHE;
-		if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+		if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
 			int err = arp_bind_neighbour(&rt->dst);
 			if (err) {
 				if (net_ratelimit())
@@ -1222,7 +1224,7 @@
 	/* Try to bind route to arp only if it is output
 	   route or unicast forwarding path.
 	 */
-	if (rt->rt_type == RTN_UNICAST || rt->fl.iif == 0) {
+	if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) {
 		int err = arp_bind_neighbour(&rt->dst);
 		if (err) {
 			spin_unlock_bh(rt_hash_lock_addr(hash));
@@ -1287,7 +1289,7 @@
 {
 	struct inet_peer *peer;
 
-	peer = inet_getpeer(rt->rt_dst, create);
+	peer = inet_getpeer_v4(rt->rt_dst, create);
 
 	if (peer && cmpxchg(&rt->peer, NULL, peer) != NULL)
 		inet_putpeer(peer);
@@ -1404,7 +1406,7 @@
 				if (rth->fl.fl4_dst != daddr ||
 				    rth->fl.fl4_src != skeys[i] ||
 				    rth->fl.oif != ikeys[k] ||
-				    rth->fl.iif != 0 ||
+				    rt_is_input_route(rth) ||
 				    rt_is_expired(rth) ||
 				    !net_eq(dev_net(rth->dst.dev), net)) {
 					rthp = &rth->dst.rt_next;
@@ -1433,8 +1435,6 @@
 				rt->dst.child		= NULL;
 				if (rt->dst.dev)
 					dev_hold(rt->dst.dev);
-				if (rt->idev)
-					in_dev_hold(rt->idev);
 				rt->dst.obsolete	= -1;
 				rt->dst.lastuse	= jiffies;
 				rt->dst.path		= &rt->dst;
@@ -1666,7 +1666,7 @@
 				    rth->rt_dst != daddr ||
 				    rth->rt_src != iph->saddr ||
 				    rth->fl.oif != ikeys[k] ||
-				    rth->fl.iif != 0 ||
+				    rt_is_input_route(rth) ||
 				    dst_metric_locked(&rth->dst, RTAX_MTU) ||
 				    !net_eq(dev_net(rth->dst.dev), net) ||
 				    rt_is_expired(rth))
@@ -1686,11 +1686,14 @@
 					if (mtu < dst_mtu(&rth->dst)) {
 						dst_confirm(&rth->dst);
 						if (mtu < ip_rt_min_pmtu) {
+							u32 lock = dst_metric(&rth->dst,
+									      RTAX_LOCK);
 							mtu = ip_rt_min_pmtu;
-							rth->dst.metrics[RTAX_LOCK-1] |=
-								(1 << RTAX_MTU);
+							lock |= (1 << RTAX_MTU);
+							dst_metric_set(&rth->dst, RTAX_LOCK,
+								       lock);
 						}
-						rth->dst.metrics[RTAX_MTU-1] = mtu;
+						dst_metric_set(&rth->dst, RTAX_MTU, mtu);
 						dst_set_expires(&rth->dst,
 							ip_rt_mtu_expires);
 					}
@@ -1708,10 +1711,11 @@
 	if (dst_mtu(dst) > mtu && mtu >= 68 &&
 	    !(dst_metric_locked(dst, RTAX_MTU))) {
 		if (mtu < ip_rt_min_pmtu) {
+			u32 lock = dst_metric(dst, RTAX_LOCK);
 			mtu = ip_rt_min_pmtu;
-			dst->metrics[RTAX_LOCK-1] |= (1 << RTAX_MTU);
+			dst_metric_set(dst, RTAX_LOCK, lock | (1 << RTAX_MTU));
 		}
-		dst->metrics[RTAX_MTU-1] = mtu;
+		dst_metric_set(dst, RTAX_MTU, mtu);
 		dst_set_expires(dst, ip_rt_mtu_expires);
 		call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
 	}
@@ -1728,33 +1732,13 @@
 {
 	struct rtable *rt = (struct rtable *) dst;
 	struct inet_peer *peer = rt->peer;
-	struct in_device *idev = rt->idev;
 
 	if (peer) {
 		rt->peer = NULL;
 		inet_putpeer(peer);
 	}
-
-	if (idev) {
-		rt->idev = NULL;
-		in_dev_put(idev);
-	}
 }
 
-static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
-			    int how)
-{
-	struct rtable *rt = (struct rtable *) dst;
-	struct in_device *idev = rt->idev;
-	if (dev != dev_net(dev)->loopback_dev && idev && idev->dev == dev) {
-		struct in_device *loopback_idev =
-			in_dev_get(dev_net(dev)->loopback_dev);
-		if (loopback_idev) {
-			rt->idev = loopback_idev;
-			in_dev_put(idev);
-		}
-	}
-}
 
 static void ipv4_link_failure(struct sk_buff *skb)
 {
@@ -1790,7 +1774,7 @@
 	__be32 src;
 	struct fib_result res;
 
-	if (rt->fl.iif == 0)
+	if (rt_is_output_route(rt))
 		src = rt->rt_src;
 	else {
 		rcu_read_lock();
@@ -1816,36 +1800,35 @@
 
 static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
 {
+	struct dst_entry *dst = &rt->dst;
 	struct fib_info *fi = res->fi;
 
 	if (fi) {
 		if (FIB_RES_GW(*res) &&
 		    FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
 			rt->rt_gateway = FIB_RES_GW(*res);
-		memcpy(rt->dst.metrics, fi->fib_metrics,
-		       sizeof(rt->dst.metrics));
+		dst_import_metrics(dst, fi->fib_metrics);
 		if (fi->fib_mtu == 0) {
-			rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
-			if (dst_metric_locked(&rt->dst, RTAX_MTU) &&
+			dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
+			if (dst_metric_locked(dst, RTAX_MTU) &&
 			    rt->rt_gateway != rt->rt_dst &&
-			    rt->dst.dev->mtu > 576)
-				rt->dst.metrics[RTAX_MTU-1] = 576;
+			    dst->dev->mtu > 576)
+				dst_metric_set(dst, RTAX_MTU, 576);
 		}
 #ifdef CONFIG_NET_CLS_ROUTE
-		rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid;
+		dst->tclassid = FIB_RES_NH(*res).nh_tclassid;
 #endif
 	} else
-		rt->dst.metrics[RTAX_MTU-1]= rt->dst.dev->mtu;
+		dst_metric_set(dst, RTAX_MTU, dst->dev->mtu);
 
-	if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
-		rt->dst.metrics[RTAX_HOPLIMIT-1] = sysctl_ip_default_ttl;
-	if (dst_mtu(&rt->dst) > IP_MAX_MTU)
-		rt->dst.metrics[RTAX_MTU-1] = IP_MAX_MTU;
-	if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0)
-		rt->dst.metrics[RTAX_ADVMSS-1] = max_t(unsigned int, rt->dst.dev->mtu - 40,
-				       ip_rt_min_advmss);
-	if (dst_metric(&rt->dst, RTAX_ADVMSS) > 65535 - 40)
-		rt->dst.metrics[RTAX_ADVMSS-1] = 65535 - 40;
+	if (dst_mtu(dst) > IP_MAX_MTU)
+		dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
+	if (dst_metric(dst, RTAX_ADVMSS) == 0)
+		dst_metric_set(dst, RTAX_ADVMSS,
+			       max_t(unsigned int, dst->dev->mtu - 40,
+				     ip_rt_min_advmss));
+	if (dst_metric(dst, RTAX_ADVMSS) > 65535 - 40)
+		dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
 
 #ifdef CONFIG_NET_CLS_ROUTE
 #ifdef CONFIG_IP_MULTIPLE_TABLES
@@ -1910,7 +1893,6 @@
 	rth->fl.iif	= dev->ifindex;
 	rth->dst.dev	= init_net.loopback_dev;
 	dev_hold(rth->dst.dev);
-	rth->idev	= in_dev_get(rth->dst.dev);
 	rth->fl.oif	= 0;
 	rth->rt_gateway	= daddr;
 	rth->rt_spec_dst= spec_dst;
@@ -2050,7 +2032,6 @@
 		rth->fl.iif	= in_dev->dev->ifindex;
 	rth->dst.dev	= (out_dev)->dev;
 	dev_hold(rth->dst.dev);
-	rth->idev	= in_dev_get(rth->dst.dev);
 	rth->fl.oif 	= 0;
 	rth->rt_spec_dst= spec_dst;
 
@@ -2111,12 +2092,10 @@
 {
 	struct fib_result res;
 	struct in_device *in_dev = __in_dev_get_rcu(dev);
-	struct flowi fl = { .nl_u = { .ip4_u =
-				      { .daddr = daddr,
-					.saddr = saddr,
-					.tos = tos,
-					.scope = RT_SCOPE_UNIVERSE,
-				      } },
+	struct flowi fl = { .fl4_dst	= daddr,
+			    .fl4_src	= saddr,
+			    .fl4_tos	= tos,
+			    .fl4_scope	= RT_SCOPE_UNIVERSE,
 			    .mark = skb->mark,
 			    .iif = dev->ifindex };
 	unsigned	flags = 0;
@@ -2231,7 +2210,6 @@
 	rth->fl.iif	= dev->ifindex;
 	rth->dst.dev	= net->loopback_dev;
 	dev_hold(rth->dst.dev);
-	rth->idev	= in_dev_get(rth->dst.dev);
 	rth->rt_gateway	= daddr;
 	rth->rt_spec_dst= spec_dst;
 	rth->dst.input= ip_local_deliver;
@@ -2417,9 +2395,6 @@
 	if (!rth)
 		return -ENOBUFS;
 
-	in_dev_hold(in_dev);
-	rth->idev = in_dev;
-
 	atomic_set(&rth->dst.__refcnt, 1);
 	rth->dst.flags= DST_HOST;
 	if (IN_DEV_CONF_GET(in_dev, NOXFRM))
@@ -2506,14 +2481,11 @@
 				const struct flowi *oldflp)
 {
 	u32 tos	= RT_FL_TOS(oldflp);
-	struct flowi fl = { .nl_u = { .ip4_u =
-				      { .daddr = oldflp->fl4_dst,
-					.saddr = oldflp->fl4_src,
-					.tos = tos & IPTOS_RT_MASK,
-					.scope = ((tos & RTO_ONLINK) ?
-						  RT_SCOPE_LINK :
-						  RT_SCOPE_UNIVERSE),
-				      } },
+	struct flowi fl = { .fl4_dst = oldflp->fl4_dst,
+			    .fl4_src = oldflp->fl4_src,
+			    .fl4_tos = tos & IPTOS_RT_MASK,
+			    .fl4_scope = ((tos & RTO_ONLINK) ?
+					  RT_SCOPE_LINK : RT_SCOPE_UNIVERSE),
 			    .mark = oldflp->mark,
 			    .iif = net->loopback_dev->ifindex,
 			    .oif = oldflp->oif };
@@ -2695,7 +2667,7 @@
 		rth = rcu_dereference_bh(rth->dst.rt_next)) {
 		if (rth->fl.fl4_dst == flp->fl4_dst &&
 		    rth->fl.fl4_src == flp->fl4_src &&
-		    rth->fl.iif == 0 &&
+		    rt_is_output_route(rth) &&
 		    rth->fl.oif == flp->oif &&
 		    rth->fl.mark == flp->mark &&
 		    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
@@ -2751,7 +2723,7 @@
 		new->__use = 1;
 		new->input = dst_discard;
 		new->output = dst_discard;
-		memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+		dst_copy_metrics(new, &ort->dst);
 
 		new->dev = ort->dst.dev;
 		if (new->dev)
@@ -2759,9 +2731,6 @@
 
 		rt->fl = ort->fl;
 
-		rt->idev = ort->idev;
-		if (rt->idev)
-			in_dev_hold(rt->idev);
 		rt->rt_genid = rt_genid(net);
 		rt->rt_flags = ort->rt_flags;
 		rt->rt_type = ort->rt_type;
@@ -2853,7 +2822,7 @@
 	if (rt->dst.tclassid)
 		NLA_PUT_U32(skb, RTA_FLOW, rt->dst.tclassid);
 #endif
-	if (rt->fl.iif)
+	if (rt_is_input_route(rt))
 		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
 	else if (rt->rt_src != rt->fl.fl4_src)
 		NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
@@ -2861,7 +2830,7 @@
 	if (rt->rt_dst != rt->rt_gateway)
 		NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
 
-	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
 		goto nla_put_failure;
 
 	if (rt->fl.mark)
@@ -2878,7 +2847,7 @@
 		}
 	}
 
-	if (rt->fl.iif) {
+	if (rt_is_input_route(rt)) {
 #ifdef CONFIG_IP_MROUTE
 		__be32 dst = rt->rt_dst;
 
@@ -2973,13 +2942,9 @@
 			err = -rt->dst.error;
 	} else {
 		struct flowi fl = {
-			.nl_u = {
-				.ip4_u = {
-					.daddr = dst,
-					.saddr = src,
-					.tos = rtm->rtm_tos,
-				},
-			},
+			.fl4_dst = dst,
+			.fl4_src = src,
+			.fl4_tos = rtm->rtm_tos,
 			.oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0,
 			.mark = mark,
 		};
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index 650cace..4751920 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -346,17 +346,14 @@
 	 */
 	{
 		struct flowi fl = { .mark = sk->sk_mark,
-				    .nl_u = { .ip4_u =
-					      { .daddr = ((opt && opt->srr) ?
-							  opt->faddr :
-							  ireq->rmt_addr),
-						.saddr = ireq->loc_addr,
-						.tos = RT_CONN_FLAGS(sk) } },
+				    .fl4_dst = ((opt && opt->srr) ?
+						opt->faddr : ireq->rmt_addr),
+				    .fl4_src = ireq->loc_addr,
+				    .fl4_tos = RT_CONN_FLAGS(sk),
 				    .proto = IPPROTO_TCP,
 				    .flags = inet_sk_flowi_flags(sk),
-				    .uli_u = { .ports =
-					       { .sport = th->dest,
-						 .dport = th->source } } };
+				    .fl_ip_sport = th->dest,
+				    .fl_ip_dport = th->source };
 		security_req_classify_flow(req, &fl);
 		if (ip_route_output_key(sock_net(sk), &rt, &fl)) {
 			reqsk_free(req);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d96c1da..e85ff59 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -26,6 +26,8 @@
 static int tcp_retr1_max = 255;
 static int ip_local_port_range_min[] = { 1, 1 };
 static int ip_local_port_range_max[] = { 65535, 65535 };
+static int tcp_adv_win_scale_min = -31;
+static int tcp_adv_win_scale_max = 31;
 
 /* Update system visible IP port range */
 static void set_local_port_range(int range[2])
@@ -153,7 +155,7 @@
 		.data		= &sysctl_ip_default_ttl,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= ipv4_doint_and_flush,
+		.proc_handler	= proc_dointvec,
 		.extra2		= &init_net,
 	},
 	{
@@ -398,7 +400,7 @@
 		.data		= &sysctl_tcp_mem,
 		.maxlen		= sizeof(sysctl_tcp_mem),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_doulongvec_minmax
 	},
 	{
 		.procname	= "tcp_wmem",
@@ -426,7 +428,9 @@
 		.data		= &sysctl_tcp_adv_win_scale,
 		.maxlen		= sizeof(int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec
+		.proc_handler	= proc_dointvec_minmax,
+		.extra1		= &tcp_adv_win_scale_min,
+		.extra2		= &tcp_adv_win_scale_max,
 	},
 	{
 		.procname	= "tcp_tw_reuse",
@@ -602,8 +606,7 @@
 		.data		= &sysctl_udp_mem,
 		.maxlen		= sizeof(sysctl_udp_mem),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_minmax,
-		.extra1		= &zero
+		.proc_handler	= proc_doulongvec_minmax,
 	},
 	{
 		.procname	= "udp_rmem_min",
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1664a05..6c11eec 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -282,7 +282,7 @@
 struct percpu_counter tcp_orphan_count;
 EXPORT_SYMBOL_GPL(tcp_orphan_count);
 
-int sysctl_tcp_mem[3] __read_mostly;
+long sysctl_tcp_mem[3] __read_mostly;
 int sysctl_tcp_wmem[3] __read_mostly;
 int sysctl_tcp_rmem[3] __read_mostly;
 
@@ -290,7 +290,7 @@
 EXPORT_SYMBOL(sysctl_tcp_rmem);
 EXPORT_SYMBOL(sysctl_tcp_wmem);
 
-atomic_t tcp_memory_allocated;	/* Current allocated memory. */
+atomic_long_t tcp_memory_allocated;	/* Current allocated memory. */
 EXPORT_SYMBOL(tcp_memory_allocated);
 
 /*
@@ -1193,7 +1193,7 @@
 	struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
 	WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
-	     KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+	     "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
 	     tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 #endif
 
@@ -1477,10 +1477,9 @@
 			 * shouldn't happen.
 			 */
 			if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
-			     KERN_INFO "recvmsg bug: copied %X "
-				       "seq %X rcvnxt %X fl %X\n", *seq,
-				       TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
-				       flags))
+				 "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+				 *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+				 flags))
 				break;
 
 			offset = *seq - TCP_SKB_CB(skb)->seq;
@@ -1490,10 +1489,9 @@
 				goto found_ok_skb;
 			if (tcp_hdr(skb)->fin)
 				goto found_fin_ok;
-			WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
-					"copied %X seq %X rcvnxt %X fl %X\n",
-					*seq, TCP_SKB_CB(skb)->seq,
-					tp->rcv_nxt, flags);
+			WARN(!(flags & MSG_PEEK),
+			     "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+			     *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
 		}
 
 		/* Well, if we have backlog, try to process it now yet. */
@@ -2246,7 +2244,7 @@
 		/* Values greater than interface MTU won't take effect. However
 		 * at the point when this call is done we typically don't yet
 		 * know which interface is going to be used */
-		if (val < 8 || val > MAX_TCP_WINDOW) {
+		if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
 			err = -EINVAL;
 			break;
 		}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3357f69..824e8c8 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -259,8 +259,11 @@
 	int sndmem = tcp_sk(sk)->rx_opt.mss_clamp + MAX_TCP_HEADER + 16 +
 		     sizeof(struct sk_buff);
 
-	if (sk->sk_sndbuf < 3 * sndmem)
-		sk->sk_sndbuf = min(3 * sndmem, sysctl_tcp_wmem[2]);
+	if (sk->sk_sndbuf < 3 * sndmem) {
+		sk->sk_sndbuf = 3 * sndmem;
+		if (sk->sk_sndbuf > sysctl_tcp_wmem[2])
+			sk->sk_sndbuf = sysctl_tcp_wmem[2];
+	}
 }
 
 /* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
@@ -396,7 +399,7 @@
 	if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
 	    !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
 	    !tcp_memory_pressure &&
-	    atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+	    atomic_long_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
 		sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
 				    sysctl_tcp_rmem[2]);
 	}
@@ -731,7 +734,7 @@
 			 * Reset our results.
 			 */
 			if (!(dst_metric_locked(dst, RTAX_RTT)))
-				dst->metrics[RTAX_RTT - 1] = 0;
+				dst_metric_set(dst, RTAX_RTT, 0);
 			return;
 		}
 
@@ -773,34 +776,38 @@
 			if (dst_metric(dst, RTAX_SSTHRESH) &&
 			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
 			    (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
-				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_cwnd >> 1;
+				dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
 			if (!dst_metric_locked(dst, RTAX_CWND) &&
 			    tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
-				dst->metrics[RTAX_CWND - 1] = tp->snd_cwnd;
+				dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
 		} else if (tp->snd_cwnd > tp->snd_ssthresh &&
 			   icsk->icsk_ca_state == TCP_CA_Open) {
 			/* Cong. avoidance phase, cwnd is reliable. */
 			if (!dst_metric_locked(dst, RTAX_SSTHRESH))
-				dst->metrics[RTAX_SSTHRESH-1] =
-					max(tp->snd_cwnd >> 1, tp->snd_ssthresh);
+				dst_metric_set(dst, RTAX_SSTHRESH,
+					       max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
 			if (!dst_metric_locked(dst, RTAX_CWND))
-				dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_cwnd) >> 1;
+				dst_metric_set(dst, RTAX_CWND,
+					       (dst_metric(dst, RTAX_CWND) +
+						tp->snd_cwnd) >> 1);
 		} else {
 			/* Else slow start did not finish, cwnd is non-sense,
 			   ssthresh may be also invalid.
 			 */
 			if (!dst_metric_locked(dst, RTAX_CWND))
-				dst->metrics[RTAX_CWND-1] = (dst_metric(dst, RTAX_CWND) + tp->snd_ssthresh) >> 1;
+				dst_metric_set(dst, RTAX_CWND,
+					       (dst_metric(dst, RTAX_CWND) +
+						tp->snd_ssthresh) >> 1);
 			if (dst_metric(dst, RTAX_SSTHRESH) &&
 			    !dst_metric_locked(dst, RTAX_SSTHRESH) &&
 			    tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
-				dst->metrics[RTAX_SSTHRESH-1] = tp->snd_ssthresh;
+				dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
 		}
 
 		if (!dst_metric_locked(dst, RTAX_REORDERING)) {
 			if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
 			    tp->reordering != sysctl_tcp_reordering)
-				dst->metrics[RTAX_REORDERING-1] = tp->reordering;
+				dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
 		}
 	}
 }
@@ -4861,7 +4868,7 @@
 		return 0;
 
 	/* If we are under soft global TCP memory pressure, do not expand.  */
-	if (atomic_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
+	if (atomic_long_read(&tcp_memory_allocated) >= sysctl_tcp_mem[0])
 		return 0;
 
 	/* If we filled the congestion window, do not expand.  */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 8f8527d..4fc3387 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -415,6 +415,9 @@
 		    !icsk->icsk_backoff)
 			break;
 
+		if (sock_owned_by_user(sk))
+			break;
+
 		icsk->icsk_backoff--;
 		inet_csk(sk)->icsk_rto = __tcp_set_rto(tp) <<
 					 icsk->icsk_backoff;
@@ -429,11 +432,6 @@
 		if (remaining) {
 			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
 						  remaining, TCP_RTO_MAX);
-		} else if (sock_owned_by_user(sk)) {
-			/* RTO revert clocked out retransmission,
-			 * but socket is locked. Will defer. */
-			inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
-						  HZ/20, TCP_RTO_MAX);
 		} else {
 			/* RTO revert clocked out retransmission.
 			 * Will retransmit now */
@@ -1212,12 +1210,6 @@
 };
 #endif
 
-static struct timewait_sock_ops tcp_timewait_sock_ops = {
-	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
-	.twsk_unique	= tcp_twsk_unique,
-	.twsk_destructor= tcp_twsk_destructor,
-};
-
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
 	struct tcp_extend_values tmp_ext;
@@ -1349,7 +1341,7 @@
 		    tcp_death_row.sysctl_tw_recycle &&
 		    (dst = inet_csk_route_req(sk, req)) != NULL &&
 		    (peer = rt_get_peer((struct rtable *)dst)) != NULL &&
-		    peer->v4daddr == saddr) {
+		    peer->daddr.a4 == saddr) {
 			inet_peer_refcheck(peer);
 			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
 			    (s32)(peer->tcp_ts - req->ts_recent) >
@@ -1765,64 +1757,40 @@
 	goto discard_it;
 }
 
-/* VJ's idea. Save last timestamp seen from this destination
- * and hold it at least for normal timewait interval to use for duplicate
- * segment detection in subsequent connections, before they enter synchronized
- * state.
- */
-
-int tcp_v4_remember_stamp(struct sock *sk)
+struct inet_peer *tcp_v4_get_peer(struct sock *sk, bool *release_it)
 {
+	struct rtable *rt = (struct rtable *) __sk_dst_get(sk);
 	struct inet_sock *inet = inet_sk(sk);
-	struct tcp_sock *tp = tcp_sk(sk);
-	struct rtable *rt = (struct rtable *)__sk_dst_get(sk);
-	struct inet_peer *peer = NULL;
-	int release_it = 0;
+	struct inet_peer *peer;
 
 	if (!rt || rt->rt_dst != inet->inet_daddr) {
-		peer = inet_getpeer(inet->inet_daddr, 1);
-		release_it = 1;
+		peer = inet_getpeer_v4(inet->inet_daddr, 1);
+		*release_it = true;
 	} else {
 		if (!rt->peer)
 			rt_bind_peer(rt, 1);
 		peer = rt->peer;
+		*release_it = false;
 	}
 
-	if (peer) {
-		if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
-		    ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
-		     peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
-			peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
-			peer->tcp_ts = tp->rx_opt.ts_recent;
-		}
-		if (release_it)
-			inet_putpeer(peer);
-		return 1;
-	}
-
-	return 0;
+	return peer;
 }
-EXPORT_SYMBOL(tcp_v4_remember_stamp);
+EXPORT_SYMBOL(tcp_v4_get_peer);
 
-int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
+void *tcp_v4_tw_get_peer(struct sock *sk)
 {
-	struct inet_peer *peer = inet_getpeer(tw->tw_daddr, 1);
+	struct inet_timewait_sock *tw = inet_twsk(sk);
 
-	if (peer) {
-		const struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
-
-		if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
-		    ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
-		     peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
-			peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
-			peer->tcp_ts	   = tcptw->tw_ts_recent;
-		}
-		inet_putpeer(peer);
-		return 1;
-	}
-
-	return 0;
+	return inet_getpeer_v4(tw->tw_daddr, 1);
 }
+EXPORT_SYMBOL(tcp_v4_tw_get_peer);
+
+static struct timewait_sock_ops tcp_timewait_sock_ops = {
+	.twsk_obj_size	= sizeof(struct tcp_timewait_sock),
+	.twsk_unique	= tcp_twsk_unique,
+	.twsk_destructor= tcp_twsk_destructor,
+	.twsk_getpeer	= tcp_v4_tw_get_peer,
+};
 
 const struct inet_connection_sock_af_ops ipv4_specific = {
 	.queue_xmit	   = ip_queue_xmit,
@@ -1830,7 +1798,7 @@
 	.rebuild_header	   = inet_sk_rebuild_header,
 	.conn_request	   = tcp_v4_conn_request,
 	.syn_recv_sock	   = tcp_v4_syn_recv_sock,
-	.remember_stamp	   = tcp_v4_remember_stamp,
+	.get_peer	   = tcp_v4_get_peer,
 	.net_header_len	   = sizeof(struct iphdr),
 	.setsockopt	   = ip_setsockopt,
 	.getsockopt	   = ip_getsockopt,
@@ -2045,7 +2013,9 @@
 	}
 get_sk:
 	sk_nulls_for_each_from(sk, node) {
-		if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) {
+		if (!net_eq(sock_net(sk), net))
+			continue;
+		if (sk->sk_family == st->family) {
 			cur = sk;
 			goto out;
 		}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 43cf901..80b1f80 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -49,6 +49,56 @@
 };
 EXPORT_SYMBOL_GPL(tcp_death_row);
 
+/* VJ's idea. Save last timestamp seen from this destination
+ * and hold it at least for normal timewait interval to use for duplicate
+ * segment detection in subsequent connections, before they enter synchronized
+ * state.
+ */
+
+static int tcp_remember_stamp(struct sock *sk)
+{
+	const struct inet_connection_sock *icsk = inet_csk(sk);
+	struct tcp_sock *tp = tcp_sk(sk);
+	struct inet_peer *peer;
+	bool release_it;
+
+	peer = icsk->icsk_af_ops->get_peer(sk, &release_it);
+	if (peer) {
+		if ((s32)(peer->tcp_ts - tp->rx_opt.ts_recent) <= 0 ||
+		    ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+		     peer->tcp_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
+			peer->tcp_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
+			peer->tcp_ts = tp->rx_opt.ts_recent;
+		}
+		if (release_it)
+			inet_putpeer(peer);
+		return 1;
+	}
+
+	return 0;
+}
+
+static int tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
+{
+	struct sock *sk = (struct sock *) tw;
+	struct inet_peer *peer;
+
+	peer = twsk_getpeer(sk);
+	if (peer) {
+		const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+
+		if ((s32)(peer->tcp_ts - tcptw->tw_ts_recent) <= 0 ||
+		    ((u32)get_seconds() - peer->tcp_ts_stamp > TCP_PAWS_MSL &&
+		     peer->tcp_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
+			peer->tcp_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
+			peer->tcp_ts	   = tcptw->tw_ts_recent;
+		}
+		inet_putpeer(peer);
+		return 1;
+	}
+	return 0;
+}
+
 static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
 {
 	if (seq == s_win)
@@ -149,14 +199,9 @@
 			tcptw->tw_ts_recent	  = tmp_opt.rcv_tsval;
 		}
 
-		/* I am shamed, but failed to make it more elegant.
-		 * Yes, it is direct reference to IP, which is impossible
-		 * to generalize to IPv6. Taking into account that IPv6
-		 * do not understand recycling in any case, it not
-		 * a big problem in practice. --ANK */
-		if (tw->tw_family == AF_INET &&
-		    tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
-		    tcp_v4_tw_remember_stamp(tw))
+		if (tcp_death_row.sysctl_tw_recycle &&
+		    tcptw->tw_ts_recent_stamp &&
+		    tcp_tw_remember_stamp(tw))
 			inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout,
 					   TCP_TIMEWAIT_LEN);
 		else
@@ -274,7 +319,7 @@
 	int recycle_ok = 0;
 
 	if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
-		recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
+		recycle_ok = tcp_remember_stamp(sk);
 
 	if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
 		tw = inet_twsk_alloc(sk, state);
@@ -347,7 +392,7 @@
 		 * socket up.  We've got bigger problems than
 		 * non-graceful socket closings.
 		 */
-		LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n");
+		NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
 	}
 
 	tcp_update_metrics(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 05b1ecf..97041f2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -55,7 +55,7 @@
 int sysctl_tcp_tso_win_divisor __read_mostly = 3;
 
 int sysctl_tcp_mtu_probing __read_mostly = 0;
-int sysctl_tcp_base_mss __read_mostly = 512;
+int sysctl_tcp_base_mss __read_mostly = TCP_BASE_MSS;
 
 /* By default, RFC2861 behavior.  */
 int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
@@ -231,11 +231,10 @@
 		/* when initializing use the value from init_rcv_wnd
 		 * rather than the default from above
 		 */
-		if (init_rcv_wnd &&
-		    (*rcv_wnd > init_rcv_wnd * mss))
-			*rcv_wnd = init_rcv_wnd * mss;
-		else if (*rcv_wnd > init_cwnd * mss)
-			*rcv_wnd = init_cwnd * mss;
+		if (init_rcv_wnd)
+			*rcv_wnd = min(*rcv_wnd, init_rcv_wnd * mss);
+		else
+			*rcv_wnd = min(*rcv_wnd, init_cwnd * mss);
 	}
 
 	/* Set the clamp no higher than max representable value */
@@ -386,27 +385,30 @@
  */
 static u8 tcp_cookie_size_check(u8 desired)
 {
-	if (desired > 0) {
+	int cookie_size;
+
+	if (desired > 0)
 		/* previously specified */
 		return desired;
-	}
-	if (sysctl_tcp_cookie_size <= 0) {
+
+	cookie_size = ACCESS_ONCE(sysctl_tcp_cookie_size);
+	if (cookie_size <= 0)
 		/* no default specified */
 		return 0;
-	}
-	if (sysctl_tcp_cookie_size <= TCP_COOKIE_MIN) {
+
+	if (cookie_size <= TCP_COOKIE_MIN)
 		/* value too small, specify minimum */
 		return TCP_COOKIE_MIN;
-	}
-	if (sysctl_tcp_cookie_size >= TCP_COOKIE_MAX) {
+
+	if (cookie_size >= TCP_COOKIE_MAX)
 		/* value too large, specify maximum */
 		return TCP_COOKIE_MAX;
-	}
-	if (0x1 & sysctl_tcp_cookie_size) {
+
+	if (cookie_size & 1)
 		/* 8-bit multiple, illegal, fix it */
-		return (u8)(sysctl_tcp_cookie_size + 0x1);
-	}
-	return (u8)sysctl_tcp_cookie_size;
+		cookie_size++;
+
+	return (u8)cookie_size;
 }
 
 /* Write previously computed TCP options to the packet.
@@ -822,8 +824,11 @@
 							   &md5);
 	tcp_header_size = tcp_options_size + sizeof(struct tcphdr);
 
-	if (tcp_packets_in_flight(tp) == 0)
+	if (tcp_packets_in_flight(tp) == 0) {
 		tcp_ca_event(sk, CA_EVENT_TX_START);
+		skb->ooo_okay = 1;
+	} else
+		skb->ooo_okay = 0;
 
 	skb_push(skb, tcp_header_size);
 	skb_reset_transport_header(skb);
@@ -1513,6 +1518,7 @@
 	struct tcp_sock *tp = tcp_sk(sk);
 	const struct inet_connection_sock *icsk = inet_csk(sk);
 	u32 send_win, cong_win, limit, in_flight;
+	int win_divisor;
 
 	if (TCP_SKB_CB(skb)->flags & TCPHDR_FIN)
 		goto send_now;
@@ -1544,13 +1550,14 @@
 	if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
 		goto send_now;
 
-	if (sysctl_tcp_tso_win_divisor) {
+	win_divisor = ACCESS_ONCE(sysctl_tcp_tso_win_divisor);
+	if (win_divisor) {
 		u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
 
 		/* If at least some fraction of a window is available,
 		 * just use it.
 		 */
-		chunk /= sysctl_tcp_tso_win_divisor;
+		chunk /= win_divisor;
 		if (limit >= chunk)
 			goto send_now;
 	} else {
@@ -2592,6 +2599,7 @@
 {
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct sk_buff *buff;
+	int err;
 
 	tcp_connect_init(sk);
 
@@ -2614,7 +2622,9 @@
 	sk->sk_wmem_queued += buff->truesize;
 	sk_mem_charge(sk, buff->truesize);
 	tp->packets_out += tcp_skb_pcount(buff);
-	tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+	err = tcp_transmit_skb(sk, buff, 1, sk->sk_allocation);
+	if (err == -ECONNREFUSED)
+		return err;
 
 	/* We change tp->snd_nxt after the tcp_transmit_skb() call
 	 * in order to make this packet get counted in tcpOutSegs.
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 6211e21..85ee7eb 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -154,7 +154,7 @@
 	struct timespec tv
 		= ktime_to_timespec(ktime_sub(p->tstamp, tcp_probe.start));
 
-	return snprintf(tbuf, n,
+	return scnprintf(tbuf, n,
 			"%lu.%09lu %pI4:%u %pI4:%u %d %#x %#x %u %u %u %u\n",
 			(unsigned long) tv.tv_sec,
 			(unsigned long) tv.tv_nsec,
@@ -174,7 +174,7 @@
 		return -EINVAL;
 
 	while (cnt < len) {
-		char tbuf[128];
+		char tbuf[164];
 		int width;
 
 		/* Wait for data in buffer */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 28cb2d7..b37181d 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -110,7 +110,7 @@
 struct udp_table udp_table __read_mostly;
 EXPORT_SYMBOL(udp_table);
 
-int sysctl_udp_mem[3] __read_mostly;
+long sysctl_udp_mem[3] __read_mostly;
 EXPORT_SYMBOL(sysctl_udp_mem);
 
 int sysctl_udp_rmem_min __read_mostly;
@@ -119,7 +119,7 @@
 int sysctl_udp_wmem_min __read_mostly;
 EXPORT_SYMBOL(sysctl_udp_wmem_min);
 
-atomic_t udp_memory_allocated;
+atomic_long_t udp_memory_allocated;
 EXPORT_SYMBOL(udp_memory_allocated);
 
 #define MAX_UDP_PORTS 65536
@@ -430,7 +430,7 @@
 
 	if (result) {
 exact_match:
-		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
 		else if (unlikely(compute_score2(result, net, saddr, sport,
 				  daddr, hnum, dif) < badness)) {
@@ -500,7 +500,7 @@
 		goto begin;
 
 	if (result) {
-		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
 		else if (unlikely(compute_score(result, net, saddr, hnum, sport,
 				  daddr, dport, dif) < badness)) {
@@ -890,15 +890,13 @@
 	if (rt == NULL) {
 		struct flowi fl = { .oif = ipc.oif,
 				    .mark = sk->sk_mark,
-				    .nl_u = { .ip4_u =
-					      { .daddr = faddr,
-						.saddr = saddr,
-						.tos = tos } },
+				    .fl4_dst = faddr,
+				    .fl4_src = saddr,
+				    .fl4_tos = tos,
 				    .proto = sk->sk_protocol,
 				    .flags = inet_sk_flowi_flags(sk),
-				    .uli_u = { .ports =
-					       { .sport = inet->inet_sport,
-						 .dport = dport } } };
+				    .fl_ip_sport = inet->inet_sport,
+				    .fl_ip_dport = dport };
 		struct net *net = sock_net(sk);
 
 		security_sk_classify_flow(sk, &fl);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 6f36841..534972e 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -56,7 +56,7 @@
 		0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
 	ip_select_ident(top_iph, dst->child, NULL);
 
-	top_iph->ttl = dst_metric(dst->child, RTAX_HOPLIMIT);
+	top_iph->ttl = ip4_dst_hoplimit(dst->child);
 
 	top_iph->saddr = x->props.saddr.a4;
 	top_iph->daddr = x->id.daddr.a4;
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 4464f3b..b057d40 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -11,6 +11,7 @@
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/inetdevice.h>
+#include <linux/if_tunnel.h>
 #include <net/dst.h>
 #include <net/xfrm.h>
 #include <net/ip.h>
@@ -22,12 +23,8 @@
 					  xfrm_address_t *daddr)
 {
 	struct flowi fl = {
-		.nl_u = {
-			.ip4_u = {
-				.tos = tos,
-				.daddr = daddr->a4,
-			},
-		},
+		.fl4_dst = daddr->a4,
+		.fl4_tos = tos,
 	};
 	struct dst_entry *dst;
 	struct rtable *rt;
@@ -80,10 +77,6 @@
 	xdst->u.dst.dev = dev;
 	dev_hold(dev);
 
-	xdst->u.rt.idev = in_dev_get(dev);
-	if (!xdst->u.rt.idev)
-		return -ENODEV;
-
 	xdst->u.rt.peer = rt->peer;
 	if (rt->peer)
 		atomic_inc(&rt->peer->refcnt);
@@ -158,6 +151,20 @@
 				fl->fl_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
 			}
 			break;
+
+		case IPPROTO_GRE:
+			if (pskb_may_pull(skb, xprth + 12 - skb->data)) {
+				__be16 *greflags = (__be16 *)xprth;
+				__be32 *gre_hdr = (__be32 *)xprth;
+
+				if (greflags[0] & GRE_KEY) {
+					if (greflags[0] & GRE_CSUM)
+						gre_hdr++;
+					fl->fl_gre_key = gre_hdr[1];
+				}
+			}
+			break;
+
 		default:
 			fl->fl_ipsec_spi = 0;
 			break;
@@ -189,8 +196,6 @@
 {
 	struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
 
-	if (likely(xdst->u.rt.idev))
-		in_dev_put(xdst->u.rt.idev);
 	if (likely(xdst->u.rt.peer))
 		inet_putpeer(xdst->u.rt.peer);
 	xfrm_dst_destroy(xdst);
@@ -199,27 +204,9 @@
 static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
 			     int unregister)
 {
-	struct xfrm_dst *xdst;
-
 	if (!unregister)
 		return;
 
-	xdst = (struct xfrm_dst *)dst;
-	if (xdst->u.rt.idev->dev == dev) {
-		struct in_device *loopback_idev =
-			in_dev_get(dev_net(dev)->loopback_dev);
-		BUG_ON(!loopback_idev);
-
-		do {
-			in_dev_put(xdst->u.rt.idev);
-			xdst->u.rt.idev = loopback_idev;
-			in_dev_hold(loopback_idev);
-			xdst = (struct xfrm_dst *)xdst->u.dst.child;
-		} while (xdst->u.dst.xfrm);
-
-		__in_dev_put(loopback_idev);
-	}
-
 	xfrm_dst_ifdown(dst, dev);
 }
 
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index e048ec6..1023ad0 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -98,7 +98,11 @@
 #endif
 
 #define	INFINITY_LIFE_TIME	0xFFFFFFFF
-#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b)))
+
+static inline u32 cstamp_delta(unsigned long cstamp)
+{
+	return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
+}
 
 #define ADDRCONF_TIMER_FUZZ_MINUS	(HZ > 50 ? HZ/50 : 1)
 #define ADDRCONF_TIMER_FUZZ		(HZ / 4)
@@ -2740,10 +2744,6 @@
 			/* Flag it for later restoration when link comes up */
 			ifa->flags |= IFA_F_TENTATIVE;
 			ifa->state = INET6_IFADDR_STATE_DAD;
-
-			write_unlock_bh(&idev->lock);
-
-			in6_ifa_hold(ifa);
 		} else {
 			list_del(&ifa->if_list);
 
@@ -2758,19 +2758,15 @@
 			ifa->state = INET6_IFADDR_STATE_DEAD;
 			spin_unlock_bh(&ifa->state_lock);
 
-			if (state == INET6_IFADDR_STATE_DEAD)
-				goto put_ifa;
+			if (state != INET6_IFADDR_STATE_DEAD) {
+				__ipv6_ifa_notify(RTM_DELADDR, ifa);
+				atomic_notifier_call_chain(&inet6addr_chain,
+							   NETDEV_DOWN, ifa);
+			}
+
+			in6_ifa_put(ifa);
+			write_lock_bh(&idev->lock);
 		}
-
-		__ipv6_ifa_notify(RTM_DELADDR, ifa);
-		if (ifa->state == INET6_IFADDR_STATE_DEAD)
-			atomic_notifier_call_chain(&inet6addr_chain,
-						   NETDEV_DOWN, ifa);
-
-put_ifa:
-		in6_ifa_put(ifa);
-
-		write_lock_bh(&idev->lock);
 	}
 
 	list_splice(&keep_list, &idev->addr_list);
@@ -3452,10 +3448,8 @@
 {
 	struct ifa_cacheinfo ci;
 
-	ci.cstamp = (u32)(TIME_DELTA(cstamp, INITIAL_JIFFIES) / HZ * 100
-			+ TIME_DELTA(cstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
-	ci.tstamp = (u32)(TIME_DELTA(tstamp, INITIAL_JIFFIES) / HZ * 100
-			+ TIME_DELTA(tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
+	ci.cstamp = cstamp_delta(cstamp);
+	ci.tstamp = cstamp_delta(tstamp);
 	ci.ifa_prefered = preferred;
 	ci.ifa_valid = valid;
 
@@ -3806,8 +3800,10 @@
 	array[DEVCONF_AUTOCONF] = cnf->autoconf;
 	array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
 	array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
-	array[DEVCONF_RTR_SOLICIT_INTERVAL] = cnf->rtr_solicit_interval;
-	array[DEVCONF_RTR_SOLICIT_DELAY] = cnf->rtr_solicit_delay;
+	array[DEVCONF_RTR_SOLICIT_INTERVAL] =
+		jiffies_to_msecs(cnf->rtr_solicit_interval);
+	array[DEVCONF_RTR_SOLICIT_DELAY] =
+		jiffies_to_msecs(cnf->rtr_solicit_delay);
 	array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
 #ifdef CONFIG_IPV6_PRIVACY
 	array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
@@ -3821,7 +3817,8 @@
 	array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
 #ifdef CONFIG_IPV6_ROUTER_PREF
 	array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
-	array[DEVCONF_RTR_PROBE_INTERVAL] = cnf->rtr_probe_interval;
+	array[DEVCONF_RTR_PROBE_INTERVAL] =
+		jiffies_to_msecs(cnf->rtr_probe_interval);
 #ifdef CONFIG_IPV6_ROUTE_INFO
 	array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
 #endif
@@ -3839,6 +3836,15 @@
 	array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
 }
 
+static inline size_t inet6_ifla6_size(void)
+{
+	return nla_total_size(4) /* IFLA_INET6_FLAGS */
+	     + nla_total_size(sizeof(struct ifla_cacheinfo))
+	     + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
+	     + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
+	     + nla_total_size(ICMP6_MIB_MAX * 8); /* IFLA_INET6_ICMP6STATS */
+}
+
 static inline size_t inet6_if_nlmsg_size(void)
 {
 	return NLMSG_ALIGN(sizeof(struct ifinfomsg))
@@ -3846,13 +3852,7 @@
 	       + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
 	       + nla_total_size(4) /* IFLA_MTU */
 	       + nla_total_size(4) /* IFLA_LINK */
-	       + nla_total_size( /* IFLA_PROTINFO */
-			nla_total_size(4) /* IFLA_INET6_FLAGS */
-			+ nla_total_size(sizeof(struct ifla_cacheinfo))
-			+ nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
-			+ nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
-			+ nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
-		 );
+	       + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
 }
 
 static inline void __snmp6_fill_stats(u64 *stats, void __percpu **mib,
@@ -3899,15 +3899,70 @@
 	}
 }
 
+static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
+{
+	struct nlattr *nla;
+	struct ifla_cacheinfo ci;
+
+	NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
+
+	ci.max_reasm_len = IPV6_MAXPLEN;
+	ci.tstamp = cstamp_delta(idev->tstamp);
+	ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
+	ci.retrans_time = jiffies_to_msecs(idev->nd_parms->retrans_time);
+	NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
+
+	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
+	if (nla == NULL)
+		goto nla_put_failure;
+	ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
+
+	/* XXX - MC not implemented */
+
+	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
+	if (nla == NULL)
+		goto nla_put_failure;
+	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
+
+	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
+	if (nla == NULL)
+		goto nla_put_failure;
+	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
+
+	return 0;
+
+nla_put_failure:
+	return -EMSGSIZE;
+}
+
+static size_t inet6_get_link_af_size(const struct net_device *dev)
+{
+	if (!__in6_dev_get(dev))
+		return 0;
+
+	return inet6_ifla6_size();
+}
+
+static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev)
+{
+	struct inet6_dev *idev = __in6_dev_get(dev);
+
+	if (!idev)
+		return -ENODATA;
+
+	if (inet6_fill_ifla6_attrs(skb, idev) < 0)
+		return -EMSGSIZE;
+
+	return 0;
+}
+
 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
 			     u32 pid, u32 seq, int event, unsigned int flags)
 {
 	struct net_device *dev = idev->dev;
-	struct nlattr *nla;
 	struct ifinfomsg *hdr;
 	struct nlmsghdr *nlh;
 	void *protoinfo;
-	struct ifla_cacheinfo ci;
 
 	nlh = nlmsg_put(skb, pid, seq, event, sizeof(*hdr), flags);
 	if (nlh == NULL)
@@ -3934,31 +3989,8 @@
 	if (protoinfo == NULL)
 		goto nla_put_failure;
 
-	NLA_PUT_U32(skb, IFLA_INET6_FLAGS, idev->if_flags);
-
-	ci.max_reasm_len = IPV6_MAXPLEN;
-	ci.tstamp = (__u32)(TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) / HZ * 100
-		    + TIME_DELTA(idev->tstamp, INITIAL_JIFFIES) % HZ * 100 / HZ);
-	ci.reachable_time = idev->nd_parms->reachable_time;
-	ci.retrans_time = idev->nd_parms->retrans_time;
-	NLA_PUT(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci);
-
-	nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
-	if (nla == NULL)
+	if (inet6_fill_ifla6_attrs(skb, idev) < 0)
 		goto nla_put_failure;
-	ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
-
-	/* XXX - MC not implemented */
-
-	nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
-	if (nla == NULL)
-		goto nla_put_failure;
-	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
-
-	nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
-	if (nla == NULL)
-		goto nla_put_failure;
-	snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
 
 	nla_nest_end(skb, protoinfo);
 	return nlmsg_end(skb, nlh);
@@ -4629,6 +4661,12 @@
 }
 EXPORT_SYMBOL(unregister_inet6addr_notifier);
 
+static struct rtnl_af_ops inet6_ops = {
+	.family		  = AF_INET6,
+	.fill_link_af	  = inet6_fill_link_af,
+	.get_link_af_size = inet6_get_link_af_size,
+};
+
 /*
  *	Init / cleanup code
  */
@@ -4680,6 +4718,10 @@
 
 	addrconf_verify(0);
 
+	err = rtnl_af_register(&inet6_ops);
+	if (err < 0)
+		goto errout_af;
+
 	err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo);
 	if (err < 0)
 		goto errout;
@@ -4695,6 +4737,8 @@
 
 	return 0;
 errout:
+	rtnl_af_unregister(&inet6_ops);
+errout_af:
 	unregister_netdevice_notifier(&ipv6_dev_notf);
 errlo:
 	unregister_pernet_subsys(&addrconf_ops);
@@ -4715,6 +4759,8 @@
 
 	rtnl_lock();
 
+	__rtnl_af_unregister(&inet6_ops);
+
 	/* clean dev list */
 	for_each_netdev(&init_net, dev) {
 		if (__in6_dev_get(dev) == NULL)
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 54e8e42..059a3de 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -810,7 +810,7 @@
 	}
 	rcu_read_unlock();
 
-	if (unlikely(IS_ERR(segs)))
+	if (IS_ERR(segs))
 		goto out;
 
 	for (skb = segs; skb; skb = skb->next) {
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index ee9b93b..1b5c982 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -49,6 +49,8 @@
 
 #define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
 
+static u32 esp6_get_mtu(struct xfrm_state *x, int mtu);
+
 /*
  * Allocate an AEAD request structure with extra space for SG and IV.
  *
@@ -140,6 +142,8 @@
 	int blksize;
 	int clen;
 	int alen;
+	int plen;
+	int tfclen;
 	int nfrags;
 	u8 *iv;
 	u8 *tail;
@@ -148,18 +152,26 @@
 	/* skb is pure payload to encrypt */
 	err = -ENOMEM;
 
-	/* Round to block size */
-	clen = skb->len;
-
 	aead = esp->aead;
 	alen = crypto_aead_authsize(aead);
 
+	tfclen = 0;
+	if (x->tfcpad) {
+		struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
+		u32 padto;
+
+		padto = min(x->tfcpad, esp6_get_mtu(x, dst->child_mtu_cached));
+		if (skb->len < padto)
+			tfclen = padto - skb->len;
+	}
 	blksize = ALIGN(crypto_aead_blocksize(aead), 4);
-	clen = ALIGN(clen + 2, blksize);
+	clen = ALIGN(skb->len + 2 + tfclen, blksize);
 	if (esp->padlen)
 		clen = ALIGN(clen, esp->padlen);
+	plen = clen - skb->len - tfclen;
 
-	if ((err = skb_cow_data(skb, clen - skb->len + alen, &trailer)) < 0)
+	err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
+	if (err < 0)
 		goto error;
 	nfrags = err;
 
@@ -174,13 +186,17 @@
 
 	/* Fill padding... */
 	tail = skb_tail_pointer(trailer);
+	if (tfclen) {
+		memset(tail, 0, tfclen);
+		tail += tfclen;
+	}
 	do {
 		int i;
-		for (i=0; i<clen-skb->len - 2; i++)
+		for (i = 0; i < plen - 2; i++)
 			tail[i] = i + 1;
 	} while (0);
-	tail[clen-skb->len - 2] = (clen - skb->len) - 2;
-	tail[clen - skb->len - 1] = *skb_mac_header(skb);
+	tail[plen - 2] = plen - 2;
+	tail[plen - 1] = *skb_mac_header(skb);
 	pskb_put(skb, trailer, clen - skb->len + alen);
 
 	skb_push(skb, -skb_network_offset(skb));
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 8a16280..e46305d 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -54,24 +54,54 @@
 
 EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
 
+struct dst_entry *inet6_csk_route_req(struct sock *sk,
+				      const struct request_sock *req)
+{
+	struct inet6_request_sock *treq = inet6_rsk(req);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct in6_addr *final_p, final;
+	struct dst_entry *dst;
+	struct flowi fl;
+
+	memset(&fl, 0, sizeof(fl));
+	fl.proto = IPPROTO_TCP;
+	ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
+	final_p = fl6_update_dst(&fl, np->opt, &final);
+	ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
+	fl.oif = sk->sk_bound_dev_if;
+	fl.mark = sk->sk_mark;
+	fl.fl_ip_dport = inet_rsk(req)->rmt_port;
+	fl.fl_ip_sport = inet_rsk(req)->loc_port;
+	security_req_classify_flow(req, &fl);
+
+	if (ip6_dst_lookup(sk, &dst, &fl))
+		return NULL;
+
+	if (final_p)
+		ipv6_addr_copy(&fl.fl6_dst, final_p);
+
+	if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+		return NULL;
+
+	return dst;
+}
+
 /*
  * request_sock (formerly open request) hash tables.
  */
 static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
 			   const u32 rnd, const u16 synq_hsize)
 {
-	u32 a = (__force u32)raddr->s6_addr32[0];
-	u32 b = (__force u32)raddr->s6_addr32[1];
-	u32 c = (__force u32)raddr->s6_addr32[2];
+	u32 c;
 
-	a += JHASH_GOLDEN_RATIO;
-	b += JHASH_GOLDEN_RATIO;
-	c += rnd;
-	__jhash_mix(a, b, c);
+	c = jhash_3words((__force u32)raddr->s6_addr32[0],
+			 (__force u32)raddr->s6_addr32[1],
+			 (__force u32)raddr->s6_addr32[2],
+			 rnd);
 
-	a += (__force u32)raddr->s6_addr32[3];
-	b += (__force u32)rport;
-	__jhash_mix(a, b, c);
+	c = jhash_2words((__force u32)raddr->s6_addr32[3],
+			 (__force u32)rport,
+			 c);
 
 	return c & (synq_hsize - 1);
 }
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 2a59610..4f4483e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -58,8 +58,6 @@
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
 
-#define IPV6_TLV_TEL_DST_SIZE 8
-
 #ifdef IP6_TNL_DEBUG
 #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__)
 #else
@@ -1175,6 +1173,8 @@
 				sizeof (struct ipv6hdr);
 
 			dev->mtu = rt->rt6i_dev->mtu - sizeof (struct ipv6hdr);
+			if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+				dev->mtu-=8;
 
 			if (dev->mtu < IPV6_MIN_MTU)
 				dev->mtu = IPV6_MIN_MTU;
@@ -1363,12 +1363,17 @@
 
 static void ip6_tnl_dev_setup(struct net_device *dev)
 {
+	struct ip6_tnl *t;
+
 	dev->netdev_ops = &ip6_tnl_netdev_ops;
 	dev->destructor = ip6_dev_free;
 
 	dev->type = ARPHRD_TUNNEL6;
 	dev->hard_header_len = LL_MAX_HEADER + sizeof (struct ipv6hdr);
 	dev->mtu = ETH_DATA_LEN - sizeof (struct ipv6hdr);
+	t = netdev_priv(dev);
+	if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
+		dev->mtu-=8;
 	dev->flags |= IFF_NOARP;
 	dev->addr_len = sizeof(struct in6_addr);
 	dev->features |= NETIF_F_NETNS_LOCAL;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 6f32ffc..9fab274 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1843,9 +1843,7 @@
 
 	fl = (struct flowi) {
 		.oif = vif->link,
-		.nl_u = { .ip6_u =
-				{ .daddr = ipv6h->daddr, }
-		}
+		.fl6_dst = ipv6h->daddr,
 	};
 
 	dst = ip6_route_output(net, NULL, &fl);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index d1444b9..49f986d 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -82,7 +82,7 @@
 static struct in6_addr mld2_all_mcr = MLD2_ALL_MCR_INIT;
 
 /* Big mc list lock for all the sockets */
-static DEFINE_RWLOCK(ipv6_sk_mc_lock);
+static DEFINE_SPINLOCK(ipv6_sk_mc_lock);
 
 static void igmp6_join_group(struct ifmcaddr6 *ma);
 static void igmp6_leave_group(struct ifmcaddr6 *ma);
@@ -123,6 +123,11 @@
  *	socket join on multicast group
  */
 
+#define for_each_pmc_rcu(np, pmc)				\
+	for (pmc = rcu_dereference(np->ipv6_mc_list);		\
+	     pmc != NULL;					\
+	     pmc = rcu_dereference(pmc->next))
+
 int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
 {
 	struct net_device *dev = NULL;
@@ -134,15 +139,15 @@
 	if (!ipv6_addr_is_multicast(addr))
 		return -EINVAL;
 
-	read_lock_bh(&ipv6_sk_mc_lock);
-	for (mc_lst=np->ipv6_mc_list; mc_lst; mc_lst=mc_lst->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(np, mc_lst) {
 		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 		    ipv6_addr_equal(&mc_lst->addr, addr)) {
-			read_unlock_bh(&ipv6_sk_mc_lock);
+			rcu_read_unlock();
 			return -EADDRINUSE;
 		}
 	}
-	read_unlock_bh(&ipv6_sk_mc_lock);
+	rcu_read_unlock();
 
 	mc_lst = sock_kmalloc(sk, sizeof(struct ipv6_mc_socklist), GFP_KERNEL);
 
@@ -186,33 +191,41 @@
 		return err;
 	}
 
-	write_lock_bh(&ipv6_sk_mc_lock);
+	spin_lock(&ipv6_sk_mc_lock);
 	mc_lst->next = np->ipv6_mc_list;
-	np->ipv6_mc_list = mc_lst;
-	write_unlock_bh(&ipv6_sk_mc_lock);
+	rcu_assign_pointer(np->ipv6_mc_list, mc_lst);
+	spin_unlock(&ipv6_sk_mc_lock);
 
 	rcu_read_unlock();
 
 	return 0;
 }
 
+static void ipv6_mc_socklist_reclaim(struct rcu_head *head)
+{
+	kfree(container_of(head, struct ipv6_mc_socklist, rcu));
+}
 /*
  *	socket leave on multicast group
  */
 int ipv6_sock_mc_drop(struct sock *sk, int ifindex, const struct in6_addr *addr)
 {
 	struct ipv6_pinfo *np = inet6_sk(sk);
-	struct ipv6_mc_socklist *mc_lst, **lnk;
+	struct ipv6_mc_socklist *mc_lst;
+	struct ipv6_mc_socklist __rcu **lnk;
 	struct net *net = sock_net(sk);
 
-	write_lock_bh(&ipv6_sk_mc_lock);
-	for (lnk = &np->ipv6_mc_list; (mc_lst = *lnk) !=NULL ; lnk = &mc_lst->next) {
+	spin_lock(&ipv6_sk_mc_lock);
+	for (lnk = &np->ipv6_mc_list;
+	     (mc_lst = rcu_dereference_protected(*lnk,
+			lockdep_is_held(&ipv6_sk_mc_lock))) !=NULL ;
+	      lnk = &mc_lst->next) {
 		if ((ifindex == 0 || mc_lst->ifindex == ifindex) &&
 		    ipv6_addr_equal(&mc_lst->addr, addr)) {
 			struct net_device *dev;
 
 			*lnk = mc_lst->next;
-			write_unlock_bh(&ipv6_sk_mc_lock);
+			spin_unlock(&ipv6_sk_mc_lock);
 
 			rcu_read_lock();
 			dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -225,11 +238,12 @@
 			} else
 				(void) ip6_mc_leave_src(sk, mc_lst, NULL);
 			rcu_read_unlock();
-			sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
+			atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+			call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
 			return 0;
 		}
 	}
-	write_unlock_bh(&ipv6_sk_mc_lock);
+	spin_unlock(&ipv6_sk_mc_lock);
 
 	return -EADDRNOTAVAIL;
 }
@@ -257,7 +271,7 @@
 		return NULL;
 	idev = __in6_dev_get(dev);
 	if (!idev)
-		return NULL;;
+		return NULL;
 	read_lock_bh(&idev->lock);
 	if (idev->dead) {
 		read_unlock_bh(&idev->lock);
@@ -272,12 +286,13 @@
 	struct ipv6_mc_socklist *mc_lst;
 	struct net *net = sock_net(sk);
 
-	write_lock_bh(&ipv6_sk_mc_lock);
-	while ((mc_lst = np->ipv6_mc_list) != NULL) {
+	spin_lock(&ipv6_sk_mc_lock);
+	while ((mc_lst = rcu_dereference_protected(np->ipv6_mc_list,
+				lockdep_is_held(&ipv6_sk_mc_lock))) != NULL) {
 		struct net_device *dev;
 
 		np->ipv6_mc_list = mc_lst->next;
-		write_unlock_bh(&ipv6_sk_mc_lock);
+		spin_unlock(&ipv6_sk_mc_lock);
 
 		rcu_read_lock();
 		dev = dev_get_by_index_rcu(net, mc_lst->ifindex);
@@ -290,11 +305,13 @@
 		} else
 			(void) ip6_mc_leave_src(sk, mc_lst, NULL);
 		rcu_read_unlock();
-		sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
 
-		write_lock_bh(&ipv6_sk_mc_lock);
+		atomic_sub(sizeof(*mc_lst), &sk->sk_omem_alloc);
+		call_rcu(&mc_lst->rcu, ipv6_mc_socklist_reclaim);
+
+		spin_lock(&ipv6_sk_mc_lock);
 	}
-	write_unlock_bh(&ipv6_sk_mc_lock);
+	spin_unlock(&ipv6_sk_mc_lock);
 }
 
 int ip6_mc_source(int add, int omode, struct sock *sk,
@@ -328,8 +345,7 @@
 
 	err = -EADDRNOTAVAIL;
 
-	read_lock(&ipv6_sk_mc_lock);
-	for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rcu(inet6, pmc) {
 		if (pgsr->gsr_interface && pmc->ifindex != pgsr->gsr_interface)
 			continue;
 		if (ipv6_addr_equal(&pmc->addr, group))
@@ -428,7 +444,6 @@
 done:
 	if (pmclocked)
 		write_unlock(&pmc->sflock);
-	read_unlock(&ipv6_sk_mc_lock);
 	read_unlock_bh(&idev->lock);
 	rcu_read_unlock();
 	if (leavegroup)
@@ -466,14 +481,13 @@
 	dev = idev->dev;
 
 	err = 0;
-	read_lock(&ipv6_sk_mc_lock);
 
 	if (gsf->gf_fmode == MCAST_INCLUDE && gsf->gf_numsrc == 0) {
 		leavegroup = 1;
 		goto done;
 	}
 
-	for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rcu(inet6, pmc) {
 		if (pmc->ifindex != gsf->gf_interface)
 			continue;
 		if (ipv6_addr_equal(&pmc->addr, group))
@@ -521,7 +535,6 @@
 	write_unlock(&pmc->sflock);
 	err = 0;
 done:
-	read_unlock(&ipv6_sk_mc_lock);
 	read_unlock_bh(&idev->lock);
 	rcu_read_unlock();
 	if (leavegroup)
@@ -562,7 +575,7 @@
 	 * so reading the list is safe.
 	 */
 
-	for (pmc=inet6->ipv6_mc_list; pmc; pmc=pmc->next) {
+	for_each_pmc_rcu(inet6, pmc) {
 		if (pmc->ifindex != gsf->gf_interface)
 			continue;
 		if (ipv6_addr_equal(group, &pmc->addr))
@@ -612,13 +625,13 @@
 	struct ip6_sf_socklist *psl;
 	int rv = 1;
 
-	read_lock(&ipv6_sk_mc_lock);
-	for (mc = np->ipv6_mc_list; mc; mc = mc->next) {
+	rcu_read_lock();
+	for_each_pmc_rcu(np, mc) {
 		if (ipv6_addr_equal(&mc->addr, mc_addr))
 			break;
 	}
 	if (!mc) {
-		read_unlock(&ipv6_sk_mc_lock);
+		rcu_read_unlock();
 		return 1;
 	}
 	read_lock(&mc->sflock);
@@ -638,7 +651,7 @@
 			rv = 0;
 	}
 	read_unlock(&mc->sflock);
-	read_unlock(&ipv6_sk_mc_lock);
+	rcu_read_unlock();
 
 	return rv;
 }
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 998d6d2..2342545 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -141,18 +141,18 @@
 	.proxy_redo =	pndisc_redo,
 	.id =		"ndisc_cache",
 	.parms = {
-		.tbl =			&nd_tbl,
-		.base_reachable_time =	30 * HZ,
-		.retrans_time =	 1 * HZ,
-		.gc_staletime =	60 * HZ,
-		.reachable_time =		30 * HZ,
-		.delay_probe_time =	 5 * HZ,
-		.queue_len =		 3,
-		.ucast_probes =	 3,
-		.mcast_probes =	 3,
-		.anycast_delay =	 1 * HZ,
-		.proxy_delay =		(8 * HZ) / 10,
-		.proxy_qlen =		64,
+		.tbl			= &nd_tbl,
+		.base_reachable_time	= ND_REACHABLE_TIME,
+		.retrans_time		= ND_RETRANS_TIMER,
+		.gc_staletime		= 60 * HZ,
+		.reachable_time		= ND_REACHABLE_TIME,
+		.delay_probe_time	= 5 * HZ,
+		.queue_len		= 3,
+		.ucast_probes		= 3,
+		.mcast_probes		= 3,
+		.anycast_delay		= 1 * HZ,
+		.proxy_delay		= (8 * HZ) / 10,
+		.proxy_qlen		= 64,
 	},
 	.gc_interval =	  30 * HZ,
 	.gc_thresh1 =	 128,
@@ -1259,7 +1259,8 @@
 	if (ra_msg->icmph.icmp6_hop_limit) {
 		in6_dev->cnf.hop_limit = ra_msg->icmph.icmp6_hop_limit;
 		if (rt)
-			rt->dst.metrics[RTAX_HOPLIMIT-1] = ra_msg->icmph.icmp6_hop_limit;
+			dst_metric_set(&rt->dst, RTAX_HOPLIMIT,
+				       ra_msg->icmph.icmp6_hop_limit);
 	}
 
 skip_defrtr:
@@ -1377,7 +1378,7 @@
 			in6_dev->cnf.mtu6 = mtu;
 
 			if (rt)
-				rt->dst.metrics[RTAX_MTU-1] = mtu;
+				dst_metric_set(&rt->dst, RTAX_MTU, mtu);
 
 			rt6_mtu_change(skb->dev, mtu);
 		}
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 7155b24..35915e8 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -18,10 +18,8 @@
 	struct flowi fl = {
 		.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0,
 		.mark = skb->mark,
-		.nl_u =
-		{ .ip6_u =
-		  { .daddr = iph->daddr,
-		    .saddr = iph->saddr, } },
+		.fl6_dst = iph->daddr,
+		.fl6_src = iph->saddr,
 	};
 
 	dst = ip6_route_output(net, skb->sk, &fl);
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 0a432c9..abfee91 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -11,13 +11,13 @@
 obj-$(CONFIG_IP6_NF_SECURITY) += ip6table_security.o
 
 # objects for l3 independent conntrack
-nf_conntrack_ipv6-objs  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
+nf_conntrack_ipv6-y  :=  nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o
 
 # l3 independent conntrack
 obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o nf_defrag_ipv6.o
 
 # defrag
-nf_defrag_ipv6-objs := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
+nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
 
 # matches
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 51df035..4555823 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,7 @@
 			private = &tmp;
 		}
 #endif
+		memset(&info, 0, sizeof(info));
 		info.valid_hooks = t->valid_hooks;
 		memcpy(info.hook_entry, private->hook_entry,
 		       sizeof(info.hook_entry));
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 2933396..bf998fe 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -124,7 +124,7 @@
 	skb_reset_network_header(nskb);
 	ip6h = ipv6_hdr(nskb);
 	ip6h->version = 6;
-	ip6h->hop_limit = dst_metric(dst, RTAX_HOPLIMIT);
+	ip6h->hop_limit = ip6_dst_hoplimit(dst);
 	ip6h->nexthdr = IPPROTO_TCP;
 	ipv6_addr_copy(&ip6h->saddr, &oip6h->daddr);
 	ipv6_addr_copy(&ip6h->daddr, &oip6h->saddr);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 3a3f129..79d43aa 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -286,7 +286,7 @@
 
 	/* Check for overlap with preceding fragment. */
 	if (prev &&
-	    (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+	    (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset)
 		goto discard_fq;
 
 	/* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index c7ba314..07beeb0 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -104,26 +104,22 @@
 unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
 			     const struct in6_addr *daddr, u32 rnd)
 {
-	u32 a, b, c;
+	u32 c;
 
-	a = (__force u32)saddr->s6_addr32[0];
-	b = (__force u32)saddr->s6_addr32[1];
-	c = (__force u32)saddr->s6_addr32[2];
+	c = jhash_3words((__force u32)saddr->s6_addr32[0],
+			 (__force u32)saddr->s6_addr32[1],
+			 (__force u32)saddr->s6_addr32[2],
+			 rnd);
 
-	a += JHASH_GOLDEN_RATIO;
-	b += JHASH_GOLDEN_RATIO;
-	c += rnd;
-	__jhash_mix(a, b, c);
+	c = jhash_3words((__force u32)saddr->s6_addr32[3],
+			 (__force u32)daddr->s6_addr32[0],
+			 (__force u32)daddr->s6_addr32[1],
+			 c);
 
-	a += (__force u32)saddr->s6_addr32[3];
-	b += (__force u32)daddr->s6_addr32[0];
-	c += (__force u32)daddr->s6_addr32[1];
-	__jhash_mix(a, b, c);
-
-	a += (__force u32)daddr->s6_addr32[2];
-	b += (__force u32)daddr->s6_addr32[3];
-	c += (__force u32)id;
-	__jhash_mix(a, b, c);
+	c =  jhash_3words((__force u32)daddr->s6_addr32[2],
+			  (__force u32)daddr->s6_addr32[3],
+			  (__force u32)id,
+			  c);
 
 	return c & (INETFRAGS_HASHSZ - 1);
 }
@@ -349,7 +345,7 @@
 
 	/* Check for overlap with preceding fragment. */
 	if (prev &&
-	    (FRAG6_CB(prev)->offset + prev->len) - offset > 0)
+	    (FRAG6_CB(prev)->offset + prev->len) > offset)
 		goto discard_fq;
 
 	/* Look for overlap with succeeding segment. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 25661f9..98796b0 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -129,7 +129,6 @@
 		.__use		= 1,
 		.obsolete	= -1,
 		.error		= -ENETUNREACH,
-		.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
 		.input		= ip6_pkt_discard,
 		.output		= ip6_pkt_discard_out,
 	},
@@ -150,7 +149,6 @@
 		.__use		= 1,
 		.obsolete	= -1,
 		.error		= -EACCES,
-		.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
 		.input		= ip6_pkt_prohibit,
 		.output		= ip6_pkt_prohibit_out,
 	},
@@ -166,7 +164,6 @@
 		.__use		= 1,
 		.obsolete	= -1,
 		.error		= -EINVAL,
-		.metrics	= { [RTAX_HOPLIMIT - 1] = 255, },
 		.input		= dst_discard,
 		.output		= dst_discard,
 	},
@@ -188,11 +185,29 @@
 {
 	struct rt6_info *rt = (struct rt6_info *)dst;
 	struct inet6_dev *idev = rt->rt6i_idev;
+	struct inet_peer *peer = rt->rt6i_peer;
 
 	if (idev != NULL) {
 		rt->rt6i_idev = NULL;
 		in6_dev_put(idev);
 	}
+	if (peer) {
+		BUG_ON(!(rt->rt6i_flags & RTF_CACHE));
+		rt->rt6i_peer = NULL;
+		inet_putpeer(peer);
+	}
+}
+
+void rt6_bind_peer(struct rt6_info *rt, int create)
+{
+	struct inet_peer *peer;
+
+	if (WARN_ON(!(rt->rt6i_flags & RTF_CACHE)))
+		return;
+
+	peer = inet_getpeer_v6(&rt->rt6i_dst.addr, create);
+	if (peer && cmpxchg(&rt->rt6i_peer, NULL, peer) != NULL)
+		inet_putpeer(peer);
 }
 
 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
@@ -558,11 +573,7 @@
 {
 	struct flowi fl = {
 		.oif = oif,
-		.nl_u = {
-			.ip6_u = {
-				.daddr = *daddr,
-			},
-		},
+		.fl6_dst = *daddr,
 	};
 	struct dst_entry *dst;
 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
@@ -778,13 +789,9 @@
 	int flags = RT6_LOOKUP_F_HAS_SADDR;
 	struct flowi fl = {
 		.iif = skb->dev->ifindex,
-		.nl_u = {
-			.ip6_u = {
-				.daddr = iph->daddr,
-				.saddr = iph->saddr,
-				.flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
-			},
-		},
+		.fl6_dst = iph->daddr,
+		.fl6_src = iph->saddr,
+		.fl6_flowlabel = (* (__be32 *) iph)&IPV6_FLOWINFO_MASK,
 		.mark = skb->mark,
 		.proto = iph->nexthdr,
 	};
@@ -834,7 +841,7 @@
 		new->input = dst_discard;
 		new->output = dst_discard;
 
-		memcpy(new->metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+		dst_copy_metrics(new, &ort->dst);
 		new->dev = ort->dst.dev;
 		if (new->dev)
 			dev_hold(new->dev);
@@ -918,10 +925,12 @@
 	if (mtu < dst_mtu(dst) && rt6->rt6i_dst.plen == 128) {
 		rt6->rt6i_flags |= RTF_MODIFIED;
 		if (mtu < IPV6_MIN_MTU) {
+			u32 features = dst_metric(dst, RTAX_FEATURES);
 			mtu = IPV6_MIN_MTU;
-			dst->metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+			features |= RTAX_FEATURE_ALLFRAG;
+			dst_metric_set(dst, RTAX_FEATURES, features);
 		}
-		dst->metrics[RTAX_MTU-1] = mtu;
+		dst_metric_set(dst, RTAX_MTU, mtu);
 		call_netevent_notifiers(NETEVENT_PMTU_UPDATE, dst);
 	}
 }
@@ -979,9 +988,9 @@
 	rt->rt6i_idev     = idev;
 	rt->rt6i_nexthop  = neigh;
 	atomic_set(&rt->dst.__refcnt, 1);
-	rt->dst.metrics[RTAX_HOPLIMIT-1] = 255;
-	rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
-	rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
+	dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
+	dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
 	rt->dst.output  = ip6_output;
 
 #if 0	/* there's no chance to use these for ndisc */
@@ -1095,8 +1104,8 @@
 
 int ip6_dst_hoplimit(struct dst_entry *dst)
 {
-	int hoplimit = dst_metric(dst, RTAX_HOPLIMIT);
-	if (hoplimit < 0) {
+	int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
+	if (hoplimit == 0) {
 		struct net_device *dev = dst->dev;
 		struct inet6_dev *idev;
 
@@ -1110,6 +1119,7 @@
 	}
 	return hoplimit;
 }
+EXPORT_SYMBOL(ip6_dst_hoplimit);
 
 /*
  *
@@ -1295,17 +1305,15 @@
 					goto out;
 				}
 
-				rt->dst.metrics[type - 1] = nla_get_u32(nla);
+				dst_metric_set(&rt->dst, type, nla_get_u32(nla));
 			}
 		}
 	}
 
-	if (dst_metric(&rt->dst, RTAX_HOPLIMIT) == 0)
-		rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
 	if (!dst_mtu(&rt->dst))
-		rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(dev);
+		dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(dev));
 	if (!dst_metric(&rt->dst, RTAX_ADVMSS))
-		rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
+		dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
 	rt->dst.dev = dev;
 	rt->rt6i_idev = idev;
 	rt->rt6i_table = table;
@@ -1463,12 +1471,8 @@
 	struct ip6rd_flowi rdfl = {
 		.fl = {
 			.oif = dev->ifindex,
-			.nl_u = {
-				.ip6_u = {
-					.daddr = *dest,
-					.saddr = *src,
-				},
-			},
+			.fl6_dst = *dest,
+			.fl6_src = *src,
 		},
 	};
 
@@ -1535,9 +1539,9 @@
 	ipv6_addr_copy(&nrt->rt6i_gateway, (struct in6_addr*)neigh->primary_key);
 	nrt->rt6i_nexthop = neigh_clone(neigh);
 	/* Reset pmtu, it may be better */
-	nrt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(neigh->dev);
-	nrt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(dev_net(neigh->dev),
-							dst_mtu(&nrt->dst));
+	dst_metric_set(&nrt->dst, RTAX_MTU, ipv6_get_mtu(neigh->dev));
+	dst_metric_set(&nrt->dst, RTAX_ADVMSS, ipv6_advmss(dev_net(neigh->dev),
+							   dst_mtu(&nrt->dst)));
 
 	if (ip6_ins_rt(nrt))
 		goto out;
@@ -1596,9 +1600,12 @@
 	   would return automatically.
 	 */
 	if (rt->rt6i_flags & RTF_CACHE) {
-		rt->dst.metrics[RTAX_MTU-1] = pmtu;
-		if (allfrag)
-			rt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+		dst_metric_set(&rt->dst, RTAX_MTU, pmtu);
+		if (allfrag) {
+			u32 features = dst_metric(&rt->dst, RTAX_FEATURES);
+			features |= RTAX_FEATURE_ALLFRAG;
+			dst_metric_set(&rt->dst, RTAX_FEATURES, features);
+		}
 		dst_set_expires(&rt->dst, net->ipv6.sysctl.ip6_rt_mtu_expires);
 		rt->rt6i_flags |= RTF_MODIFIED|RTF_EXPIRES;
 		goto out;
@@ -1615,9 +1622,12 @@
 		nrt = rt6_alloc_clone(rt, daddr);
 
 	if (nrt) {
-		nrt->dst.metrics[RTAX_MTU-1] = pmtu;
-		if (allfrag)
-			nrt->dst.metrics[RTAX_FEATURES-1] |= RTAX_FEATURE_ALLFRAG;
+		dst_metric_set(&nrt->dst, RTAX_MTU, pmtu);
+		if (allfrag) {
+			u32 features = dst_metric(&nrt->dst, RTAX_FEATURES);
+			features |= RTAX_FEATURE_ALLFRAG;
+			dst_metric_set(&nrt->dst, RTAX_FEATURES, features);
+		}
 
 		/* According to RFC 1981, detecting PMTU increase shouldn't be
 		 * happened within 5 mins, the recommended timer is 10 mins.
@@ -1668,7 +1678,7 @@
 		rt->dst.input = ort->dst.input;
 		rt->dst.output = ort->dst.output;
 
-		memcpy(rt->dst.metrics, ort->dst.metrics, RTAX_MAX*sizeof(u32));
+		dst_copy_metrics(&rt->dst, &ort->dst);
 		rt->dst.error = ort->dst.error;
 		rt->dst.dev = ort->dst.dev;
 		if (rt->dst.dev)
@@ -1945,8 +1955,12 @@
 	struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops);
 	struct neighbour *neigh;
 
-	if (rt == NULL)
+	if (rt == NULL) {
+		if (net_ratelimit())
+			pr_warning("IPv6:  Maximum number of routes reached,"
+				   " consider increasing route/max_size.\n");
 		return ERR_PTR(-ENOMEM);
+	}
 
 	dev_hold(net->loopback_dev);
 	in6_dev_hold(idev);
@@ -1956,9 +1970,9 @@
 	rt->dst.output = ip6_output;
 	rt->rt6i_dev = net->loopback_dev;
 	rt->rt6i_idev = idev;
-	rt->dst.metrics[RTAX_MTU-1] = ipv6_get_mtu(rt->rt6i_dev);
-	rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, dst_mtu(&rt->dst));
-	rt->dst.metrics[RTAX_HOPLIMIT-1] = -1;
+	dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
+	dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
+	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
 	rt->dst.obsolete = -1;
 
 	rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
@@ -2058,8 +2072,8 @@
 	    (dst_mtu(&rt->dst) >= arg->mtu ||
 	     (dst_mtu(&rt->dst) < arg->mtu &&
 	      dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
-		rt->dst.metrics[RTAX_MTU-1] = arg->mtu;
-		rt->dst.metrics[RTAX_ADVMSS-1] = ipv6_advmss(net, arg->mtu);
+		dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
+		dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, arg->mtu));
 	}
 	return 0;
 }
@@ -2285,7 +2299,7 @@
 			NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
 	}
 
-	if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
+	if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
 		goto nla_put_failure;
 
 	if (rt->dst.neighbour)
@@ -2461,8 +2475,6 @@
 
 #ifdef CONFIG_PROC_FS
 
-#define RT6_INFO_LEN (32 + 4 + 32 + 4 + 32 + 40 + 5 + 1)
-
 struct rt6_proc_arg
 {
 	char *buffer;
@@ -2678,6 +2690,7 @@
 	net->ipv6.ip6_null_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_null_entry;
 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+	dst_metric_set(&net->ipv6.ip6_null_entry->dst, RTAX_HOPLIMIT, 255);
 
 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
@@ -2688,6 +2701,7 @@
 	net->ipv6.ip6_prohibit_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_prohibit_entry;
 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+	dst_metric_set(&net->ipv6.ip6_prohibit_entry->dst, RTAX_HOPLIMIT, 255);
 
 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
@@ -2697,6 +2711,7 @@
 	net->ipv6.ip6_blk_hole_entry->dst.path =
 		(struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
+	dst_metric_set(&net->ipv6.ip6_blk_hole_entry->dst, RTAX_HOPLIMIT, 255);
 #endif
 
 	net->ipv6.sysctl.flush_delay = 0;
@@ -2741,6 +2756,7 @@
 	kfree(net->ipv6.ip6_prohibit_entry);
 	kfree(net->ipv6.ip6_blk_hole_entry);
 #endif
+	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
 static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2848,6 @@
 	xfrm6_fini();
 	fib6_gc_cleanup();
 	unregister_pernet_subsys(&ip6_route_net_ops);
+	dst_entries_destroy(&ip6_dst_blackhole_ops);
 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
 }
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index d6bfaec..8ce38f1 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -606,8 +606,9 @@
 		return 0;
 	}
 
-	icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+	/* no tunnel matched,  let upstream know, ipsec may handle it */
 	rcu_read_unlock();
+	return 1;
 out:
 	kfree_skb(skb);
 	return 0;
@@ -730,10 +731,9 @@
 	}
 
 	{
-		struct flowi fl = { .nl_u = { .ip4_u =
-					      { .daddr = dst,
-						.saddr = tiph->saddr,
-						.tos = RT_TOS(tos) } },
+		struct flowi fl = { .fl4_dst = dst,
+				    .fl4_src = tiph->saddr,
+				    .fl4_tos = RT_TOS(tos),
 				    .oif = tunnel->parms.link,
 				    .proto = IPPROTO_IPV6 };
 		if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
@@ -855,10 +855,9 @@
 	iph = &tunnel->parms.iph;
 
 	if (iph->daddr) {
-		struct flowi fl = { .nl_u = { .ip4_u =
-					      { .daddr = iph->daddr,
-						.saddr = iph->saddr,
-						.tos = RT_TOS(iph->tos) } },
+		struct flowi fl = { .fl4_dst = iph->daddr,
+				    .fl4_src = iph->saddr,
+				    .fl4_tos = RT_TOS(iph->tos),
 				    .oif = tunnel->parms.link,
 				    .proto = IPPROTO_IPV6 };
 		struct rtable *rt;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7e41e2c..fee0768 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -130,6 +130,7 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	struct in6_addr *saddr = NULL, *final_p, final;
+	struct rt6_info *rt;
 	struct flowi fl;
 	struct dst_entry *dst;
 	int addr_type;
@@ -280,6 +281,26 @@
 	sk->sk_gso_type = SKB_GSO_TCPV6;
 	__ip6_dst_store(sk, dst, NULL, NULL);
 
+	rt = (struct rt6_info *) dst;
+	if (tcp_death_row.sysctl_tw_recycle &&
+	    !tp->rx_opt.ts_recent_stamp &&
+	    ipv6_addr_equal(&rt->rt6i_dst.addr, &np->daddr)) {
+		struct inet_peer *peer = rt6_get_peer(rt);
+		/*
+		 * VJ's idea. We save last timestamp seen from
+		 * the destination in peer table, when entering state
+		 * TIME-WAIT * and initialize rx_opt.ts_recent from it,
+		 * when trying new connection.
+		 */
+		if (peer) {
+			inet_peer_refcheck(peer);
+			if ((u32)get_seconds() - peer->tcp_ts_stamp <= TCP_PAWS_MSL) {
+				tp->rx_opt.ts_recent_stamp = peer->tcp_ts_stamp;
+				tp->rx_opt.ts_recent = peer->tcp_ts;
+			}
+		}
+	}
+
 	icsk->icsk_ext_hdr_len = 0;
 	if (np->opt)
 		icsk->icsk_ext_hdr_len = (np->opt->opt_flen +
@@ -906,12 +927,6 @@
 };
 #endif
 
-static struct timewait_sock_ops tcp6_timewait_sock_ops = {
-	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
-	.twsk_unique	= tcp_twsk_unique,
-	.twsk_destructor= tcp_twsk_destructor,
-};
-
 static void __tcp_v6_send_check(struct sk_buff *skb,
 				struct in6_addr *saddr, struct in6_addr *daddr)
 {
@@ -1176,6 +1191,7 @@
 	struct ipv6_pinfo *np = inet6_sk(sk);
 	struct tcp_sock *tp = tcp_sk(sk);
 	__u32 isn = TCP_SKB_CB(skb)->when;
+	struct dst_entry *dst = NULL;
 #ifdef CONFIG_SYN_COOKIES
 	int want_cookie = 0;
 #else
@@ -1273,6 +1289,8 @@
 		TCP_ECN_create_request(req, tcp_hdr(skb));
 
 	if (!isn) {
+		struct inet_peer *peer = NULL;
+
 		if (ipv6_opt_accepted(sk, skb) ||
 		    np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
 		    np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
@@ -1285,13 +1303,57 @@
 		if (!sk->sk_bound_dev_if &&
 		    ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
 			treq->iif = inet6_iif(skb);
-		if (!want_cookie) {
-			isn = tcp_v6_init_sequence(skb);
-		} else {
+
+		if (want_cookie) {
 			isn = cookie_v6_init_sequence(sk, skb, &req->mss);
 			req->cookie_ts = tmp_opt.tstamp_ok;
+			goto have_isn;
 		}
+
+		/* VJ's idea. We save last timestamp seen
+		 * from the destination in peer table, when entering
+		 * state TIME-WAIT, and check against it before
+		 * accepting new connection request.
+		 *
+		 * If "isn" is not zero, this request hit alive
+		 * timewait bucket, so that all the necessary checks
+		 * are made in the function processing timewait state.
+		 */
+		if (tmp_opt.saw_tstamp &&
+		    tcp_death_row.sysctl_tw_recycle &&
+		    (dst = inet6_csk_route_req(sk, req)) != NULL &&
+		    (peer = rt6_get_peer((struct rt6_info *)dst)) != NULL &&
+		    ipv6_addr_equal((struct in6_addr *)peer->daddr.a6,
+				    &treq->rmt_addr)) {
+			inet_peer_refcheck(peer);
+			if ((u32)get_seconds() - peer->tcp_ts_stamp < TCP_PAWS_MSL &&
+			    (s32)(peer->tcp_ts - req->ts_recent) >
+							TCP_PAWS_WINDOW) {
+				NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+				goto drop_and_release;
+			}
+		}
+		/* Kill the following clause, if you dislike this way. */
+		else if (!sysctl_tcp_syncookies &&
+			 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+			  (sysctl_max_syn_backlog >> 2)) &&
+			 (!peer || !peer->tcp_ts_stamp) &&
+			 (!dst || !dst_metric(dst, RTAX_RTT))) {
+			/* Without syncookies last quarter of
+			 * backlog is filled with destinations,
+			 * proven to be alive.
+			 * It means that we continue to communicate
+			 * to destinations, already remembered
+			 * to the moment of synflood.
+			 */
+			LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
+				       &treq->rmt_addr, ntohs(tcp_hdr(skb)->source));
+			goto drop_and_release;
+		}
+
+		isn = tcp_v6_init_sequence(skb);
 	}
+have_isn:
 	tcp_rsk(req)->snt_isn = isn;
 
 	security_inet_conn_request(sk, skb, req);
@@ -1304,6 +1366,8 @@
 	inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
 	return 0;
 
+drop_and_release:
+	dst_release(dst);
 drop_and_free:
 	reqsk_free(req);
 drop:
@@ -1382,28 +1446,9 @@
 	if (sk_acceptq_is_full(sk))
 		goto out_overflow;
 
-	if (dst == NULL) {
-		struct in6_addr *final_p, final;
-		struct flowi fl;
-
-		memset(&fl, 0, sizeof(fl));
-		fl.proto = IPPROTO_TCP;
-		ipv6_addr_copy(&fl.fl6_dst, &treq->rmt_addr);
-		final_p = fl6_update_dst(&fl, opt, &final);
-		ipv6_addr_copy(&fl.fl6_src, &treq->loc_addr);
-		fl.oif = sk->sk_bound_dev_if;
-		fl.mark = sk->sk_mark;
-		fl.fl_ip_dport = inet_rsk(req)->rmt_port;
-		fl.fl_ip_sport = inet_rsk(req)->loc_port;
-		security_req_classify_flow(req, &fl);
-
-		if (ip6_dst_lookup(sk, &dst, &fl))
-			goto out;
-
-		if (final_p)
-			ipv6_addr_copy(&fl.fl6_dst, final_p);
-
-		if ((xfrm_lookup(sock_net(sk), &dst, &fl, sk, 0)) < 0)
+	if (!dst) {
+		dst = inet6_csk_route_req(sk, req);
+		if (!dst)
 			goto out;
 	}
 
@@ -1818,19 +1863,51 @@
 	goto discard_it;
 }
 
-static int tcp_v6_remember_stamp(struct sock *sk)
+static struct inet_peer *tcp_v6_get_peer(struct sock *sk, bool *release_it)
 {
-	/* Alas, not yet... */
-	return 0;
+	struct rt6_info *rt = (struct rt6_info *) __sk_dst_get(sk);
+	struct ipv6_pinfo *np = inet6_sk(sk);
+	struct inet_peer *peer;
+
+	if (!rt ||
+	    !ipv6_addr_equal(&np->daddr, &rt->rt6i_dst.addr)) {
+		peer = inet_getpeer_v6(&np->daddr, 1);
+		*release_it = true;
+	} else {
+		if (!rt->rt6i_peer)
+			rt6_bind_peer(rt, 1);
+		peer = rt->rt6i_peer;
+		*release_it = false;
+	}
+
+	return peer;
 }
 
+static void *tcp_v6_tw_get_peer(struct sock *sk)
+{
+	struct inet6_timewait_sock *tw6 = inet6_twsk(sk);
+	struct inet_timewait_sock *tw = inet_twsk(sk);
+
+	if (tw->tw_family == AF_INET)
+		return tcp_v4_tw_get_peer(sk);
+
+	return inet_getpeer_v6(&tw6->tw_v6_daddr, 1);
+}
+
+static struct timewait_sock_ops tcp6_timewait_sock_ops = {
+	.twsk_obj_size	= sizeof(struct tcp6_timewait_sock),
+	.twsk_unique	= tcp_twsk_unique,
+	.twsk_destructor= tcp_twsk_destructor,
+	.twsk_getpeer	= tcp_v6_tw_get_peer,
+};
+
 static const struct inet_connection_sock_af_ops ipv6_specific = {
 	.queue_xmit	   = inet6_csk_xmit,
 	.send_check	   = tcp_v6_send_check,
 	.rebuild_header	   = inet6_sk_rebuild_header,
 	.conn_request	   = tcp_v6_conn_request,
 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
-	.remember_stamp	   = tcp_v6_remember_stamp,
+	.get_peer	   = tcp_v6_get_peer,
 	.net_header_len	   = sizeof(struct ipv6hdr),
 	.setsockopt	   = ipv6_setsockopt,
 	.getsockopt	   = ipv6_getsockopt,
@@ -1862,7 +1939,7 @@
 	.rebuild_header	   = inet_sk_rebuild_header,
 	.conn_request	   = tcp_v6_conn_request,
 	.syn_recv_sock	   = tcp_v6_syn_recv_sock,
-	.remember_stamp	   = tcp_v4_remember_stamp,
+	.get_peer	   = tcp_v4_get_peer,
 	.net_header_len	   = sizeof(struct iphdr),
 	.setsockopt	   = ipv6_setsockopt,
 	.getsockopt	   = ipv6_getsockopt,
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 91def93..26a8da3 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -54,8 +54,8 @@
 {
 	const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
 	const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-	__be32 sk1_rcv_saddr = inet_sk(sk)->inet_rcv_saddr;
-	__be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
+	__be32 sk1_rcv_saddr = sk_rcv_saddr(sk);
+	__be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
 	int sk_ipv6only = ipv6_only_sock(sk);
 	int sk2_ipv6only = inet_v6_ipv6only(sk2);
 	int addr_type = ipv6_addr_type(sk_rcv_saddr6);
@@ -227,7 +227,7 @@
 
 	if (result) {
 exact_match:
-		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
 		else if (unlikely(compute_score2(result, net, saddr, sport,
 				  daddr, hnum, dif) < badness)) {
@@ -294,7 +294,7 @@
 		goto begin;
 
 	if (result) {
-		if (unlikely(!atomic_inc_not_zero(&result->sk_refcnt)))
+		if (unlikely(!atomic_inc_not_zero_hint(&result->sk_refcnt, 2)))
 			result = NULL;
 		else if (unlikely(compute_score(result, net, hnum, saddr, sport,
 					daddr, dport, dif) < badness)) {
@@ -602,7 +602,7 @@
 
 		sk = stack[i];
 		if (skb1) {
-			if (sk_rcvqueues_full(sk, skb)) {
+			if (sk_rcvqueues_full(sk, skb1)) {
 				kfree_skb(skb1);
 				goto drop;
 			}
diff --git a/net/ipv6/xfrm6_mode_tunnel.c b/net/ipv6/xfrm6_mode_tunnel.c
index b809812..645cb96 100644
--- a/net/ipv6/xfrm6_mode_tunnel.c
+++ b/net/ipv6/xfrm6_mode_tunnel.c
@@ -14,6 +14,7 @@
 #include <net/dsfield.h>
 #include <net/dst.h>
 #include <net/inet_ecn.h>
+#include <net/ip6_route.h>
 #include <net/ipv6.h>
 #include <net/xfrm.h>
 
@@ -53,7 +54,7 @@
 	if (x->props.flags & XFRM_STATE_NOECN)
 		dsfield &= ~INET_ECN_MASK;
 	ipv6_change_dsfield(top_iph, 0, dsfield);
-	top_iph->hop_limit = dst_metric(dst->child, RTAX_HOPLIMIT);
+	top_iph->hop_limit = ip6_dst_hoplimit(dst->child);
 	ipv6_addr_copy(&top_iph->saddr, (struct in6_addr *)&x->props.saddr);
 	ipv6_addr_copy(&top_iph->daddr, (struct in6_addr *)&x->id.daddr);
 	return 0;
diff --git a/net/irda/ircomm/Makefile b/net/irda/ircomm/Makefile
index 4868945..ab23b5b 100644
--- a/net/irda/ircomm/Makefile
+++ b/net/irda/ircomm/Makefile
@@ -4,5 +4,5 @@
 
 obj-$(CONFIG_IRCOMM) += ircomm.o ircomm-tty.o
 
-ircomm-objs := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o
-ircomm-tty-objs := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o
+ircomm-y := ircomm_core.o ircomm_event.o ircomm_lmp.o ircomm_ttp.o
+ircomm-tty-y := ircomm_tty.o ircomm_tty_attach.o ircomm_tty_ioctl.o ircomm_param.o
diff --git a/net/irda/irlan/Makefile b/net/irda/irlan/Makefile
index 77549bc..94eefbc 100644
--- a/net/irda/irlan/Makefile
+++ b/net/irda/irlan/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_IRLAN) += irlan.o
 
-irlan-objs := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o
+irlan-y := irlan_common.o irlan_eth.o irlan_event.o irlan_client.o irlan_provider.o irlan_filter.o irlan_provider_event.o irlan_client_event.o
diff --git a/net/irda/irnet/Makefile b/net/irda/irnet/Makefile
index b3ee01e..61c365c 100644
--- a/net/irda/irnet/Makefile
+++ b/net/irda/irnet/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_IRNET) += irnet.o
 
-irnet-objs := irnet_ppp.o irnet_irda.o
+irnet-y := irnet_ppp.o irnet_irda.o
diff --git a/net/irda/irttp.c b/net/irda/irttp.c
index 285761e..f6054f9 100644
--- a/net/irda/irttp.c
+++ b/net/irda/irttp.c
@@ -550,22 +550,30 @@
  */
 int irttp_udata_request(struct tsap_cb *self, struct sk_buff *skb)
 {
+	int ret;
+
 	IRDA_ASSERT(self != NULL, return -1;);
 	IRDA_ASSERT(self->magic == TTP_TSAP_MAGIC, return -1;);
 	IRDA_ASSERT(skb != NULL, return -1;);
 
 	IRDA_DEBUG(4, "%s()\n", __func__);
 
+	/* Take shortcut on zero byte packets */
+	if (skb->len == 0) {
+		ret = 0;
+		goto err;
+	}
+
 	/* Check that nothing bad happens */
-	if ((skb->len == 0) || (!self->connected)) {
-		IRDA_DEBUG(1, "%s(), No data, or not connected\n",
-			   __func__);
+	if (!self->connected) {
+		IRDA_WARNING("%s(), Not connected\n", __func__);
+		ret = -ENOTCONN;
 		goto err;
 	}
 
 	if (skb->len > self->max_seg_size) {
-		IRDA_DEBUG(1, "%s(), UData is too large for IrLAP!\n",
-			   __func__);
+		IRDA_ERROR("%s(), UData is too large for IrLAP!\n", __func__);
+		ret = -EMSGSIZE;
 		goto err;
 	}
 
@@ -576,7 +584,7 @@
 
 err:
 	dev_kfree_skb(skb);
-	return -1;
+	return ret;
 }
 EXPORT_SYMBOL(irttp_udata_request);
 
@@ -599,9 +607,15 @@
 	IRDA_DEBUG(2, "%s() : queue len = %d\n", __func__,
 		   skb_queue_len(&self->tx_queue));
 
+	/* Take shortcut on zero byte packets */
+	if (skb->len == 0) {
+		ret = 0;
+		goto err;
+	}
+
 	/* Check that nothing bad happens */
-	if ((skb->len == 0) || (!self->connected)) {
-		IRDA_WARNING("%s: No data, or not connected\n", __func__);
+	if (!self->connected) {
+		IRDA_WARNING("%s: Not connected\n", __func__);
 		ret = -ENOTCONN;
 		goto err;
 	}
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 104ec3b..b8dbae8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -249,7 +249,7 @@
 	struct seq_file *seq;
 	int rc = -ENOMEM;
 
-	pd = kzalloc(GFP_KERNEL, sizeof(*pd));
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 	if (pd == NULL)
 		goto out;
 
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 0bf6a59..110efb7 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -476,15 +476,13 @@
 
 		{
 			struct flowi fl = { .oif = sk->sk_bound_dev_if,
-					    .nl_u = { .ip4_u = {
-							.daddr = daddr,
-							.saddr = inet->inet_saddr,
-							.tos = RT_CONN_FLAGS(sk) } },
+					    .fl4_dst = daddr,
+					    .fl4_src = inet->inet_saddr,
+					    .fl4_tos = RT_CONN_FLAGS(sk),
 					    .proto = sk->sk_protocol,
 					    .flags = inet_sk_flowi_flags(sk),
-					    .uli_u = { .ports = {
-							 .sport = inet->inet_sport,
-							 .dport = inet->inet_dport } } };
+					    .fl_ip_sport = inet->inet_sport,
+					    .fl_ip_dport = inet->inet_dport };
 
 			/* If this fails, retransmit mechanism of transport layer will
 			 * keep trying until route appears or the connection times
@@ -674,4 +672,8 @@
 MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
 MODULE_DESCRIPTION("L2TP over IP");
 MODULE_VERSION("1.0");
-MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, SOCK_DGRAM, IPPROTO_L2TP);
+
+/* Use the value of SOCK_DGRAM (2) directory, because __stringify does't like
+ * enums
+ */
+MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 2, IPPROTO_L2TP);
diff --git a/net/lapb/Makefile b/net/lapb/Makefile
index 53f7c90..fff797d 100644
--- a/net/lapb/Makefile
+++ b/net/lapb/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_LAPB) += lapb.o
 
-lapb-objs := lapb_in.o lapb_out.o lapb_subr.o lapb_timer.o lapb_iface.o
+lapb-y := lapb_in.o lapb_out.o lapb_subr.o lapb_timer.o lapb_iface.o
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 5826129..dfd3a64 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -316,9 +316,9 @@
 	if (unlikely(addr->sllc_family != AF_LLC))
 		goto out;
 	rc = -ENODEV;
-	rtnl_lock();
+	rcu_read_lock();
 	if (sk->sk_bound_dev_if) {
-		llc->dev = dev_get_by_index(&init_net, sk->sk_bound_dev_if);
+		llc->dev = dev_get_by_index_rcu(&init_net, sk->sk_bound_dev_if);
 		if (llc->dev) {
 			if (!addr->sllc_arphrd)
 				addr->sllc_arphrd = llc->dev->type;
@@ -329,14 +329,15 @@
 			    !llc_mac_match(addr->sllc_mac,
 					   llc->dev->dev_addr)) {
 				rc = -EINVAL;
-				dev_put(llc->dev);
 				llc->dev = NULL;
 			}
 		}
 	} else
-		llc->dev = dev_getbyhwaddr(&init_net, addr->sllc_arphrd,
+		llc->dev = dev_getbyhwaddr_rcu(&init_net, addr->sllc_arphrd,
 					   addr->sllc_mac);
-	rtnl_unlock();
+	if (llc->dev)
+		dev_hold(llc->dev);
+	rcu_read_unlock();
 	if (!llc->dev)
 		goto out;
 	if (!addr->sllc_sap) {
diff --git a/net/netfilter/core.c b/net/netfilter/core.c
index 85dabb8..32fcbe2 100644
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
@@ -173,9 +173,11 @@
 			     outdev, &elem, okfn, hook_thresh);
 	if (verdict == NF_ACCEPT || verdict == NF_STOP) {
 		ret = 1;
-	} else if (verdict == NF_DROP) {
+	} else if ((verdict & NF_VERDICT_MASK) == NF_DROP) {
 		kfree_skb(skb);
-		ret = -EPERM;
+		ret = -(verdict >> NF_VERDICT_BITS);
+		if (ret == 0)
+			ret = -EPERM;
 	} else if ((verdict & NF_VERDICT_MASK) == NF_QUEUE) {
 		if (!nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
 			      verdict >> NF_VERDICT_BITS))
diff --git a/net/netfilter/ipvs/Kconfig b/net/netfilter/ipvs/Kconfig
index a22dac2..70bd1d0 100644
--- a/net/netfilter/ipvs/Kconfig
+++ b/net/netfilter/ipvs/Kconfig
@@ -4,6 +4,7 @@
 menuconfig IP_VS
 	tristate "IP virtual server support"
 	depends on NET && INET && NETFILTER
+	depends on (NF_CONNTRACK || NF_CONNTRACK=n)
 	---help---
 	  IP Virtual Server support will let you build a high-performance
 	  virtual server based on cluster of two or more real servers. This
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 5f5daa3..c6f2936 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -110,10 +110,8 @@
 	struct rt6_info *rt;
 	struct flowi fl = {
 		.oif = 0,
-		.nl_u = {
-			.ip6_u = {
-				.daddr = *addr,
-				.saddr = { .s6_addr32 = {0, 0, 0, 0} }, } },
+		.fl6_dst = *addr,
+		.fl6_src = { .s6_addr32 = {0, 0, 0, 0} },
 	};
 
 	rt = (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index de04ea3..5325a3f 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -96,12 +96,8 @@
 		if (!(rt = (struct rtable *)
 		      __ip_vs_dst_check(dest, rtos))) {
 			struct flowi fl = {
-				.oif = 0,
-				.nl_u = {
-					.ip4_u = {
-						.daddr = dest->addr.ip,
-						.saddr = 0,
-						.tos = rtos, } },
+				.fl4_dst = dest->addr.ip,
+				.fl4_tos = rtos,
 			};
 
 			if (ip_route_output_key(net, &rt, &fl)) {
@@ -118,12 +114,8 @@
 		spin_unlock(&dest->dst_lock);
 	} else {
 		struct flowi fl = {
-			.oif = 0,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = daddr,
-					.saddr = 0,
-					.tos = rtos, } },
+			.fl4_dst = daddr,
+			.fl4_tos = rtos,
 		};
 
 		if (ip_route_output_key(net, &rt, &fl)) {
@@ -169,7 +161,7 @@
 	struct net *net = dev_net(dev);
 	struct iphdr *iph = ip_hdr(skb);
 
-	if (rt->fl.iif) {
+	if (rt_is_input_route(rt)) {
 		unsigned long orefdst = skb->_skb_refdst;
 
 		if (ip_route_input(skb, iph->daddr, iph->saddr,
@@ -178,14 +170,9 @@
 		refdst_drop(orefdst);
 	} else {
 		struct flowi fl = {
-			.oif = 0,
-			.nl_u = {
-				.ip4_u = {
-					.daddr = iph->daddr,
-					.saddr = iph->saddr,
-					.tos = RT_TOS(iph->tos),
-				}
-			},
+			.fl4_dst = iph->daddr,
+			.fl4_src = iph->saddr,
+			.fl4_tos = RT_TOS(iph->tos),
 			.mark = skb->mark,
 		};
 		struct rtable *rt;
@@ -216,12 +203,7 @@
 {
 	struct dst_entry *dst;
 	struct flowi fl = {
-		.oif = 0,
-		.nl_u = {
-			.ip6_u = {
-				.daddr = *daddr,
-			},
-		},
+		.fl6_dst = *daddr,
 	};
 
 	dst = ip6_route_output(net, NULL, &fl);
@@ -552,7 +534,8 @@
 #endif
 
 	/* From world but DNAT to loopback address? */
-	if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
+	if (local && ipv4_is_loopback(rt->rt_dst) &&
+	    rt_is_input_route(skb_rtable(skb))) {
 		IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
 				 "stopping DNAT to loopback address");
 		goto tx_error_put;
@@ -1165,7 +1148,8 @@
 #endif
 
 	/* From world but DNAT to loopback address? */
-	if (local && ipv4_is_loopback(rt->rt_dst) && skb_rtable(skb)->fl.iif) {
+	if (local && ipv4_is_loopback(rt->rt_dst) &&
+	    rt_is_input_route(skb_rtable(skb))) {
 		IP_VS_DBG(1, "%s(): "
 			  "stopping DNAT to loopback %pI4\n",
 			  __func__, &cp->daddr.ip);
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1eacf8d..27a5ea6 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1312,7 +1312,8 @@
 	if (!hash) {
 		*vmalloced = 1;
 		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-		hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+		hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+				 PAGE_KERNEL);
 	}
 
 	if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ed6d929..dc7bb74 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -292,6 +292,12 @@
 
 		for (i = 0; i < MAX_NF_CT_PROTO; i++)
 			proto_array[i] = &nf_conntrack_l4proto_generic;
+
+		/* Before making proto_array visible to lockless readers,
+		 * we must make sure its content is committed to memory.
+		 */
+		smp_wmb();
+
 		nf_ct_protos[l4proto->l3proto] = proto_array;
 	} else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
 					&nf_conntrack_l4proto_generic) {
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 22a2d42..5128a6c 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -70,9 +70,9 @@
 			return false;
 		fl.oif = info->priv->oif;
 	}
-	fl.nl_u.ip4_u.daddr = info->gw.ip;
-	fl.nl_u.ip4_u.tos   = RT_TOS(iph->tos);
-	fl.nl_u.ip4_u.scope = RT_SCOPE_UNIVERSE;
+	fl.fl4_dst = info->gw.ip;
+	fl.fl4_tos = RT_TOS(iph->tos);
+	fl.fl4_scope = RT_SCOPE_UNIVERSE;
 	if (ip_route_output_key(net, &rt, &fl) != 0)
 		return false;
 
@@ -150,9 +150,9 @@
 			return false;
 		fl.oif = info->priv->oif;
 	}
-	fl.nl_u.ip6_u.daddr = info->gw.in6;
-	fl.nl_u.ip6_u.flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
-				  (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
+	fl.fl6_dst = info->gw.in6;
+	fl.fl6_flowlabel = ((iph->flow_lbl[0] & 0xF) << 16) |
+			   (iph->flow_lbl[1] << 8) | iph->flow_lbl[2];
 	dst = ip6_route_output(net, NULL, &fl);
 	if (dst == NULL)
 		return false;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 3616f27..e79efaf 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -61,6 +61,7 @@
 #include <linux/kernel.h>
 #include <linux/kmod.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 #include <net/net_namespace.h>
 #include <net/ip.h>
 #include <net/protocol.h>
@@ -163,8 +164,13 @@
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
 		int closing, int tx_ring);
 
+#define PGV_FROM_VMALLOC 1
+struct pgv {
+	char *buffer;
+};
+
 struct packet_ring_buffer {
-	char			**pg_vec;
+	struct pgv		*pg_vec;
 	unsigned int		head;
 	unsigned int		frames_per_block;
 	unsigned int		frame_size;
@@ -217,6 +223,13 @@
 
 #define PACKET_SKB_CB(__skb)	((struct packet_skb_cb *)((__skb)->cb))
 
+static inline __pure struct page *pgv_to_page(void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		return vmalloc_to_page(addr);
+	return virt_to_page(addr);
+}
+
 static void __packet_set_status(struct packet_sock *po, void *frame, int status)
 {
 	union {
@@ -229,11 +242,11 @@
 	switch (po->tp_version) {
 	case TPACKET_V1:
 		h.h1->tp_status = status;
-		flush_dcache_page(virt_to_page(&h.h1->tp_status));
+		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 		break;
 	case TPACKET_V2:
 		h.h2->tp_status = status;
-		flush_dcache_page(virt_to_page(&h.h2->tp_status));
+		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 		break;
 	default:
 		pr_err("TPACKET version not supported\n");
@@ -256,10 +269,10 @@
 	h.raw = frame;
 	switch (po->tp_version) {
 	case TPACKET_V1:
-		flush_dcache_page(virt_to_page(&h.h1->tp_status));
+		flush_dcache_page(pgv_to_page(&h.h1->tp_status));
 		return h.h1->tp_status;
 	case TPACKET_V2:
-		flush_dcache_page(virt_to_page(&h.h2->tp_status));
+		flush_dcache_page(pgv_to_page(&h.h2->tp_status));
 		return h.h2->tp_status;
 	default:
 		pr_err("TPACKET version not supported\n");
@@ -283,7 +296,8 @@
 	pg_vec_pos = position / rb->frames_per_block;
 	frame_offset = position % rb->frames_per_block;
 
-	h.raw = rb->pg_vec[pg_vec_pos] + (frame_offset * rb->frame_size);
+	h.raw = rb->pg_vec[pg_vec_pos].buffer +
+		(frame_offset * rb->frame_size);
 
 	if (status != __packet_get_status(po, h.raw))
 		return NULL;
@@ -503,7 +517,8 @@
 	return err;
 }
 
-static inline unsigned int run_filter(struct sk_buff *skb, struct sock *sk,
+static inline unsigned int run_filter(const struct sk_buff *skb,
+				      const struct sock *sk,
 				      unsigned int res)
 {
 	struct sk_filter *filter;
@@ -511,22 +526,22 @@
 	rcu_read_lock_bh();
 	filter = rcu_dereference_bh(sk->sk_filter);
 	if (filter != NULL)
-		res = sk_run_filter(skb, filter->insns, filter->len);
+		res = sk_run_filter(skb, filter->insns);
 	rcu_read_unlock_bh();
 
 	return res;
 }
 
 /*
-   This function makes lazy skb cloning in hope that most of packets
-   are discarded by BPF.
-
-   Note tricky part: we DO mangle shared skb! skb->data, skb->len
-   and skb->cb are mangled. It works because (and until) packets
-   falling here are owned by current CPU. Output packets are cloned
-   by dev_queue_xmit_nit(), input packets are processed by net_bh
-   sequencially, so that if we return skb to original state on exit,
-   we will not harm anyone.
+ * This function makes lazy skb cloning in hope that most of packets
+ * are discarded by BPF.
+ *
+ * Note tricky part: we DO mangle shared skb! skb->data, skb->len
+ * and skb->cb are mangled. It works because (and until) packets
+ * falling here are owned by current CPU. Output packets are cloned
+ * by dev_queue_xmit_nit(), input packets are processed by net_bh
+ * sequencially, so that if we return skb to original state on exit,
+ * we will not harm anyone.
  */
 
 static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
@@ -552,11 +567,11 @@
 
 	if (dev->header_ops) {
 		/* The device has an explicit notion of ll header,
-		   exported to higher levels.
-
-		   Otherwise, the device hides datails of it frame
-		   structure, so that corresponding packet head
-		   never delivered to user.
+		 * exported to higher levels.
+		 *
+		 * Otherwise, the device hides details of its frame
+		 * structure, so that corresponding packet head is
+		 * never delivered to user.
 		 */
 		if (sk->sk_type != SOCK_DGRAM)
 			skb_push(skb, skb->data - skb_mac_header(skb));
@@ -791,17 +806,15 @@
 
 	__packet_set_status(po, h.raw, status);
 	smp_mb();
+#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
 	{
-		struct page *p_start, *p_end;
-		u8 *h_end = h.raw + macoff + snaplen - 1;
+		u8 *start, *end;
 
-		p_start = virt_to_page(h.raw);
-		p_end = virt_to_page(h_end);
-		while (p_start <= p_end) {
-			flush_dcache_page(p_start);
-			p_start++;
-		}
+		end = (u8 *)PAGE_ALIGN((unsigned long)h.raw + macoff + snaplen);
+		for (start = h.raw; start < end; start += PAGE_SIZE)
+			flush_dcache_page(pgv_to_page(start));
 	}
+#endif
 
 	sk->sk_data_ready(sk, 0);
 
@@ -907,7 +920,6 @@
 	}
 
 	err = -EFAULT;
-	page = virt_to_page(data);
 	offset = offset_in_page(data);
 	len_max = PAGE_SIZE - offset;
 	len = ((to_write > len_max) ? len_max : to_write);
@@ -926,11 +938,11 @@
 			return -EFAULT;
 		}
 
+		page = pgv_to_page(data);
+		data += len;
 		flush_dcache_page(page);
 		get_page(page);
-		skb_fill_page_desc(skb,
-				nr_frags,
-				page++, offset, len);
+		skb_fill_page_desc(skb, nr_frags, page, offset, len);
 		to_write -= len;
 		offset = 0;
 		len_max = PAGE_SIZE;
@@ -1610,9 +1622,11 @@
 
 		err = -EINVAL;
 		vnet_hdr_len = sizeof(vnet_hdr);
-		if ((len -= vnet_hdr_len) < 0)
+		if (len < vnet_hdr_len)
 			goto out_free;
 
+		len -= vnet_hdr_len;
+
 		if (skb_is_gso(skb)) {
 			struct skb_shared_info *sinfo = skb_shinfo(skb);
 
@@ -1719,7 +1733,7 @@
 	rcu_read_lock();
 	dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
 	if (dev)
-		strlcpy(uaddr->sa_data, dev->name, 15);
+		strncpy(uaddr->sa_data, dev->name, 14);
 	else
 		memset(uaddr->sa_data, 0, 14);
 	rcu_read_unlock();
@@ -1742,6 +1756,7 @@
 	sll->sll_family = AF_PACKET;
 	sll->sll_ifindex = po->ifindex;
 	sll->sll_protocol = po->num;
+	sll->sll_pkttype = 0;
 	rcu_read_lock();
 	dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
 	if (dev) {
@@ -2322,37 +2337,70 @@
 	.close	=	packet_mm_close,
 };
 
-static void free_pg_vec(char **pg_vec, unsigned int order, unsigned int len)
+static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
+			unsigned int len)
 {
 	int i;
 
 	for (i = 0; i < len; i++) {
-		if (likely(pg_vec[i]))
-			free_pages((unsigned long) pg_vec[i], order);
+		if (likely(pg_vec[i].buffer)) {
+			if (is_vmalloc_addr(pg_vec[i].buffer))
+				vfree(pg_vec[i].buffer);
+			else
+				free_pages((unsigned long)pg_vec[i].buffer,
+					   order);
+			pg_vec[i].buffer = NULL;
+		}
 	}
 	kfree(pg_vec);
 }
 
 static inline char *alloc_one_pg_vec_page(unsigned long order)
 {
-	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP | __GFP_ZERO | __GFP_NOWARN;
+	char *buffer = NULL;
+	gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
+			  __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
 
-	return (char *) __get_free_pages(gfp_flags, order);
+	buffer = (char *) __get_free_pages(gfp_flags, order);
+
+	if (buffer)
+		return buffer;
+
+	/*
+	 * __get_free_pages failed, fall back to vmalloc
+	 */
+	buffer = vzalloc((1 << order) * PAGE_SIZE);
+
+	if (buffer)
+		return buffer;
+
+	/*
+	 * vmalloc failed, lets dig into swap here
+	 */
+	gfp_flags &= ~__GFP_NORETRY;
+	buffer = (char *)__get_free_pages(gfp_flags, order);
+	if (buffer)
+		return buffer;
+
+	/*
+	 * complete and utter failure
+	 */
+	return NULL;
 }
 
-static char **alloc_pg_vec(struct tpacket_req *req, int order)
+static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
 {
 	unsigned int block_nr = req->tp_block_nr;
-	char **pg_vec;
+	struct pgv *pg_vec;
 	int i;
 
-	pg_vec = kzalloc(block_nr * sizeof(char *), GFP_KERNEL);
+	pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL);
 	if (unlikely(!pg_vec))
 		goto out;
 
 	for (i = 0; i < block_nr; i++) {
-		pg_vec[i] = alloc_one_pg_vec_page(order);
-		if (unlikely(!pg_vec[i]))
+		pg_vec[i].buffer = alloc_one_pg_vec_page(order);
+		if (unlikely(!pg_vec[i].buffer))
 			goto out_free_pgvec;
 	}
 
@@ -2368,7 +2416,7 @@
 static int packet_set_ring(struct sock *sk, struct tpacket_req *req,
 		int closing, int tx_ring)
 {
-	char **pg_vec = NULL;
+	struct pgv *pg_vec = NULL;
 	struct packet_sock *po = pkt_sk(sk);
 	int was_running, order = 0;
 	struct packet_ring_buffer *rb;
@@ -2453,22 +2501,20 @@
 	mutex_lock(&po->pg_vec_lock);
 	if (closing || atomic_read(&po->mapped) == 0) {
 		err = 0;
-#define XC(a, b) ({ __typeof__ ((a)) __t; __t = (a); (a) = (b); __t; })
 		spin_lock_bh(&rb_queue->lock);
-		pg_vec = XC(rb->pg_vec, pg_vec);
+		swap(rb->pg_vec, pg_vec);
 		rb->frame_max = (req->tp_frame_nr - 1);
 		rb->head = 0;
 		rb->frame_size = req->tp_frame_size;
 		spin_unlock_bh(&rb_queue->lock);
 
-		order = XC(rb->pg_vec_order, order);
-		req->tp_block_nr = XC(rb->pg_vec_len, req->tp_block_nr);
+		swap(rb->pg_vec_order, order);
+		swap(rb->pg_vec_len, req->tp_block_nr);
 
 		rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
 		po->prot_hook.func = (po->rx_ring.pg_vec) ?
 						tpacket_rcv : packet_rcv;
 		skb_queue_purge(rb_queue);
-#undef XC
 		if (atomic_read(&po->mapped))
 			pr_err("packet_mmap: vma is busy: %d\n",
 			       atomic_read(&po->mapped));
@@ -2530,15 +2576,17 @@
 			continue;
 
 		for (i = 0; i < rb->pg_vec_len; i++) {
-			struct page *page = virt_to_page(rb->pg_vec[i]);
+			struct page *page;
+			void *kaddr = rb->pg_vec[i].buffer;
 			int pg_num;
 
-			for (pg_num = 0; pg_num < rb->pg_vec_pages;
-					pg_num++, page++) {
+			for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
+				page = pgv_to_page(kaddr);
 				err = vm_insert_page(vma, start, page);
 				if (unlikely(err))
 					goto out;
 				start += PAGE_SIZE;
+				kaddr += PAGE_SIZE;
 			}
 		}
 	}
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
index d62bbba..e10b1b1 100644
--- a/net/phonet/Makefile
+++ b/net/phonet/Makefile
@@ -1,6 +1,6 @@
 obj-$(CONFIG_PHONET) += phonet.o pn_pep.o
 
-phonet-objs := \
+phonet-y := \
 	pn_dev.o \
 	pn_netlink.o \
 	socket.o \
@@ -8,4 +8,4 @@
 	sysctl.o \
 	af_phonet.o
 
-pn_pep-objs := pep.o pep-gprs.o
+pn_pep-y := pep.o pep-gprs.o
diff --git a/net/rds/Makefile b/net/rds/Makefile
index b46eca1..56d3f60 100644
--- a/net/rds/Makefile
+++ b/net/rds/Makefile
@@ -4,7 +4,7 @@
 			loop.o page.o rdma.o
 
 obj-$(CONFIG_RDS_RDMA) += rds_rdma.o
-rds_rdma-objs :=	rdma_transport.o \
+rds_rdma-y :=	rdma_transport.o \
 			ib.o ib_cm.o ib_recv.o ib_ring.o ib_send.o ib_stats.o \
 			ib_sysctl.o ib_rdma.o \
 			iw.o iw_cm.o iw_recv.o iw_ring.o iw_send.o iw_stats.o \
@@ -12,10 +12,8 @@
 
 
 obj-$(CONFIG_RDS_TCP) += rds_tcp.o
-rds_tcp-objs :=		tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
+rds_tcp-y :=		tcp.o tcp_connect.o tcp_listen.o tcp_recv.o \
 			tcp_send.o tcp_stats.o
 
-ifeq ($(CONFIG_RDS_DEBUG), y)
-EXTRA_CFLAGS += -DDEBUG
-endif
+ccflags-$(CONFIG_RDS_DEBUG)	:=	-DDEBUG
 
diff --git a/net/rds/loop.c b/net/rds/loop.c
index c390156..aeec1d4 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -134,8 +134,12 @@
 static void rds_loop_conn_free(void *arg)
 {
 	struct rds_loop_connection *lc = arg;
+	unsigned long flags;
+
 	rdsdebug("lc %p\n", lc);
+	spin_lock_irqsave(&loop_conns_lock, flags);
 	list_del(&lc->loop_node);
+	spin_unlock_irqrestore(&loop_conns_lock, flags);
 	kfree(lc);
 }
 
diff --git a/net/rds/message.c b/net/rds/message.c
index 848cff4..1fd3d29 100644
--- a/net/rds/message.c
+++ b/net/rds/message.c
@@ -249,8 +249,10 @@
 	rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
 	rm->data.op_nents = ceil(total_len, PAGE_SIZE);
 	rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
-	if (!rm->data.op_sg)
+	if (!rm->data.op_sg) {
+		rds_message_put(rm);
 		return ERR_PTR(-ENOMEM);
+	}
 
 	for (i = 0; i < rm->data.op_nents; ++i) {
 		sg_set_page(&rm->data.op_sg[i],
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 8920f2a..4e37c1c 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -567,7 +567,7 @@
 		goto out;
 	}
 
-	if (args->nr_local > (u64)UINT_MAX) {
+	if (args->nr_local > UIO_MAXIOV) {
 		ret = -EMSGSIZE;
 		goto out;
 	}
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 08a8c6c..8e0a320 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -221,7 +221,13 @@
 static void rds_tcp_conn_free(void *arg)
 {
 	struct rds_tcp_connection *tc = arg;
+	unsigned long flags;
 	rdsdebug("freeing tc %p\n", tc);
+
+	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+	list_del(&tc->t_tcp_node);
+	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
 	kmem_cache_free(rds_tcp_conn_slab, tc);
 }
 
diff --git a/net/rxrpc/Makefile b/net/rxrpc/Makefile
index c46867c..d1c3429 100644
--- a/net/rxrpc/Makefile
+++ b/net/rxrpc/Makefile
@@ -2,7 +2,7 @@
 # Makefile for Linux kernel RxRPC
 #
 
-af-rxrpc-objs := \
+af-rxrpc-y := \
 	af_rxrpc.o \
 	ar-accept.o \
 	ar-ack.o \
@@ -21,7 +21,7 @@
 	ar-transport.o
 
 ifeq ($(CONFIG_PROC_FS),y)
-af-rxrpc-objs += ar-proc.o
+af-rxrpc-y += ar-proc.o
 endif
 
 obj-$(CONFIG_AF_RXRPC) += af-rxrpc.o
diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
index 9f1729b..a53fb25 100644
--- a/net/rxrpc/ar-peer.c
+++ b/net/rxrpc/ar-peer.c
@@ -47,12 +47,12 @@
 	case AF_INET:
 		fl.oif = 0;
 		fl.proto = IPPROTO_UDP,
-		fl.nl_u.ip4_u.saddr = 0;
-		fl.nl_u.ip4_u.daddr = peer->srx.transport.sin.sin_addr.s_addr;
-		fl.nl_u.ip4_u.tos = 0;
+		fl.fl4_dst = peer->srx.transport.sin.sin_addr.s_addr;
+		fl.fl4_src = 0;
+		fl.fl4_tos = 0;
 		/* assume AFS.CM talking to AFS.FS */
-		fl.uli_u.ports.sport = htons(7001);
-		fl.uli_u.ports.dport = htons(7000);
+		fl.fl_ip_sport = htons(7001);
+		fl.fl_ip_dport = htons(7000);
 		break;
 	default:
 		BUG();
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index efd4f95..f23d915 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -268,6 +268,10 @@
 		goto nla_put_failure;
 
 	nla_nest_end(skb, nest);
+
+	if (tcf_exts_dump_stats(skb, &f->exts, &basic_ext_map) < 0)
+		goto nla_put_failure;
+
 	return skb->len;
 
 nla_put_failure:
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 37dff78..d49c40f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,8 +34,6 @@
 	.populate	= cgrp_populate,
 #ifdef CONFIG_NET_CLS_CGROUP
 	.subsys_id	= net_cls_subsys_id,
-#else
-#define net_cls_subsys_id net_cls_subsys.subsys_id
 #endif
 	.module		= THIS_MODULE,
 };
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 7632532..ea8f566 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -103,7 +103,8 @@
 
 static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
 {
-	textsearch_destroy(EM_TEXT_PRIV(m)->config);
+	if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
+		textsearch_destroy(EM_TEXT_PRIV(m)->config);
 }
 
 static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 5dbb3cd..0918834 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -60,8 +60,7 @@
 
 		/* check the reason of requeuing without tx lock first */
 		txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
-		if (!netif_tx_queue_stopped(txq) &&
-		    !netif_tx_queue_frozen(txq)) {
+		if (!netif_tx_queue_frozen_or_stopped(txq)) {
 			q->gso_skb = NULL;
 			q->q.qlen--;
 		} else
@@ -122,7 +121,7 @@
 	spin_unlock(root_lock);
 
 	HARD_TX_LOCK(dev, txq, smp_processor_id());
-	if (!netif_tx_queue_stopped(txq) && !netif_tx_queue_frozen(txq))
+	if (!netif_tx_queue_frozen_or_stopped(txq))
 		ret = dev_hard_start_xmit(skb, dev, txq);
 
 	HARD_TX_UNLOCK(dev, txq);
@@ -144,8 +143,7 @@
 		ret = dev_requeue_skb(skb, q);
 	}
 
-	if (ret && (netif_tx_queue_stopped(txq) ||
-		    netif_tx_queue_frozen(txq)))
+	if (ret && netif_tx_queue_frozen_or_stopped(txq))
 		ret = 0;
 
 	return ret;
@@ -555,7 +553,9 @@
 	size = QDISC_ALIGN(sizeof(*sch));
 	size += ops->priv_size + (QDISC_ALIGNTO - 1);
 
-	p = kzalloc(size, GFP_KERNEL);
+	p = kzalloc_node(size, GFP_KERNEL,
+			 netdev_queue_numa_node_read(dev_queue));
+
 	if (!p)
 		goto errout;
 	sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 401af95..106479a 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -309,8 +309,7 @@
 			if (__netif_tx_trylock(slave_txq)) {
 				unsigned int length = qdisc_pkt_len(skb);
 
-				if (!netif_tx_queue_stopped(slave_txq) &&
-				    !netif_tx_queue_frozen(slave_txq) &&
+				if (!netif_tx_queue_frozen_or_stopped(slave_txq) &&
 				    slave_ops->ndo_start_xmit(skb, slave) == NETDEV_TX_OK) {
 					txq_trans_update(slave_txq);
 					__netif_tx_unlock(slave_txq);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 1ef29c7..e58f947 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -92,7 +92,7 @@
 struct kmem_cache *sctp_chunk_cachep __read_mostly;
 struct kmem_cache *sctp_bucket_cachep __read_mostly;
 
-int sysctl_sctp_mem[3];
+long sysctl_sctp_mem[3];
 int sysctl_sctp_rmem[3];
 int sysctl_sctp_wmem[3];
 
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e34ca9c..842c7f3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -111,12 +111,12 @@
 static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG;
 
 extern struct kmem_cache *sctp_bucket_cachep;
-extern int sysctl_sctp_mem[3];
+extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
 extern int sysctl_sctp_wmem[3];
 
 static int sctp_memory_pressure;
-static atomic_t sctp_memory_allocated;
+static atomic_long_t sctp_memory_allocated;
 struct percpu_counter sctp_sockets_allocated;
 
 static void sctp_enter_memory_pressure(struct sock *sk)
@@ -6047,7 +6047,7 @@
 		 * will suddenly eat the receive_queue.
 		 *
 		 *  Look at current nfs client by the way...
-		 *  However, this function was corrent in any case. 8)
+		 *  However, this function was correct in any case. 8)
 		 */
 		if (flags & MSG_PEEK) {
 			spin_lock_bh(&sk->sk_receive_queue.lock);
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 832590b..50cb57f 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -54,7 +54,7 @@
 static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
 static int rwnd_scale_max = 16;
 
-extern int sysctl_sctp_mem[3];
+extern long sysctl_sctp_mem[3];
 extern int sysctl_sctp_rmem[3];
 extern int sysctl_sctp_wmem[3];
 
@@ -203,7 +203,7 @@
 		.data		= &sysctl_sctp_mem,
 		.maxlen		= sizeof(sysctl_sctp_mem),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec,
+		.proc_handler	= proc_doulongvec_minmax
 	},
 	{
 		.procname	= "sctp_rmem",
diff --git a/net/socket.c b/net/socket.c
index 3ca2fd9..c898df7 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -156,7 +156,7 @@
  */
 
 static DEFINE_SPINLOCK(net_family_lock);
-static const struct net_proto_family *net_families[NPROTO] __read_mostly;
+static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
 
 /*
  *	Statistics counters of the socket lists
@@ -1200,7 +1200,7 @@
 	 * requested real, full-featured networking support upon configuration.
 	 * Otherwise module support will break!
 	 */
-	if (net_families[family] == NULL)
+	if (rcu_access_pointer(net_families[family]) == NULL)
 		request_module("net-pf-%d", family);
 #endif
 
@@ -2332,10 +2332,11 @@
 	}
 
 	spin_lock(&net_family_lock);
-	if (net_families[ops->family])
+	if (rcu_dereference_protected(net_families[ops->family],
+				      lockdep_is_held(&net_family_lock)))
 		err = -EEXIST;
 	else {
-		net_families[ops->family] = ops;
+		rcu_assign_pointer(net_families[ops->family], ops);
 		err = 0;
 	}
 	spin_unlock(&net_family_lock);
@@ -2363,7 +2364,7 @@
 	BUG_ON(family < 0 || family >= NPROTO);
 
 	spin_lock(&net_family_lock);
-	net_families[family] = NULL;
+	rcu_assign_pointer(net_families[family], NULL);
 	spin_unlock(&net_family_lock);
 
 	synchronize_rcu();
diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile
index 7350d86..9e4cb59 100644
--- a/net/sunrpc/auth_gss/Makefile
+++ b/net/sunrpc/auth_gss/Makefile
@@ -4,10 +4,10 @@
 
 obj-$(CONFIG_SUNRPC_GSS) += auth_rpcgss.o
 
-auth_rpcgss-objs := auth_gss.o gss_generic_token.o \
+auth_rpcgss-y := auth_gss.o gss_generic_token.o \
 	gss_mech_switch.o svcauth_gss.o
 
 obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
 
-rpcsec_gss_krb5-objs := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
+rpcsec_gss_krb5-y := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
 	gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 8a2e89b..886715a 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -35,11 +35,9 @@
  */
 
 #include "core.h"
-#include "dbg.h"
 #include "addr.h"
 #include "zone.h"
 #include "cluster.h"
-#include "net.h"
 
 /**
  * tipc_addr_domain_valid - validates a network domain address
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 22a60fc..6d828d9 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -36,17 +36,9 @@
  */
 
 #include "core.h"
-#include "msg.h"
-#include "dbg.h"
 #include "link.h"
-#include "net.h"
-#include "node.h"
 #include "port.h"
-#include "addr.h"
-#include "node_subscr.h"
 #include "name_distr.h"
-#include "bearer.h"
-#include "name_table.h"
 #include "bcast.h"
 
 #define MAX_PKT_DEFAULT_MCAST 1500	/* bcast link max packet size (fixed) */
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 9927d1d..885da94 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -36,12 +36,9 @@
 
 #include "core.h"
 #include "config.h"
-#include "dbg.h"
 #include "bearer.h"
-#include "link.h"
 #include "port.h"
 #include "discover.h"
-#include "bcast.h"
 
 #define MAX_ADDR_STR 32
 
@@ -625,7 +622,7 @@
  * Note: This routine assumes caller holds tipc_net_lock.
  */
 
-static int bearer_disable(struct bearer *b_ptr)
+static void bearer_disable(struct bearer *b_ptr)
 {
 	struct link *l_ptr;
 	struct link *temp_l_ptr;
@@ -641,7 +638,6 @@
 	}
 	spin_unlock_bh(&b_ptr->publ.lock);
 	memset(b_ptr, 0, sizeof(struct bearer));
-	return 0;
 }
 
 int tipc_disable_bearer(const char *name)
@@ -654,8 +650,10 @@
 	if (b_ptr == NULL) {
 		warn("Attempt to disable unknown bearer <%s>\n", name);
 		res = -EINVAL;
-	} else
-		res = bearer_disable(b_ptr);
+	} else {
+		bearer_disable(b_ptr);
+		res = 0;
+	}
 	write_unlock_bh(&tipc_net_lock);
 	return res;
 }
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index a850b38..85f451d 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -37,12 +37,50 @@
 #ifndef _TIPC_BEARER_H
 #define _TIPC_BEARER_H
 
-#include "core.h"
 #include "bcast.h"
 
 #define MAX_BEARERS 8
 #define MAX_MEDIA 4
 
+/*
+ * Identifiers of supported TIPC media types
+ */
+#define TIPC_MEDIA_TYPE_ETH	1
+
+/*
+ * Destination address structure used by TIPC bearers when sending messages
+ *
+ * IMPORTANT: The fields of this structure MUST be stored using the specified
+ * byte order indicated below, as the structure is exchanged between nodes
+ * as part of a link setup process.
+ */
+struct tipc_media_addr {
+	__be32  type;			/* bearer type (network byte order) */
+	union {
+		__u8   eth_addr[6];	/* 48 bit Ethernet addr (byte array) */
+	} dev_addr;
+};
+
+/**
+ * struct tipc_bearer - TIPC bearer info available to media code
+ * @usr_handle: pointer to additional media-specific information about bearer
+ * @mtu: max packet size bearer can support
+ * @blocked: non-zero if bearer is blocked
+ * @lock: spinlock for controlling access to bearer
+ * @addr: media-specific address associated with bearer
+ * @name: bearer name (format = media:interface)
+ *
+ * Note: TIPC initializes "name" and "lock" fields; media code is responsible
+ * for initialization all other fields when a bearer is enabled.
+ */
+struct tipc_bearer {
+	void *usr_handle;
+	u32 mtu;
+	int blocked;
+	spinlock_t lock;
+	struct tipc_media_addr addr;
+	char name[TIPC_MAX_BEARER_NAME];
+};
 
 /**
  * struct media - TIPC media information available to internal users
@@ -55,7 +93,7 @@
  * @priority: default link (and bearer) priority
  * @tolerance: default time (in ms) before declaring link failure
  * @window: default window (in packets) before declaring link congestion
- * @type_id: TIPC media identifier [defined in tipc_bearer.h]
+ * @type_id: TIPC media identifier
  * @name: media name
  */
 
@@ -116,6 +154,34 @@
 
 extern struct bearer tipc_bearers[];
 
+/*
+ * TIPC routines available to supported media types
+ */
+int tipc_register_media(u32 media_type,
+		 char *media_name, int (*enable)(struct tipc_bearer *),
+		 void (*disable)(struct tipc_bearer *),
+		 int (*send_msg)(struct sk_buff *,
+			struct tipc_bearer *, struct tipc_media_addr *),
+		 char *(*addr2str)(struct tipc_media_addr *a,
+			char *str_buf, int str_size),
+		 struct tipc_media_addr *bcast_addr, const u32 bearer_priority,
+		 const u32 link_tolerance,  /* [ms] */
+		 const u32 send_window_limit);
+
+void tipc_recv_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr);
+
+int  tipc_block_bearer(const char *name);
+void tipc_continue(struct tipc_bearer *tb_ptr);
+
+int tipc_enable_bearer(const char *bearer_name, u32 bcast_scope, u32 priority);
+int tipc_disable_bearer(const char *name);
+
+/*
+ * Routines made available to TIPC by supported media types
+ */
+int  tipc_eth_media_start(void);
+void tipc_eth_media_stop(void);
+
 void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
 struct sk_buff *tipc_media_get_names(void);
 
@@ -126,7 +192,6 @@
 struct bearer *tipc_bearer_find_interface(const char *if_name);
 int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
 int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr);
-int tipc_bearer_init(void);
 void tipc_bearer_stop(void);
 void tipc_bearer_lock_push(struct bearer *b_ptr);
 
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index 7fea14b..405be87 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -36,17 +36,10 @@
 
 #include "core.h"
 #include "cluster.h"
-#include "addr.h"
-#include "node_subscr.h"
 #include "link.h"
-#include "node.h"
-#include "net.h"
-#include "msg.h"
-#include "bearer.h"
 
 static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
 				u32 lower, u32 upper);
-static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
 
 struct tipc_node **tipc_local_nodes = NULL;
 struct tipc_node_map tipc_cltr_bcast_nodes = {0,{0,}};
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 50a6133..bdde39f 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -35,23 +35,11 @@
  */
 
 #include "core.h"
-#include "dbg.h"
-#include "bearer.h"
 #include "port.h"
 #include "link.h"
-#include "zone.h"
-#include "addr.h"
 #include "name_table.h"
-#include "node.h"
+#include "user_reg.h"
 #include "config.h"
-#include "discover.h"
-
-struct subscr_data {
-	char usr_handle[8];
-	u32 domain;
-	u32 port_ref;
-	struct list_head subd_list;
-};
 
 struct manager {
 	u32 user_ref;
@@ -572,7 +560,7 @@
 	struct tipc_name_seq seq;
 	int res;
 
-	res = tipc_attach(&mng.user_ref, NULL, NULL);
+	res = tipc_attach(&mng.user_ref);
 	if (res)
 		goto failed;
 
diff --git a/net/tipc/config.h b/net/tipc/config.h
index 481e12e..443159a 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -39,7 +39,6 @@
 
 /* ---------------------------------------------------------------------- */
 
-#include "core.h"
 #include "link.h"
 
 struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index e2a09eb..f5d62c1 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -40,7 +40,6 @@
 #include <linux/random.h>
 
 #include "core.h"
-#include "dbg.h"
 #include "ref.h"
 #include "net.h"
 #include "user_reg.h"
@@ -236,43 +235,3 @@
 MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(TIPC_MOD_VER);
-
-/* Native TIPC API for kernel-space applications (see tipc.h) */
-
-EXPORT_SYMBOL(tipc_attach);
-EXPORT_SYMBOL(tipc_detach);
-EXPORT_SYMBOL(tipc_createport);
-EXPORT_SYMBOL(tipc_deleteport);
-EXPORT_SYMBOL(tipc_ownidentity);
-EXPORT_SYMBOL(tipc_portimportance);
-EXPORT_SYMBOL(tipc_set_portimportance);
-EXPORT_SYMBOL(tipc_portunreliable);
-EXPORT_SYMBOL(tipc_set_portunreliable);
-EXPORT_SYMBOL(tipc_portunreturnable);
-EXPORT_SYMBOL(tipc_set_portunreturnable);
-EXPORT_SYMBOL(tipc_publish);
-EXPORT_SYMBOL(tipc_withdraw);
-EXPORT_SYMBOL(tipc_connect2port);
-EXPORT_SYMBOL(tipc_disconnect);
-EXPORT_SYMBOL(tipc_shutdown);
-EXPORT_SYMBOL(tipc_send);
-EXPORT_SYMBOL(tipc_send2name);
-EXPORT_SYMBOL(tipc_send2port);
-EXPORT_SYMBOL(tipc_multicast);
-
-/* TIPC API for external bearers (see tipc_bearer.h) */
-
-EXPORT_SYMBOL(tipc_block_bearer);
-EXPORT_SYMBOL(tipc_continue);
-EXPORT_SYMBOL(tipc_disable_bearer);
-EXPORT_SYMBOL(tipc_enable_bearer);
-EXPORT_SYMBOL(tipc_recv_msg);
-EXPORT_SYMBOL(tipc_register_media);
-
-/* TIPC API for external APIs (see tipc_port.h) */
-
-EXPORT_SYMBOL(tipc_createport_raw);
-EXPORT_SYMBOL(tipc_reject_msg);
-EXPORT_SYMBOL(tipc_send_buf_fast);
-EXPORT_SYMBOL(tipc_acknowledge);
-
diff --git a/net/tipc/core.h b/net/tipc/core.h
index e19389e..ca7e171 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -39,10 +39,6 @@
 
 #include <linux/tipc.h>
 #include <linux/tipc_config.h>
-#include <net/tipc/tipc_msg.h>
-#include <net/tipc/tipc_port.h>
-#include <net/tipc/tipc_bearer.h>
-#include <net/tipc/tipc.h>
 #include <linux/types.h>
 #include <linux/kernel.h>
 #include <linux/errno.h>
@@ -62,6 +58,9 @@
 
 #define TIPC_MOD_VER "2.0.0"
 
+struct tipc_msg;	/* msg.h */
+struct print_buf;	/* dbg.h */
+
 /*
  * TIPC sanity test macros
  */
@@ -174,6 +173,13 @@
 #define ELINKCONG EAGAIN	/* link congestion <=> resource unavailable */
 
 /*
+ * TIPC operating mode routines
+ */
+#define TIPC_NOT_RUNNING  0
+#define TIPC_NODE_MODE    1
+#define TIPC_NET_MODE     2
+
+/*
  * Global configuration variables
  */
 
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 4a7cd37..f2ce36b 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -35,9 +35,7 @@
  */
 
 #include "core.h"
-#include "dbg.h"
 #include "link.h"
-#include "zone.h"
 #include "discover.h"
 #include "port.h"
 #include "name_table.h"
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index f8e7506..d2c3cff 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -37,8 +37,6 @@
 #ifndef _TIPC_DISCOVER_H
 #define _TIPC_DISCOVER_H
 
-#include "core.h"
-
 struct link_req;
 
 struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 6e988ba..ee683cc 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -34,13 +34,13 @@
  * POSSIBILITY OF SUCH DAMAGE.
  */
 
-#include <net/tipc/tipc.h>
-#include <net/tipc/tipc_bearer.h>
-#include <net/tipc/tipc_msg.h>
 #include <linux/netdevice.h>
 #include <linux/slab.h>
 #include <net/net_namespace.h>
 
+#include "core.h"
+#include "bearer.h"
+
 #define MAX_ETH_BEARERS		2
 #define ETH_LINK_PRIORITY	TIPC_DEF_LINK_PRI
 #define ETH_LINK_TOLERANCE	TIPC_DEF_LINK_TOL
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b31992c..cf414cf 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -35,19 +35,11 @@
  */
 
 #include "core.h"
-#include "dbg.h"
 #include "link.h"
-#include "net.h"
-#include "node.h"
 #include "port.h"
-#include "addr.h"
-#include "node_subscr.h"
 #include "name_distr.h"
-#include "bearer.h"
-#include "name_table.h"
 #include "discover.h"
 #include "config.h"
-#include "bcast.h"
 
 
 /*
@@ -57,12 +49,6 @@
 #define INVALID_SESSION 0x10000
 
 /*
- * Limit for deferred reception queue:
- */
-
-#define DEF_QUEUE_LIMIT 256u
-
-/*
  * Link state events:
  */
 
diff --git a/net/tipc/link.h b/net/tipc/link.h
index f98bc61..c562888 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -39,7 +39,6 @@
 
 #include "dbg.h"
 #include "msg.h"
-#include "bearer.h"
 #include "node.h"
 
 #define PUSH_FAILED   1
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ecb532f..ee6b4c6 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -36,9 +36,7 @@
 
 #include "core.h"
 #include "addr.h"
-#include "dbg.h"
 #include "msg.h"
-#include "bearer.h"
 
 u32 tipc_msg_tot_importance(struct tipc_msg *m)
 {
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 031aad1..aee5386 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -37,10 +37,51 @@
 #ifndef _TIPC_MSG_H
 #define _TIPC_MSG_H
 
-#include "core.h"
+#include "bearer.h"
 
 #define TIPC_VERSION              2
 
+/*
+ *		TIPC user data message header format, version 2:
+ *
+ *
+ *     1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w0:|vers | user  |hdr sz |n|d|s|-|          message size           |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w1:|mstyp| error |rer cnt|lsc|opt p|      broadcast ack no         |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w2:|        link level ack no      |   broadcast/link level seq no |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w3:|                       previous node                           |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w4:|                      originating port                         |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w5:|                      destination port                         |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w6:|                      originating node                         |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w7:|                      destination node                         |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w8:|            name type / transport sequence number              |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * w9:|              name instance/multicast lower bound              |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * wA:|                    multicast upper bound                      |
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *    /                                                               /
+ *    \                           options                             \
+ *    /                                                               /
+ *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ */
+
+#define TIPC_CONN_MSG		0
+#define TIPC_MCAST_MSG		1
+#define TIPC_NAMED_MSG		2
+#define TIPC_DIRECT_MSG		3
+
+
 #define SHORT_H_SIZE              24	/* Connected, in-cluster messages */
 #define DIR_MSG_H_SIZE            32	/* Directly addressed messages */
 #define LONG_H_SIZE               40	/* Named messages */
@@ -52,20 +93,26 @@
 #define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
 
 
-/*
-		TIPC user data message header format, version 2
+struct tipc_msg {
+	__be32 hdr[15];
+};
 
-	- Fundamental definitions available to privileged TIPC users
-	  are located in tipc_msg.h.
-	- Remaining definitions available to TIPC internal users appear below.
-*/
 
+static inline u32 msg_word(struct tipc_msg *m, u32 pos)
+{
+	return ntohl(m->hdr[pos]);
+}
 
 static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
 {
 	m->hdr[w] = htonl(val);
 }
 
+static inline u32 msg_bits(struct tipc_msg *m, u32 w, u32 pos, u32 mask)
+{
+	return (msg_word(m, w) >> pos) & mask;
+}
+
 static inline void msg_set_bits(struct tipc_msg *m, u32 w,
 				u32 pos, u32 mask, u32 val)
 {
@@ -112,16 +159,36 @@
 	msg_set_bits(m, 0, 25, 0xf, n);
 }
 
+static inline u32 msg_importance(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 25, 0xf);
+}
+
 static inline void msg_set_importance(struct tipc_msg *m, u32 i)
 {
 	msg_set_user(m, i);
 }
 
+static inline u32 msg_hdr_sz(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 21, 0xf) << 2;
+}
+
 static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
 {
 	msg_set_bits(m, 0, 21, 0xf, n>>2);
 }
 
+static inline u32 msg_size(struct tipc_msg *m)
+{
+	return msg_bits(m, 0, 0, 0x1ffff);
+}
+
+static inline u32 msg_data_sz(struct tipc_msg *m)
+{
+	return msg_size(m) - msg_hdr_sz(m);
+}
+
 static inline int msg_non_seq(struct tipc_msg *m)
 {
 	return msg_bits(m, 0, 20, 1);
@@ -162,11 +229,36 @@
  * Word 1
  */
 
+static inline u32 msg_type(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 29, 0x7);
+}
+
 static inline void msg_set_type(struct tipc_msg *m, u32 n)
 {
 	msg_set_bits(m, 1, 29, 0x7, n);
 }
 
+static inline u32 msg_named(struct tipc_msg *m)
+{
+	return msg_type(m) == TIPC_NAMED_MSG;
+}
+
+static inline u32 msg_mcast(struct tipc_msg *m)
+{
+	return msg_type(m) == TIPC_MCAST_MSG;
+}
+
+static inline u32 msg_connected(struct tipc_msg *m)
+{
+	return msg_type(m) == TIPC_CONN_MSG;
+}
+
+static inline u32 msg_errcode(struct tipc_msg *m)
+{
+	return msg_bits(m, 1, 25, 0xf);
+}
+
 static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
 {
 	msg_set_bits(m, 1, 25, 0xf, err);
@@ -257,31 +349,68 @@
  */
 
 
+static inline u32 msg_prevnode(struct tipc_msg *m)
+{
+	return msg_word(m, 3);
+}
+
 static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
 {
 	msg_set_word(m, 3, a);
 }
 
+static inline u32 msg_origport(struct tipc_msg *m)
+{
+	return msg_word(m, 4);
+}
+
 static inline void msg_set_origport(struct tipc_msg *m, u32 p)
 {
 	msg_set_word(m, 4, p);
 }
 
+static inline u32 msg_destport(struct tipc_msg *m)
+{
+	return msg_word(m, 5);
+}
+
 static inline void msg_set_destport(struct tipc_msg *m, u32 p)
 {
 	msg_set_word(m, 5, p);
 }
 
+static inline u32 msg_mc_netid(struct tipc_msg *m)
+{
+	return msg_word(m, 5);
+}
+
 static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
 {
 	msg_set_word(m, 5, p);
 }
 
+static inline int msg_short(struct tipc_msg *m)
+{
+	return msg_hdr_sz(m) == 24;
+}
+
+static inline u32 msg_orignode(struct tipc_msg *m)
+{
+	if (likely(msg_short(m)))
+		return msg_prevnode(m);
+	return msg_word(m, 6);
+}
+
 static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
 {
 	msg_set_word(m, 6, a);
 }
 
+static inline u32 msg_destnode(struct tipc_msg *m)
+{
+	return msg_word(m, 7);
+}
+
 static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
 {
 	msg_set_word(m, 7, a);
@@ -299,6 +428,11 @@
 	return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
 }
 
+static inline u32 msg_nametype(struct tipc_msg *m)
+{
+	return msg_word(m, 8);
+}
+
 static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
 {
 	msg_set_word(m, 8, n);
@@ -324,6 +458,16 @@
 	msg_set_word(m, 8, n);
 }
 
+static inline u32 msg_nameinst(struct tipc_msg *m)
+{
+	return msg_word(m, 9);
+}
+
+static inline u32 msg_namelower(struct tipc_msg *m)
+{
+	return msg_nameinst(m);
+}
+
 static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
 {
 	msg_set_word(m, 9, n);
@@ -334,11 +478,21 @@
 	msg_set_namelower(m, n);
 }
 
+static inline u32 msg_nameupper(struct tipc_msg *m)
+{
+	return msg_word(m, 10);
+}
+
 static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
 {
 	msg_set_word(m, 10, n);
 }
 
+static inline unchar *msg_data(struct tipc_msg *m)
+{
+	return ((unchar *)m) + msg_hdr_sz(m);
+}
+
 static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 {
 	return (struct tipc_msg *)msg_data(m);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 7b90717..10ff48b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -36,9 +36,7 @@
 
 #include "core.h"
 #include "cluster.h"
-#include "dbg.h"
 #include "link.h"
-#include "msg.h"
 #include "name_distr.h"
 
 #define ITEM_SIZE sizeof(struct distr_item)
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 3a8de43..d5adb04 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -36,15 +36,10 @@
 
 #include "core.h"
 #include "config.h"
-#include "dbg.h"
 #include "name_table.h"
 #include "name_distr.h"
-#include "addr.h"
-#include "node_subscr.h"
 #include "subscr.h"
 #include "port.h"
-#include "cluster.h"
-#include "bcast.h"
 
 static int tipc_nametbl_size = 1024;		/* must be a power of 2 */
 
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 1a621cf..c2b4b86 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -35,18 +35,13 @@
  */
 
 #include "core.h"
-#include "bearer.h"
 #include "net.h"
 #include "zone.h"
-#include "addr.h"
 #include "name_table.h"
 #include "name_distr.h"
 #include "subscr.h"
 #include "link.h"
-#include "msg.h"
 #include "port.h"
-#include "bcast.h"
-#include "discover.h"
 #include "config.h"
 
 /*
diff --git a/net/tipc/node.c b/net/tipc/node.c
index b4d87eb..df71dfc 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -37,16 +37,9 @@
 #include "core.h"
 #include "config.h"
 #include "node.h"
-#include "cluster.h"
-#include "net.h"
-#include "addr.h"
-#include "node_subscr.h"
-#include "link.h"
 #include "port.h"
-#include "bearer.h"
 #include "name_distr.h"
 
-void node_print(struct print_buf *buf, struct tipc_node *n_ptr, char *str);
 static void node_lost_contact(struct tipc_node *n_ptr);
 static void node_established_contact(struct tipc_node *n_ptr);
 
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 19194d4..018a553 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -35,10 +35,8 @@
  */
 
 #include "core.h"
-#include "dbg.h"
 #include "node_subscr.h"
 #include "node.h"
-#include "addr.h"
 
 /**
  * tipc_nodesub_subscribe - create "node down" subscription for specified node
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 82092ea..7873283 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -36,15 +36,9 @@
 
 #include "core.h"
 #include "config.h"
-#include "dbg.h"
 #include "port.h"
-#include "addr.h"
-#include "link.h"
-#include "node.h"
 #include "name_table.h"
 #include "user_reg.h"
-#include "msg.h"
-#include "bcast.h"
 
 /* Connection management: */
 #define PROBING_INTERVAL 3600000	/* [ms] => 1 h */
@@ -94,7 +88,7 @@
  * tipc_multicast - send a multicast message to local and remote destinations
  */
 
-int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
+int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
 		   u32 num_sect, struct iovec const *msg_sect)
 {
 	struct tipc_msg *hdr;
@@ -989,13 +983,6 @@
 	return 0;
 }
 
-int tipc_ownidentity(u32 ref, struct tipc_portid *id)
-{
-	id->ref = ref;
-	id->node = tipc_own_addr;
-	return 0;
-}
-
 int tipc_portimportance(u32 ref, unsigned int *importance)
 {
 	struct port *p_ptr;
@@ -1271,16 +1258,11 @@
 }
 
 /**
- * tipc_forward2name - forward message sections to port name
+ * tipc_send2name - send message sections to port name
  */
 
-static int tipc_forward2name(u32 ref,
-			     struct tipc_name const *name,
-			     u32 domain,
-			     u32 num_sect,
-			     struct iovec const *msg_sect,
-			     struct tipc_portid const *orig,
-			     unsigned int importance)
+int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
+	   unsigned int num_sect, struct iovec const *msg_sect)
 {
 	struct port *p_ptr;
 	struct tipc_msg *msg;
@@ -1294,14 +1276,12 @@
 
 	msg = &p_ptr->publ.phdr;
 	msg_set_type(msg, TIPC_NAMED_MSG);
-	msg_set_orignode(msg, orig->node);
-	msg_set_origport(msg, orig->ref);
+	msg_set_orignode(msg, tipc_own_addr);
+	msg_set_origport(msg, ref);
 	msg_set_hdr_sz(msg, LONG_H_SIZE);
 	msg_set_nametype(msg, name->type);
 	msg_set_nameinst(msg, name->instance);
 	msg_set_lookup_scope(msg, tipc_addr_scope(domain));
-	if (importance <= TIPC_CRITICAL_IMPORTANCE)
-		msg_set_importance(msg,importance);
 	destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
 	msg_set_destnode(msg, destnode);
 	msg_set_destport(msg, destport);
@@ -1325,33 +1305,11 @@
 }
 
 /**
- * tipc_send2name - send message sections to port name
+ * tipc_send2port - send message sections to port identity
  */
 
-int tipc_send2name(u32 ref,
-		   struct tipc_name const *name,
-		   unsigned int domain,
-		   unsigned int num_sect,
-		   struct iovec const *msg_sect)
-{
-	struct tipc_portid orig;
-
-	orig.ref = ref;
-	orig.node = tipc_own_addr;
-	return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
-				 TIPC_PORT_IMPORTANCE);
-}
-
-/**
- * tipc_forward2port - forward message sections to port identity
- */
-
-static int tipc_forward2port(u32 ref,
-			     struct tipc_portid const *dest,
-			     unsigned int num_sect,
-			     struct iovec const *msg_sect,
-			     struct tipc_portid const *orig,
-			     unsigned int importance)
+int tipc_send2port(u32 ref, struct tipc_portid const *dest,
+	   unsigned int num_sect, struct iovec const *msg_sect)
 {
 	struct port *p_ptr;
 	struct tipc_msg *msg;
@@ -1363,13 +1321,11 @@
 
 	msg = &p_ptr->publ.phdr;
 	msg_set_type(msg, TIPC_DIRECT_MSG);
-	msg_set_orignode(msg, orig->node);
-	msg_set_origport(msg, orig->ref);
+	msg_set_orignode(msg, tipc_own_addr);
+	msg_set_origport(msg, ref);
 	msg_set_destnode(msg, dest->node);
 	msg_set_destport(msg, dest->ref);
 	msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
-	if (importance <= TIPC_CRITICAL_IMPORTANCE)
-		msg_set_importance(msg, importance);
 	p_ptr->sent++;
 	if (dest->node == tipc_own_addr)
 		return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
@@ -1384,31 +1340,11 @@
 }
 
 /**
- * tipc_send2port - send message sections to port identity
+ * tipc_send_buf2port - send message buffer to port identity
  */
 
-int tipc_send2port(u32 ref,
-		   struct tipc_portid const *dest,
-		   unsigned int num_sect,
-		   struct iovec const *msg_sect)
-{
-	struct tipc_portid orig;
-
-	orig.ref = ref;
-	orig.node = tipc_own_addr;
-	return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
-				 TIPC_PORT_IMPORTANCE);
-}
-
-/**
- * tipc_forward_buf2port - forward message buffer to port identity
- */
-static int tipc_forward_buf2port(u32 ref,
-				 struct tipc_portid const *dest,
-				 struct sk_buff *buf,
-				 unsigned int dsz,
-				 struct tipc_portid const *orig,
-				 unsigned int importance)
+int tipc_send_buf2port(u32 ref, struct tipc_portid const *dest,
+	       struct sk_buff *buf, unsigned int dsz)
 {
 	struct port *p_ptr;
 	struct tipc_msg *msg;
@@ -1420,13 +1356,11 @@
 
 	msg = &p_ptr->publ.phdr;
 	msg_set_type(msg, TIPC_DIRECT_MSG);
-	msg_set_orignode(msg, orig->node);
-	msg_set_origport(msg, orig->ref);
+	msg_set_orignode(msg, tipc_own_addr);
+	msg_set_origport(msg, ref);
 	msg_set_destnode(msg, dest->node);
 	msg_set_destport(msg, dest->ref);
 	msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
-	if (importance <= TIPC_CRITICAL_IMPORTANCE)
-		msg_set_importance(msg, importance);
 	msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
 	if (skb_cow(buf, DIR_MSG_H_SIZE))
 		return -ENOMEM;
@@ -1445,20 +1379,3 @@
 	return -ELINKCONG;
 }
 
-/**
- * tipc_send_buf2port - send message buffer to port identity
- */
-
-int tipc_send_buf2port(u32 ref,
-		       struct tipc_portid const *dest,
-		       struct sk_buff *buf,
-		       unsigned int dsz)
-{
-	struct tipc_portid orig;
-
-	orig.ref = ref;
-	orig.node = tipc_own_addr;
-	return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
-				     TIPC_PORT_IMPORTANCE);
-}
-
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 73bbf44..3a807fc 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -37,13 +37,44 @@
 #ifndef _TIPC_PORT_H
 #define _TIPC_PORT_H
 
-#include "core.h"
 #include "ref.h"
 #include "net.h"
 #include "msg.h"
-#include "dbg.h"
 #include "node_subscr.h"
 
+#define TIPC_FLOW_CONTROL_WIN 512
+
+typedef void (*tipc_msg_err_event) (void *usr_handle, u32 portref,
+		struct sk_buff **buf, unsigned char const *data,
+		unsigned int size, int reason,
+		struct tipc_portid const *attmpt_destid);
+
+typedef void (*tipc_named_msg_err_event) (void *usr_handle, u32 portref,
+		struct sk_buff **buf, unsigned char const *data,
+		unsigned int size, int reason,
+		struct tipc_name_seq const *attmpt_dest);
+
+typedef void (*tipc_conn_shutdown_event) (void *usr_handle, u32 portref,
+		struct sk_buff **buf, unsigned char const *data,
+		unsigned int size, int reason);
+
+typedef void (*tipc_msg_event) (void *usr_handle, u32 portref,
+		struct sk_buff **buf, unsigned char const *data,
+		unsigned int size, unsigned int importance,
+		struct tipc_portid const *origin);
+
+typedef void (*tipc_named_msg_event) (void *usr_handle, u32 portref,
+		struct sk_buff **buf, unsigned char const *data,
+		unsigned int size, unsigned int importance,
+		struct tipc_portid const *orig,
+		struct tipc_name_seq const *dest);
+
+typedef void (*tipc_conn_msg_event) (void *usr_handle, u32 portref,
+		struct sk_buff **buf, unsigned char const *data,
+		unsigned int size);
+
+typedef void (*tipc_continue_event) (void *usr_handle, u32 portref);
+
 /**
  * struct user_port - TIPC user port (used with native API)
  * @user_ref: id of user who created user port
@@ -68,6 +99,34 @@
 };
 
 /**
+ * struct tipc_port - TIPC port info available to socket API
+ * @usr_handle: pointer to additional user-defined information about port
+ * @lock: pointer to spinlock for controlling access to port
+ * @connected: non-zero if port is currently connected to a peer port
+ * @conn_type: TIPC type used when connection was established
+ * @conn_instance: TIPC instance used when connection was established
+ * @conn_unacked: number of unacknowledged messages received from peer port
+ * @published: non-zero if port has one or more associated names
+ * @congested: non-zero if cannot send because of link or port congestion
+ * @max_pkt: maximum packet size "hint" used when building messages sent by port
+ * @ref: unique reference to port in TIPC object registry
+ * @phdr: preformatted message header used when sending messages
+ */
+struct tipc_port {
+	void *usr_handle;
+	spinlock_t *lock;
+	int connected;
+	u32 conn_type;
+	u32 conn_instance;
+	u32 conn_unacked;
+	int published;
+	u32 congested;
+	u32 max_pkt;
+	u32 ref;
+	struct tipc_msg phdr;
+};
+
+/**
  * struct port - TIPC port structure
  * @publ: TIPC port info available to privileged users
  * @port_list: adjacent ports in TIPC's global list of ports
@@ -109,11 +168,76 @@
 extern spinlock_t tipc_port_list_lock;
 struct port_list;
 
+/*
+ * TIPC port manipulation routines
+ */
+struct tipc_port *tipc_createport_raw(void *usr_handle,
+		u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
+		void (*wakeup)(struct tipc_port *), const u32 importance);
+
+int tipc_reject_msg(struct sk_buff *buf, u32 err);
+
+int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode);
+
+void tipc_acknowledge(u32 port_ref, u32 ack);
+
+int tipc_createport(unsigned int tipc_user, void *usr_handle,
+		unsigned int importance, tipc_msg_err_event error_cb,
+		tipc_named_msg_err_event named_error_cb,
+		tipc_conn_shutdown_event conn_error_cb, tipc_msg_event msg_cb,
+		tipc_named_msg_event named_msg_cb,
+		tipc_conn_msg_event conn_msg_cb,
+		tipc_continue_event continue_event_cb, u32 *portref);
+
+int tipc_deleteport(u32 portref);
+
+int tipc_portimportance(u32 portref, unsigned int *importance);
+int tipc_set_portimportance(u32 portref, unsigned int importance);
+
+int tipc_portunreliable(u32 portref, unsigned int *isunreliable);
+int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
+
+int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
+int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
+
+int tipc_publish(u32 portref, unsigned int scope,
+		struct tipc_name_seq const *name_seq);
+int tipc_withdraw(u32 portref, unsigned int scope,
+		struct tipc_name_seq const *name_seq);
+
+int tipc_connect2port(u32 portref, struct tipc_portid const *port);
+
+int tipc_disconnect(u32 portref);
+
+int tipc_shutdown(u32 ref);
+
+
+/*
+ * The following routines require that the port be locked on entry
+ */
+int tipc_disconnect_port(struct tipc_port *tp_ptr);
+
+/*
+ * TIPC messaging routines
+ */
+int tipc_send(u32 portref, unsigned int num_sect, struct iovec const *msg_sect);
+
+int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
+		unsigned int num_sect, struct iovec const *msg_sect);
+
+int tipc_send2port(u32 portref, struct tipc_portid const *dest,
+		unsigned int num_sect, struct iovec const *msg_sect);
+
+int tipc_send_buf2port(u32 portref, struct tipc_portid const *dest,
+		struct sk_buff *buf, unsigned int dsz);
+
+int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
+		unsigned int section_count, struct iovec const *msg);
+
 int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
 			      struct iovec const *msg_sect, u32 num_sect,
 			      int err);
 struct sk_buff *tipc_port_get_ports(void);
-struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
 void tipc_port_recv_proto_msg(struct sk_buff *buf);
 void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
 void tipc_port_reinit(void);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 33217fc..cd0bb77 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -49,10 +49,9 @@
 
 #include <linux/tipc.h>
 #include <linux/tipc_config.h>
-#include <net/tipc/tipc_msg.h>
-#include <net/tipc/tipc_port.h>
 
 #include "core.h"
+#include "port.h"
 
 #define SS_LISTENING	-1	/* socket is listening */
 #define SS_READY	-2	/* socket is connectionless */
@@ -396,6 +395,7 @@
 	struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
 	struct tipc_sock *tsock = tipc_sk(sock->sk);
 
+	memset(addr, 0, sizeof(*addr));
 	if (peer) {
 		if ((sock->state != SS_CONNECTED) &&
 			((peer != 2) || (sock->state != SS_DISCONNECTING)))
@@ -403,7 +403,8 @@
 		addr->addr.id.ref = tsock->peer_name.ref;
 		addr->addr.id.node = tsock->peer_name.node;
 	} else {
-		tipc_ownidentity(tsock->p->ref, &addr->addr.id);
+		addr->addr.id.ref = tsock->p->ref;
+		addr->addr.id.node = tipc_own_addr;
 	}
 
 	*uaddr_len = sizeof(*addr);
@@ -596,7 +597,6 @@
 				break;
 			res = tipc_multicast(tport->ref,
 					     &dest->addr.nameseq,
-					     0,
 					     m->msg_iovlen,
 					     m->msg_iov);
 		}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 3331396..23f43d0 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -35,10 +35,8 @@
  */
 
 #include "core.h"
-#include "dbg.h"
 #include "name_table.h"
-#include "port.h"
-#include "ref.h"
+#include "user_reg.h"
 #include "subscr.h"
 
 /**
@@ -544,14 +542,14 @@
 int tipc_subscr_start(void)
 {
 	struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
-	int res = -1;
+	int res;
 
 	memset(&topsrv, 0, sizeof (topsrv));
 	spin_lock_init(&topsrv.lock);
 	INIT_LIST_HEAD(&topsrv.subscriber_list);
 
 	spin_lock_bh(&topsrv.lock);
-	res = tipc_attach(&topsrv.user_ref, NULL, NULL);
+	res = tipc_attach(&topsrv.user_ref);
 	if (res) {
 		spin_unlock_bh(&topsrv.lock);
 		return res;
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 5069288..2e2702e 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -50,15 +50,11 @@
 /**
  * struct tipc_user - registered TIPC user info
  * @next: index of next free registry entry (or -1 for an allocated entry)
- * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
- * @usr_handle: user-defined value passed to callback routine
  * @ports: list of user ports owned by the user
  */
 
 struct tipc_user {
 	int next;
-	tipc_mode_event callback;
-	void *usr_handle;
 	struct list_head ports;
 };
 
@@ -95,41 +91,12 @@
 }
 
 /**
- * reg_callback - inform TIPC user about current operating mode
- */
-
-static void reg_callback(struct tipc_user *user_ptr)
-{
-	tipc_mode_event cb;
-	void *arg;
-
-	spin_lock_bh(&reg_lock);
-	cb = user_ptr->callback;
-	arg = user_ptr->usr_handle;
-	spin_unlock_bh(&reg_lock);
-
-	if (cb)
-		cb(arg, tipc_mode, tipc_own_addr);
-}
-
-/**
  * tipc_reg_start - activate TIPC user registry
  */
 
 int tipc_reg_start(void)
 {
-	u32 u;
-	int res;
-
-	if ((res = reg_init()))
-		return res;
-
-	for (u = 1; u <= MAX_USERID; u++) {
-		if (users[u].callback)
-			tipc_k_signal((Handler)reg_callback,
-				      (unsigned long)&users[u]);
-	}
-	return 0;
+	return reg_init();
 }
 
 /**
@@ -138,15 +105,9 @@
 
 void tipc_reg_stop(void)
 {
-	int id;
-
 	if (!users)
 		return;
 
-	for (id = 1; id <= MAX_USERID; id++) {
-		if (users[id].callback)
-			reg_callback(&users[id]);
-	}
 	kfree(users);
 	users = NULL;
 }
@@ -157,12 +118,10 @@
  * NOTE: This routine may be called when TIPC is inactive.
  */
 
-int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
+int tipc_attach(u32 *userid)
 {
 	struct tipc_user *user_ptr;
 
-	if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
-		return -ENOPROTOOPT;
 	if (!users)
 		reg_init();
 
@@ -177,13 +136,9 @@
 	user_ptr->next = -1;
 	spin_unlock_bh(&reg_lock);
 
-	user_ptr->callback = cb;
-	user_ptr->usr_handle = usr_handle;
 	INIT_LIST_HEAD(&user_ptr->ports);
 	atomic_inc(&tipc_user_count);
 
-	if (cb && (tipc_mode != TIPC_NOT_RUNNING))
-		tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
 	return 0;
 }
 
@@ -207,7 +162,6 @@
 	}
 
 	user_ptr = &users[userid];
-	user_ptr->callback = NULL;
 	INIT_LIST_HEAD(&ports_temp);
 	list_splice(&user_ptr->ports, &ports_temp);
 	user_ptr->next = next_free_user;
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
index 81dc12e..109eed0 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/user_reg.h
@@ -42,6 +42,9 @@
 int tipc_reg_start(void);
 void tipc_reg_stop(void);
 
+int tipc_attach(unsigned int *userref);
+void tipc_detach(unsigned int userref);
+
 int tipc_reg_add_port(struct user_port *up_ptr);
 int tipc_reg_remove_port(struct user_port *up_ptr);
 
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 83f8b5e..1b61ca8 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -36,9 +36,6 @@
 
 #include "core.h"
 #include "zone.h"
-#include "net.h"
-#include "addr.h"
-#include "node_subscr.h"
 #include "cluster.h"
 #include "node.h"
 
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 3c95304..417d7a6 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -316,7 +316,8 @@
 	if (unix_writable(sk)) {
 		wq = rcu_dereference(sk->sk_wq);
 		if (wq_has_sleeper(wq))
-			wake_up_interruptible_sync(&wq->wait);
+			wake_up_interruptible_sync_poll(&wq->wait,
+				POLLOUT | POLLWRNORM | POLLWRBAND);
 		sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
 	}
 	rcu_read_unlock();
@@ -1343,9 +1344,25 @@
 	sock_wfree(skb);
 }
 
+#define MAX_RECURSION_LEVEL 4
+
 static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
 {
 	int i;
+	unsigned char max_level = 0;
+	int unix_sock_count = 0;
+
+	for (i = scm->fp->count - 1; i >= 0; i--) {
+		struct sock *sk = unix_get_socket(scm->fp->fp[i]);
+
+		if (sk) {
+			unix_sock_count++;
+			max_level = max(max_level,
+					unix_sk(sk)->recursion_level);
+		}
+	}
+	if (unlikely(max_level > MAX_RECURSION_LEVEL))
+		return -ETOOMANYREFS;
 
 	/*
 	 * Need to duplicate file references for the sake of garbage
@@ -1356,9 +1373,11 @@
 	if (!UNIXCB(skb).fp)
 		return -ENOMEM;
 
-	for (i = scm->fp->count-1; i >= 0; i--)
-		unix_inflight(scm->fp->fp[i]);
-	return 0;
+	if (unix_sock_count) {
+		for (i = scm->fp->count - 1; i >= 0; i--)
+			unix_inflight(scm->fp->fp[i]);
+	}
+	return max_level;
 }
 
 static int unix_scm_to_skb(struct scm_cookie *scm, struct sk_buff *skb, bool send_fds)
@@ -1393,6 +1412,7 @@
 	struct sk_buff *skb;
 	long timeo;
 	struct scm_cookie tmp_scm;
+	int max_level;
 
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
@@ -1431,8 +1451,9 @@
 		goto out;
 
 	err = unix_scm_to_skb(siocb->scm, skb, true);
-	if (err)
+	if (err < 0)
 		goto out_free;
+	max_level = err + 1;
 	unix_get_secdata(siocb->scm, skb);
 
 	skb_reset_transport_header(skb);
@@ -1514,6 +1535,8 @@
 	if (sock_flag(other, SOCK_RCVTSTAMP))
 		__net_timestamp(skb);
 	skb_queue_tail(&other->sk_receive_queue, skb);
+	if (max_level > unix_sk(other)->recursion_level)
+		unix_sk(other)->recursion_level = max_level;
 	unix_state_unlock(other);
 	other->sk_data_ready(other, len);
 	sock_put(other);
@@ -1544,6 +1567,7 @@
 	int sent = 0;
 	struct scm_cookie tmp_scm;
 	bool fds_sent = false;
+	int max_level;
 
 	if (NULL == siocb->scm)
 		siocb->scm = &tmp_scm;
@@ -1607,10 +1631,11 @@
 
 		/* Only send the fds in the first buffer */
 		err = unix_scm_to_skb(siocb->scm, skb, !fds_sent);
-		if (err) {
+		if (err < 0) {
 			kfree_skb(skb);
 			goto out_err;
 		}
+		max_level = err + 1;
 		fds_sent = true;
 
 		err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
@@ -1626,6 +1651,8 @@
 			goto pipe_err_free;
 
 		skb_queue_tail(&other->sk_receive_queue, skb);
+		if (max_level > unix_sk(other)->recursion_level)
+			unix_sk(other)->recursion_level = max_level;
 		unix_state_unlock(other);
 		other->sk_data_ready(other, size);
 		sent += size;
@@ -1710,7 +1737,8 @@
 		goto out_unlock;
 	}
 
-	wake_up_interruptible_sync(&u->peer_wait);
+	wake_up_interruptible_sync_poll(&u->peer_wait,
+					POLLOUT | POLLWRNORM | POLLWRBAND);
 
 	if (msg->msg_name)
 		unix_copy_addr(msg, skb->sk);
@@ -1845,6 +1873,7 @@
 		unix_state_lock(sk);
 		skb = skb_dequeue(&sk->sk_receive_queue);
 		if (skb == NULL) {
+			unix_sk(sk)->recursion_level = 0;
 			if (copied >= target)
 				goto unlock;
 
@@ -2072,13 +2101,12 @@
 	if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
 		mask |= POLLERR;
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
-		mask |= POLLRDHUP;
+		mask |= POLLRDHUP | POLLIN | POLLRDNORM;
 	if (sk->sk_shutdown == SHUTDOWN_MASK)
 		mask |= POLLHUP;
 
 	/* readable? */
-	if (!skb_queue_empty(&sk->sk_receive_queue) ||
-	    (sk->sk_shutdown & RCV_SHUTDOWN))
+	if (!skb_queue_empty(&sk->sk_receive_queue))
 		mask |= POLLIN | POLLRDNORM;
 
 	/* Connection-based need to check for termination and startup */
@@ -2090,20 +2118,19 @@
 			return mask;
 	}
 
-	/* writable? */
-	writable = unix_writable(sk);
-	if (writable) {
-		other = unix_peer_get(sk);
-		if (other) {
-			if (unix_peer(other) != sk) {
-				sock_poll_wait(file, &unix_sk(other)->peer_wait,
-					  wait);
-				if (unix_recvq_full(other))
-					writable = 0;
-			}
+	/* No write status requested, avoid expensive OUT tests. */
+	if (wait && !(wait->key & (POLLWRBAND | POLLWRNORM | POLLOUT)))
+		return mask;
 
-			sock_put(other);
+	writable = unix_writable(sk);
+	other = unix_peer_get(sk);
+	if (other) {
+		if (unix_peer(other) != sk) {
+			sock_poll_wait(file, &unix_sk(other)->peer_wait, wait);
+			if (unix_recvq_full(other))
+				writable = 0;
 		}
+		sock_put(other);
 	}
 
 	if (writable)
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index c8df6fd..f89f83b 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -96,7 +96,7 @@
 unsigned int unix_tot_inflight;
 
 
-static struct sock *unix_get_socket(struct file *filp)
+struct sock *unix_get_socket(struct file *filp)
 {
 	struct sock *u_sock = NULL;
 	struct inode *inode = filp->f_path.dentry->d_inode;
@@ -259,9 +259,16 @@
 }
 
 static bool gc_in_progress = false;
+#define UNIX_INFLIGHT_TRIGGER_GC 16000
 
 void wait_for_unix_gc(void)
 {
+	/*
+	 * If number of inflight sockets is insane,
+	 * force a garbage collect right now.
+	 */
+	if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
+		unix_gc();
 	wait_event(unix_gc_wait, gc_in_progress == false);
 }
 
diff --git a/net/wanrouter/Makefile b/net/wanrouter/Makefile
index 9f188ab..4da14bc 100644
--- a/net/wanrouter/Makefile
+++ b/net/wanrouter/Makefile
@@ -4,4 +4,4 @@
 
 obj-$(CONFIG_WAN_ROUTER) += wanrouter.o
 
-wanrouter-objs :=  wanproc.o wanmain.o
+wanrouter-y :=  wanproc.o wanmain.o
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f7af98d..ad96ee9 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1357,11 +1357,11 @@
 	void __user *argp = (void __user *)arg;
 	int rc;
 
-	lock_kernel();
 	switch (cmd) {
 		case TIOCOUTQ: {
-			int amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+			int amount;
 
+			amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
 			if (amount < 0)
 				amount = 0;
 			rc = put_user(amount, (unsigned int __user *)argp);
@@ -1375,8 +1375,10 @@
 			 * These two are safe on a single CPU system as
 			 * only user tasks fiddle here
 			 */
+			lock_sock(sk);
 			if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL)
 				amount = skb->len;
+			release_sock(sk);
 			rc = put_user(amount, (unsigned int __user *)argp);
 			break;
 		}
@@ -1422,9 +1424,11 @@
 			rc = x25_subscr_ioctl(cmd, argp);
 			break;
 		case SIOCX25GFACILITIES: {
-			struct x25_facilities fac = x25->facilities;
-			rc = copy_to_user(argp, &fac,
-					  sizeof(fac)) ? -EFAULT : 0;
+			lock_sock(sk);
+			rc = copy_to_user(argp, &x25->facilities,
+						sizeof(x25->facilities))
+						? -EFAULT : 0;
+			release_sock(sk);
 			break;
 		}
 
@@ -1435,18 +1439,19 @@
 					   sizeof(facilities)))
 				break;
 			rc = -EINVAL;
+			lock_sock(sk);
 			if (sk->sk_state != TCP_LISTEN &&
 			    sk->sk_state != TCP_CLOSE)
-				break;
+				goto out_fac_release;
 			if (facilities.pacsize_in < X25_PS16 ||
 			    facilities.pacsize_in > X25_PS4096)
-				break;
+				goto out_fac_release;
 			if (facilities.pacsize_out < X25_PS16 ||
 			    facilities.pacsize_out > X25_PS4096)
-				break;
+				goto out_fac_release;
 			if (facilities.winsize_in < 1 ||
 			    facilities.winsize_in > 127)
-				break;
+				goto out_fac_release;
 			if (facilities.throughput) {
 				int out = facilities.throughput & 0xf0;
 				int in  = facilities.throughput & 0x0f;
@@ -1454,24 +1459,28 @@
 					facilities.throughput |=
 						X25_DEFAULT_THROUGHPUT << 4;
 				else if (out < 0x30 || out > 0xD0)
-					break;
+					goto out_fac_release;
 				if (!in)
 					facilities.throughput |=
 						X25_DEFAULT_THROUGHPUT;
 				else if (in < 0x03 || in > 0x0D)
-					break;
+					goto out_fac_release;
 			}
 			if (facilities.reverse &&
 				(facilities.reverse & 0x81) != 0x81)
-				break;
+				goto out_fac_release;
 			x25->facilities = facilities;
 			rc = 0;
+out_fac_release:
+			release_sock(sk);
 			break;
 		}
 
 		case SIOCX25GDTEFACILITIES: {
+			lock_sock(sk);
 			rc = copy_to_user(argp, &x25->dte_facilities,
 						sizeof(x25->dte_facilities));
+			release_sock(sk);
 			if (rc)
 				rc = -EFAULT;
 			break;
@@ -1483,26 +1492,31 @@
 			if (copy_from_user(&dtefacs, argp, sizeof(dtefacs)))
 				break;
 			rc = -EINVAL;
+			lock_sock(sk);
 			if (sk->sk_state != TCP_LISTEN &&
 					sk->sk_state != TCP_CLOSE)
-				break;
+				goto out_dtefac_release;
 			if (dtefacs.calling_len > X25_MAX_AE_LEN)
-				break;
+				goto out_dtefac_release;
 			if (dtefacs.calling_ae == NULL)
-				break;
+				goto out_dtefac_release;
 			if (dtefacs.called_len > X25_MAX_AE_LEN)
-				break;
+				goto out_dtefac_release;
 			if (dtefacs.called_ae == NULL)
-				break;
+				goto out_dtefac_release;
 			x25->dte_facilities = dtefacs;
 			rc = 0;
+out_dtefac_release:
+			release_sock(sk);
 			break;
 		}
 
 		case SIOCX25GCALLUSERDATA: {
-			struct x25_calluserdata cud = x25->calluserdata;
-			rc = copy_to_user(argp, &cud,
-					  sizeof(cud)) ? -EFAULT : 0;
+			lock_sock(sk);
+			rc = copy_to_user(argp, &x25->calluserdata,
+					sizeof(x25->calluserdata))
+					? -EFAULT : 0;
+			release_sock(sk);
 			break;
 		}
 
@@ -1516,16 +1530,19 @@
 			rc = -EINVAL;
 			if (calluserdata.cudlength > X25_MAX_CUD_LEN)
 				break;
+			lock_sock(sk);
 			x25->calluserdata = calluserdata;
+			release_sock(sk);
 			rc = 0;
 			break;
 		}
 
 		case SIOCX25GCAUSEDIAG: {
-			struct x25_causediag causediag;
-			causediag = x25->causediag;
-			rc = copy_to_user(argp, &causediag,
-					  sizeof(causediag)) ? -EFAULT : 0;
+			lock_sock(sk);
+			rc = copy_to_user(argp, &x25->causediag,
+					sizeof(x25->causediag))
+					? -EFAULT : 0;
+			release_sock(sk);
 			break;
 		}
 
@@ -1534,7 +1551,9 @@
 			rc = -EFAULT;
 			if (copy_from_user(&causediag, argp, sizeof(causediag)))
 				break;
+			lock_sock(sk);
 			x25->causediag = causediag;
+			release_sock(sk);
 			rc = 0;
 			break;
 
@@ -1543,31 +1562,37 @@
 		case SIOCX25SCUDMATCHLEN: {
 			struct x25_subaddr sub_addr;
 			rc = -EINVAL;
+			lock_sock(sk);
 			if(sk->sk_state != TCP_CLOSE)
-				break;
+				goto out_cud_release;
 			rc = -EFAULT;
 			if (copy_from_user(&sub_addr, argp,
 					sizeof(sub_addr)))
-				break;
+				goto out_cud_release;
 			rc = -EINVAL;
 			if(sub_addr.cudmatchlength > X25_MAX_CUD_LEN)
-				break;
+				goto out_cud_release;
 			x25->cudmatchlength = sub_addr.cudmatchlength;
 			rc = 0;
+out_cud_release:
+			release_sock(sk);
 			break;
 		}
 
 		case SIOCX25CALLACCPTAPPRV: {
 			rc = -EINVAL;
+			lock_kernel();
 			if (sk->sk_state != TCP_CLOSE)
 				break;
 			clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags);
+			unlock_kernel();
 			rc = 0;
 			break;
 		}
 
 		case SIOCX25SENDCALLACCPT:  {
 			rc = -EINVAL;
+			lock_kernel();
 			if (sk->sk_state != TCP_ESTABLISHED)
 				break;
 			/* must call accptapprv above */
@@ -1575,6 +1600,7 @@
 				break;
 			x25_write_internal(sk, X25_CALL_ACCEPTED);
 			x25->state = X25_STATE_3;
+			unlock_kernel();
 			rc = 0;
 			break;
 		}
@@ -1583,7 +1609,6 @@
 			rc = -ENOIOCTLCMD;
 			break;
 	}
-	unlock_kernel();
 
 	return rc;
 }
@@ -1619,16 +1644,20 @@
 	dev_put(dev);
 
 	if (cmd == SIOCX25GSUBSCRIP) {
+		read_lock_bh(&x25_neigh_list_lock);
 		x25_subscr.extended = nb->extended;
 		x25_subscr.global_facil_mask = nb->global_facil_mask;
+		read_unlock_bh(&x25_neigh_list_lock);
 		rc = copy_to_user(x25_subscr32, &x25_subscr,
 				sizeof(*x25_subscr32)) ? -EFAULT : 0;
 	} else {
 		rc = -EINVAL;
 		if (x25_subscr.extended == 0 || x25_subscr.extended == 1) {
 			rc = 0;
+			write_lock_bh(&x25_neigh_list_lock);
 			nb->extended = x25_subscr.extended;
 			nb->global_facil_mask = x25_subscr.global_facil_mask;
+			write_unlock_bh(&x25_neigh_list_lock);
 		}
 	}
 	x25_neigh_put(nb);
@@ -1654,19 +1683,15 @@
 		break;
 	case SIOCGSTAMP:
 		rc = -EINVAL;
-		lock_kernel();
 		if (sk)
 			rc = compat_sock_get_timestamp(sk,
 					(struct timeval __user*)argp);
-		unlock_kernel();
 		break;
 	case SIOCGSTAMPNS:
 		rc = -EINVAL;
-		lock_kernel();
 		if (sk)
 			rc = compat_sock_get_timestampns(sk,
 					(struct timespec __user*)argp);
-		unlock_kernel();
 		break;
 	case SIOCGIFADDR:
 	case SIOCSIFADDR:
@@ -1685,22 +1710,16 @@
 		rc = -EPERM;
 		if (!capable(CAP_NET_ADMIN))
 			break;
-		lock_kernel();
 		rc = x25_route_ioctl(cmd, argp);
-		unlock_kernel();
 		break;
 	case SIOCX25GSUBSCRIP:
-		lock_kernel();
 		rc = compat_x25_subscr_ioctl(cmd, argp);
-		unlock_kernel();
 		break;
 	case SIOCX25SSUBSCRIP:
 		rc = -EPERM;
 		if (!capable(CAP_NET_ADMIN))
 			break;
-		lock_kernel();
 		rc = compat_x25_subscr_ioctl(cmd, argp);
-		unlock_kernel();
 		break;
 	case SIOCX25GFACILITIES:
 	case SIOCX25SFACILITIES:
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 771bab0..55187c8 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -61,6 +61,8 @@
 	while (len > 0) {
 		switch (*p & X25_FAC_CLASS_MASK) {
 		case X25_FAC_CLASS_A:
+			if (len < 2)
+				return 0;
 			switch (*p) {
 			case X25_FAC_REVERSE:
 				if((p[1] & 0x81) == 0x81) {
@@ -104,6 +106,8 @@
 			len -= 2;
 			break;
 		case X25_FAC_CLASS_B:
+			if (len < 3)
+				return 0;
 			switch (*p) {
 			case X25_FAC_PACKET_SIZE:
 				facilities->pacsize_in  = p[1];
@@ -125,6 +129,8 @@
 			len -= 3;
 			break;
 		case X25_FAC_CLASS_C:
+			if (len < 4)
+				return 0;
 			printk(KERN_DEBUG "X.25: unknown facility %02X, "
 			       "values %02X, %02X, %02X\n",
 			       p[0], p[1], p[2], p[3]);
@@ -132,26 +138,26 @@
 			len -= 4;
 			break;
 		case X25_FAC_CLASS_D:
+			if (len < p[1] + 2)
+				return 0;
 			switch (*p) {
 			case X25_FAC_CALLING_AE:
-				if (p[1] > X25_MAX_DTE_FACIL_LEN)
-					break;
+				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+					return 0;
 				dte_facs->calling_len = p[2];
 				memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLING_AE;
 				break;
 			case X25_FAC_CALLED_AE:
-				if (p[1] > X25_MAX_DTE_FACIL_LEN)
-					break;
+				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+					return 0;
 				dte_facs->called_len = p[2];
 				memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLED_AE;
 				break;
 			default:
 				printk(KERN_DEBUG "X.25: unknown facility %02X,"
-					"length %d, values %02X, %02X, "
-					"%02X, %02X\n",
-					p[0], p[1], p[2], p[3], p[4], p[5]);
+					"length %d\n", p[0], p[1]);
 				break;
 			}
 			len -= p[1] + 2;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 6317896..f729f02 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -119,6 +119,8 @@
 						&x25->vc_facil_mask);
 			if (len > 0)
 				skb_pull(skb, len);
+			else
+				return -1;
 			/*
 			 *	Copy any Call User Data.
 			 */
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c
index 73e7b95..4cbc942 100644
--- a/net/x25/x25_link.c
+++ b/net/x25/x25_link.c
@@ -31,8 +31,8 @@
 #include <linux/init.h>
 #include <net/x25.h>
 
-static LIST_HEAD(x25_neigh_list);
-static DEFINE_RWLOCK(x25_neigh_list_lock);
+LIST_HEAD(x25_neigh_list);
+DEFINE_RWLOCK(x25_neigh_list_lock);
 
 static void x25_t20timer_expiry(unsigned long);
 
@@ -360,16 +360,20 @@
 	dev_put(dev);
 
 	if (cmd == SIOCX25GSUBSCRIP) {
+		read_lock_bh(&x25_neigh_list_lock);
 		x25_subscr.extended	     = nb->extended;
 		x25_subscr.global_facil_mask = nb->global_facil_mask;
+		read_unlock_bh(&x25_neigh_list_lock);
 		rc = copy_to_user(arg, &x25_subscr,
 				  sizeof(x25_subscr)) ? -EFAULT : 0;
 	} else {
 		rc = -EINVAL;
 		if (!(x25_subscr.extended && x25_subscr.extended != 1)) {
 			rc = 0;
+			write_lock_bh(&x25_neigh_list_lock);
 			nb->extended	     = x25_subscr.extended;
 			nb->global_facil_mask = x25_subscr.global_facil_mask;
+			write_unlock_bh(&x25_neigh_list_lock);
 		}
 	}
 	x25_neigh_put(nb);
@@ -394,6 +398,7 @@
 	list_for_each_safe(entry, tmp, &x25_neigh_list) {
 		nb = list_entry(entry, struct x25_neigh, node);
 		__x25_remove_neigh(nb);
+		dev_put(nb->dev);
 	}
 	write_unlock_bh(&x25_neigh_list_lock);
 }
diff --git a/net/xfrm/xfrm_hash.c b/net/xfrm/xfrm_hash.c
index a2023ec..1e98bc0 100644
--- a/net/xfrm/xfrm_hash.c
+++ b/net/xfrm/xfrm_hash.c
@@ -19,7 +19,7 @@
 	if (sz <= PAGE_SIZE)
 		n = kzalloc(sz, GFP_KERNEL);
 	else if (hashdist)
-		n = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+		n = vzalloc(sz);
 	else
 		n = (struct hlist_head *)
 			__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 044e778..6e50ccd 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1433,7 +1433,7 @@
 		}
 
 		xdst->route = dst;
-		memcpy(&dst1->metrics, &dst->metrics, sizeof(dst->metrics));
+		dst_copy_metrics(dst1, dst);
 
 		if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) {
 			family = xfrm[i]->props.family;
@@ -2271,7 +2271,7 @@
 		if (pmtu > route_mtu_cached)
 			pmtu = route_mtu_cached;
 
-		dst->metrics[RTAX_MTU-1] = pmtu;
+		dst_metric_set(dst, RTAX_MTU, pmtu);
 	} while ((dst = dst->next));
 }
 
@@ -2349,7 +2349,7 @@
 		mtu = xfrm_state_mtu(dst->xfrm, mtu);
 		if (mtu > last->route_mtu_cached)
 			mtu = last->route_mtu_cached;
-		dst->metrics[RTAX_MTU-1] = mtu;
+		dst_metric_set(dst, RTAX_MTU, mtu);
 
 		if (last == first)
 			break;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8bae6b2..8eb8895 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -148,7 +148,8 @@
 		     !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
 		    attrs[XFRMA_ALG_AEAD]	||
 		    attrs[XFRMA_ALG_CRYPT]	||
-		    attrs[XFRMA_ALG_COMP])
+		    attrs[XFRMA_ALG_COMP]	||
+		    attrs[XFRMA_TFCPAD])
 			goto out;
 		break;
 
@@ -165,6 +166,9 @@
 		     attrs[XFRMA_ALG_CRYPT]) &&
 		    attrs[XFRMA_ALG_AEAD])
 			goto out;
+		if (attrs[XFRMA_TFCPAD] &&
+		    p->mode != XFRM_MODE_TUNNEL)
+			goto out;
 		break;
 
 	case IPPROTO_COMP:
@@ -172,7 +176,8 @@
 		    attrs[XFRMA_ALG_AEAD]	||
 		    attrs[XFRMA_ALG_AUTH]	||
 		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
-		    attrs[XFRMA_ALG_CRYPT])
+		    attrs[XFRMA_ALG_CRYPT]	||
+		    attrs[XFRMA_TFCPAD])
 			goto out;
 		break;
 
@@ -186,6 +191,7 @@
 		    attrs[XFRMA_ALG_CRYPT]	||
 		    attrs[XFRMA_ENCAP]		||
 		    attrs[XFRMA_SEC_CTX]	||
+		    attrs[XFRMA_TFCPAD]		||
 		    !attrs[XFRMA_COADDR])
 			goto out;
 		break;
@@ -439,6 +445,9 @@
 			goto error;
 	}
 
+	if (attrs[XFRMA_TFCPAD])
+		x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
+
 	if (attrs[XFRMA_COADDR]) {
 		x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
 				    sizeof(*x->coaddr), GFP_KERNEL);
@@ -688,6 +697,9 @@
 	if (x->encap)
 		NLA_PUT(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
 
+	if (x->tfcpad)
+		NLA_PUT_U32(skb, XFRMA_TFCPAD, x->tfcpad);
+
 	if (xfrm_mark_put(skb, &x->mark))
 		goto nla_put_failure;
 
@@ -2122,6 +2134,7 @@
 	[XFRMA_MIGRATE]		= { .len = sizeof(struct xfrm_user_migrate) },
 	[XFRMA_KMADDRESS]	= { .len = sizeof(struct xfrm_user_kmaddress) },
 	[XFRMA_MARK]		= { .len = sizeof(struct xfrm_mark) },
+	[XFRMA_TFCPAD]		= { .type = NLA_U32 },
 };
 
 static struct xfrm_link {
@@ -2301,6 +2314,8 @@
 		l += nla_total_size(sizeof(*x->calg));
 	if (x->encap)
 		l += nla_total_size(sizeof(*x->encap));
+	if (x->tfcpad)
+		l += nla_total_size(sizeof(x->tfcpad));
 	if (x->security)
 		l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
 				    x->security->ctx_len);
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c0efe10..af6e9f3 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -875,7 +875,7 @@
 			symval = sym_get_string_value(sym);
 		}
 
-		newlen = strlen(res) + strlen(symval) + strlen(src);
+		newlen = strlen(res) + strlen(symval) + strlen(src) + 1;
 		if (newlen > reslen) {
 			reslen = newlen;
 			res = realloc(res, reslen);
diff --git a/security/Kconfig b/security/Kconfig
index bd72ae6..e80da95 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -39,6 +39,18 @@
 
 	  If you are unsure as to whether this is required, answer N.
 
+config SECURITY_DMESG_RESTRICT
+	bool "Restrict unprivileged access to the kernel syslog"
+	default n
+	help
+	  This enforces restrictions on unprivileged users reading the kernel
+	  syslog via dmesg(8).
+
+	  If this option is not selected, no restrictions will be enforced
+	  unless the dmesg_restrict sysctl is explicitly set to (1).
+
+	  If you are unsure how to answer this question, answer N.
+
 config SECURITY
 	bool "Enable different security models"
 	depends on SYSFS
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index cf1de44..b7106f1 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -922,7 +922,7 @@
 	error = register_security(&apparmor_ops);
 	if (error) {
 		AA_ERROR("Unable to register AppArmor\n");
-		goto register_security_out;
+		goto set_init_cxt_out;
 	}
 
 	/* Report that AppArmor successfully initialized */
@@ -936,6 +936,9 @@
 
 	return error;
 
+set_init_cxt_out:
+	aa_free_task_context(current->real_cred->security);
+
 register_security_out:
 	aa_free_root_ns();
 
@@ -944,7 +947,6 @@
 
 	apparmor_enabled = 0;
 	return error;
-
 }
 
 security_initcall(apparmor_init);
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 52cc865..4f0eade 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -306,7 +306,7 @@
 	return ns;
 
 fail_unconfined:
-	kzfree(ns->base.name);
+	kzfree(ns->base.hname);
 fail_ns:
 	kzfree(ns);
 	return NULL;
diff --git a/security/commoncap.c b/security/commoncap.c
index 5e632b4..04b80f9 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -895,6 +895,8 @@
 {
 	if (type != SYSLOG_ACTION_OPEN && from_file)
 		return 0;
+	if (dmesg_restrict && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
 	if ((type != SYSLOG_ACTION_READ_ALL &&
 	     type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN))
 		return -EPERM;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index d9154cf..156ef93 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4524,11 +4524,11 @@
 	if (selinux_secmark_enabled())
 		if (avc_has_perm(sksec->sid, skb->secmark,
 				 SECCLASS_PACKET, PACKET__SEND, &ad))
-			return NF_DROP;
+			return NF_DROP_ERR(-ECONNREFUSED);
 
 	if (selinux_policycap_netpeer)
 		if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
-			return NF_DROP;
+			return NF_DROP_ERR(-ECONNREFUSED);
 
 	return NF_ACCEPT;
 }
@@ -4585,7 +4585,7 @@
 				secmark_perm = PACKET__SEND;
 			break;
 		default:
-			return NF_DROP;
+			return NF_DROP_ERR(-ECONNREFUSED);
 		}
 		if (secmark_perm == PACKET__FORWARD_OUT) {
 			if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4607,7 +4607,7 @@
 	if (secmark_active)
 		if (avc_has_perm(peer_sid, skb->secmark,
 				 SECCLASS_PACKET, secmark_perm, &ad))
-			return NF_DROP;
+			return NF_DROP_ERR(-ECONNREFUSED);
 
 	if (peerlbl_active) {
 		u32 if_sid;
@@ -4617,13 +4617,13 @@
 			return NF_DROP;
 		if (avc_has_perm(peer_sid, if_sid,
 				 SECCLASS_NETIF, NETIF__EGRESS, &ad))
-			return NF_DROP;
+			return NF_DROP_ERR(-ECONNREFUSED);
 
 		if (sel_netnode_sid(addrp, family, &node_sid))
 			return NF_DROP;
 		if (avc_has_perm(peer_sid, node_sid,
 				 SECCLASS_NODE, NODE__SENDTO, &ad))
-			return NF_DROP;
+			return NF_DROP_ERR(-ECONNREFUSED);
 	}
 
 	return NF_ACCEPT;
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index f7e374e..1b9bf93 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -625,6 +625,8 @@
 			control_cache_size, (struct hpi_control_cache_info *)
 			&phw->control_cache[0]
 			);
+		if (!phw->p_cache)
+			pao->has_control_cache = 0;
 	} else
 		pao->has_control_cache = 0;
 
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 22c5fc6..2672f65 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -644,6 +644,8 @@
 				interface->control_cache.size_in_bytes,
 				(struct hpi_control_cache_info *)
 				p_control_cache_virtual);
+			if (!phw->p_cache)
+				err = HPI_ERROR_MEMORY_ALLOC;
 		}
 		if (!err) {
 			err = hpios_locked_mem_get_phys_addr(&phw->
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index dda4f1c..d67f4d3 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -571,14 +571,20 @@
 {
 	struct hpi_control_cache *p_cache =
 		kmalloc(sizeof(*p_cache), GFP_KERNEL);
+	if (!p_cache)
+		return NULL;
+	p_cache->p_info =
+		kmalloc(sizeof(*p_cache->p_info) * number_of_controls,
+			GFP_KERNEL);
+	if (!p_cache->p_info) {
+		kfree(p_cache);
+		return NULL;
+	}
 	p_cache->cache_size_in_bytes = size_in_bytes;
 	p_cache->control_count = number_of_controls;
 	p_cache->p_cache =
 		(struct hpi_control_cache_single *)pDSP_control_buffer;
 	p_cache->init = 0;
-	p_cache->p_info =
-		kmalloc(sizeof(*p_cache->p_info) * p_cache->control_count,
-		GFP_KERNEL);
 	return p_cache;
 }
 
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 3e5ca8f..e377287 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -225,39 +225,25 @@
 {
 	struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL);
 
-	if (ins == NULL) 
+	if (ins == NULL)
 		return NULL;
 
 	/* better to use vmalloc for this big table */
-	ins->symbol_table.nsymbols = 0;
 	ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
 					    DSP_MAX_SYMBOLS);
-	ins->symbol_table.highest_frag_index = 0;
-
-	if (ins->symbol_table.symbols == NULL) {
+	ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
+	ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
+	if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
 		cs46xx_dsp_spos_destroy(chip);
 		goto error;
 	}
-
+	ins->symbol_table.nsymbols = 0;
+	ins->symbol_table.highest_frag_index = 0;
 	ins->code.offset = 0;
 	ins->code.size = 0;
-	ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
-
-	if (ins->code.data == NULL) {
-		cs46xx_dsp_spos_destroy(chip);
-		goto error;
-	}
-
 	ins->nscb = 0;
 	ins->ntask = 0;
-
 	ins->nmodules = 0;
-	ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
-
-	if (ins->modules == NULL) {
-		cs46xx_dsp_spos_destroy(chip);
-		goto error;
-	}
 
 	/* default SPDIF input sample rate
 	   to 48000 khz */
@@ -271,8 +257,8 @@
 
 	/* set left and right validity bits and
 	   default channel status */
-	ins->spdif_csuv_default = 
-		ins->spdif_csuv_stream =  
+	ins->spdif_csuv_default =
+		ins->spdif_csuv_stream =
 	 /* byte 0 */  ((unsigned int)_wrap_all_bits(  (SNDRV_PCM_DEFAULT_CON_SPDIF        & 0xff)) << 24) |
 	 /* byte 1 */  ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) |
 	 /* byte 3 */   (unsigned int)_wrap_all_bits(  (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) |
@@ -281,6 +267,9 @@
 	return ins;
 
 error:
+	kfree(ins->modules);
+	kfree(ins->code.data);
+	vfree(ins->symbol_table.symbols);
 	kfree(ins);
 	return NULL;
 }
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 460fb2e..18af38e 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1166,6 +1166,7 @@
 
 static struct snd_pci_quirk cs420x_cfg_tbl[] = {
 	SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
+	SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
 	SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
 	SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
 	SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index ef9af3f..1bd7a54 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -425,7 +425,7 @@
 static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
 {
 	struct snd_pcm_substream *substream = lx_stream->stream;
-	const int is_capture = lx_stream->is_capture;
+	const unsigned int is_capture = lx_stream->is_capture;
 
 	int err;
 
@@ -473,7 +473,7 @@
 
 static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
 {
-	const int is_capture = lx_stream->is_capture;
+	const unsigned int is_capture = lx_stream->is_capture;
 	int err;
 
 	snd_printd(LXP "stopping: stopping stream\n");
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
index 51afc04..aea621e 100644
--- a/sound/pci/lx6464es/lx6464es.h
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -60,7 +60,7 @@
 	snd_pcm_uframes_t          frame_pos;
 	enum lx_stream_status      status; /* free, open, running, draining
 					    * pause */
-	int                        is_capture:1;
+	unsigned int               is_capture:1;
 };
 
 
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 3086b75..617f98b 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -1152,7 +1152,7 @@
 					   struct lx_stream *lx_stream)
 {
 	struct snd_pcm_substream *substream = lx_stream->stream;
-	int is_capture = lx_stream->is_capture;
+	const unsigned int is_capture = lx_stream->is_capture;
 	int err;
 	unsigned long flags;
 
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 94a9d06..3b5690d 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -25,8 +25,9 @@
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS42L51 if I2C
 	select SND_SOC_CS4270 if I2C
+	select SND_SOC_CX20442
 	select SND_SOC_DA7210 if I2C
-	select SND_SOC_JZ4740 if SOC_JZ4740
+	select SND_SOC_JZ4740_CODEC if SOC_JZ4740
 	select SND_SOC_MAX98088 if I2C
 	select SND_SOC_MAX9877 if I2C
 	select SND_SOC_PCM3008
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d251ff5..c5ab8c8 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -58,7 +58,7 @@
 	(1000000000 / ((rate * 1000) / samples))
 
 #define US_TO_SAMPLES(rate, us) \
-	(rate / (1000000 / us))
+	(rate / (1000000 / (us < 1000000 ? us : 1000000)))
 
 #define UTHR_FROM_PERIOD_SIZE(samples, playrate, burstrate) \
 	((samples * 5000) / ((burstrate * 5000) / (burstrate - playrate)))
@@ -200,7 +200,7 @@
 		      u8 *value)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int val;
+	int val, ret = 0;
 
 	*value = reg & 0xff;
 
@@ -210,6 +210,7 @@
 		if (val < 0) {
 			dev_err(codec->dev, "Read failed (%d)\n", val);
 			value[0] = dac33_read_reg_cache(codec, reg);
+			ret = val;
 		} else {
 			value[0] = val;
 			dac33_write_reg_cache(codec, reg, val);
@@ -218,7 +219,7 @@
 		value[0] = dac33_read_reg_cache(codec, reg);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -329,13 +330,18 @@
 		    dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
 }
 
-static inline void dac33_read_id(struct snd_soc_codec *codec)
+static inline int dac33_read_id(struct snd_soc_codec *codec)
 {
+	int i, ret = 0;
 	u8 reg;
 
-	dac33_read(codec, DAC33_DEVICE_ID_MSB, &reg);
-	dac33_read(codec, DAC33_DEVICE_ID_LSB, &reg);
-	dac33_read(codec, DAC33_DEVICE_REV_ID, &reg);
+	for (i = 0; i < 3; i++) {
+		ret = dac33_read(codec, DAC33_DEVICE_ID_MSB + i, &reg);
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
 }
 
 static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
@@ -1076,6 +1082,9 @@
 		/* Number of samples under i2c latency */
 		dac33->alarm_threshold = US_TO_SAMPLES(rate,
 						dac33->mode1_latency);
+		nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
+				dac33->alarm_threshold;
+
 		if (dac33->auto_fifo_config) {
 			if (period_size <= dac33->alarm_threshold)
 				/*
@@ -1086,6 +1095,8 @@
 				       ((dac33->alarm_threshold / period_size) +
 				       (dac33->alarm_threshold % period_size ?
 				       1 : 0));
+			else if (period_size > nsample_limit)
+				dac33->nsample = nsample_limit;
 			else
 				dac33->nsample = period_size;
 		} else {
@@ -1097,8 +1108,7 @@
 			 */
 			dac33->nsample_max = substream->runtime->buffer_size -
 						period_size;
-			nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
-					dac33->alarm_threshold;
+
 			if (dac33->nsample_max > nsample_limit)
 				dac33->nsample_max = nsample_limit;
 
@@ -1414,9 +1424,15 @@
 		dev_err(codec->dev, "Failed to power up codec: %d\n", ret);
 		goto err_power;
 	}
-	dac33_read_id(codec);
+	ret = dac33_read_id(codec);
 	dac33_hard_power(codec, 0);
 
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to read chip ID: %d\n", ret);
+		ret = -ENODEV;
+		goto err_power;
+	}
+
 	/* Check if the IRQ number is valid and request it */
 	if (dac33->irq >= 0) {
 		ret = request_irq(dac33->irq, dac33_interrupt_handler,
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 329acc1..ee4fb20 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -119,13 +119,13 @@
 {
 	struct	tpa6130a2_data *data;
 	u8	val;
-	int	ret;
+	int	ret = 0;
 
 	BUG_ON(tpa6130a2_client == NULL);
 	data = i2c_get_clientdata(tpa6130a2_client);
 
 	mutex_lock(&data->mutex);
-	if (power) {
+	if (power && !data->power_state) {
 		/* Power on */
 		if (data->power_gpio >= 0)
 			gpio_set_value(data->power_gpio, 1);
@@ -153,7 +153,7 @@
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val &= ~TPA6130A2_SWS;
 		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-	} else {
+	} else if (!power && data->power_state) {
 		/* set SWS */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= TPA6130A2_SWS;
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index b4f1172..aca4b1e 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -186,7 +186,6 @@
 {
 	switch (reg) {
 	case WM8900_REG_ID:
-	case WM8900_REG_POWER1:
 		return 1;
 	default:
 		return 0;
@@ -1200,11 +1199,6 @@
 		return -ENODEV;
 	}
 
-	/* Read back from the chip */
-	reg = snd_soc_read(codec, WM8900_REG_POWER1);
-	reg = (reg >> 12) & 0xf;
-	dev_info(codec->dev, "WM8900 revision %d\n", reg);
-
 	wm8900_reset(codec);
 
 	/* Turn the chip on */
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 2cb8153..19ca782 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -123,7 +123,7 @@
 			reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
 			break;
 		default:
-			WARN(1, "Unknown DCS readback method");
+			WARN(1, "Unknown DCS readback method\n");
 			break;
 		}
 
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index a3bfb2e..73d0edd 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -79,7 +79,7 @@
 static int tosa_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->card->codec;
+	struct snd_soc_codec *codec = rtd->codec;
 
 	/* check the jack status at stream startup */
 	tosa_ext_control(codec);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c8f3f5..614a8b3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -165,8 +165,11 @@
 {
 	struct snd_soc_pcm_runtime *rtd =
 			container_of(dev, struct snd_soc_pcm_runtime, dev);
+	int ret;
 
-	strict_strtol(buf, 10, &rtd->pmdown_time);
+	ret = strict_strtol(buf, 10, &rtd->pmdown_time);
+	if (ret)
+		return ret;
 
 	return count;
 }
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 7dae05d..782f741 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -60,7 +60,7 @@
 	{ USB_ID(0x041e, 0x3000), 0, 1, 2, 1,  18, 0x0013 }, /* Extigy       */
 	{ USB_ID(0x041e, 0x3020), 2, 1, 6, 6,  18, 0x0013 }, /* Audigy 2 NX  */
 	{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6,  2,  0x6e91 }, /* Live! 24-bit */
-	{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi */
+	{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
 	{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
 };
 
@@ -183,7 +183,13 @@
 	if (value > 1)
 		return -EINVAL;
 	changed = value != mixer->audigy2nx_leds[index];
-	err = snd_usb_ctl_msg(mixer->chip->dev,
+	if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
+		err = snd_usb_ctl_msg(mixer->chip->dev,
+			      usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+			      !value, 0, NULL, 0, 100);
+	else
+		err = snd_usb_ctl_msg(mixer->chip->dev,
 			      usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      value, index + 2, NULL, 0, 100);
@@ -225,8 +231,12 @@
 	int i, err;
 
 	for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
+		/* USB X-Fi S51 doesn't have a CMSS LED */
+		if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
+			continue;
 		if (i > 1 && /* Live24ext has 2 LEDs only */
 			(mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+			 mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
 			 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
 			break; 
 		err = snd_ctl_add(mixer->chip->card,
@@ -365,6 +375,7 @@
 
 	if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
 	    mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+	    mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
 	    mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
 		if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
 			return err;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index cff3a3c..4132522 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -676,8 +676,10 @@
 	if (!needs_knot)
 		return 0;
 
-	subs->rate_list.count = count;
 	subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
+	if (!subs->rate_list.list)
+		return -ENOMEM;
+	subs->rate_list.count = count;
 	subs->rate_list.mask = 0;
 	count = 0;
 	list_for_each_entry(fp, &subs->fmt_list, list) {
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
index 122ec9d..26aff6b 100644
--- a/tools/perf/Documentation/perf-trace.txt
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -8,7 +8,11 @@
 SYNOPSIS
 --------
 [verse]
-'perf trace' {record <script> | report <script> [args] }
+'perf trace' [<options>]
+'perf trace' [<options>] record <script> [<record-options>] <command>
+'perf trace' [<options>] report <script> [script-args]
+'perf trace' [<options>] <script> <required-script-args> [<record-options>] <command>
+'perf trace' [<options>] <top-script> [script-args]
 
 DESCRIPTION
 -----------
@@ -24,23 +28,53 @@
   available via 'perf trace -l').  The following variants allow you to
   record and run those scripts:
 
-  'perf trace record <script>' to record the events required for 'perf
-  trace report'.  <script> is the name displayed in the output of
-  'perf trace --list' i.e. the actual script name minus any language
-  extension.
+  'perf trace record <script> <command>' to record the events required
+  for 'perf trace report'.  <script> is the name displayed in the
+  output of 'perf trace --list' i.e. the actual script name minus any
+  language extension.  If <command> is not specified, the events are
+  recorded using the -a (system-wide) 'perf record' option.
 
-  'perf trace report <script>' to run and display the results of
-  <script>.  <script> is the name displayed in the output of 'perf
+  'perf trace report <script> [args]' to run and display the results
+  of <script>.  <script> is the name displayed in the output of 'perf
   trace --list' i.e. the actual script name minus any language
   extension.  The perf.data output from a previous run of 'perf trace
   record <script>' is used and should be present for this command to
-  succeed.
+  succeed.  [args] refers to the (mainly optional) args expected by
+  the script.
+
+  'perf trace <script> <required-script-args> <command>' to both
+  record the events required for <script> and to run the <script>
+  using 'live-mode' i.e. without writing anything to disk.  <script>
+  is the name displayed in the output of 'perf trace --list' i.e. the
+  actual script name minus any language extension.  If <command> is
+  not specified, the events are recorded using the -a (system-wide)
+  'perf record' option.  If <script> has any required args, they
+  should be specified before <command>.  This mode doesn't allow for
+  optional script args to be specified; if optional script args are
+  desired, they can be specified using separate 'perf trace record'
+  and 'perf trace report' commands, with the stdout of the record step
+  piped to the stdin of the report script, using the '-o -' and '-i -'
+  options of the corresponding commands.
+
+  'perf trace <top-script>' to both record the events required for
+  <top-script> and to run the <top-script> using 'live-mode'
+  i.e. without writing anything to disk.  <top-script> is the name
+  displayed in the output of 'perf trace --list' i.e. the actual
+  script name minus any language extension; a <top-script> is defined
+  as any script name ending with the string 'top'.
+
+  [<record-options>] can be passed to the record steps of 'perf trace
+  record' and 'live-mode' variants; this isn't possible however for
+  <top-script> 'live-mode' or 'perf trace report' variants.
 
   See the 'SEE ALSO' section for links to language-specific
   information on how to write and run your own trace scripts.
 
 OPTIONS
 -------
+<command>...::
+	Any command you can specify in a shell.
+
 -D::
 --dump-raw-trace=::
         Display verbose dump of the trace data.
@@ -64,6 +98,13 @@
         Generate perf-trace.[ext] starter script for given language,
         using current perf.data.
 
+-a::
+        Force system-wide collection.  Scripts run without a <command>
+        normally use -a by default, while scripts run with a <command>
+        normally don't - this option allows the latter to be run in
+        system-wide mode.
+
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-trace-perl[1],
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 4e75583..93bd2ff 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -790,7 +790,7 @@
 
 static bool force, append_file;
 
-static const struct option options[] = {
+const struct option record_options[] = {
 	OPT_CALLBACK('e', "event", NULL, "event",
 		     "event selector. use 'perf list' to list available events",
 		     parse_events),
@@ -839,16 +839,16 @@
 {
 	int i, j, err = -ENOMEM;
 
-	argc = parse_options(argc, argv, options, record_usage,
+	argc = parse_options(argc, argv, record_options, record_usage,
 			    PARSE_OPT_STOP_AT_NON_OPTION);
 	if (!argc && target_pid == -1 && target_tid == -1 &&
 		!system_wide && !cpu_list)
-		usage_with_options(record_usage, options);
+		usage_with_options(record_usage, record_options);
 
 	if (force && append_file) {
 		fprintf(stderr, "Can't overwrite and append at the same time."
 				" You need to choose between -f and -A");
-		usage_with_options(record_usage, options);
+		usage_with_options(record_usage, record_options);
 	} else if (append_file) {
 		write_mode = WRITE_APPEND;
 	} else {
@@ -871,7 +871,7 @@
 		if (thread_num <= 0) {
 			fprintf(stderr, "Can't find all threads of pid %d\n",
 					target_pid);
-			usage_with_options(record_usage, options);
+			usage_with_options(record_usage, record_options);
 		}
 	} else {
 		all_tids=malloc(sizeof(pid_t));
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index b513e40..dd62580 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -69,7 +69,6 @@
 static pid_t			*all_tids			=      NULL;
 static int			thread_num			=      0;
 static bool			inherit				=  false;
-static int			profile_cpu			=     -1;
 static int			nr_cpus				=      0;
 static int			realtime_prio			=      0;
 static bool			group				=  false;
@@ -558,13 +557,13 @@
 	else
 		printf(" (all");
 
-	if (profile_cpu != -1)
-		printf(", cpu: %d)\n", profile_cpu);
+	if (cpu_list)
+		printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list);
 	else {
 		if (target_tid != -1)
 			printf(")\n");
 		else
-			printf(", %d CPUs)\n", nr_cpus);
+			printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : "");
 	}
 
 	printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1187,11 +1186,10 @@
 static void start_counter(int i, int counter)
 {
 	struct perf_event_attr *attr;
-	int cpu;
+	int cpu = -1;
 	int thread_index;
 
-	cpu = profile_cpu;
-	if (target_tid == -1 && profile_cpu == -1)
+	if (target_tid == -1)
 		cpu = cpumap[i];
 
 	attr = attrs + counter;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 2f8df45c..86cfe38 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -10,6 +10,7 @@
 #include "util/symbol.h"
 #include "util/thread.h"
 #include "util/trace-event.h"
+#include "util/parse-options.h"
 #include "util/util.h"
 
 static char const		*script_name;
@@ -17,6 +18,7 @@
 static bool			debug_mode;
 static u64			last_timestamp;
 static u64			nr_unordered;
+extern const struct option	record_options[];
 
 static int default_start_script(const char *script __unused,
 				int argc __unused,
@@ -328,7 +330,7 @@
 {
 	struct script_desc *s = zalloc(sizeof(*s));
 
-	if (s != NULL)
+	if (s != NULL && name)
 		s->name = strdup(name);
 
 	return s;
@@ -337,6 +339,8 @@
 static void script_desc__delete(struct script_desc *s)
 {
 	free(s->name);
+	free(s->half_liner);
+	free(s->args);
 	free(s);
 }
 
@@ -537,8 +541,40 @@
 	return path;
 }
 
+static bool is_top_script(const char *script_path)
+{
+	return ends_with((char *)script_path, "top") == NULL ? false : true;
+}
+
+static int has_required_arg(char *script_path)
+{
+	struct script_desc *desc;
+	int n_args = 0;
+	char *p;
+
+	desc = script_desc__new(NULL);
+
+	if (read_script_info(desc, script_path))
+		goto out;
+
+	if (!desc->args)
+		goto out;
+
+	for (p = desc->args; *p; p++)
+		if (*p == '<')
+			n_args++;
+out:
+	script_desc__delete(desc);
+
+	return n_args;
+}
+
 static const char * const trace_usage[] = {
-	"perf trace [<options>] <command>",
+	"perf trace [<options>]",
+	"perf trace [<options>] record <script> [<record-options>] <command>",
+	"perf trace [<options>] report <script> [script-args]",
+	"perf trace [<options>] <script> [<record-options>] <command>",
+	"perf trace [<options>] <top-script> [script-args]",
 	NULL
 };
 
@@ -564,50 +600,81 @@
 	OPT_END()
 };
 
+static bool have_cmd(int argc, const char **argv)
+{
+	char **__argv = malloc(sizeof(const char *) * argc);
+
+	if (!__argv)
+		die("malloc");
+	memcpy(__argv, argv, sizeof(const char *) * argc);
+	argc = parse_options(argc, (const char **)__argv, record_options,
+			     NULL, PARSE_OPT_STOP_AT_NON_OPTION);
+	free(__argv);
+
+	return argc != 0;
+}
+
 int cmd_trace(int argc, const char **argv, const char *prefix __used)
 {
+	char *rec_script_path = NULL;
+	char *rep_script_path = NULL;
 	struct perf_session *session;
-	const char *suffix = NULL;
+	char *script_path = NULL;
 	const char **__argv;
-	char *script_path;
-	int i, err;
+	bool system_wide;
+	int i, j, err;
 
-	if (argc >= 2 && strncmp(argv[1], "rec", strlen("rec")) == 0) {
-		if (argc < 3) {
-			fprintf(stderr,
-				"Please specify a record script\n");
-			return -1;
-		}
-		suffix = RECORD_SUFFIX;
+	setup_scripting();
+
+	argc = parse_options(argc, argv, options, trace_usage,
+			     PARSE_OPT_STOP_AT_NON_OPTION);
+
+	if (argc > 1 && !strncmp(argv[0], "rec", strlen("rec"))) {
+		rec_script_path = get_script_path(argv[1], RECORD_SUFFIX);
+		if (!rec_script_path)
+			return cmd_record(argc, argv, NULL);
 	}
 
-	if (argc >= 2 && strncmp(argv[1], "rep", strlen("rep")) == 0) {
-		if (argc < 3) {
+	if (argc > 1 && !strncmp(argv[0], "rep", strlen("rep"))) {
+		rep_script_path = get_script_path(argv[1], REPORT_SUFFIX);
+		if (!rep_script_path) {
 			fprintf(stderr,
-				"Please specify a report script\n");
+				"Please specify a valid report script"
+				"(see 'perf trace -l' for listing)\n");
 			return -1;
 		}
-		suffix = REPORT_SUFFIX;
 	}
 
 	/* make sure PERF_EXEC_PATH is set for scripts */
 	perf_set_argv_exec_path(perf_exec_path());
 
-	if (!suffix && argc >= 2 && strncmp(argv[1], "-", strlen("-")) != 0) {
-		char *record_script_path, *report_script_path;
+	if (argc && !script_name && !rec_script_path && !rep_script_path) {
 		int live_pipe[2];
+		int rep_args;
 		pid_t pid;
 
-		record_script_path = get_script_path(argv[1], RECORD_SUFFIX);
-		if (!record_script_path) {
-			fprintf(stderr, "record script not found\n");
-			return -1;
+		rec_script_path = get_script_path(argv[0], RECORD_SUFFIX);
+		rep_script_path = get_script_path(argv[0], REPORT_SUFFIX);
+
+		if (!rec_script_path && !rep_script_path) {
+			fprintf(stderr, " Couldn't find script %s\n\n See perf"
+				" trace -l for available scripts.\n", argv[0]);
+			usage_with_options(trace_usage, options);
 		}
 
-		report_script_path = get_script_path(argv[1], REPORT_SUFFIX);
-		if (!report_script_path) {
-			fprintf(stderr, "report script not found\n");
-			return -1;
+		if (is_top_script(argv[0])) {
+			rep_args = argc - 1;
+		} else {
+			int rec_args;
+
+			rep_args = has_required_arg(rep_script_path);
+			rec_args = (argc - 1) - rep_args;
+			if (rec_args < 0) {
+				fprintf(stderr, " %s script requires options."
+					"\n\n See perf trace -l for available "
+					"scripts and options.\n", argv[0]);
+				usage_with_options(trace_usage, options);
+			}
 		}
 
 		if (pipe(live_pipe) < 0) {
@@ -622,60 +689,84 @@
 		}
 
 		if (!pid) {
+			system_wide = true;
+			j = 0;
+
 			dup2(live_pipe[1], 1);
 			close(live_pipe[0]);
 
-			__argv = malloc(6 * sizeof(const char *));
-			__argv[0] = "/bin/sh";
-			__argv[1] = record_script_path;
-			__argv[2] = "-q";
-			__argv[3] = "-o";
-			__argv[4] = "-";
-			__argv[5] = NULL;
+			if (!is_top_script(argv[0]))
+				system_wide = !have_cmd(argc - rep_args,
+							&argv[rep_args]);
+
+			__argv = malloc((argc + 6) * sizeof(const char *));
+			if (!__argv)
+				die("malloc");
+
+			__argv[j++] = "/bin/sh";
+			__argv[j++] = rec_script_path;
+			if (system_wide)
+				__argv[j++] = "-a";
+			__argv[j++] = "-q";
+			__argv[j++] = "-o";
+			__argv[j++] = "-";
+			for (i = rep_args + 1; i < argc; i++)
+				__argv[j++] = argv[i];
+			__argv[j++] = NULL;
 
 			execvp("/bin/sh", (char **)__argv);
+			free(__argv);
 			exit(-1);
 		}
 
 		dup2(live_pipe[0], 0);
 		close(live_pipe[1]);
 
-		__argv = malloc((argc + 3) * sizeof(const char *));
-		__argv[0] = "/bin/sh";
-		__argv[1] = report_script_path;
+		__argv = malloc((argc + 4) * sizeof(const char *));
+		if (!__argv)
+			die("malloc");
+		j = 0;
+		__argv[j++] = "/bin/sh";
+		__argv[j++] = rep_script_path;
+		for (i = 1; i < rep_args + 1; i++)
+			__argv[j++] = argv[i];
+		__argv[j++] = "-i";
+		__argv[j++] = "-";
+		__argv[j++] = NULL;
+
+		execvp("/bin/sh", (char **)__argv);
+		free(__argv);
+		exit(-1);
+	}
+
+	if (rec_script_path)
+		script_path = rec_script_path;
+	if (rep_script_path)
+		script_path = rep_script_path;
+
+	if (script_path) {
+		system_wide = false;
+		j = 0;
+
+		if (rec_script_path)
+			system_wide = !have_cmd(argc - 1, &argv[1]);
+
+		__argv = malloc((argc + 2) * sizeof(const char *));
+		if (!__argv)
+			die("malloc");
+		__argv[j++] = "/bin/sh";
+		__argv[j++] = script_path;
+		if (system_wide)
+			__argv[j++] = "-a";
 		for (i = 2; i < argc; i++)
-			__argv[i] = argv[i];
-		__argv[i++] = "-i";
-		__argv[i++] = "-";
-		__argv[i++] = NULL;
+			__argv[j++] = argv[i];
+		__argv[j++] = NULL;
 
 		execvp("/bin/sh", (char **)__argv);
+		free(__argv);
 		exit(-1);
 	}
 
-	if (suffix) {
-		script_path = get_script_path(argv[2], suffix);
-		if (!script_path) {
-			fprintf(stderr, "script not found\n");
-			return -1;
-		}
-
-		__argv = malloc((argc + 1) * sizeof(const char *));
-		__argv[0] = "/bin/sh";
-		__argv[1] = script_path;
-		for (i = 3; i < argc; i++)
-			__argv[i - 1] = argv[i];
-		__argv[argc - 1] = NULL;
-
-		execvp("/bin/sh", (char **)__argv);
-		exit(-1);
-	}
-
-	setup_scripting();
-
-	argc = parse_options(argc, argv, options, trace_usage,
-			     PARSE_OPT_STOP_AT_NON_OPTION);
-
 	if (symbol__init() < 0)
 		return -1;
 	if (!script_name)
diff --git a/tools/perf/scripts/perl/bin/failed-syscalls-record b/tools/perf/scripts/perl/bin/failed-syscalls-record
index eb5846b..8104895 100644
--- a/tools/perf/scripts/perl/bin/failed-syscalls-record
+++ b/tools/perf/scripts/perl/bin/failed-syscalls-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_exit $@
+perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/perl/bin/rw-by-file-record b/tools/perf/scripts/perl/bin/rw-by-file-record
index 5bfaae5..33efc86 100644
--- a/tools/perf/scripts/perl/bin/rw-by-file-record
+++ b/tools/perf/scripts/perl/bin/rw-by-file-record
@@ -1,3 +1,3 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_enter_write $@
 
diff --git a/tools/perf/scripts/perl/bin/rw-by-pid-record b/tools/perf/scripts/perl/bin/rw-by-pid-record
index 6e0b2f7..7cb9db2 100644
--- a/tools/perf/scripts/perl/bin/rw-by-pid-record
+++ b/tools/perf/scripts/perl/bin/rw-by-pid-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/rwtop-record b/tools/perf/scripts/perl/bin/rwtop-record
index 6e0b2f7..7cb9db2 100644
--- a/tools/perf/scripts/perl/bin/rwtop-record
+++ b/tools/perf/scripts/perl/bin/rwtop-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
+perf record -e syscalls:sys_enter_read -e syscalls:sys_exit_read -e syscalls:sys_enter_write -e syscalls:sys_exit_write $@
diff --git a/tools/perf/scripts/perl/bin/wakeup-latency-record b/tools/perf/scripts/perl/bin/wakeup-latency-record
index 9f2acaa..464251a 100644
--- a/tools/perf/scripts/perl/bin/wakeup-latency-record
+++ b/tools/perf/scripts/perl/bin/wakeup-latency-record
@@ -1,5 +1,5 @@
 #!/bin/bash
-perf record -a -e sched:sched_switch -e sched:sched_wakeup $@
+perf record -e sched:sched_switch -e sched:sched_wakeup $@
 
 
 
diff --git a/tools/perf/scripts/perl/bin/workqueue-stats-record b/tools/perf/scripts/perl/bin/workqueue-stats-record
index 85301f2..8edda90 100644
--- a/tools/perf/scripts/perl/bin/workqueue-stats-record
+++ b/tools/perf/scripts/perl/bin/workqueue-stats-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
+perf record -e workqueue:workqueue_creation -e workqueue:workqueue_destruction -e workqueue:workqueue_execution -e workqueue:workqueue_insertion $@
diff --git a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
index eb5846b..8104895 100644
--- a/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
+++ b/tools/perf/scripts/python/bin/failed-syscalls-by-pid-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_exit $@
+perf record -e raw_syscalls:sys_exit $@
diff --git a/tools/perf/scripts/python/bin/futex-contention-record b/tools/perf/scripts/python/bin/futex-contention-record
index 5ecbb43..b1495c9 100644
--- a/tools/perf/scripts/python/bin/futex-contention-record
+++ b/tools/perf/scripts/python/bin/futex-contention-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
+perf record -e syscalls:sys_enter_futex -e syscalls:sys_exit_futex $@
diff --git a/tools/perf/scripts/python/bin/netdev-times-record b/tools/perf/scripts/python/bin/netdev-times-record
index d931a82..558754b 100644
--- a/tools/perf/scripts/python/bin/netdev-times-record
+++ b/tools/perf/scripts/python/bin/netdev-times-record
@@ -1,5 +1,5 @@
 #!/bin/bash
-perf record -a -e net:net_dev_xmit -e net:net_dev_queue		\
+perf record -e net:net_dev_xmit -e net:net_dev_queue		\
 		-e net:netif_receive_skb -e net:netif_rx		\
 		-e skb:consume_skb -e skb:kfree_skb			\
 		-e skb:skb_copy_datagram_iovec -e napi:napi_poll	\
diff --git a/tools/perf/scripts/python/bin/sched-migration-record b/tools/perf/scripts/python/bin/sched-migration-record
index 17a3e9b..7493fdd 100644
--- a/tools/perf/scripts/python/bin/sched-migration-record
+++ b/tools/perf/scripts/python/bin/sched-migration-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -m 16384 -a -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
+perf record -m 16384 -e sched:sched_wakeup -e sched:sched_wakeup_new -e sched:sched_switch -e sched:sched_migrate_task $@
diff --git a/tools/perf/scripts/python/bin/sctop-record b/tools/perf/scripts/python/bin/sctop-record
index 1fc5998..4efbfaa7 100644
--- a/tools/perf/scripts/python/bin/sctop-record
+++ b/tools/perf/scripts/python/bin/sctop-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
index 1fc5998..4efbfaa7 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-by-pid-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/scripts/python/bin/syscall-counts-record b/tools/perf/scripts/python/bin/syscall-counts-record
index 1fc5998..4efbfaa7 100644
--- a/tools/perf/scripts/python/bin/syscall-counts-record
+++ b/tools/perf/scripts/python/bin/syscall-counts-record
@@ -1,2 +1,2 @@
 #!/bin/bash
-perf record -a -e raw_syscalls:sys_enter $@
+perf record -e raw_syscalls:sys_enter $@
diff --git a/tools/perf/util/ui/util.c b/tools/perf/util/ui/util.c
index 9706d9d..056c695 100644
--- a/tools/perf/util/ui/util.c
+++ b/tools/perf/util/ui/util.c
@@ -104,9 +104,10 @@
 	return rc;
 }
 
+static const char yes[] = "Yes", no[] = "No";
+
 bool ui__dialog_yesno(const char *msg)
 {
 	/* newtWinChoice should really be accepting const char pointers... */
-	char yes[] = "Yes", no[] = "No";
-	return newtWinChoice(NULL, yes, no, (char *)msg) == 1;
+	return newtWinChoice(NULL, (char *)yes, (char *)no, (char *)msg) == 1;
 }