Merge branch 'upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-2.6 into upstream
diff --git a/MAINTAINERS b/MAINTAINERS
index 74d71ca..1421f74 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -568,6 +568,18 @@
 W:	http://www.penguinppc.org/ppc64/
 S:	Supported
 
+BROADCOM BNX2 GIGABIT ETHERNET DRIVER
+P:	Michael Chan
+M:	mchan@broadcom.com
+L:	netdev@vger.kernel.org
+S:	Supported
+
+BROADCOM TG3 GIGABIT ETHERNET DRIVER
+P:	Michael Chan
+M:	mchan@broadcom.com
+L:	netdev@vger.kernel.org
+S:	Supported
+
 BTTV VIDEO4LINUX DRIVER
 P:	Mauro Carvalho Chehab
 M:	mchehab@infradead.org
@@ -1413,6 +1425,8 @@
 M:	jesse.brandeburg@intel.com
 P:	Jeff Kirsher
 M:	jeffrey.t.kirsher@intel.com
+P:	Auke Kok
+M:	auke-jan.h.kok@intel.com
 W:	http://sourceforge.net/projects/e1000/
 S:	Supported
 
@@ -1425,6 +1439,8 @@
 M:	jesse.brandeburg@intel.com
 P:	Jeff Kirsher
 M:	jeffrey.t.kirsher@intel.com
+P:	Auke Kok
+M:	auke-jan.h.kok@intel.com
 W:	http://sourceforge.net/projects/e1000/
 S:	Supported
 
@@ -1437,6 +1453,8 @@
 M:	john.ronciak@intel.com
 P:	Jesse Brandeburg
 M:	jesse.brandeburg@intel.com
+P:	Auke Kok
+M:	auke-jan.h.kok@intel.com
 W:	http://sourceforge.net/projects/e1000/
 S:	Supported
 
@@ -1877,6 +1895,11 @@
 W:	http://www.atnf.csiro.au/~rgooch/linux/kernel-patches.html
 S:	Maintained
 
+MULTIMEDIA CARD SUBSYSTEM
+P:	Russell King
+M:	rmk+mmc@arm.linux.org.uk
+S:	Maintained
+
 MULTISOUND SOUND DRIVER
 P:	Andrew Veliath
 M:	andrewtv@usa.net
diff --git a/Makefile b/Makefile
index 435d209..a3a7baa 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 17
-EXTRAVERSION =-rc5
-NAME=Lordi Rules
+EXTRAVERSION =-rc6
+NAME=Crazed Snow-Weasel
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
index c645c5e..2b245ad 100644
--- a/arch/alpha/kernel/alpha_ksyms.c
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -182,7 +182,6 @@
 EXPORT_SYMBOL(smp_call_function);
 EXPORT_SYMBOL(smp_call_function_on_cpu);
 EXPORT_SYMBOL(_atomic_dec_and_lock);
-EXPORT_SYMBOL(cpu_present_mask);
 #endif /* CONFIG_SMP */
 
 /*
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
index 9924fd0..c760a83 100644
--- a/arch/alpha/kernel/process.c
+++ b/arch/alpha/kernel/process.c
@@ -94,7 +94,7 @@
 	if (cpuid != boot_cpuid) {
 		flags |= 0x00040000UL; /* "remain halted" */
 		*pflags = flags;
-		clear_bit(cpuid, &cpu_present_mask);
+		cpu_clear(cpuid, cpu_present_map);
 		halt();
 	}
 #endif
@@ -120,8 +120,8 @@
 
 #ifdef CONFIG_SMP
 	/* Wait for the secondaries to halt. */
-	cpu_clear(boot_cpuid, cpu_possible_map);
-	while (cpus_weight(cpu_possible_map))
+	cpu_clear(boot_cpuid, cpu_present_map);
+	while (cpus_weight(cpu_present_map))
 		barrier();
 #endif
 
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c
index 1852554..4dc273e 100644
--- a/arch/alpha/kernel/smp.c
+++ b/arch/alpha/kernel/smp.c
@@ -68,7 +68,6 @@
 static int smp_secondary_alive __initdata = 0;
 
 /* Which cpus ids came online.  */
-cpumask_t cpu_present_mask;
 cpumask_t cpu_online_map;
 
 EXPORT_SYMBOL(cpu_online_map);
@@ -439,7 +438,7 @@
 			if ((cpu->flags & 0x1cc) == 0x1cc) {
 				smp_num_probed++;
 				/* Assume here that "whami" == index */
-				cpu_set(i, cpu_present_mask);
+				cpu_set(i, cpu_present_map);
 				cpu->pal_revision = boot_cpu_palrev;
 			}
 
@@ -450,11 +449,10 @@
 		}
 	} else {
 		smp_num_probed = 1;
-		cpu_set(boot_cpuid, cpu_present_mask);
 	}
 
-	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_mask = %lx\n",
-	       smp_num_probed, cpu_possible_map.bits[0]);
+	printk(KERN_INFO "SMP: %d CPUs probed -- cpu_present_map = %lx\n",
+	       smp_num_probed, cpu_present_map.bits[0]);
 }
 
 /*
@@ -473,7 +471,7 @@
 
 	/* Nothing to do on a UP box, or when told not to.  */
 	if (smp_num_probed == 1 || max_cpus == 0) {
-		cpu_present_mask = cpumask_of_cpu(boot_cpuid);
+		cpu_present_map = cpumask_of_cpu(boot_cpuid);
 		printk(KERN_INFO "SMP mode deactivated.\n");
 		return;
 	}
@@ -486,10 +484,6 @@
 void __devinit
 smp_prepare_boot_cpu(void)
 {
-	/*
-	 * Mark the boot cpu (current cpu) as online
-	 */ 
-	cpu_set(smp_processor_id(), cpu_online_map);
 }
 
 int __devinit
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c
index 5f84417..2551fb4 100644
--- a/arch/alpha/kernel/sys_titan.c
+++ b/arch/alpha/kernel/sys_titan.c
@@ -66,7 +66,7 @@
 	register int bcpu = boot_cpuid;
 
 #ifdef CONFIG_SMP
-	cpumask_t cpm = cpu_present_mask;
+	cpumask_t cpm = cpu_present_map;
 	volatile unsigned long *dim0, *dim1, *dim2, *dim3;
 	unsigned long mask0, mask1, mask2, mask3, dummy;
 
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 5d3acff..d22f38b 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -101,7 +101,7 @@
 	help
 	  Choice for UART for kernel low-level using S3C2410 UARTS,
 	  should be between zero and two. The port must have been
-	  initalised by the boot-loader before use.
+	  initialised by the boot-loader before use.
 
 	  The uncompressor code port configuration is now handled
 	  by CONFIG_S3C2410_LOWLEVEL_UART_PORT.
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 2a39f9e..3b23f43 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -141,7 +141,7 @@
           2) If > 64MB of memory space is required, the IXP4xx can be 
 	     configured to use indirect registers to access PCI This allows 
 	     for up to 128MB (0x48000000 to 0x4fffffff) of memory on the bus. 
-	     The disadvantadge of this is that every PCI access requires 
+	     The disadvantage of this is that every PCI access requires 
 	     three local register accesses plus a spinlock, but in some 
 	     cases the performance hit is acceptable. In addition, you cannot 
 	     mmap() PCI devices in this case due to the indirect nature
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 02e188d..b307f11 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -493,6 +493,7 @@
 MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
 	/* Maintainer: MontaVista Software Inc. */
 	.phys_io	= 0x40000000,
+	.boot_params	= 0xa0000100,	/* BLOB boot parameter setting */
 	.io_pg_offst	= (io_p2v(0x40000000) >> 18) & 0xfffc,
 	.map_io		= mainstone_map_io,
 	.init_irq	= mainstone_init_irq,
diff --git a/arch/arm/mach-s3c2410/Kconfig b/arch/arm/mach-s3c2410/Kconfig
index ce7d810..970f98d 100644
--- a/arch/arm/mach-s3c2410/Kconfig
+++ b/arch/arm/mach-s3c2410/Kconfig
@@ -170,7 +170,7 @@
 	depends on ARCH_S3C2410 && PM
 	help
 	  Say Y here if you want verbose debugging from the PM Suspend and
-	  Resume code. See `Documentation/arm/Samsing-S3C24XX/Suspend.txt`
+	  Resume code. See <file:Documentation/arm/Samsung-S3C24XX/Suspend.txt>
 	  for more information.
 
 config S3C2410_PM_CHECK
diff --git a/arch/mips/au1000/common/prom.c b/arch/mips/au1000/common/prom.c
index 9c171af..ae7d8c57bf 100644
--- a/arch/mips/au1000/common/prom.c
+++ b/arch/mips/au1000/common/prom.c
@@ -1,10 +1,9 @@
 /*
  *
  * BRIEF MODULE DESCRIPTION
- *    PROM library initialisation code, assuming a version of
- *    pmon is the boot code.
+ *    PROM library initialisation code, assuming YAMON is the boot loader.
  *
- * Copyright 2000,2001 MontaVista Software Inc.
+ * Copyright 2000, 2001, 2006 MontaVista Software Inc.
  * Author: MontaVista Software, Inc.
  *         	ppopov@mvista.com or source@mvista.com
  *
@@ -49,9 +48,9 @@
 
 typedef struct
 {
-    char *name;
-/*    char *val; */
-}t_env_var;
+	char *name;
+	char *val;
+} t_env_var;
 
 
 char * prom_getcmdline(void)
@@ -85,21 +84,16 @@
 {
 	/*
 	 * Return a pointer to the given environment variable.
-	 * Environment variables are stored in the form of "memsize=64".
 	 */
 
 	t_env_var *env = (t_env_var *)prom_envp;
-	int i;
 
-	i = strlen(envname);
-
-	while(env->name) {
-		if(strncmp(envname, env->name, i) == 0) {
-			return(env->name + strlen(envname) + 1);
-		}
+	while (env->name) {
+		if (strcmp(envname, env->name) == 0)
+			return env->val;
 		env++;
 	}
-	return(NULL);
+	return NULL;
 }
 
 inline unsigned char str2hexnum(unsigned char c)
diff --git a/arch/mips/au1000/common/sleeper.S b/arch/mips/au1000/common/sleeper.S
index 44dac3b..683d9da 100644
--- a/arch/mips/au1000/common/sleeper.S
+++ b/arch/mips/au1000/common/sleeper.S
@@ -112,6 +112,11 @@
 	mtc0	k0, CP0_PAGEMASK
 	lw	k0, 0x14(sp)
 	mtc0	k0, CP0_CONFIG
+
+	/* We need to catch the ealry Alchemy SOCs with
+	 * the write-only Config[OD] bit and set it back to one...
+	 */
+	jal	au1x00_fixup_config_od
 	lw	$1, PT_R1(sp)
 	lw	$2, PT_R2(sp)
 	lw	$3, PT_R3(sp)
diff --git a/arch/mips/ddb5xxx/ddb5476/dbg_io.c b/arch/mips/ddb5xxx/ddb5476/dbg_io.c
index 85e9e50..f2296a9 100644
--- a/arch/mips/ddb5xxx/ddb5476/dbg_io.c
+++ b/arch/mips/ddb5xxx/ddb5476/dbg_io.c
@@ -86,7 +86,7 @@
         /* disable interrupts */
         UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-        /* set up buad rate */
+        /* set up baud rate */
         {
                 uint32 divisor;
 
diff --git a/arch/mips/ddb5xxx/ddb5477/kgdb_io.c b/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
index 1d18d59..385bbdb 100644
--- a/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
+++ b/arch/mips/ddb5xxx/ddb5477/kgdb_io.c
@@ -86,7 +86,7 @@
         /* disable interrupts */
         UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-        /* set up buad rate */
+        /* set up baud rate */
         {
                 uint32 divisor;
 
diff --git a/arch/mips/gt64120/ev64120/serialGT.c b/arch/mips/gt64120/ev64120/serialGT.c
index 16e34a5..8f0d835 100644
--- a/arch/mips/gt64120/ev64120/serialGT.c
+++ b/arch/mips/gt64120/ev64120/serialGT.c
@@ -149,7 +149,7 @@
 #else
 	/*
 	 * Note: Set baud rate, hardcoded here for rate of 115200
-	 * since became unsure of above "buad rate" algorithm (??).
+	 * since became unsure of above "baud rate" algorithm (??).
 	 */
 	outreg(channel, LCR, 0x83);
 	outreg(channel, DLM, 0x00);	// See note above
diff --git a/arch/mips/gt64120/momenco_ocelot/dbg_io.c b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
index 8720bcc..f0a6a38 100644
--- a/arch/mips/gt64120/momenco_ocelot/dbg_io.c
+++ b/arch/mips/gt64120/momenco_ocelot/dbg_io.c
@@ -73,7 +73,7 @@
 	/* disable interrupts */
 	UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-	/* set up buad rate */
+	/* set up baud rate */
 	{
 		uint32 divisor;
 
diff --git a/arch/mips/ite-boards/generic/dbg_io.c b/arch/mips/ite-boards/generic/dbg_io.c
index c4f8530..6a7ccaf 100644
--- a/arch/mips/ite-boards/generic/dbg_io.c
+++ b/arch/mips/ite-boards/generic/dbg_io.c
@@ -72,7 +72,7 @@
 	/* disable interrupts */
 	UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-	/* set up buad rate */
+	/* set up baud rate */
 	{
 		uint32 divisor;
 
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 47a087b..d268827 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -206,7 +206,7 @@
 		"daddi	%0, %1, %3\n\t"
 		".set	pop"
 		: "=r" (v), "=&r" (tmp)
-		: "I" (0xffffffffffffdb9a), "I" (0x1234));
+		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
 	set_except_vector(12, handler);
 	local_irq_restore(flags);
 
@@ -224,7 +224,7 @@
 		"dsrl	%1, %1, 1\n\t"
 		"daddi	%0, %1, %3"
 		: "=r" (v), "=&r" (tmp)
-		: "I" (0xffffffffffffdb9a), "I" (0x1234));
+		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
 	set_except_vector(12, handler);
 	local_irq_restore(flags);
 
@@ -280,7 +280,7 @@
 		"daddu	%1, %2\n\t"
 		".set	pop"
 		: "=&r" (v), "=&r" (w), "=&r" (tmp)
-		: "I" (0xffffffffffffdb9a), "I" (0x1234));
+		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
 
 	if (v == w) {
 		printk("no.\n");
@@ -296,7 +296,7 @@
 		"addiu	%1, $0, %4\n\t"
 		"daddu	%1, %2"
 		: "=&r" (v), "=&r" (w), "=&r" (tmp)
-		: "I" (0xffffffffffffdb9a), "I" (0x1234));
+		: "I" (0xffffffffffffdb9aUL), "I" (0x1234));
 
 	if (v == w) {
 		printk("yes.\n");
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index bef3e2d..8c2c359 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -655,7 +655,7 @@
 	case PRID_IMP_SB1:
 		c->cputype = CPU_SB1;
 		/* FPU in pass1 is known to have issues. */
-		if ((c->processor_id & 0xff) < 0x20)
+		if ((c->processor_id & 0xff) < 0x02)
 			c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
 		break;
 	case PRID_IMP_SB1A:
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index e54a7f4..d7bf021 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -288,6 +288,9 @@
 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
 			+ ELF_MIPS_R_SYM(rel[i]);
 		if (!sym->st_value) {
+			/* Ignore unresolved weak symbol */
+			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+				continue;
 			printk(KERN_WARNING "%s: Unknown symbol %s\n",
 			       me->name, strtab + sym->st_name);
 			return -ENOENT;
@@ -325,6 +328,9 @@
 		sym = (Elf_Sym *)sechdrs[symindex].sh_addr
 			+ ELF_MIPS_R_SYM(rel[i]);
 		if (!sym->st_value) {
+			/* Ignore unresolved weak symbol */
+			if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+				continue;
 			printk(KERN_WARNING "%s: Unknown symbol %s\n",
 			       me->name, strtab + sym->st_name);
 			return -ENOENT;
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b53a920..8efb23a 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -209,7 +209,7 @@
 	PTR	sys_fork
 	PTR	sys_read
 	PTR	sys_write
-	PTR	sys_open			/* 4005 */
+	PTR	compat_sys_open			/* 4005 */
 	PTR	sys_close
 	PTR	sys_waitpid
 	PTR	sys_creat
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index bcf1b10..397a70e 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -246,7 +246,7 @@
 #ifdef CONFIG_64BIT
 	/* HACK: Guess if the sign extension was forgotten */
 	if (start > 0x0000000080000000 && start < 0x00000000ffffffff)
-		start |= 0xffffffff00000000;
+		start |= 0xffffffff00000000UL;
 #endif
 
 	end = start + size;
@@ -355,8 +355,6 @@
 	}
 #endif
 
-	memory_present(0, first_usable_pfn, max_low_pfn);
-
 	/* Initialize the boot-time allocator with low memory only.  */
 	bootmap_size = init_bootmem(first_usable_pfn, max_low_pfn);
 
@@ -410,6 +408,7 @@
 
 		/* Register lowmem ranges */
 		free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
+		memory_present(0, curr_pfn, curr_pfn + size - 1);
 	}
 
 	/* Reserve the bootmap memory.  */
@@ -419,17 +418,20 @@
 #ifdef CONFIG_BLK_DEV_INITRD
 	initrd_below_start_ok = 1;
 	if (initrd_start) {
-		unsigned long initrd_size = ((unsigned char *)initrd_end) - ((unsigned char *)initrd_start);
+		unsigned long initrd_size = ((unsigned char *)initrd_end) -
+			((unsigned char *)initrd_start);
+		const int width = sizeof(long) * 2;
+
 		printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
 		       (void *)initrd_start, initrd_size);
 
 		if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) {
 			printk("initrd extends beyond end of memory "
 			       "(0x%0*Lx > 0x%0*Lx)\ndisabling initrd\n",
-			       sizeof(long) * 2,
-			       (unsigned long long)CPHYSADDR(initrd_end),
-			       sizeof(long) * 2,
-			       (unsigned long long)PFN_PHYS(max_low_pfn));
+			       width,
+			       (unsigned long long) CPHYSADDR(initrd_end),
+			       width,
+			       (unsigned long long) PFN_PHYS(max_low_pfn));
 			initrd_start = initrd_end = 0;
 			initrd_reserve_bootmem = 0;
 		}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d42f358..298f82f 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -247,6 +247,9 @@
 	current_thread_info()->cpu = 0;
 	smp_tune_scheduling();
 	plat_prepare_cpus(max_cpus);
+#ifndef CONFIG_HOTPLUG_CPU
+	cpu_present_map = cpu_possible_map;
+#endif
 }
 
 /* preload SMP state for boot cpu */
@@ -442,7 +445,7 @@
 	int cpu;
 	int ret;
 
-	for_each_cpu(cpu) {
+	for_each_present_cpu(cpu) {
 		ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
 		if (ret)
 			printk(KERN_WARNING "topology_init: register_cpu %d "
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 8f4fdd9..5e8a18a 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -276,8 +276,7 @@
 
 asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
 {
-	int	tmp, len;
-	char	__user *name;
+	int	tmp;
 
 	switch(cmd) {
 	case MIPS_ATOMIC_SET:
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 35cb08d..a7564b0 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -819,15 +819,30 @@
 
 asmlinkage void do_mcheck(struct pt_regs *regs)
 {
+	const int field = 2 * sizeof(unsigned long);
+	int multi_match = regs->cp0_status & ST0_TS;
+
 	show_regs(regs);
-	dump_tlb_all();
+
+	if (multi_match) {
+		printk("Index   : %0x\n", read_c0_index());
+		printk("Pagemask: %0x\n", read_c0_pagemask());
+		printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
+		printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
+		printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
+		printk("\n");
+		dump_tlb_all();
+	}
+
+	show_code((unsigned int *) regs->cp0_epc);
+
 	/*
 	 * Some chips may have other causes of machine check (e.g. SB1
 	 * graduation timer)
 	 */
 	panic("Caught Machine Check exception - %scaused by multiple "
 	      "matching entries in the TLB.",
-	      (regs->cp0_status & ST0_TS) ? "" : "not ");
+	      (multi_match) ? "" : "not ");
 }
 
 asmlinkage void do_mt(struct pt_regs *regs)
diff --git a/arch/mips/math-emu/dp_fint.c b/arch/mips/math-emu/dp_fint.c
index a1962eb..39a71de1 100644
--- a/arch/mips/math-emu/dp_fint.c
+++ b/arch/mips/math-emu/dp_fint.c
@@ -29,7 +29,9 @@
 
 ieee754dp ieee754dp_fint(int x)
 {
-	COMPXDP;
+	u64 xm;
+	int xe;
+	int xs;
 
 	CLEARCX;
 
diff --git a/arch/mips/math-emu/dp_flong.c b/arch/mips/math-emu/dp_flong.c
index eae90a8..f08f223 100644
--- a/arch/mips/math-emu/dp_flong.c
+++ b/arch/mips/math-emu/dp_flong.c
@@ -29,7 +29,9 @@
 
 ieee754dp ieee754dp_flong(s64 x)
 {
-	COMPXDP;
+	u64 xm;
+	int xe;
+	int xs;
 
 	CLEARCX;
 
diff --git a/arch/mips/math-emu/sp_fint.c b/arch/mips/math-emu/sp_fint.c
index 7aac13a..e88e125 100644
--- a/arch/mips/math-emu/sp_fint.c
+++ b/arch/mips/math-emu/sp_fint.c
@@ -29,7 +29,9 @@
 
 ieee754sp ieee754sp_fint(int x)
 {
-	COMPXSP;
+	unsigned xm;
+	int xe;
+	int xs;
 
 	CLEARCX;
 
diff --git a/arch/mips/math-emu/sp_flong.c b/arch/mips/math-emu/sp_flong.c
index 3d6c1d1..26d6919 100644
--- a/arch/mips/math-emu/sp_flong.c
+++ b/arch/mips/math-emu/sp_flong.c
@@ -29,7 +29,9 @@
 
 ieee754sp ieee754sp_flong(s64 x)
 {
-	COMPXDP;		/* <--- need 64-bit mantissa temp */
+	u64 xm;		/* <--- need 64-bit mantissa temp */
+	int xe;
+	int xs;
 
 	CLEARCX;
 
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 6b35417..4a43924 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -1161,6 +1161,31 @@
 	c->options |= MIPS_CPU_SUBSET_CACHES;
 }
 
+void au1x00_fixup_config_od(void)
+{
+	/*
+	 * c0_config.od (bit 19) was write only (and read as 0)
+	 * on the early revisions of Alchemy SOCs.  It disables the bus
+	 * transaction overlapping and needs to be set to fix various errata.
+	 */
+	switch (read_c0_prid()) {
+	case 0x00030100: /* Au1000 DA */
+	case 0x00030201: /* Au1000 HA */
+	case 0x00030202: /* Au1000 HB */
+	case 0x01030200: /* Au1500 AB */
+	/*
+	 * Au1100 errata actually keeps silence about this bit, so we set it
+	 * just in case for those revisions that require it to be set according
+	 * to arch/mips/au1000/common/cputable.c
+	 */
+	case 0x02030200: /* Au1100 AB */
+	case 0x02030201: /* Au1100 BA */
+	case 0x02030202: /* Au1100 BC */
+		set_c0_config(1 << 19);
+		break;
+	}
+}
+
 static inline void coherency_setup(void)
 {
 	change_c0_config(CONF_CM_CMASK, CONF_CM_DEFAULT);
@@ -1181,6 +1206,15 @@
 	case CPU_R4400MC:
 		clear_c0_config(CONF_CU);
 		break;
+	/*
+	 * We need to catch the ealry Alchemy SOCs with
+	 * the write-only co_config.od bit and set it back to one...
+	 */
+	case CPU_AU1000: /* rev. DA, HA, HB */
+	case CPU_AU1100: /* rev. AB, BA, BC ?? */
+	case CPU_AU1500: /* rev. AB */
+		au1x00_fixup_config_od();
+		break;
 	}
 }
 
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c22308b9..33f6e1c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -227,7 +227,7 @@
 	for (tmp = 0; tmp < max_low_pfn; tmp++)
 		if (page_is_ram(tmp)) {
 			ram++;
-			if (PageReserved(mem_map+tmp))
+			if (PageReserved(pfn_to_page(tmp)))
 				reservedpages++;
 		}
 
diff --git a/arch/mips/momentum/jaguar_atx/dbg_io.c b/arch/mips/momentum/jaguar_atx/dbg_io.c
index 542eac8..d7dea0a 100644
--- a/arch/mips/momentum/jaguar_atx/dbg_io.c
+++ b/arch/mips/momentum/jaguar_atx/dbg_io.c
@@ -73,7 +73,7 @@
 	/* disable interrupts */
 	UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-	/* set up buad rate */
+	/* set up baud rate */
 	{
 		uint32 divisor;
 
diff --git a/arch/mips/momentum/ocelot_c/dbg_io.c b/arch/mips/momentum/ocelot_c/dbg_io.c
index 8720bcc..f0a6a38 100644
--- a/arch/mips/momentum/ocelot_c/dbg_io.c
+++ b/arch/mips/momentum/ocelot_c/dbg_io.c
@@ -73,7 +73,7 @@
 	/* disable interrupts */
 	UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-	/* set up buad rate */
+	/* set up baud rate */
 	{
 		uint32 divisor;
 
diff --git a/arch/mips/momentum/ocelot_g/dbg_io.c b/arch/mips/momentum/ocelot_g/dbg_io.c
index 8720bcc..f0a6a38 100644
--- a/arch/mips/momentum/ocelot_g/dbg_io.c
+++ b/arch/mips/momentum/ocelot_g/dbg_io.c
@@ -73,7 +73,7 @@
 	/* disable interrupts */
 	UART16550_WRITE(OFS_INTR_ENABLE, 0);
 
-	/* set up buad rate */
+	/* set up baud rate */
 	{
 		uint32 divisor;
 
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 91b799d..c31e4cf 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -14,8 +14,8 @@
 
 #include "op_impl.h"
 
-extern struct op_mips_model op_model_mipsxx __attribute__((weak));
-extern struct op_mips_model op_model_rm9000 __attribute__((weak));
+extern struct op_mips_model op_model_mipsxx_ops __attribute__((weak));
+extern struct op_mips_model op_model_rm9000_ops __attribute__((weak));
 
 static struct op_mips_model *model;
 
@@ -83,11 +83,11 @@
 	case CPU_74K:
 	case CPU_SB1:
 	case CPU_SB1A:
-		lmodel = &op_model_mipsxx;
+		lmodel = &op_model_mipsxx_ops;
 		break;
 
 	case CPU_RM9000:
-		lmodel = &op_model_rm9000;
+		lmodel = &op_model_rm9000_ops;
 		break;
 	};
 
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index e7ce923..f26a00e 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -23,7 +23,7 @@
 
 #define M_COUNTER_OVERFLOW		(1UL    << 31)
 
-struct op_mips_model op_model_mipsxx;
+struct op_mips_model op_model_mipsxx_ops;
 
 static struct mipsxx_register_config {
 	unsigned int control[4];
@@ -34,7 +34,7 @@
 
 static void mipsxx_reg_setup(struct op_counter_config *ctr)
 {
-	unsigned int counters = op_model_mipsxx.num_counters;
+	unsigned int counters = op_model_mipsxx_ops.num_counters;
 	int i;
 
 	/* Compute the performance counter control word.  */
@@ -62,7 +62,7 @@
 
 static void mipsxx_cpu_setup (void *args)
 {
-	unsigned int counters = op_model_mipsxx.num_counters;
+	unsigned int counters = op_model_mipsxx_ops.num_counters;
 
 	switch (counters) {
 	case 4:
@@ -83,7 +83,7 @@
 /* Start all counters on current CPU */
 static void mipsxx_cpu_start(void *args)
 {
-	unsigned int counters = op_model_mipsxx.num_counters;
+	unsigned int counters = op_model_mipsxx_ops.num_counters;
 
 	switch (counters) {
 	case 4:
@@ -100,7 +100,7 @@
 /* Stop all counters on current CPU */
 static void mipsxx_cpu_stop(void *args)
 {
-	unsigned int counters = op_model_mipsxx.num_counters;
+	unsigned int counters = op_model_mipsxx_ops.num_counters;
 
 	switch (counters) {
 	case 4:
@@ -116,7 +116,7 @@
 
 static int mipsxx_perfcount_handler(struct pt_regs *regs)
 {
-	unsigned int counters = op_model_mipsxx.num_counters;
+	unsigned int counters = op_model_mipsxx_ops.num_counters;
 	unsigned int control;
 	unsigned int counter;
 	int handled = 0;
@@ -187,37 +187,37 @@
 
 	reset_counters(counters);
 
-	op_model_mipsxx.num_counters = counters;
+	op_model_mipsxx_ops.num_counters = counters;
 	switch (current_cpu_data.cputype) {
 	case CPU_20KC:
-		op_model_mipsxx.cpu_type = "mips/20K";
+		op_model_mipsxx_ops.cpu_type = "mips/20K";
 		break;
 
 	case CPU_24K:
-		op_model_mipsxx.cpu_type = "mips/24K";
+		op_model_mipsxx_ops.cpu_type = "mips/24K";
 		break;
 
 	case CPU_25KF:
-		op_model_mipsxx.cpu_type = "mips/25K";
+		op_model_mipsxx_ops.cpu_type = "mips/25K";
 		break;
 
 #ifndef CONFIG_SMP
 	case CPU_34K:
-		op_model_mipsxx.cpu_type = "mips/34K";
+		op_model_mipsxx_ops.cpu_type = "mips/34K";
 		break;
 
 	case CPU_74K:
-		op_model_mipsxx.cpu_type = "mips/74K";
+		op_model_mipsxx_ops.cpu_type = "mips/74K";
 		break;
 #endif
 
 	case CPU_5KC:
-		op_model_mipsxx.cpu_type = "mips/5K";
+		op_model_mipsxx_ops.cpu_type = "mips/5K";
 		break;
 
 	case CPU_SB1:
 	case CPU_SB1A:
-		op_model_mipsxx.cpu_type = "mips/sb1";
+		op_model_mipsxx_ops.cpu_type = "mips/sb1";
 		break;
 
 	default:
@@ -233,12 +233,12 @@
 
 static void mipsxx_exit(void)
 {
-	reset_counters(op_model_mipsxx.num_counters);
+	reset_counters(op_model_mipsxx_ops.num_counters);
 
 	perf_irq = null_perf_irq;
 }
 
-struct op_mips_model op_model_mipsxx = {
+struct op_mips_model op_model_mipsxx_ops = {
 	.reg_setup	= mipsxx_reg_setup,
 	.cpu_setup	= mipsxx_cpu_setup,
 	.init		= mipsxx_init,
diff --git a/arch/mips/oprofile/op_model_rm9000.c b/arch/mips/oprofile/op_model_rm9000.c
index 9b75e41..b7063fe 100644
--- a/arch/mips/oprofile/op_model_rm9000.c
+++ b/arch/mips/oprofile/op_model_rm9000.c
@@ -126,7 +126,7 @@
 	free_irq(rm9000_perfcount_irq, NULL);
 }
 
-struct op_mips_model op_model_rm9000 = {
+struct op_mips_model op_model_rm9000_ops = {
 	.reg_setup	= rm9000_reg_setup,
 	.cpu_setup	= rm9000_cpu_setup,
 	.init		= rm9000_init,
diff --git a/arch/mips/sgi-ip32/ip32-irq.c b/arch/mips/sgi-ip32/ip32-irq.c
index de01c98..8ba0804 100644
--- a/arch/mips/sgi-ip32/ip32-irq.c
+++ b/arch/mips/sgi-ip32/ip32-irq.c
@@ -31,12 +31,12 @@
 /* issue a PIO read to make sure no PIO writes are pending */
 static void inline flush_crime_bus(void)
 {
-	volatile unsigned long junk = crime->control;
+	crime->control;
 }
 
 static void inline flush_mace_bus(void)
 {
-	volatile unsigned long junk = mace->perif.ctrl.misc;
+	mace->perif.ctrl.misc;
 }
 
 #undef DEBUG_IRQ
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c
index 4baa75b..f08173b 100644
--- a/arch/powerpc/platforms/powermac/pfunc_core.c
+++ b/arch/powerpc/platforms/powermac/pfunc_core.c
@@ -11,6 +11,7 @@
 #include <linux/kernel.h>
 #include <linux/spinlock.h>
 #include <linux/module.h>
+#include <linux/mutex.h>
 
 #include <asm/semaphore.h>
 #include <asm/prom.h>
@@ -546,6 +547,7 @@
 
 static LIST_HEAD(pmf_devices);
 static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED;
+static DEFINE_MUTEX(pmf_irq_mutex);
 
 static void pmf_release_device(struct kref *kref)
 {
@@ -864,15 +866,17 @@
 
 	spin_lock_irqsave(&pmf_lock, flags);
 	func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN);
-	if (func == NULL) {
-		spin_unlock_irqrestore(&pmf_lock, flags);
+	if (func)
+		func = pmf_get_function(func);
+	spin_unlock_irqrestore(&pmf_lock, flags);
+	if (func == NULL)
 		return -ENODEV;
-	}
+	mutex_lock(&pmf_irq_mutex);
 	if (list_empty(&func->irq_clients))
 		func->dev->handlers->irq_enable(func);
 	list_add(&client->link, &func->irq_clients);
 	client->func = func;
-	spin_unlock_irqrestore(&pmf_lock, flags);
+	mutex_unlock(&pmf_irq_mutex);
 
 	return 0;
 }
@@ -881,16 +885,16 @@
 void pmf_unregister_irq_client(struct pmf_irq_client *client)
 {
 	struct pmf_function *func = client->func;
-	unsigned long flags;
 
 	BUG_ON(func == NULL);
 
-	spin_lock_irqsave(&pmf_lock, flags);
+	mutex_lock(&pmf_irq_mutex);
 	client->func = NULL;
 	list_del(&client->link);
 	if (list_empty(&func->irq_clients))
 		func->dev->handlers->irq_disable(func);
-	spin_unlock_irqrestore(&pmf_lock, flags);
+	mutex_unlock(&pmf_irq_mutex);
+	pmf_put_function(func);
 }
 EXPORT_SYMBOL_GPL(pmf_unregister_irq_client);
 
diff --git a/arch/sparc64/lib/checksum.S b/arch/sparc64/lib/checksum.S
index ba9cd3c..1d230f6 100644
--- a/arch/sparc64/lib/checksum.S
+++ b/arch/sparc64/lib/checksum.S
@@ -165,8 +165,9 @@
 	sll		%g1, 8, %g1
 	or		%o5, %g1, %o4
 
-1:	add		%o2, %o4, %o2
+1:	addcc		%o2, %o4, %o2
+	addc		%g0, %o2, %o2
 
 csum_partial_finish:
 	retl
-	 mov		%o2, %o0
+	 srl		%o2, 0, %o0
diff --git a/arch/sparc64/lib/csum_copy.S b/arch/sparc64/lib/csum_copy.S
index 71af488..e566c77 100644
--- a/arch/sparc64/lib/csum_copy.S
+++ b/arch/sparc64/lib/csum_copy.S
@@ -221,11 +221,12 @@
 	sll		%g1, 8, %g1
 	or		%o5, %g1, %o4
 
-1:	add		%o3, %o4, %o3
+1:	addcc		%o3, %o4, %o3
+	addc		%g0, %o3, %o3
 
 70:
 	retl
-	 mov		%o3, %o0
+	 srl		%o3, 0, %o0
 
 95:	mov		0, GLOBAL_SPARE
 	brlez,pn	%o2, 4f
diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386
index 7a0e04e..b65ca11 100644
--- a/arch/um/Makefile-i386
+++ b/arch/um/Makefile-i386
@@ -33,5 +33,9 @@
 # prevent gcc from keeping the stack 16 byte aligned. Taken from i386.
 cflags-y += $(call cc-option,-mpreferred-stack-boundary=2)
 
+# Prevent sprintf in nfsd from being converted to strcpy and resulting in
+# an unresolved reference.
+cflags-y += -ffreestanding
+
 CFLAGS += $(cflags-y)
 USER_CFLAGS += $(cflags-y)
diff --git a/arch/um/include/kern_util.h b/arch/um/include/kern_util.h
index efa3d33..310980b 100644
--- a/arch/um/include/kern_util.h
+++ b/arch/um/include/kern_util.h
@@ -120,20 +120,11 @@
 extern void free_irq(unsigned int, void *);
 extern int cpu(void);
 
+extern void time_init_kern(void);
+
 /* Are we disallowed to sleep? Used to choose between GFP_KERNEL and GFP_ATOMIC. */
 extern int __cant_sleep(void);
 extern void segv_handler(int sig, union uml_pt_regs *regs);
 extern void sigio_handler(int sig, union uml_pt_regs *regs);
 
 #endif
-
-/*
- * Overrides for Emacs so that we follow Linus's tabbing style.
- * Emacs will notice this stuff at the end of the file and automatically
- * adjust the settings for this buffer only.  This must remain at the end
- * of the file.
- * ---------------------------------------------------------------------------
- * Local variables:
- * c-file-style: "linux"
- * End:
- */
diff --git a/arch/um/kernel/time_kern.c b/arch/um/kernel/time_kern.c
index 528cf62..86f51d0 100644
--- a/arch/um/kernel/time_kern.c
+++ b/arch/um/kernel/time_kern.c
@@ -84,6 +84,16 @@
 	}
 }
 
+
+void time_init_kern(void)
+{
+	unsigned long long nsecs;
+
+	nsecs = os_nsecs();
+	set_normalized_timespec(&wall_to_monotonic, -nsecs / BILLION,
+				-nsecs % BILLION);
+}
+
 void do_boot_timer_handler(struct sigcontext * sc)
 {
 	struct pt_regs regs;
diff --git a/arch/um/os-Linux/main.c b/arch/um/os-Linux/main.c
index 3a0ac38..90912aa 100644
--- a/arch/um/os-Linux/main.c
+++ b/arch/um/os-Linux/main.c
@@ -59,7 +59,7 @@
 	initcall_t *call;
 
 	call = &__uml_initcall_start;
-	while (call < &__uml_initcall_end){;
+	while (call < &__uml_initcall_end){
 		(*call)();
 		call++;
 	}
diff --git a/arch/um/os-Linux/time.c b/arch/um/os-Linux/time.c
index 6f76267..280c4fb 100644
--- a/arch/um/os-Linux/time.c
+++ b/arch/um/os-Linux/time.c
@@ -81,20 +81,12 @@
 	set_interval(ITIMER_REAL);
 }
 
-extern void ktime_get_ts(struct timespec *ts);
-#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
-
 void time_init(void)
 {
-	struct timespec now;
-
 	if(signal(SIGVTALRM, boot_timer_handler) == SIG_ERR)
 		panic("Couldn't set SIGVTALRM handler");
 	set_interval(ITIMER_VIRTUAL);
-
-	do_posix_clock_monotonic_gettime(&now);
-	wall_to_monotonic.tv_sec = -now.tv_sec;
-	wall_to_monotonic.tv_nsec = -now.tv_nsec;
+	time_init_kern();
 }
 
 unsigned long long os_nsecs(void)
diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
index 749dd1b..710d5fb8 100644
--- a/arch/um/sys-i386/syscalls.c
+++ b/arch/um/sys-i386/syscalls.c
@@ -99,11 +99,12 @@
 
 	switch (call) {
 	case SEMOP:
-		return sys_semtimedop(first, (struct sembuf *) ptr, second,
-				      NULL);
+		return sys_semtimedop(first, (struct sembuf __user *) ptr,
+				      second, NULL);
 	case SEMTIMEDOP:
-		return sys_semtimedop(first, (struct sembuf *) ptr, second,
-				      (const struct timespec *) fifth);
+		return sys_semtimedop(first, (struct sembuf __user *) ptr,
+				      second,
+				      (const struct timespec __user *) fifth);
 	case SEMGET:
 		return sys_semget (first, second, third);
 	case SEMCTL: {
diff --git a/arch/um/sys-x86_64/signal.c b/arch/um/sys-x86_64/signal.c
index a4c46a8..9edf114 100644
--- a/arch/um/sys-x86_64/signal.c
+++ b/arch/um/sys-x86_64/signal.c
@@ -21,7 +21,7 @@
 #include "skas.h"
 
 static int copy_sc_from_user_skas(struct pt_regs *regs,
-                                 struct sigcontext *from)
+                                 struct sigcontext __user *from)
 {
        int err = 0;
 
@@ -54,7 +54,8 @@
        return(err);
 }
 
-int copy_sc_to_user_skas(struct sigcontext *to, struct _fpstate *to_fp,
+int copy_sc_to_user_skas(struct sigcontext __user *to,
+			 struct _fpstate __user *to_fp,
 			 struct pt_regs *regs, unsigned long mask,
 			 unsigned long sp)
 {
@@ -106,10 +107,11 @@
 #endif
 
 #ifdef CONFIG_MODE_TT
-int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext *from,
+int copy_sc_from_user_tt(struct sigcontext *to, struct sigcontext __user *from,
 			 int fpsize)
 {
-	struct _fpstate *to_fp, *from_fp;
+	struct _fpstate *to_fp;
+	struct _fpstate __user *from_fp;
 	unsigned long sigs;
 	int err;
 
@@ -124,13 +126,14 @@
 	return(err);
 }
 
-int copy_sc_to_user_tt(struct sigcontext *to, struct _fpstate *fp,
+int copy_sc_to_user_tt(struct sigcontext __user *to, struct _fpstate __user *fp,
 		       struct sigcontext *from, int fpsize, unsigned long sp)
 {
-	struct _fpstate *to_fp, *from_fp;
+	struct _fpstate __user *to_fp;
+	struct _fpstate *from_fp;
 	int err;
 
-	to_fp = (fp ? fp : (struct _fpstate *) (to + 1));
+	to_fp = (fp ? fp : (struct _fpstate __user *) (to + 1));
 	from_fp = from->fpstate;
 	err = copy_to_user(to, from, sizeof(*to));
 	/* The SP in the sigcontext is the updated one for the signal
@@ -158,7 +161,8 @@
        return(ret);
 }
 
-static int copy_sc_to_user(struct sigcontext *to, struct _fpstate *fp,
+static int copy_sc_to_user(struct sigcontext __user *to,
+			   struct _fpstate __user *fp,
 			   struct pt_regs *from, unsigned long mask,
 			   unsigned long sp)
 {
@@ -169,7 +173,7 @@
 
 struct rt_sigframe
 {
-       char *pretcode;
+       char __user *pretcode;
        struct ucontext uc;
        struct siginfo info;
 };
@@ -188,7 +192,7 @@
 
 	frame = (struct rt_sigframe __user *)
 		round_down(stack_top - sizeof(struct rt_sigframe), 16) - 8;
-        frame = (struct rt_sigframe *) ((unsigned long) frame - 128);
+        frame = (struct rt_sigframe __user *) ((unsigned long) frame - 128);
 
 	if (!access_ok(VERIFY_WRITE, fp, sizeof(struct _fpstate)))
 		goto out;
diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c
index 6acee5c..6fce9f4 100644
--- a/arch/um/sys-x86_64/syscalls.c
+++ b/arch/um/sys-x86_64/syscalls.c
@@ -45,7 +45,7 @@
 	case ARCH_GET_GS:
 		ret = arch_prctl(code, (unsigned long) &tmp);
 		if(!ret)
-			ret = put_user(tmp, &addr);
+			ret = put_user(tmp, (long __user *)addr);
 		break;
 	default:
 		ret = -EINVAL;
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c
index 128b263..eab5394 100644
--- a/drivers/char/pcmcia/cm4000_cs.c
+++ b/drivers/char/pcmcia/cm4000_cs.c
@@ -149,7 +149,7 @@
 #define	ZERO_DEV(dev)  						\
 	memset(&dev->atr_csum,0,				\
 		sizeof(struct cm4000_dev) - 			\
-		/*link*/ sizeof(struct pcmcia_device) - 	\
+		/*link*/ sizeof(struct pcmcia_device *) - 	\
 		/*node*/ sizeof(dev_node_t) - 			\
 		/*atr*/ MAX_ATR*sizeof(char) - 			\
 		/*rbuf*/ 512*sizeof(char) - 			\
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 8a23fb5..5413dc4 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -845,7 +845,7 @@
 			&sbp2_highlevel, ud->ne->host, &sbp2_ops,
 			sizeof(struct sbp2_status_block), sizeof(quadlet_t),
 			0x010000000000ULL, CSR1212_ALL_SPACE_END);
-	if (!scsi_id->status_fifo_addr) {
+	if (scsi_id->status_fifo_addr == ~0ULL) {
 		SBP2_ERR("failed to allocate status FIFO address range");
 		goto failed_alloc;
 	}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index a54da42..8406839 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -275,6 +275,7 @@
 		spin_lock_irqsave(&priv->tx_lock, flags);
 		++priv->tx_tail;
 		if (netif_queue_stopped(dev) &&
+		    test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags) &&
 		    priv->tx_head - priv->tx_tail <= ipoib_sendq_size >> 1)
 			netif_wake_queue(dev);
 		spin_unlock_irqrestore(&priv->tx_lock, flags);
diff --git a/drivers/mmc/Kconfig b/drivers/mmc/Kconfig
index 003b077..45bcf09 100644
--- a/drivers/mmc/Kconfig
+++ b/drivers/mmc/Kconfig
@@ -84,7 +84,7 @@
 
 config MMC_AU1X
 	tristate "Alchemy AU1XX0 MMC Card Interface support"
-	depends on SOC_AU1X00 && MMC
+	depends on MMC && SOC_AU1200
 	help
 	  This selects the AMD Alchemy(R) Multimedia card interface.
 	  If you have a Alchemy platform with a MMC slot, say Y or M here.
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 066e22b01..46d8c01 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -19,11 +19,11 @@
 	See the file COPYING in this distribution for more information.
 
 	Contributors:
-	
+
 		Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
 		PCI suspend/resume  - Felipe Damasio <felipewd@terra.com.br>
 		LinkChg interrupt   - Felipe Damasio <felipewd@terra.com.br>
-			
+
 	TODO:
 	* Test Tx checksumming thoroughly
 	* Implement dev->tx_timeout
@@ -461,7 +461,7 @@
 static inline void cp_set_rxbufsize (struct cp_private *cp)
 {
 	unsigned int mtu = cp->dev->mtu;
-	
+
 	if (mtu > ETH_DATA_LEN)
 		/* MTU + ethernet header + FCS + optional VLAN tag */
 		cp->rx_buf_sz = mtu + ETH_HLEN + 8;
@@ -510,7 +510,7 @@
 static inline unsigned int cp_rx_csum_ok (u32 status)
 {
 	unsigned int protocol = (status >> 16) & 0x3;
-	
+
 	if (likely((protocol == RxProtoTCP) && (!(status & TCPFail))))
 		return 1;
 	else if ((protocol == RxProtoUDP) && (!(status & UDPFail)))
@@ -1061,7 +1061,7 @@
 	cpw8(Config3, PARMEnable);
 	cp->wol_enabled = 0;
 
-	cpw8(Config5, cpr8(Config5) & PMEStatus); 
+	cpw8(Config5, cpr8(Config5) & PMEStatus);
 
 	cpw32_f(HiTxRingAddr, 0);
 	cpw32_f(HiTxRingAddr + 4, 0);
@@ -1351,7 +1351,7 @@
 		         WAKE_MCAST | WAKE_UCAST;
 	/* We don't need to go on if WOL is disabled */
 	if (!cp->wol_enabled) return;
-	
+
 	options        = cpr8 (Config3);
 	if (options & LinkUp)        wol->wolopts |= WAKE_PHY;
 	if (options & MagicPacket)   wol->wolopts |= WAKE_MAGIC;
@@ -1919,7 +1919,7 @@
 	mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE);
 
 	spin_unlock_irqrestore (&cp->lock, flags);
-	
+
 	return 0;
 }
 #endif /* CONFIG_PM */
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index feae783..abd6261 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -165,7 +165,7 @@
 static int debug = -1;
 
 /*
- * Receive ring size 
+ * Receive ring size
  * Warning: 64K ring has hardware issues and may lock up.
  */
 #if defined(CONFIG_SH_DREAMCAST)
@@ -257,7 +257,7 @@
 	{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
 	{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
 	{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
-	{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 }, 
+	{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
 
 #ifdef CONFIG_SH_SECUREEDGE5410
 	/* Bogus 8139 silicon reports 8129 without external PROM :-( */
@@ -1824,7 +1824,7 @@
 	int tmp_work;
 #endif
 
-	if (netif_msg_rx_err (tp)) 
+	if (netif_msg_rx_err (tp))
 		printk(KERN_DEBUG "%s: Ethernet frame had errors, status %8.8x.\n",
 			dev->name, rx_status);
 	tp->stats.rx_errors++;
@@ -1944,7 +1944,7 @@
 		 RTL_R16 (RxBufAddr),
 		 RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
 
-	while (netif_running(dev) && received < budget 
+	while (netif_running(dev) && received < budget
 	       && (RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
 		u32 ring_offset = cur_rx % RX_BUF_LEN;
 		u32 rx_status;
@@ -2031,7 +2031,7 @@
 
 			netif_receive_skb (skb);
 		} else {
-			if (net_ratelimit()) 
+			if (net_ratelimit())
 				printk (KERN_WARNING
 					"%s: Memory squeeze, dropping packet.\n",
 					dev->name);
@@ -2158,13 +2158,13 @@
 	status = RTL_R16 (IntrStatus);
 
 	/* shared irq? */
-	if (unlikely((status & rtl8139_intr_mask) == 0)) 
+	if (unlikely((status & rtl8139_intr_mask) == 0))
 		goto out;
 
 	handled = 1;
 
 	/* h/w no longer present (hotplug?) or major error, bail */
-	if (unlikely(status == 0xFFFF)) 
+	if (unlikely(status == 0xFFFF))
 		goto out;
 
 	/* close possible race's with dev_close */
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index bdaaad8..f499a3b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -865,6 +865,22 @@
 	  <file:Documentation/networking/net-modules.txt>.  The module will be
 	  called dm9000.
 
+config SMC911X
+	tristate "SMSC LAN911[5678] support"
+	select CRC32
+	select MII
+	depends on NET_ETHERNET
+	help
+	  This is a driver for SMSC's LAN911x series of Ethernet chipsets
+	  including the new LAN9115, LAN9116, LAN9117, and LAN9118.
+	  Say Y if you want it compiled into the kernel, 
+	  and read the Ethernet-HOWTO, available from
+	  <http://www.linuxdoc.org/docs.html#howto>.
+
+	  This driver is also available as a module. The module will be 
+	  called smc911x.  If you want to compile it as a module, say M 
+	  here and read <file:Documentation/modules.txt>
+
 config NET_VENDOR_RACAL
 	bool "Racal-Interlan (Micom) NI cards"
 	depends on NET_ETHERNET && ISA
@@ -2311,6 +2327,23 @@
 
 	  If in doubt, say N.
 
+config MYRI10GE
+	tristate "Myricom Myri-10G Ethernet support"
+	depends on PCI
+	select FW_LOADER
+	select CRC32
+	---help---
+	  This driver supports Myricom Myri-10G Dual Protocol interface in
+	  Ethernet mode. If the eeprom on your board is not recent enough,
+	  you will need a newer firmware image.
+	  You may get this image or more information, at:
+
+	  <http://www.myri.com/Myri-10G/>
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/networking/net-modules.txt>.  The module
+	  will be called myri10ge.
+
 endmenu
 
 source "drivers/net/tokenring/Kconfig"
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index b90468a..1eced32 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -192,7 +192,9 @@
 obj-$(CONFIG_AMD8111_ETH) += amd8111e.o
 obj-$(CONFIG_IBMVETH) += ibmveth.o
 obj-$(CONFIG_S2IO) += s2io.o
+obj-$(CONFIG_MYRI10GE) += myri10ge/
 obj-$(CONFIG_SMC91X) += smc91x.o
+obj-$(CONFIG_SMC911X) += smc911x.o
 obj-$(CONFIG_DM9000) += dm9000.o
 obj-$(CONFIG_FEC_8XX) += fec_8xx/
 
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 14dbad1..e1fe960 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -2,7 +2,7 @@
  *
  * Alchemy Au1x00 ethernet driver
  *
- * Copyright 2001,2002,2003 MontaVista Software Inc.
+ * Copyright 2001-2003, 2006 MontaVista Software Inc.
  * Copyright 2002 TimeSys Corp.
  * Added ethtool/mii-tool support,
  * Copyright 2004 Matt Porter <mporter@kernel.crashing.org>
@@ -68,7 +68,7 @@
 static int au1000_debug = 3;
 #endif
 
-#define DRV_NAME	"au1000eth"
+#define DRV_NAME	"au1000_eth"
 #define DRV_VERSION	"1.5"
 #define DRV_AUTHOR	"Pete Popov <ppopov@embeddedalley.com>"
 #define DRV_DESC	"Au1xxx on-chip Ethernet driver"
@@ -80,7 +80,7 @@
 // prototypes
 static void hard_stop(struct net_device *);
 static void enable_rx_tx(struct net_device *dev);
-static struct net_device * au1000_probe(u32 ioaddr, int irq, int port_num);
+static struct net_device * au1000_probe(int port_num);
 static int au1000_init(struct net_device *);
 static int au1000_open(struct net_device *);
 static int au1000_close(struct net_device *);
@@ -1160,12 +1160,27 @@
 }
 
 static struct {
-	int port;
 	u32 base_addr;
 	u32 macen_addr;
 	int irq;
 	struct net_device *dev;
-} iflist[2];
+} iflist[2] = {
+#ifdef CONFIG_SOC_AU1000
+	{AU1000_ETH0_BASE, AU1000_MAC0_ENABLE, AU1000_MAC0_DMA_INT},
+	{AU1000_ETH1_BASE, AU1000_MAC1_ENABLE, AU1000_MAC1_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1100
+	{AU1100_ETH0_BASE, AU1100_MAC0_ENABLE, AU1100_MAC0_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1500
+	{AU1500_ETH0_BASE, AU1500_MAC0_ENABLE, AU1500_MAC0_DMA_INT},
+	{AU1500_ETH1_BASE, AU1500_MAC1_ENABLE, AU1500_MAC1_DMA_INT}
+#endif
+#ifdef CONFIG_SOC_AU1550
+	{AU1550_ETH0_BASE, AU1550_MAC0_ENABLE, AU1550_MAC0_DMA_INT},
+	{AU1550_ETH1_BASE, AU1550_MAC1_ENABLE, AU1550_MAC1_DMA_INT}
+#endif
+};
 
 static int num_ifs;
 
@@ -1176,58 +1191,14 @@
  */
 static int __init au1000_init_module(void)
 {
-	struct cpuinfo_mips *c = &current_cpu_data;
 	int ni = (int)((au_readl(SYS_PINFUNC) & (u32)(SYS_PF_NI2)) >> 4);
 	struct net_device *dev;
 	int i, found_one = 0;
 
-	switch (c->cputype) {
-#ifdef CONFIG_SOC_AU1000
-	case CPU_AU1000:
-		num_ifs = 2 - ni;
-		iflist[0].base_addr = AU1000_ETH0_BASE;
-		iflist[1].base_addr = AU1000_ETH1_BASE;
-		iflist[0].macen_addr = AU1000_MAC0_ENABLE;
-		iflist[1].macen_addr = AU1000_MAC1_ENABLE;
-		iflist[0].irq = AU1000_MAC0_DMA_INT;
-		iflist[1].irq = AU1000_MAC1_DMA_INT;
-		break;
-#endif
-#ifdef CONFIG_SOC_AU1100
-	case CPU_AU1100:
-		num_ifs = 1 - ni;
-		iflist[0].base_addr = AU1100_ETH0_BASE;
-		iflist[0].macen_addr = AU1100_MAC0_ENABLE;
-		iflist[0].irq = AU1100_MAC0_DMA_INT;
-		break;
-#endif
-#ifdef CONFIG_SOC_AU1500
-	case CPU_AU1500:
-		num_ifs = 2 - ni;
-		iflist[0].base_addr = AU1500_ETH0_BASE;
-		iflist[1].base_addr = AU1500_ETH1_BASE;
-		iflist[0].macen_addr = AU1500_MAC0_ENABLE;
-		iflist[1].macen_addr = AU1500_MAC1_ENABLE;
-		iflist[0].irq = AU1500_MAC0_DMA_INT;
-		iflist[1].irq = AU1500_MAC1_DMA_INT;
-		break;
-#endif
-#ifdef CONFIG_SOC_AU1550
-	case CPU_AU1550:
-		num_ifs = 2 - ni;
-		iflist[0].base_addr = AU1550_ETH0_BASE;
-		iflist[1].base_addr = AU1550_ETH1_BASE;
-		iflist[0].macen_addr = AU1550_MAC0_ENABLE;
-		iflist[1].macen_addr = AU1550_MAC1_ENABLE;
-		iflist[0].irq = AU1550_MAC0_DMA_INT;
-		iflist[1].irq = AU1550_MAC1_DMA_INT;
-		break;
-#endif
-	default:
-		num_ifs = 0;
-	}
+	num_ifs = NUM_ETH_INTERFACES - ni;
+
 	for(i = 0; i < num_ifs; i++) {
-		dev = au1000_probe(iflist[i].base_addr, iflist[i].irq, i);
+		dev = au1000_probe(i);
 		iflist[i].dev = dev;
 		if (dev)
 			found_one++;
@@ -1436,8 +1407,7 @@
 	.get_link = au1000_get_link
 };
 
-static struct net_device *
-au1000_probe(u32 ioaddr, int irq, int port_num)
+static struct net_device * au1000_probe(int port_num)
 {
 	static unsigned version_printed = 0;
 	struct au1000_private *aup = NULL;
@@ -1445,94 +1415,95 @@
 	db_dest_t *pDB, *pDBfree;
 	char *pmac, *argptr;
 	char ethaddr[6];
-	int i, err;
+	int irq, i, err;
+	u32 base, macen;
 
-	if (!request_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE, "Au1x00 ENET"))
+	if (port_num >= NUM_ETH_INTERFACES)
+ 		return NULL;
+
+	base  = CPHYSADDR(iflist[port_num].base_addr );
+	macen = CPHYSADDR(iflist[port_num].macen_addr);
+	irq = iflist[port_num].irq;
+
+	if (!request_mem_region( base, MAC_IOSIZE, "Au1x00 ENET") ||
+	    !request_mem_region(macen, 4, "Au1x00 ENET"))
 		return NULL;
 
-	if (version_printed++ == 0) 
+	if (version_printed++ == 0)
 		printk("%s version %s %s\n", DRV_NAME, DRV_VERSION, DRV_AUTHOR);
 
 	dev = alloc_etherdev(sizeof(struct au1000_private));
 	if (!dev) {
-		printk (KERN_ERR "au1000 eth: alloc_etherdev failed\n");  
+		printk(KERN_ERR "%s: alloc_etherdev failed\n", DRV_NAME);
 		return NULL;
 	}
 
-	if ((err = register_netdev(dev))) {
-		printk(KERN_ERR "Au1x_eth Cannot register net device err %d\n",
-				err);
+	if ((err = register_netdev(dev)) != 0) {
+		printk(KERN_ERR "%s: Cannot register net device, error %d\n",
+				DRV_NAME, err);
 		free_netdev(dev);
 		return NULL;
 	}
 
-	printk("%s: Au1x Ethernet found at 0x%x, irq %d\n", 
-			dev->name, ioaddr, irq);
+	printk("%s: Au1xx0 Ethernet found at 0x%x, irq %d\n",
+		dev->name, base, irq);
 
 	aup = dev->priv;
 
 	/* Allocate the data buffers */
 	/* Snooping works fine with eth on all au1xxx */
-	aup->vaddr = (u32)dma_alloc_noncoherent(NULL,
-			MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
-			&aup->dma_addr,
-			0);
+	aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE *
+						(NUM_TX_BUFFS + NUM_RX_BUFFS),
+						&aup->dma_addr,	0);
 	if (!aup->vaddr) {
 		free_netdev(dev);
-		release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+		release_mem_region( base, MAC_IOSIZE);
+		release_mem_region(macen, 4);
 		return NULL;
 	}
 
 	/* aup->mac is the base address of the MAC's registers */
-	aup->mac = (volatile mac_reg_t *)((unsigned long)ioaddr);
+	aup->mac = (volatile mac_reg_t *)iflist[port_num].base_addr;
+
 	/* Setup some variables for quick register address access */
-	if (ioaddr == iflist[0].base_addr)
-	{
-		/* check env variables first */
-		if (!get_ethernet_addr(ethaddr)) { 
+	aup->enable = (volatile u32 *)iflist[port_num].macen_addr;
+	aup->mac_id = port_num;
+	au_macs[port_num] = aup;
+
+	if (port_num == 0) {
+		/* Check the environment variables first */
+		if (get_ethernet_addr(ethaddr) == 0)
 			memcpy(au1000_mac_addr, ethaddr, sizeof(au1000_mac_addr));
-		} else {
+		else {
 			/* Check command line */
 			argptr = prom_getcmdline();
-			if ((pmac = strstr(argptr, "ethaddr=")) == NULL) {
-				printk(KERN_INFO "%s: No mac address found\n", 
-						dev->name);
-				/* use the hard coded mac addresses */
-			} else {
+			if ((pmac = strstr(argptr, "ethaddr=")) == NULL)
+				printk(KERN_INFO "%s: No MAC address found\n",
+						 dev->name);
+				/* Use the hard coded MAC addresses */
+			else {
 				str2eaddr(ethaddr, pmac + strlen("ethaddr="));
 				memcpy(au1000_mac_addr, ethaddr, 
-						sizeof(au1000_mac_addr));
+				       sizeof(au1000_mac_addr));
 			}
 		}
-			aup->enable = (volatile u32 *) 
-				((unsigned long)iflist[0].macen_addr);
-		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
-		setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
-		aup->mac_id = 0;
-		au_macs[0] = aup;
-	}
-		else
-	if (ioaddr == iflist[1].base_addr)
-	{
-			aup->enable = (volatile u32 *) 
-				((unsigned long)iflist[1].macen_addr);
-		memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
-		dev->dev_addr[4] += 0x10;
-		setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
-		aup->mac_id = 1;
-		au_macs[1] = aup;
-	}
-	else
-	{
-		printk(KERN_ERR "%s: bad ioaddr\n", dev->name);
-	}
 
-	/* bring the device out of reset, otherwise probing the mii
-	 * will hang */
+		setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR);
+	} else if (port_num == 1)
+		setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR);
+
+	/*
+	 * Assign to the Ethernet ports two consecutive MAC addresses
+	 * to match those that are printed on their stickers
+	 */
+	memcpy(dev->dev_addr, au1000_mac_addr, sizeof(au1000_mac_addr));
+	dev->dev_addr[5] += port_num;
+
+	/* Bring the device out of reset, otherwise probing the MII will hang */
 	*aup->enable = MAC_EN_CLOCK_ENABLE;
 	au_sync_delay(2);
-	*aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | 
-		MAC_EN_RESET2 | MAC_EN_CLOCK_ENABLE;
+	*aup->enable = MAC_EN_RESET0 | MAC_EN_RESET1 | MAC_EN_RESET2 |
+		       MAC_EN_CLOCK_ENABLE;
 	au_sync_delay(2);
 
 	aup->mii = kmalloc(sizeof(struct mii_phy), GFP_KERNEL);
@@ -1581,7 +1552,7 @@
 	}
 
 	spin_lock_init(&aup->lock);
-	dev->base_addr = ioaddr;
+	dev->base_addr = base;
 	dev->irq = irq;
 	dev->open = au1000_open;
 	dev->hard_start_xmit = au1000_tx;
@@ -1615,13 +1586,12 @@
 		if (aup->tx_db_inuse[i])
 			ReleaseDB(aup, aup->tx_db_inuse[i]);
 	}
-	dma_free_noncoherent(NULL,
-			MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
-			(void *)aup->vaddr,
-			aup->dma_addr);
+	dma_free_noncoherent(NULL, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS),
+			     (void *)aup->vaddr, aup->dma_addr);
 	unregister_netdev(dev);
 	free_netdev(dev);
-	release_mem_region(CPHYSADDR(ioaddr), MAC_IOSIZE);
+	release_mem_region( base, MAC_IOSIZE);
+	release_mem_region(macen, 4);
 	return NULL;
 }
 
@@ -1806,20 +1776,18 @@
 			aup = (struct au1000_private *) dev->priv;
 			unregister_netdev(dev);
 			kfree(aup->mii);
-			for (j = 0; j < NUM_RX_DMA; j++) {
+			for (j = 0; j < NUM_RX_DMA; j++)
 				if (aup->rx_db_inuse[j])
 					ReleaseDB(aup, aup->rx_db_inuse[j]);
-			}
-			for (j = 0; j < NUM_TX_DMA; j++) {
+			for (j = 0; j < NUM_TX_DMA; j++)
 				if (aup->tx_db_inuse[j])
 					ReleaseDB(aup, aup->tx_db_inuse[j]);
-			}
-			dma_free_noncoherent(NULL,
-					MAX_BUF_SIZE * (NUM_TX_BUFFS+NUM_RX_BUFFS),
-					(void *)aup->vaddr,
-					aup->dma_addr);
+ 			dma_free_noncoherent(NULL, MAX_BUF_SIZE *
+ 					     (NUM_TX_BUFFS + NUM_RX_BUFFS),
+ 					     (void *)aup->vaddr, aup->dma_addr);
+ 			release_mem_region(dev->base_addr, MAC_IOSIZE);
+ 			release_mem_region(CPHYSADDR(iflist[i].macen_addr), 4);
 			free_netdev(dev);
-			release_mem_region(CPHYSADDR(iflist[i].base_addr), MAC_IOSIZE);
 		}
 	}
 }
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index ac48f75..39f36aa 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -4877,7 +4877,7 @@
 				  const struct pci_device_id *ent)
 {
 	static int cas_version_printed = 0;
-	unsigned long casreg_base, casreg_len;
+	unsigned long casreg_len;
 	struct net_device *dev;
 	struct cas *cp;
 	int i, err, pci_using_dac;
@@ -4972,7 +4972,6 @@
 		pci_using_dac = 0;
 	}
 
-	casreg_base = pci_resource_start(pdev, 0);
 	casreg_len = pci_resource_len(pdev, 0);
 
 	cp = netdev_priv(dev);
@@ -5024,7 +5023,7 @@
 	cp->timer_ticks = 0;
 
 	/* give us access to cassini registers */
-	cp->regs = ioremap(casreg_base, casreg_len);
+	cp->regs = pci_iomap(pdev, 0, casreg_len);
 	if (cp->regs == 0UL) {
 		printk(KERN_ERR PFX "Cannot map device registers, "
 		       "aborting.\n");
@@ -5123,7 +5122,7 @@
 		cas_shutdown(cp);
 	mutex_unlock(&cp->pm_mutex);
 
-	iounmap(cp->regs);
+	pci_iounmap(pdev, cp->regs);
 
 
 err_out_free_res:
@@ -5171,7 +5170,7 @@
 #endif
 	pci_free_consistent(pdev, sizeof(struct cas_init_block),
 			    cp->init_block, cp->block_dvma);
-	iounmap(cp->regs);
+	pci_iounmap(pdev, cp->regs);
 	free_netdev(dev);
 	pci_release_regions(pdev);
 	pci_disable_device(pdev);
diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile
index ca9f895..5dea2b7 100644
--- a/drivers/net/e1000/Makefile
+++ b/drivers/net/e1000/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # 
-# Copyright(c) 1999 - 2003 Intel Corporation. All rights reserved.
+# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
 # 
 # This program is free software; you can redistribute it and/or modify it 
 # under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
 # 
 # Contact Information:
 # Linux NICS <linux.nics@intel.com>
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
 # Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 #
 ################################################################################
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 281de41..2bc34fb 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
@@ -114,6 +115,8 @@
 /* Supported Rx Buffer Sizes */
 #define E1000_RXBUFFER_128   128    /* Used for packet split */
 #define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
 #define E1000_RXBUFFER_2048  2048
 #define E1000_RXBUFFER_4096  4096
 #define E1000_RXBUFFER_8192  8192
@@ -334,7 +337,6 @@
 	boolean_t have_msi;
 #endif
 	/* to not mess up cache alignment, always add to the bottom */
-	boolean_t txb2b;
 #ifdef NETIF_F_TSO
 	boolean_t tso_force;
 #endif
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ecccca3..cfdf0b2 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
@@ -864,15 +865,15 @@
 e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
 {
 	struct net_device *netdev = adapter->netdev;
- 	uint32_t mask, i=0, shared_int = TRUE;
- 	uint32_t irq = adapter->pdev->irq;
+	uint32_t mask, i=0, shared_int = TRUE;
+	uint32_t irq = adapter->pdev->irq;
 
 	*data = 0;
 
 	/* Hook up test interrupt handler just for this test */
- 	if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
- 		shared_int = FALSE;
- 	} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
+	if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+		shared_int = FALSE;
+	} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
 			      netdev->name, netdev)){
 		*data = 1;
 		return -1;
@@ -888,22 +889,22 @@
 		/* Interrupt to test */
 		mask = 1 << i;
 
- 		if (!shared_int) {
- 			/* Disable the interrupt to be reported in
- 			 * the cause register and then force the same
- 			 * interrupt and see if one gets posted.  If
- 			 * an interrupt was posted to the bus, the
- 			 * test failed.
- 			 */
- 			adapter->test_icr = 0;
- 			E1000_WRITE_REG(&adapter->hw, IMC, mask);
- 			E1000_WRITE_REG(&adapter->hw, ICS, mask);
- 			msec_delay(10);
+		if (!shared_int) {
+			/* Disable the interrupt to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, IMC, mask);
+			E1000_WRITE_REG(&adapter->hw, ICS, mask);
+			msec_delay(10);
 
- 			if (adapter->test_icr & mask) {
- 				*data = 3;
- 				break;
- 			}
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
 		}
 
 		/* Enable the interrupt to be reported in
@@ -922,7 +923,7 @@
 			break;
 		}
 
- 		if (!shared_int) {
+		if (!shared_int) {
 			/* Disable the other interrupts to be reported in
 			 * the cause register and then force the other
 			 * interrupts and see if any get posted.  If
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index 523c2c9..3959039 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
@@ -764,7 +765,7 @@
     }
 
     if (hw->mac_type == e1000_82573) {
-        e1000_enable_tx_pkt_filtering(hw); 
+        e1000_enable_tx_pkt_filtering(hw);
     }
 
     switch (hw->mac_type) {
@@ -860,7 +861,7 @@
 
     if(eeprom_data != EEPROM_RESERVED_WORD) {
         /* Adjust SERDES output amplitude only. */
-        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK; 
+        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
         ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
         if(ret_val)
             return ret_val;
@@ -1227,7 +1228,7 @@
 
     if (hw->phy_reset_disable)
         return E1000_SUCCESS;
-    
+
     ret_val = e1000_phy_reset(hw);
     if (ret_val) {
         DEBUGOUT("Error Resetting the PHY\n");
@@ -1369,7 +1370,7 @@
     DEBUGFUNC("e1000_copper_link_ggp_setup");
 
     if(!hw->phy_reset_disable) {
-        
+
         /* Enable CRS on TX for half-duplex operation. */
         ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
                                      &phy_data);
@@ -1518,7 +1519,7 @@
 
     if(hw->phy_reset_disable)
         return E1000_SUCCESS;
-    
+
     /* Enable CRS on TX. This must be set for half-duplex operation. */
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
     if(ret_val)
@@ -1664,7 +1665,7 @@
 *      collision distance in the Transmit Control Register.
 *   2) Set up flow control on the MAC to that established with
 *      the link partner.
-*   3) Config DSP to improve Gigabit link quality for some PHY revisions.    
+*   3) Config DSP to improve Gigabit link quality for some PHY revisions.
 *
 * hw - Struct containing variables accessed by shared code
 ******************************************************************************/
@@ -1673,7 +1674,7 @@
 {
     int32_t ret_val;
     DEBUGFUNC("e1000_copper_link_postconfig");
-    
+
     if(hw->mac_type >= e1000_82544) {
         e1000_config_collision_dist(hw);
     } else {
@@ -1697,7 +1698,7 @@
             return ret_val;
         }
     }
-                
+
     return E1000_SUCCESS;
 }
 
@@ -1753,11 +1754,11 @@
     }
 
     if(hw->autoneg) {
-        /* Setup autoneg and flow control advertisement 
-          * and perform autonegotiation */   
+        /* Setup autoneg and flow control advertisement
+          * and perform autonegotiation */
         ret_val = e1000_copper_link_autoneg(hw);
         if(ret_val)
-            return ret_val;           
+            return ret_val;
     } else {
         /* PHY will be set to 10H, 10F, 100H,or 100F
           * depending on value from forced_speed_duplex. */
@@ -1785,7 +1786,7 @@
             ret_val = e1000_copper_link_postconfig(hw);
             if(ret_val)
                 return ret_val;
-            
+
             DEBUGOUT("Valid link established!!!\n");
             return E1000_SUCCESS;
         }
@@ -1983,7 +1984,7 @@
 
     DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
 
-    ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);    
+    ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
     if(ret_val)
         return ret_val;
 
@@ -2272,7 +2273,7 @@
 
     DEBUGFUNC("e1000_config_mac_to_phy");
 
-    /* 82544 or newer MAC, Auto Speed Detection takes care of 
+    /* 82544 or newer MAC, Auto Speed Detection takes care of
     * MAC speed/duplex configuration.*/
     if (hw->mac_type >= e1000_82544)
         return E1000_SUCCESS;
@@ -2291,9 +2292,9 @@
     if(ret_val)
         return ret_val;
 
-    if(phy_data & M88E1000_PSSR_DPLX) 
+    if(phy_data & M88E1000_PSSR_DPLX)
         ctrl |= E1000_CTRL_FD;
-    else 
+    else
         ctrl &= ~E1000_CTRL_FD;
 
     e1000_config_collision_dist(hw);
@@ -2492,10 +2493,10 @@
                  */
                 if(hw->original_fc == e1000_fc_full) {
                     hw->fc = e1000_fc_full;
-                    DEBUGOUT("Flow Control = FULL.\r\n");
+                    DEBUGOUT("Flow Control = FULL.\n");
                 } else {
                     hw->fc = e1000_fc_rx_pause;
-                    DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+                    DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
                 }
             }
             /* For receiving PAUSE frames ONLY.
@@ -2511,7 +2512,7 @@
                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                 hw->fc = e1000_fc_tx_pause;
-                DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+                DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
             }
             /* For transmitting PAUSE frames ONLY.
              *
@@ -2526,7 +2527,7 @@
                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
                 hw->fc = e1000_fc_rx_pause;
-                DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
             }
             /* Per the IEEE spec, at this point flow control should be
              * disabled.  However, we want to consider that we could
@@ -2552,10 +2553,10 @@
                      hw->original_fc == e1000_fc_tx_pause) ||
                     hw->fc_strict_ieee) {
                 hw->fc = e1000_fc_none;
-                DEBUGOUT("Flow Control = NONE.\r\n");
+                DEBUGOUT("Flow Control = NONE.\n");
             } else {
                 hw->fc = e1000_fc_rx_pause;
-                DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
             }
 
             /* Now we need to do one last check...  If we auto-
@@ -2580,7 +2581,7 @@
                 return ret_val;
             }
         } else {
-            DEBUGOUT("Copper PHY and Auto Neg has not completed.\r\n");
+            DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
         }
     }
     return E1000_SUCCESS;
@@ -2763,7 +2764,7 @@
             hw->autoneg_failed = 1;
             return 0;
         }
-        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\r\n");
+        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
 
         /* Disable auto-negotiation in the TXCW register */
         E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
@@ -2788,7 +2789,7 @@
     else if(((hw->media_type == e1000_media_type_fiber) ||
              (hw->media_type == e1000_media_type_internal_serdes)) &&
             (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
-        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\r\n");
+        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
         E1000_WRITE_REG(hw, TXCW, hw->txcw);
         E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
 
@@ -2851,13 +2852,13 @@
 
         if(status & E1000_STATUS_FD) {
             *duplex = FULL_DUPLEX;
-            DEBUGOUT("Full Duplex\r\n");
+            DEBUGOUT("Full Duplex\n");
         } else {
             *duplex = HALF_DUPLEX;
-            DEBUGOUT(" Half Duplex\r\n");
+            DEBUGOUT(" Half Duplex\n");
         }
     } else {
-        DEBUGOUT("1000 Mbs, Full Duplex\r\n");
+        DEBUGOUT("1000 Mbs, Full Duplex\n");
         *speed = SPEED_1000;
         *duplex = FULL_DUPLEX;
     }
@@ -2883,7 +2884,7 @@
         }
     }
 
-    if ((hw->mac_type == e1000_80003es2lan) && 
+    if ((hw->mac_type == e1000_80003es2lan) &&
         (hw->media_type == e1000_media_type_copper)) {
         if (*speed == SPEED_1000)
             ret_val = e1000_configure_kmrn_for_1000(hw);
@@ -3159,7 +3160,7 @@
     if (e1000_swfw_sync_acquire(hw, swfw))
         return -E1000_ERR_SWFW_SYNC;
 
-    if((hw->phy_type == e1000_phy_igp || 
+    if((hw->phy_type == e1000_phy_igp ||
         hw->phy_type == e1000_phy_igp_2) &&
        (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
         ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3298,7 +3299,7 @@
     if (e1000_swfw_sync_acquire(hw, swfw))
         return -E1000_ERR_SWFW_SYNC;
 
-    if((hw->phy_type == e1000_phy_igp || 
+    if((hw->phy_type == e1000_phy_igp ||
         hw->phy_type == e1000_phy_igp_2) &&
        (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
         ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
@@ -3496,22 +3497,22 @@
         }
         /* Read the device control register and assert the E1000_CTRL_PHY_RST
          * bit. Then, take it out of reset.
-         * For pre-e1000_82571 hardware, we delay for 10ms between the assert 
+         * For pre-e1000_82571 hardware, we delay for 10ms between the assert
          * and deassert.  For e1000_82571 hardware and later, we instead delay
          * for 50us between and 10ms after the deassertion.
          */
         ctrl = E1000_READ_REG(hw, CTRL);
         E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
         E1000_WRITE_FLUSH(hw);
-        
-        if (hw->mac_type < e1000_82571) 
+
+        if (hw->mac_type < e1000_82571)
             msec_delay(10);
         else
             udelay(100);
-        
+
         E1000_WRITE_REG(hw, CTRL, ctrl);
         E1000_WRITE_FLUSH(hw);
-        
+
         if (hw->mac_type >= e1000_82571)
             msec_delay(10);
         e1000_swfw_sync_release(hw, swfw);
@@ -3815,7 +3816,7 @@
     /* Check polarity status */
     ret_val = e1000_check_polarity(hw, &polarity);
     if(ret_val)
-        return ret_val; 
+        return ret_val;
     phy_info->cable_polarity = polarity;
 
     ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
@@ -4540,14 +4541,14 @@
 
         E1000_WRITE_REG(hw, EERD, eerd);
         error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
-        
+
         if(error) {
             break;
         }
         data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
-      
+
     }
-    
+
     return error;
 }
 
@@ -4573,24 +4574,24 @@
         return -E1000_ERR_SWFW_SYNC;
 
     for (i = 0; i < words; i++) {
-        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) | 
-                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) | 
+        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
                          E1000_EEPROM_RW_REG_START;
 
         error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
         if(error) {
             break;
-        }       
+        }
 
         E1000_WRITE_REG(hw, EEWR, register_value);
-        
+
         error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
-        
+
         if(error) {
             break;
-        }       
+        }
     }
-    
+
     e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
     return error;
 }
@@ -4610,7 +4611,7 @@
     for(i = 0; i < attempts; i++) {
         if(eerd == E1000_EEPROM_POLL_READ)
             reg = E1000_READ_REG(hw, EERD);
-        else 
+        else
             reg = E1000_READ_REG(hw, EEWR);
 
         if(reg & E1000_EEPROM_RW_REG_DONE) {
@@ -5135,7 +5136,7 @@
     uint32_t i;
     uint32_t num_rar_entry;
     uint32_t num_mta_entry;
-    
+
     DEBUGFUNC("e1000_mc_addr_list_update");
 
     /* Set the new number of MC addresses that we are being requested to use. */
@@ -6240,7 +6241,7 @@
  *                                1 - Downshift ocured.
  *
  * returns: - E1000_ERR_XXX
- *            E1000_SUCCESS 
+ *            E1000_SUCCESS
  *
  * For phy's older then IGP, this function reads the Downshift bit in the Phy
  * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
@@ -6255,7 +6256,7 @@
 
     DEBUGFUNC("e1000_check_downshift");
 
-    if(hw->phy_type == e1000_phy_igp || 
+    if(hw->phy_type == e1000_phy_igp ||
         hw->phy_type == e1000_phy_igp_2) {
         ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
                                      &phy_data);
@@ -6684,8 +6685,8 @@
 
 
     } else {
- 
-            phy_data |= IGP02E1000_PM_D0_LPLU;   
+
+            phy_data |= IGP02E1000_PM_D0_LPLU;
             ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
             if (ret_val)
                 return ret_val;
@@ -6777,7 +6778,7 @@
 e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
 {
     uint8_t i;
-    uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET; 
+    uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
     uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
 
     length = (length >> 2);
@@ -6796,7 +6797,7 @@
  * and also checks whether the previous command is completed.
  * It busy waits in case of previous command is not completed.
  *
- * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or 
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
  *            timeout
  *          - E1000_SUCCESS for success.
  ****************************************************************************/
@@ -6820,7 +6821,7 @@
         msec_delay_irq(1);
     }
 
-    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) { 
+    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
         DEBUGOUT("Previous command timeout failed .\n");
         return -E1000_ERR_HOST_INTERFACE_COMMAND;
     }
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 150e45e..467c9ed 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
@@ -374,7 +375,7 @@
 };
 #endif
 
-int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer, 
+int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
 							uint16_t length);
 boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
 boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
@@ -1801,7 +1802,7 @@
  *       value2 = [0..64512],    default=4096
  *       value3 = [0..64512],    default=0
  */
-    
+
 #define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
 #define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
 #define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ed15fca..bd709e5 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,51 +22,13 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
 
 #include "e1000.h"
 
-/* Change Log
- * 7.0.33      3-Feb-2006
- *   o Added another fix for the pass false carrier bit
- * 7.0.32      24-Jan-2006
- *   o Need to rebuild with noew version number for the pass false carrier 
- *     fix in e1000_hw.c
- * 7.0.30      18-Jan-2006
- *   o fixup for tso workaround to disable it for pci-x
- *   o fix mem leak on 82542
- *   o fixes for 10 Mb/s connections and incorrect stats
- * 7.0.28      01/06/2006
- *   o hardware workaround to only set "speed mode" bit for 1G link.
- * 7.0.26      12/23/2005
- *   o wake on lan support modified for device ID 10B5
- *   o fix dhcp + vlan issue not making it to the iAMT firmware
- * 7.0.24      12/9/2005
- *   o New hardware support for the Gigabit NIC embedded in the south bridge
- *   o Fixes to the recycling logic (skb->tail) from IBM LTC
- * 6.3.9	12/16/2005
- *   o incorporate fix for recycled skbs from IBM LTC
- * 6.3.7	11/18/2005
- *   o Honor eeprom setting for enabling/disabling Wake On Lan
- * 6.3.5 	11/17/2005
- *   o Fix memory leak in rx ring handling for PCI Express adapters
- * 6.3.4	11/8/05
- *   o Patch from Jesper Juhl to remove redundant NULL checks for kfree
- * 6.3.2	9/20/05
- *   o Render logic that sets/resets DRV_LOAD as inline functions to 
- *     avoid code replication. If f/w is AMT then set DRV_LOAD only when
- *     network interface is open.
- *   o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
- *   o Adjust PBA partioning for Jumbo frames using MTU size and not
- *     rx_buffer_len
- * 6.3.1	9/19/05
- *   o Use adapter->tx_timeout_factor in Tx Hung Detect logic 
- *      (e1000_clean_tx_irq)
- *   o Support for 8086:10B5 device (Quad Port)
- */
-
 char e1000_driver_name[] = "e1000";
 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
 #ifndef CONFIG_E1000_NAPI
@@ -74,9 +36,9 @@
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION "7.0.33-k2"DRIVERNAPI
+#define DRV_VERSION "7.0.38-k4"DRIVERNAPI
 char e1000_driver_version[] = DRV_VERSION;
-static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
 /* e1000_pci_tbl - PCI Device ID Table
  *
@@ -208,8 +170,8 @@
 static void e1000_tx_timeout(struct net_device *dev);
 static void e1000_reset_task(struct net_device *dev);
 static void e1000_smartspeed(struct e1000_adapter *adapter);
-static inline int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
-					      struct sk_buff *skb);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+                                       struct sk_buff *skb);
 
 static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
 static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
@@ -293,7 +255,7 @@
  * @adapter: board private structure
  **/
 
-static inline void
+static void
 e1000_irq_disable(struct e1000_adapter *adapter)
 {
 	atomic_inc(&adapter->irq_sem);
@@ -307,7 +269,7 @@
  * @adapter: board private structure
  **/
 
-static inline void
+static void
 e1000_irq_enable(struct e1000_adapter *adapter)
 {
 	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
@@ -348,10 +310,10 @@
  * For ASF and Pass Through versions of f/w this means that the
  * driver is no longer loaded. For AMT version (only with 82573) i
  * of the f/w this means that the netowrk i/f is closed.
- * 
+ *
  **/
 
-static inline void 
+static void
 e1000_release_hw_control(struct e1000_adapter *adapter)
 {
 	uint32_t ctrl_ext;
@@ -361,6 +323,7 @@
 	switch (adapter->hw.mac_type) {
 	case e1000_82571:
 	case e1000_82572:
+	case e1000_80003es2lan:
 		ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
 		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
 				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
@@ -379,13 +342,13 @@
  * @adapter: address of board private structure
  *
  * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
- * For ASF and Pass Through versions of f/w this means that 
- * the driver is loaded. For AMT version (only with 82573) 
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
  * of the f/w this means that the netowrk i/f is open.
- * 
+ *
  **/
 
-static inline void 
+static void
 e1000_get_hw_control(struct e1000_adapter *adapter)
 {
 	uint32_t ctrl_ext;
@@ -394,6 +357,7 @@
 	switch (adapter->hw.mac_type) {
 	case e1000_82571:
 	case e1000_82572:
+	case e1000_80003es2lan:
 		ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
 		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
 				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
@@ -421,7 +385,7 @@
 		uint16_t mii_reg;
 		e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
 		if (mii_reg & MII_CR_POWER_DOWN)
-			e1000_phy_reset(&adapter->hw);
+			e1000_phy_hw_reset(&adapter->hw);
 	}
 
 	e1000_set_multi(netdev);
@@ -711,8 +675,8 @@
 		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
 
 	/* if ksp3, indicate if it's port a being setup */
-	if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 && 
-			e1000_ksp3_port_a == 0) 
+	if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
+			e1000_ksp3_port_a == 0)
 		adapter->ksp3_port_a = 1;
 	e1000_ksp3_port_a++;
 	/* Reset for multiple KP3 adapters */
@@ -740,9 +704,9 @@
 	if (pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
 
- 	/* hard_start_xmit is safe against parallel locking */
- 	netdev->features |= NETIF_F_LLTX; 
- 
+	/* hard_start_xmit is safe against parallel locking */
+	netdev->features |= NETIF_F_LLTX;
+
 	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
 
 	/* before reading the EEPROM, reset the controller to
@@ -972,8 +936,8 @@
 
 	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
 
-	adapter->rx_buffer_len = E1000_RXBUFFER_2048;
-	adapter->rx_ps_bsize0 = E1000_RXBUFFER_256;
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+	adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
 	hw->max_frame_size = netdev->mtu +
 			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
 	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
@@ -1181,7 +1145,7 @@
  * @start: address of beginning of memory
  * @len: length of memory
  **/
-static inline boolean_t
+static boolean_t
 e1000_check_64k_bound(struct e1000_adapter *adapter,
 		      void *start, unsigned long len)
 {
@@ -1599,14 +1563,21 @@
 		rctl |= E1000_RCTL_LPE;
 
 	/* Setup buffer sizes */
-	if (adapter->hw.mac_type >= e1000_82571) {
-		/* We can now specify buffers in 1K increments.
-		 * BSIZE and BSEX are ignored in this case. */
-		rctl |= adapter->rx_buffer_len << 0x11;
-	} else {
-		rctl &= ~E1000_RCTL_SZ_4096;
-		rctl |= E1000_RCTL_BSEX; 
-		switch (adapter->rx_buffer_len) {
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
 		case E1000_RXBUFFER_2048:
 		default:
 			rctl |= E1000_RCTL_SZ_2048;
@@ -1621,7 +1592,6 @@
 		case E1000_RXBUFFER_16384:
 			rctl |= E1000_RCTL_SZ_16384;
 			break;
-		}
 	}
 
 #ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
@@ -1715,7 +1685,7 @@
 	if (hw->mac_type >= e1000_82571) {
 		ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
 		/* Reset delay timers after every interrupt */
-		ctrl_ext |= E1000_CTRL_EXT_CANC;
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
 #ifdef CONFIG_E1000_NAPI
 		/* Auto-Mask interrupts upon ICR read. */
 		ctrl_ext |= E1000_CTRL_EXT_IAME;
@@ -1807,7 +1777,7 @@
 		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
 }
 
-static inline void
+static void
 e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
 			struct e1000_buffer *buffer_info)
 {
@@ -2247,6 +2217,7 @@
 
 	if (link) {
 		if (!netif_carrier_ok(netdev)) {
+			boolean_t txb2b = 1;
 			e1000_get_speed_and_duplex(&adapter->hw,
 			                           &adapter->link_speed,
 			                           &adapter->link_duplex);
@@ -2260,23 +2231,22 @@
 			 * and adjust the timeout factor */
 			netdev->tx_queue_len = adapter->tx_queue_len;
 			adapter->tx_timeout_factor = 1;
-			adapter->txb2b = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
-				adapter->txb2b = 0;
+				txb2b = 0;
 				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 8;
 				break;
 			case SPEED_100:
-				adapter->txb2b = 0;
+				txb2b = 0;
 				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
 			}
 
-			if ((adapter->hw.mac_type == e1000_82571 || 
+			if ((adapter->hw.mac_type == e1000_82571 ||
 			     adapter->hw.mac_type == e1000_82572) &&
-			    adapter->txb2b == 0) {
+			    txb2b == 0) {
 #define SPEED_MODE_BIT (1 << 21)
 				uint32_t tarc0;
 				tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
@@ -2400,7 +2370,7 @@
 #define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
 #define E1000_TX_FLAGS_VLAN_SHIFT	16
 
-static inline int
+static int
 e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
           struct sk_buff *skb)
 {
@@ -2422,7 +2392,7 @@
 
 		hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
 		mss = skb_shinfo(skb)->tso_size;
-		if (skb->protocol == ntohs(ETH_P_IP)) {
+		if (skb->protocol == htons(ETH_P_IP)) {
 			skb->nh.iph->tot_len = 0;
 			skb->nh.iph->check = 0;
 			skb->h.th->check =
@@ -2480,7 +2450,7 @@
 	return FALSE;
 }
 
-static inline boolean_t
+static boolean_t
 e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
               struct sk_buff *skb)
 {
@@ -2516,7 +2486,7 @@
 #define E1000_MAX_TXD_PWR	12
 #define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
 
-static inline int
+static int
 e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
              struct sk_buff *skb, unsigned int first, unsigned int max_per_txd,
              unsigned int nr_frags, unsigned int mss)
@@ -2625,7 +2595,7 @@
 	return count;
 }
 
-static inline void
+static void
 e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
                int tx_flags, int count)
 {
@@ -2689,7 +2659,7 @@
 #define E1000_FIFO_HDR			0x10
 #define E1000_82547_PAD_LEN		0x3E0
 
-static inline int
+static int
 e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
 {
 	uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
@@ -2716,7 +2686,7 @@
 }
 
 #define MINIMUM_DHCP_PACKET_SIZE 282
-static inline int
+static int
 e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
 {
 	struct e1000_hw *hw =  &adapter->hw;
@@ -2764,7 +2734,7 @@
 	unsigned int nr_frags = 0;
 	unsigned int mss = 0;
 	int count = 0;
- 	int tso;
+	int tso;
 	unsigned int f;
 	len -= skb->data_len;
 
@@ -2777,7 +2747,7 @@
 
 #ifdef NETIF_F_TSO
 	mss = skb_shinfo(skb)->tso_size;
-	/* The controller does a simple calculation to 
+	/* The controller does a simple calculation to
 	 * make sure there is enough room in the FIFO before
 	 * initiating the DMA for each buffer.  The calc is:
 	 * 4 = ceil(buffer len/mss).  To make sure we don't
@@ -2800,7 +2770,7 @@
 			case e1000_82573:
 				pull_size = min((unsigned int)4, skb->data_len);
 				if (!__pskb_pull_tail(skb, pull_size)) {
-					printk(KERN_ERR 
+					printk(KERN_ERR
 						"__pskb_pull_tail failed.\n");
 					dev_kfree_skb_any(skb);
 					return NETDEV_TX_OK;
@@ -2901,7 +2871,7 @@
 	/* Old method was to assume IPv4 packet by default if TSO was enabled.
 	 * 82571 hardware supports TSO capabilities for IPv6 as well...
 	 * no longer assume, we must. */
-	if (likely(skb->protocol == ntohs(ETH_P_IP)))
+	if (likely(skb->protocol == htons(ETH_P_IP)))
 		tx_flags |= E1000_TX_FLAGS_IPV4;
 
 	e1000_tx_queue(adapter, tx_ring, tx_flags,
@@ -2982,8 +2952,7 @@
 
 	/* Adapter-specific max frame size limits. */
 	switch (adapter->hw.mac_type) {
-	case e1000_82542_rev2_0:
-	case e1000_82542_rev2_1:
+	case e1000_undefined ... e1000_82542_rev2_1:
 		if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
 			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
 			return -EINVAL;
@@ -3017,27 +2986,32 @@
 		break;
 	}
 
+	/* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	 * means we reserve 2 more, this pushes us to allocate from the next
+	 * larger slab size
+	 * i.e. RXBUFFER_2048 --> size-4096 slab */
 
-	if (adapter->hw.mac_type > e1000_82547_rev_2) {
-		adapter->rx_buffer_len = max_frame;
-		E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
-	} else {
-		if(unlikely((adapter->hw.mac_type < e1000_82543) &&
-		   (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) {
-			DPRINTK(PROBE, ERR, "Jumbo Frames not supported "
-					    "on 82542\n");
-			return -EINVAL;
-		} else {
-			if(max_frame <= E1000_RXBUFFER_2048)
-				adapter->rx_buffer_len = E1000_RXBUFFER_2048;
-			else if(max_frame <= E1000_RXBUFFER_4096)
-				adapter->rx_buffer_len = E1000_RXBUFFER_4096;
-			else if(max_frame <= E1000_RXBUFFER_8192)
-				adapter->rx_buffer_len = E1000_RXBUFFER_8192;
-			else if(max_frame <= E1000_RXBUFFER_16384)
-				adapter->rx_buffer_len = E1000_RXBUFFER_16384;
-		}
-	}
+	if (max_frame <= E1000_RXBUFFER_256)
+		adapter->rx_buffer_len = E1000_RXBUFFER_256;
+	else if (max_frame <= E1000_RXBUFFER_512)
+		adapter->rx_buffer_len = E1000_RXBUFFER_512;
+	else if (max_frame <= E1000_RXBUFFER_1024)
+		adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+	else if (max_frame <= E1000_RXBUFFER_2048)
+		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+	else if (max_frame <= E1000_RXBUFFER_4096)
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+	else if (max_frame <= E1000_RXBUFFER_8192)
+		adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+	else if (max_frame <= E1000_RXBUFFER_16384)
+		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+
+	/* adjust allocation if LPE protects us, and we aren't using SBP */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+	if (!adapter->hw.tbi_compatibility_on &&
+	    ((max_frame == MAXIMUM_ETHERNET_FRAME_SIZE) ||
+	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
 
 	netdev->mtu = new_mtu;
 
@@ -3165,7 +3139,6 @@
 		adapter->stats.crcerrs + adapter->stats.algnerrc +
 		adapter->stats.ruc + adapter->stats.roc +
 		adapter->stats.cexterr;
-	adapter->net_stats.rx_dropped = 0;
 	adapter->net_stats.rx_length_errors = adapter->stats.ruc +
 	                                      adapter->stats.roc;
 	adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
@@ -3391,13 +3364,15 @@
 
 	tx_ring->next_to_clean = i;
 
-	spin_lock(&tx_ring->tx_lock);
-
+#define TX_WAKE_THRESHOLD 32
 	if (unlikely(cleaned && netif_queue_stopped(netdev) &&
-		    netif_carrier_ok(netdev)))
-		netif_wake_queue(netdev);
-
-	spin_unlock(&tx_ring->tx_lock);
+	             netif_carrier_ok(netdev))) {
+		spin_lock(&tx_ring->tx_lock);
+		if (netif_queue_stopped(netdev) &&
+		    (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
+			netif_wake_queue(netdev);
+		spin_unlock(&tx_ring->tx_lock);
+	}
 
 	if (adapter->detect_tx_hung) {
 		/* Detect a transmit hang in hardware, this serializes the
@@ -3445,7 +3420,7 @@
  * @sk_buff:     socket buffer with received data
  **/
 
-static inline void
+static void
 e1000_rx_checksum(struct e1000_adapter *adapter,
 		  uint32_t status_err, uint32_t csum,
 		  struct sk_buff *skb)
@@ -3569,7 +3544,8 @@
 				                       flags);
 				length--;
 			} else {
-				dev_kfree_skb_irq(skb);
+				/* recycle */
+				buffer_info->skb = skb;
 				goto next_desc;
 			}
 		}
@@ -3677,6 +3653,7 @@
 	i = rx_ring->next_to_clean;
 	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
 	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+	buffer_info = &rx_ring->buffer_info[i];
 
 	while (staterr & E1000_RXD_STAT_DD) {
 		buffer_info = &rx_ring->buffer_info[i];
@@ -3737,9 +3714,9 @@
 
 		/* page alloc/put takes too long and effects small packet
 		 * throughput, so unsplit small packets and save the alloc/put*/
-		if (l1 && ((length + l1) < E1000_CB_LENGTH)) {
+		if (l1 && ((length + l1) <= adapter->rx_ps_bsize0)) {
 			u8 *vaddr;
-			/* there is no documentation about how to call 
+			/* there is no documentation about how to call
 			 * kmap_atomic, so we can't hold the mapping
 			 * very long */
 			pci_dma_sync_single_for_cpu(pdev,
@@ -4159,7 +4136,7 @@
 			spin_unlock_irqrestore(&adapter->stats_lock, flags);
 			return -EIO;
 		}
-		if (adapter->hw.phy_type == e1000_media_type_copper) {
+		if (adapter->hw.media_type == e1000_media_type_copper) {
 			switch (data->reg_num) {
 			case PHY_CTRL:
 				if (mii_reg & MII_CR_POWER_DOWN)
@@ -4518,21 +4495,13 @@
 
 		E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
 		E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
-		retval = pci_enable_wake(pdev, PCI_D3hot, 1);
-		if (retval)
-			DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
-		retval = pci_enable_wake(pdev, PCI_D3cold, 1);
-		if (retval)
-			DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+		pci_enable_wake(pdev, PCI_D3hot, 1);
+		pci_enable_wake(pdev, PCI_D3cold, 1);
 	} else {
 		E1000_WRITE_REG(&adapter->hw, WUC, 0);
 		E1000_WRITE_REG(&adapter->hw, WUFC, 0);
-		retval = pci_enable_wake(pdev, PCI_D3hot, 0);
-		if (retval)
-			DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
-		retval = pci_enable_wake(pdev, PCI_D3cold, 0);
-		if (retval)
-			DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
 	}
 
 	if (adapter->hw.mac_type >= e1000_82540 &&
@@ -4541,13 +4510,8 @@
 		if (manc & E1000_MANC_SMBUS_EN) {
 			manc |= E1000_MANC_ARP_EN;
 			E1000_WRITE_REG(&adapter->hw, MANC, manc);
-			retval = pci_enable_wake(pdev, PCI_D3hot, 1);
-			if (retval)
-				DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
-			retval = pci_enable_wake(pdev, PCI_D3cold, 1);
-			if (retval)
-				DPRINTK(PROBE, ERR,
-				        "Error enabling D3 cold wake\n");
+			pci_enable_wake(pdev, PCI_D3hot, 1);
+			pci_enable_wake(pdev, PCI_D3cold, 1);
 		}
 	}
 
@@ -4557,9 +4521,7 @@
 
 	pci_disable_device(pdev);
 
-	retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
-	if (retval)
-		DPRINTK(PROBE, ERR, "Error in setting power state\n");
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
 
 	return 0;
 }
@@ -4570,22 +4532,15 @@
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
 	struct e1000_adapter *adapter = netdev_priv(netdev);
-	int retval;
 	uint32_t manc, ret_val;
 
-	retval = pci_set_power_state(pdev, PCI_D0);
-	if (retval)
-		DPRINTK(PROBE, ERR, "Error in setting power state\n");
+	pci_set_power_state(pdev, PCI_D0);
 	e1000_pci_restore_state(adapter);
 	ret_val = pci_enable_device(pdev);
 	pci_set_master(pdev);
 
-	retval = pci_enable_wake(pdev, PCI_D3hot, 0);
-	if (retval)
-		DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
-	retval = pci_enable_wake(pdev, PCI_D3cold, 0);
-	if (retval)
-		DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
 
 	e1000_reset(adapter);
 	E1000_WRITE_REG(&adapter->hw, WUS, ~0);
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index 9790db9..048d052 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index e0a4d37..e55f896 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -22,6 +22,7 @@
   
   Contact Information:
   Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
 
 *******************************************************************************/
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 2f7b868..8d680ce 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -21,15 +21,15 @@
 	http://www.scyld.com/network/epic100.html
 
 	---------------------------------------------------------------------
-	
+
 	Linux kernel-specific changes:
-	
+
 	LK1.1.2 (jgarzik):
 	* Merge becker version 1.09 (4/08/2000)
 
 	LK1.1.3:
 	* Major bugfix to 1.09 driver (Francis Romieu)
-	
+
 	LK1.1.4 (jgarzik):
 	* Merge becker test version 1.09 (5/29/2000)
 
@@ -66,7 +66,7 @@
 	LK1.1.14 (Kryzsztof Halasa):
 	* fix spurious bad initializations
 	* pound phy a la SMSC's app note on the subject
-	
+
 	AC1.1.14ac
 	* fix power up/down for ethtool that broke in 1.11
 
@@ -244,7 +244,7 @@
 };
 MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
 
-	
+
 #ifndef USE_IO_OPS
 #undef inb
 #undef inw
@@ -370,7 +370,7 @@
 static struct net_device_stats *epic_get_stats(struct net_device *dev);
 static void set_rx_mode(struct net_device *dev);
 
-
+
 
 static int __devinit epic_init_one (struct pci_dev *pdev,
 				    const struct pci_device_id *ent)
@@ -392,9 +392,9 @@
 		printk (KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
 			version, version2, version3);
 #endif
-	
+
 	card_idx++;
-	
+
 	ret = pci_enable_device(pdev);
 	if (ret)
 		goto out;
@@ -405,7 +405,7 @@
 		ret = -ENODEV;
 		goto err_out_disable;
 	}
-	
+
 	pci_set_master(pdev);
 
 	ret = pci_request_regions(pdev, DRV_NAME);
@@ -498,7 +498,7 @@
 	ep->pci_dev = pdev;
 	ep->chip_id = chip_idx;
 	ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
-	ep->irq_mask = 
+	ep->irq_mask =
 		(ep->chip_flags & TYPE2_INTR ?  PCIBusErr175 : PCIBusErr170)
 		 | CntFull | TxUnderrun | EpicNapiEvent;
 
@@ -587,7 +587,7 @@
 	pci_disable_device(pdev);
 	goto out;
 }
-
+
 /* Serial EEPROM section. */
 
 /*  EEPROM_Ctrl bits. */
@@ -709,7 +709,7 @@
 
 	outw(value, ioaddr + MIIData);
 	outl((phy_id << 9) | (loc << 4) | MII_WRITEOP, ioaddr + MIICtrl);
-	for (i = 10000; i > 0; i--) { 
+	for (i = 10000; i > 0; i--) {
 		barrier();
 		if ((inl(ioaddr + MIICtrl) & MII_WRITEOP) == 0)
 			break;
@@ -717,7 +717,7 @@
 	return;
 }
 
-
+
 static int epic_open(struct net_device *dev)
 {
 	struct epic_private *ep = dev->priv;
@@ -760,7 +760,7 @@
 #endif
 
 	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
-	
+
 	for (i = 0; i < 3; i++)
 		outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);
 
@@ -803,7 +803,7 @@
 
 	/* Enable interrupts by setting the interrupt mask. */
 	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
-		 | CntFull | TxUnderrun 
+		 | CntFull | TxUnderrun
 		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);
 
 	if (debug > 1)
@@ -831,7 +831,7 @@
 	struct epic_private *ep = dev->priv;
 
 	netif_stop_queue (dev);
-	
+
 	/* Disable interrupts by clearing the interrupt mask. */
 	outl(0x00000000, ioaddr + INTMASK);
 	/* Stop the chip's Tx and Rx DMA processes. */
@@ -987,7 +987,7 @@
 	for (i = 0; i < RX_RING_SIZE; i++) {
 		ep->rx_ring[i].rxstatus = 0;
 		ep->rx_ring[i].buflength = cpu_to_le32(ep->rx_buf_sz);
-		ep->rx_ring[i].next = ep->rx_ring_dma + 
+		ep->rx_ring[i].next = ep->rx_ring_dma +
 				      (i+1)*sizeof(struct epic_rx_desc);
 		ep->rx_skbuff[i] = NULL;
 	}
@@ -1002,7 +1002,7 @@
 			break;
 		skb->dev = dev;			/* Mark as being used by this device. */
 		skb_reserve(skb, 2);	/* 16 byte align the IP header. */
-		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 
+		ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
 			skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 		ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
 	}
@@ -1013,7 +1013,7 @@
 	for (i = 0; i < TX_RING_SIZE; i++) {
 		ep->tx_skbuff[i] = NULL;
 		ep->tx_ring[i].txstatus = 0x0000;
-		ep->tx_ring[i].next = ep->tx_ring_dma + 
+		ep->tx_ring[i].next = ep->tx_ring_dma +
 			(i+1)*sizeof(struct epic_tx_desc);
 	}
 	ep->tx_ring[i-1].next = ep->tx_ring_dma;
@@ -1026,7 +1026,7 @@
 	int entry, free_count;
 	u32 ctrl_word;
 	unsigned long flags;
-	
+
 	if (skb->len < ETH_ZLEN) {
 		skb = skb_padto(skb, ETH_ZLEN);
 		if (skb == NULL)
@@ -1042,7 +1042,7 @@
 	entry = ep->cur_tx % TX_RING_SIZE;
 
 	ep->tx_skbuff[entry] = skb;
-	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data, 
+	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
 		 			            skb->len, PCI_DMA_TODEVICE);
 	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
 		ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
@@ -1126,7 +1126,7 @@
 
 		/* Free the original skb. */
 		skb = ep->tx_skbuff[entry];
-		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr, 
+		pci_unmap_single(ep->pci_dev, ep->tx_ring[entry].bufaddr,
 				 skb->len, PCI_DMA_TODEVICE);
 		dev_kfree_skb_irq(skb);
 		ep->tx_skbuff[entry] = NULL;
@@ -1281,8 +1281,8 @@
 							       ep->rx_buf_sz,
 							       PCI_DMA_FROMDEVICE);
 			} else {
-				pci_unmap_single(ep->pci_dev, 
-					ep->rx_ring[entry].bufaddr, 
+				pci_unmap_single(ep->pci_dev,
+					ep->rx_ring[entry].bufaddr,
 					ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 				skb_put(skb = ep->rx_skbuff[entry], pkt_len);
 				ep->rx_skbuff[entry] = NULL;
@@ -1307,7 +1307,7 @@
 				break;
 			skb->dev = dev;			/* Mark as being used by this device. */
 			skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
-			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 
+			ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
 				skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 			work_done++;
 		}
@@ -1403,7 +1403,7 @@
 		ep->rx_ring[i].rxstatus = 0;		/* Not owned by Epic chip. */
 		ep->rx_ring[i].buflength = 0;
 		if (skb) {
-			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr, 
+			pci_unmap_single(ep->pci_dev, ep->rx_ring[i].bufaddr,
 				 	 ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
 			dev_kfree_skb(skb);
 		}
@@ -1414,7 +1414,7 @@
 		ep->tx_skbuff[i] = NULL;
 		if (!skb)
 			continue;
-		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr, 
+		pci_unmap_single(ep->pci_dev, ep->tx_ring[i].bufaddr,
 				 skb->len, PCI_DMA_TODEVICE);
 		dev_kfree_skb(skb);
 	}
@@ -1607,7 +1607,7 @@
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
 	struct epic_private *ep = dev->priv;
-	
+
 	pci_free_consistent(pdev, TX_TOTAL_SIZE, ep->tx_ring, ep->tx_ring_dma);
 	pci_free_consistent(pdev, RX_TOTAL_SIZE, ep->rx_ring, ep->rx_ring_dma);
 	unregister_netdev(dev);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 705e122..5669b95 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -107,6 +107,7 @@
  *	0.52: 20 Jan 2006: Add MSI/MSIX support.
  *	0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
  *	0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
+ *	0.55: 22 Mar 2006: Add flow control (pause frame).
  *
  * Known bugs:
  * We suspect that on some hardware no TX done interrupts are generated.
@@ -118,7 +119,7 @@
  * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
  * superfluous timer interrupts from the nic.
  */
-#define FORCEDETH_VERSION		"0.54"
+#define FORCEDETH_VERSION		"0.55"
 #define DRV_NAME			"forcedeth"
 
 #include <linux/module.h>
@@ -163,6 +164,7 @@
 #define DEV_HAS_MSI             0x0040  /* device supports MSI */
 #define DEV_HAS_MSI_X           0x0080  /* device supports MSI-X */
 #define DEV_HAS_POWER_CNTRL     0x0100  /* device supports power savings */
+#define DEV_HAS_PAUSEFRAME_TX   0x0200  /* device supports tx pause frames */
 
 enum {
 	NvRegIrqStatus = 0x000,
@@ -203,6 +205,7 @@
 	NvRegMSIIrqMask = 0x030,
 #define NVREG_MSI_VECTOR_0_ENABLED 0x01
 	NvRegMisc1 = 0x080,
+#define NVREG_MISC1_PAUSE_TX	0x01
 #define NVREG_MISC1_HD		0x02
 #define NVREG_MISC1_FORCE	0x3b0f3c
 
@@ -214,7 +217,8 @@
 #define NVREG_XMITSTAT_BUSY	0x01
 
 	NvRegPacketFilterFlags = 0x8c,
-#define NVREG_PFF_ALWAYS	0x7F0008
+#define NVREG_PFF_PAUSE_RX	0x08
+#define NVREG_PFF_ALWAYS	0x7F0000
 #define NVREG_PFF_PROMISC	0x80
 #define NVREG_PFF_MYADDR	0x20
 
@@ -277,6 +281,9 @@
 #define NVREG_TXRXCTL_VLANINS	0x00080
 	NvRegTxRingPhysAddrHigh = 0x148,
 	NvRegRxRingPhysAddrHigh = 0x14C,
+	NvRegTxPauseFrame = 0x170,
+#define NVREG_TX_PAUSEFRAME_DISABLE	0x1ff0080
+#define NVREG_TX_PAUSEFRAME_ENABLE	0x0c00030
 	NvRegMIIStatus = 0x180,
 #define NVREG_MIISTAT_ERROR		0x0001
 #define NVREG_MIISTAT_LINKCHANGE	0x0008
@@ -451,7 +458,7 @@
 
 #define RX_RING		128
 #define TX_RING		256
-/* 
+/*
  * If your nic mysteriously hangs then try to reduce the limits
  * to 1/0: It might be required to set NV_TX_LASTPACKET in the
  * last valid ring entry. But this would be impossible to
@@ -473,7 +480,7 @@
 #define POLL_WAIT	(1+HZ/100)
 #define LINK_TIMEOUT	(3*HZ)
 
-/* 
+/*
  * desc_ver values:
  * The nic supports three different descriptor types:
  * - DESC_VER_1: Original
@@ -506,13 +513,10 @@
 #define PHY_1000	0x2
 #define PHY_HALF	0x100
 
-/* FIXME: MII defines that should be added to <linux/mii.h> */
-#define MII_1000BT_CR	0x09
-#define MII_1000BT_SR	0x0a
-#define ADVERTISE_1000FULL	0x0200
-#define ADVERTISE_1000HALF	0x0100
-#define LPA_1000FULL	0x0800
-#define LPA_1000HALF	0x0400
+#define NV_PAUSEFRAME_RX_CAPABLE 0x0001
+#define NV_PAUSEFRAME_TX_CAPABLE 0x0002
+#define NV_PAUSEFRAME_RX_ENABLE  0x0004
+#define NV_PAUSEFRAME_TX_ENABLE  0x0008
 
 /* MSI/MSI-X defines */
 #define NV_MSI_X_MAX_VECTORS  8
@@ -602,6 +606,9 @@
 	/* msi/msi-x fields */
 	u32 msi_flags;
 	struct msix_entry msi_x_entry[NV_MSI_X_MAX_VECTORS];
+
+	/* flow control */
+	u32 pause_flags;
 };
 
 /*
@@ -612,7 +619,7 @@
 
 /*
  * Optimization can be either throuput mode or cpu mode
- * 
+ *
  * Throughput Mode: Every tx and rx packet will generate an interrupt.
  * CPU Mode: Interrupts are controlled by a timer.
  */
@@ -860,7 +867,7 @@
 
 	/* set advertise register */
 	reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-	reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|0x800|0x400);
+	reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP);
 	if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
 		printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev));
 		return PHY_ERROR;
@@ -873,14 +880,14 @@
 	mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
 	if (mii_status & PHY_GIGABIT) {
 		np->gigabit = PHY_GIGABIT;
-		mii_control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+		mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 		mii_control_1000 &= ~ADVERTISE_1000HALF;
 		if (phyinterface & PHY_RGMII)
 			mii_control_1000 |= ADVERTISE_1000FULL;
 		else
 			mii_control_1000 &= ~ADVERTISE_1000FULL;
 
-		if (mii_rw(dev, np->phyaddr, MII_1000BT_CR, mii_control_1000)) {
+		if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
 			printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
 			return PHY_ERROR;
 		}
@@ -918,6 +925,8 @@
 			return PHY_ERROR;
 		}
 	}
+	/* some phys clear out pause advertisment on reset, set it back */
+	mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg);
 
 	/* restart auto negotiation */
 	mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1110,7 +1119,7 @@
 	}
 }
 
-static void nv_init_rx(struct net_device *dev) 
+static void nv_init_rx(struct net_device *dev)
 {
 	struct fe_priv *np = netdev_priv(dev);
 	int i;
@@ -1174,7 +1183,7 @@
 {
 	struct fe_priv *np = netdev_priv(dev);
 	unsigned int i;
-	
+
 	for (i = 0; i < TX_RING; i++) {
 		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
 			np->tx_ring.orig[i].FlagLen = 0;
@@ -1320,7 +1329,7 @@
 	} else {
 		np->tx_ring.ex[start_nr].TxVlan = cpu_to_le32(tx_flags_vlan);
 		np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
-	}	
+	}
 
 	dprintk(KERN_DEBUG "%s: nv_start_xmit: packet %d (entries %d) queued for transmission. tx_flags_extra: %x\n",
 		dev->name, np->next_tx, entries, tx_flags_extra);
@@ -1395,7 +1404,7 @@
 				} else {
 					np->stats.tx_packets++;
 					np->stats.tx_bytes += skb->len;
-				}				
+				}
 			}
 		}
 		nv_release_txskb(dev, i);
@@ -1441,7 +1450,7 @@
 		for (i=0;i<TX_RING;i+= 4) {
 			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
 				printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
-				       i, 
+				       i,
 				       le32_to_cpu(np->tx_ring.orig[i].PacketBuffer),
 				       le32_to_cpu(np->tx_ring.orig[i].FlagLen),
 				       le32_to_cpu(np->tx_ring.orig[i+1].PacketBuffer),
@@ -1452,7 +1461,7 @@
 				       le32_to_cpu(np->tx_ring.orig[i+3].FlagLen));
 			} else {
 				printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
-				       i, 
+				       i,
 				       le32_to_cpu(np->tx_ring.ex[i].PacketBufferHigh),
 				       le32_to_cpu(np->tx_ring.ex[i].PacketBufferLow),
 				       le32_to_cpu(np->tx_ring.ex[i].FlagLen),
@@ -1550,7 +1559,6 @@
 	u32 Flags;
 	u32 vlanflags = 0;
 
-
 	for (;;) {
 		struct sk_buff *skb;
 		int len;
@@ -1901,7 +1909,9 @@
 {
 	struct fe_priv *np = netdev_priv(dev);
 	u8 __iomem *base = get_hwbase(dev);
-	int adv, lpa;
+	int adv = 0;
+	int lpa = 0;
+	int adv_lpa, adv_pause, lpa_pause;
 	int newls = np->linkspeed;
 	int newdup = np->duplex;
 	int mii_status;
@@ -1954,8 +1964,8 @@
 
 	retval = 1;
 	if (np->gigabit == PHY_GIGABIT) {
-		control_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
-		status_1000 = mii_rw(dev, np->phyaddr, MII_1000BT_SR, MII_READ);
+		control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
+		status_1000 = mii_rw(dev, np->phyaddr, MII_STAT1000, MII_READ);
 
 		if ((control_1000 & ADVERTISE_1000FULL) &&
 			(status_1000 & LPA_1000FULL)) {
@@ -1973,21 +1983,21 @@
 				dev->name, adv, lpa);
 
 	/* FIXME: handle parallel detection properly */
-	lpa = lpa & adv;
-	if (lpa & LPA_100FULL) {
+	adv_lpa = lpa & adv;
+	if (adv_lpa & LPA_100FULL) {
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
 		newdup = 1;
-	} else if (lpa & LPA_100HALF) {
+	} else if (adv_lpa & LPA_100HALF) {
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
 		newdup = 0;
-	} else if (lpa & LPA_10FULL) {
+	} else if (adv_lpa & LPA_10FULL) {
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 1;
-	} else if (lpa & LPA_10HALF) {
+	} else if (adv_lpa & LPA_10HALF) {
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 0;
 	} else {
-		dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, lpa);
+		dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
 		newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
 		newdup = 0;
 	}
@@ -2030,6 +2040,56 @@
 	writel(np->linkspeed, base + NvRegLinkSpeed);
 	pci_push(base);
 
+	/* setup pause frame based on advertisement and link partner */
+	np->pause_flags &= ~(NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE);
+
+	if (np->duplex != 0) {
+		adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM);
+		lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM);
+
+		switch (adv_pause) {
+		case (ADVERTISE_PAUSE_CAP):
+			if (lpa_pause & LPA_PAUSE_CAP) {
+				np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
+			}
+			break;
+		case (ADVERTISE_PAUSE_ASYM):
+			if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM))
+			{
+				np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
+			}
+			break;
+		case (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM):
+			if (lpa_pause & LPA_PAUSE_CAP)
+			{
+				np->pause_flags |= NV_PAUSEFRAME_TX_ENABLE | NV_PAUSEFRAME_RX_ENABLE;
+			}
+			if (lpa_pause == LPA_PAUSE_ASYM)
+			{
+				np->pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
+			}
+			break;
+		}
+	}
+
+	if (np->pause_flags & NV_PAUSEFRAME_RX_CAPABLE) {
+		u32 pff = readl(base + NvRegPacketFilterFlags) & ~NVREG_PFF_PAUSE_RX;
+		if (np->pause_flags & NV_PAUSEFRAME_RX_ENABLE)
+			writel(pff|NVREG_PFF_PAUSE_RX, base + NvRegPacketFilterFlags);
+		else
+			writel(pff, base + NvRegPacketFilterFlags);
+	}
+	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE) {
+		u32 regmisc = readl(base + NvRegMisc1) & ~NVREG_MISC1_PAUSE_TX;
+		if (np->pause_flags & NV_PAUSEFRAME_TX_ENABLE) {
+			writel(NVREG_TX_PAUSEFRAME_ENABLE,  base + NvRegTxPauseFrame);
+			writel(regmisc|NVREG_MISC1_PAUSE_TX, base + NvRegMisc1);
+		} else {
+			writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
+			writel(regmisc, base + NvRegMisc1);
+		}
+	}
+
 	return retval;
 }
 
@@ -2090,7 +2150,7 @@
 		spin_lock(&np->lock);
 		nv_tx_done(dev);
 		spin_unlock(&np->lock);
-		
+
 		nv_rx_process(dev);
 		if (nv_alloc_rx(dev)) {
 			spin_lock(&np->lock);
@@ -2098,7 +2158,7 @@
 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 			spin_unlock(&np->lock);
 		}
-		
+
 		if (events & NVREG_IRQ_LINK) {
 			spin_lock(&np->lock);
 			nv_link_irq(dev);
@@ -2163,7 +2223,7 @@
 		spin_lock_irq(&np->lock);
 		nv_tx_done(dev);
 		spin_unlock_irq(&np->lock);
-		
+
 		if (events & (NVREG_IRQ_TX_ERR)) {
 			dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
 						dev->name, events);
@@ -2206,7 +2266,7 @@
 		dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
 		if (!(events & np->irqmask))
 			break;
-		
+
 		nv_rx_process(dev);
 		if (nv_alloc_rx(dev)) {
 			spin_lock_irq(&np->lock);
@@ -2214,7 +2274,7 @@
 				mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
 			spin_unlock_irq(&np->lock);
 		}
-		
+
 		if (i > max_interrupt_work) {
 			spin_lock_irq(&np->lock);
 			/* disable interrupts on the nic */
@@ -2253,7 +2313,7 @@
 		dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
 		if (!(events & np->irqmask))
 			break;
-		
+
 		if (events & NVREG_IRQ_LINK) {
 			spin_lock_irq(&np->lock);
 			nv_link_irq(dev);
@@ -2326,7 +2386,7 @@
 	np->nic_poll_irq = 0;
 
 	/* FIXME: Do we need synchronize_irq(dev->irq) here? */
-	
+
 	writel(mask, base + NvRegIrqMask);
 	pci_push(base);
 
@@ -2441,7 +2501,7 @@
 	if (adv & ADVERTISE_100FULL)
 		ecmd->advertising |= ADVERTISED_100baseT_Full;
 	if (np->autoneg && np->gigabit == PHY_GIGABIT) {
-		adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+		adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 		if (adv & ADVERTISE_1000FULL)
 			ecmd->advertising |= ADVERTISED_1000baseT_Full;
 	}
@@ -2505,23 +2565,23 @@
 
 		/* advertise only what has been requested */
 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 		if (ecmd->advertising & ADVERTISED_10baseT_Half)
 			adv |= ADVERTISE_10HALF;
 		if (ecmd->advertising & ADVERTISED_10baseT_Full)
-			adv |= ADVERTISE_10FULL;
+			adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 		if (ecmd->advertising & ADVERTISED_100baseT_Half)
 			adv |= ADVERTISE_100HALF;
 		if (ecmd->advertising & ADVERTISED_100baseT_Full)
-			adv |= ADVERTISE_100FULL;
+			adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
 
 		if (np->gigabit == PHY_GIGABIT) {
-			adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 			adv &= ~ADVERTISE_1000FULL;
 			if (ecmd->advertising & ADVERTISED_1000baseT_Full)
 				adv |= ADVERTISE_1000FULL;
-			mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
 		}
 
 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -2534,22 +2594,22 @@
 		np->autoneg = 0;
 
 		adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
-		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
+		adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
 		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF)
 			adv |= ADVERTISE_10HALF;
 		if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL)
-			adv |= ADVERTISE_10FULL;
+			adv |= ADVERTISE_10FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF)
 			adv |= ADVERTISE_100HALF;
 		if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL)
-			adv |= ADVERTISE_100FULL;
+			adv |= ADVERTISE_100FULL | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
 		mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
 		np->fixed_mode = adv;
 
 		if (np->gigabit == PHY_GIGABIT) {
-			adv = mii_rw(dev, np->phyaddr, MII_1000BT_CR, MII_READ);
+			adv = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ);
 			adv &= ~ADVERTISE_1000FULL;
-			mii_rw(dev, np->phyaddr, MII_1000BT_CR, adv);
+			mii_rw(dev, np->phyaddr, MII_CTRL1000, adv);
 		}
 
 		bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -2615,6 +2675,18 @@
 	return ret;
 }
 
+#ifdef NETIF_F_TSO
+static int nv_set_tso(struct net_device *dev, u32 value)
+{
+	struct fe_priv *np = netdev_priv(dev);
+
+	if ((np->driver_data & DEV_HAS_CHECKSUM))
+		return ethtool_op_set_tso(dev, value);
+	else
+		return value ? -EOPNOTSUPP : 0;
+}
+#endif
+
 static struct ethtool_ops ops = {
 	.get_drvinfo = nv_get_drvinfo,
 	.get_link = ethtool_op_get_link,
@@ -2626,6 +2698,10 @@
 	.get_regs = nv_get_regs,
 	.nway_reset = nv_nway_reset,
 	.get_perm_addr = ethtool_op_get_perm_addr,
+#ifdef NETIF_F_TSO
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = nv_set_tso
+#endif
 };
 
 static void nv_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
@@ -2813,6 +2889,9 @@
 
 	writel(0, base + NvRegAdapterControl);
 
+	if (np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)
+		writel(NVREG_TX_PAUSEFRAME_DISABLE,  base + NvRegTxPauseFrame);
+
 	/* 2) initialize descriptor rings */
 	set_bufsize(dev);
 	oom = nv_init_ring(dev);
@@ -3098,6 +3177,12 @@
 		np->msi_flags |= NV_MSI_X_CAPABLE;
 	}
 
+	np->pause_flags = NV_PAUSEFRAME_RX_CAPABLE;
+	if (id->driver_data & DEV_HAS_PAUSEFRAME_TX) {
+		np->pause_flags |= NV_PAUSEFRAME_TX_CAPABLE;
+	}
+
+
 	err = -ENOMEM;
 	np->base = ioremap(addr, np->register_size);
 	if (!np->base)
@@ -3244,7 +3329,7 @@
 		       pci_name(pci_dev));
 		goto out_freering;
 	}
-	
+
 	/* reset it */
 	phy_init(dev);
 
@@ -3358,11 +3443,11 @@
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_14),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX,
 	},
 	{	/* MCP55 Ethernet Controller */
 		PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NVENET_15),
-		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL,
+		.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER|DEV_HAS_LARGEDESC|DEV_HAS_CHECKSUM|DEV_HAS_HIGH_DMA|DEV_HAS_VLAN|DEV_HAS_MSI|DEV_HAS_MSI_X|DEV_HAS_POWER_CNTRL|DEV_HAS_PAUSEFRAME_TX,
 	},
 	{0,},
 };
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 01ad904..51fd516 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -1,4 +1,4 @@
-/* 
+/*
 net-3-driver for the IBM LAN Adapter/A
 
 This is an extension to the Linux operating system, and is covered by the
@@ -11,9 +11,9 @@
 SK_G16 and 3C523 driver.
 
 paper sources:
-  'PC Hardware: Aufbau, Funktionsweise, Programmierung' by 
+  'PC Hardware: Aufbau, Funktionsweise, Programmierung' by
   Hans-Peter Messmer for the basic Microchannel stuff
-  
+
   'Linux Geraetetreiber' by Allesandro Rubini, Kalle Dalheimer
   for help on Ethernet driver programming
 
@@ -27,14 +27,14 @@
 
 special acknowledgements to:
   - Bob Eager for helping me out with documentation from IBM
-  - Jim Shorney for his endless patience with me while I was using 
+  - Jim Shorney for his endless patience with me while I was using
     him as a beta tester to trace down the address filter bug ;-)
 
   Missing things:
 
   -> set debug level via ioctl instead of compile-time switches
   -> I didn't follow the development of the 2.1.x kernels, so my
-     assumptions about which things changed with which kernel version 
+     assumptions about which things changed with which kernel version
      are probably nonsense
 
 History:
@@ -275,7 +275,7 @@
 	priv->rrastart = raddr = priv->txbufstart + (TXBUFCNT * PKTSIZE);
 	priv->rdastart = addr = priv->rrastart + (priv->rxbufcnt * sizeof(rra_t));
 	priv->rxbufstart = baddr = priv->rdastart + (priv->rxbufcnt * sizeof(rda_t));
-	
+
 	for (z = 0; z < priv->rxbufcnt; z++) {
 		rra.startlo = baddr;
 		rra.starthi = 0;
@@ -570,7 +570,7 @@
 		lrdaaddr = priv->rdastart + (priv->lastrxdescr * sizeof(rda_t));
 		memcpy_fromio(&rda, priv->base + rdaaddr, sizeof(rda_t));
 
-		/* iron out upper word halves of fields we use - SONIC will duplicate 
+		/* iron out upper word halves of fields we use - SONIC will duplicate
 		   bits 0..15 to 16..31 */
 
 		rda.status &= 0xffff;
@@ -836,9 +836,9 @@
 	baddr = priv->txbufstart + (priv->nexttxdescr * PKTSIZE);
 	memcpy_toio(priv->base + baddr, skb->data, skb->len);
 
-	/* copy filler into RAM - in case we're filling up... 
+	/* copy filler into RAM - in case we're filling up...
 	   we're filling a bit more than necessary, but that doesn't harm
-	   since the buffer is far larger... 
+	   since the buffer is far larger...
 	   Sorry Linus for the filler string but I couldn't resist ;-) */
 
 	if (tmplen > skb->len) {
@@ -952,7 +952,7 @@
 	priv->realirq = irq;
 	priv->medium = medium;
 	spin_lock_init(&priv->lock);
-		
+
 
 	/* set base + irq for this device (irq not allocated so far) */
 
diff --git a/drivers/net/ibmlana.h b/drivers/net/ibmlana.h
index 458ee22..6b58bab 100644
--- a/drivers/net/ibmlana.h
+++ b/drivers/net/ibmlana.h
@@ -17,7 +17,7 @@
 /* media enumeration - defined in a way that it fits onto the LAN/A's
    POS registers... */
 
-typedef enum { 
+typedef enum {
 	Media_10BaseT, Media_10Base5,
 	Media_Unknown, Media_10Base2, Media_Count
 } ibmlana_medium;
@@ -27,7 +27,7 @@
 typedef struct {
 	unsigned int slot;		/* MCA-Slot-#                       */
 	struct net_device_stats stat;	/* packet statistics            */
-	int realirq;			/* memorizes actual IRQ, even when 
+	int realirq;			/* memorizes actual IRQ, even when
 					   currently not allocated          */
 	ibmlana_medium medium;		/* physical cannector               */
 	u32 	tdastart, txbufstart,	/* addresses                        */
@@ -41,7 +41,7 @@
 	spinlock_t lock;
 } ibmlana_priv;
 
-/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes 
+/* this card uses quite a lot of I/O ports...luckily the MCA bus decodes
    a full 64K I/O range... */
 
 #define IBM_LANA_IORANGE 0xa0
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index 52d0102..666346f 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -24,7 +24,7 @@
 /* for use with IBM i/pSeries LPAR Linux.  It utilizes the logical LAN    */
 /* option of the RS/6000 Platform Architechture to interface with virtual */
 /* ethernet NICs that are presented to the partition by the hypervisor.   */
-/*                                                                        */ 
+/*                                                                        */
 /**************************************************************************/
 /*
   TODO:
@@ -79,7 +79,7 @@
 #else
 #define ibmveth_debug_printk_no_adapter(fmt, args...)
 #define ibmveth_debug_printk(fmt, args...)
-#define ibmveth_assert(expr) 
+#define ibmveth_assert(expr)
 #endif
 
 static int ibmveth_open(struct net_device *dev);
@@ -96,6 +96,7 @@
 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
 static inline void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
+static struct kobj_type ktype_veth_pool;
 
 #ifdef CONFIG_PROC_FS
 #define IBMVETH_PROC_DIR "net/ibmveth"
@@ -133,12 +134,13 @@
 }
 
 /* setup the initial settings for a buffer pool */
-static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size)
+static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
 {
 	pool->size = pool_size;
 	pool->index = pool_index;
 	pool->buff_size = buff_size;
 	pool->threshold = pool_size / 2;
+	pool->active = pool_active;
 }
 
 /* allocate and setup an buffer pool - called during open */
@@ -146,13 +148,13 @@
 {
 	int i;
 
-	pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL); 
+	pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
 
 	if(!pool->free_map) {
 		return -1;
 	}
 
-	pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL); 
+	pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
 	if(!pool->dma_addr) {
 		kfree(pool->free_map);
 		pool->free_map = NULL;
@@ -180,7 +182,6 @@
 	atomic_set(&pool->available, 0);
 	pool->producer_index = 0;
 	pool->consumer_index = 0;
-	pool->active = 0;
 
 	return 0;
 }
@@ -214,7 +215,7 @@
 
 		free_index = pool->consumer_index++ % pool->size;
 		index = pool->free_map[free_index];
-	
+
 		ibmveth_assert(index != IBM_VETH_INVALID_MAP);
 		ibmveth_assert(pool->skbuff[index] == NULL);
 
@@ -231,10 +232,10 @@
 		desc.desc = 0;
 		desc.fields.valid = 1;
 		desc.fields.length = pool->buff_size;
-		desc.fields.address = dma_addr; 
+		desc.fields.address = dma_addr;
 
 		lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-		    
+
 		if(lpar_rc != H_SUCCESS) {
 			pool->free_map[free_index] = index;
 			pool->skbuff[index] = NULL;
@@ -250,13 +251,13 @@
 			adapter->replenish_add_buff_success++;
 		}
 	}
-    
+
 	mb();
 	atomic_add(buffers_added, &(pool->available));
 }
 
 /* replenish routine */
-static void ibmveth_replenish_task(struct ibmveth_adapter *adapter) 
+static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
 {
 	int i;
 
@@ -264,7 +265,7 @@
 
 	for(i = 0; i < IbmVethNumBufferPools; i++)
 		if(adapter->rx_buff_pool[i].active)
-			ibmveth_replenish_buffer_pool(adapter, 
+			ibmveth_replenish_buffer_pool(adapter,
 						     &adapter->rx_buff_pool[i]);
 
 	adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
@@ -301,7 +302,6 @@
 		kfree(pool->skbuff);
 		pool->skbuff = NULL;
 	}
-	pool->active = 0;
 }
 
 /* remove a buffer from a pool */
@@ -372,7 +372,7 @@
 	desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
 
 	lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
-		    
+
 	if(lpar_rc != H_SUCCESS) {
 		ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
 		ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
@@ -407,7 +407,7 @@
 		}
 		free_page((unsigned long)adapter->buffer_list_addr);
 		adapter->buffer_list_addr = NULL;
-	} 
+	}
 
 	if(adapter->filter_list_addr != NULL) {
 		if(!dma_mapping_error(adapter->filter_list_dma)) {
@@ -433,7 +433,9 @@
 	}
 
 	for(i = 0; i<IbmVethNumBufferPools; i++)
-		ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[i]);
+		if (adapter->rx_buff_pool[i].active)
+			ibmveth_free_buffer_pool(adapter,
+						 &adapter->rx_buff_pool[i]);
 }
 
 static int ibmveth_open(struct net_device *netdev)
@@ -450,10 +452,10 @@
 
 	for(i = 0; i<IbmVethNumBufferPools; i++)
 		rxq_entries += adapter->rx_buff_pool[i].size;
-    
+
 	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
 	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
- 
+
 	if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
 		ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
 		ibmveth_cleanup(adapter);
@@ -489,9 +491,6 @@
 	adapter->rx_queue.num_slots = rxq_entries;
 	adapter->rx_queue.toggle = 1;
 
-	/* call change_mtu to init the buffer pools based in initial mtu */
-	ibmveth_change_mtu(netdev, netdev->mtu);
-
 	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
 	mac_address = mac_address >> 16;
 
@@ -504,7 +503,7 @@
 	ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
 	ibmveth_debug_printk("receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
 
-    
+
 	lpar_rc = h_register_logical_lan(adapter->vdev->unit_address,
 					 adapter->buffer_list_dma,
 					 rxq_desc.desc,
@@ -519,7 +518,18 @@
 				     rxq_desc.desc,
 				     mac_address);
 		ibmveth_cleanup(adapter);
-		return -ENONET; 
+		return -ENONET;
+	}
+
+	for(i = 0; i<IbmVethNumBufferPools; i++) {
+		if(!adapter->rx_buff_pool[i].active)
+			continue;
+		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
+			ibmveth_error_printk("unable to alloc pool\n");
+			adapter->rx_buff_pool[i].active = 0;
+			ibmveth_cleanup(adapter);
+			return -ENOMEM ;
+		}
 	}
 
 	ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
@@ -547,10 +557,11 @@
 {
 	struct ibmveth_adapter *adapter = netdev->priv;
 	long lpar_rc;
-    
+
 	ibmveth_debug_printk("close starting\n");
 
-	netif_stop_queue(netdev);
+	if (!adapter->pool_config)
+		netif_stop_queue(netdev);
 
 	free_irq(netdev->irq, netdev);
 
@@ -694,7 +705,7 @@
 					     desc[5].desc,
 					     correlator);
 	} while ((lpar_rc == H_BUSY) && (retry_count--));
-    
+
 	if(lpar_rc != H_SUCCESS && lpar_rc != H_DROPPED) {
 		int i;
 		ibmveth_error_printk("tx: h_send_logical_lan failed with rc=%ld\n", lpar_rc);
@@ -780,7 +791,7 @@
 		/* more work to do - return that we are not done yet */
 		netdev->quota -= frames_processed;
 		*budget -= frames_processed;
-		return 1; 
+		return 1;
 	}
 
 	/* we think we are done - reenable interrupts, then check once more to make sure we are done */
@@ -806,7 +817,7 @@
 }
 
 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
-{   
+{
 	struct net_device *netdev = dev_instance;
 	struct ibmveth_adapter *adapter = netdev->priv;
 	unsigned long lpar_rc;
@@ -862,7 +873,7 @@
 				ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
 			}
 		}
-	
+
 		/* re-enable filtering */
 		lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
 					   IbmVethMcastEnableFiltering,
@@ -876,46 +887,22 @@
 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
 {
 	struct ibmveth_adapter *adapter = dev->priv;
+	int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
 	int i;
-	int prev_smaller = 1;
 
-	if ((new_mtu < 68) || 
-	    (new_mtu > (pool_size[IbmVethNumBufferPools-1]) - IBMVETH_BUFF_OH))
+	if (new_mtu < IBMVETH_MAX_MTU)
 		return -EINVAL;
 
+	/* Look for an active buffer pool that can hold the new MTU */
 	for(i = 0; i<IbmVethNumBufferPools; i++) {
-		int activate = 0;
-		if (new_mtu > (pool_size[i]  - IBMVETH_BUFF_OH)) { 
-			activate = 1;
-			prev_smaller= 1;
-		} else {
-			if (prev_smaller)
-				activate = 1;
-			prev_smaller= 0;
+		if (!adapter->rx_buff_pool[i].active)
+			continue;
+		if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+			dev->mtu = new_mtu;
+			return 0;
 		}
-
-		if (activate && !adapter->rx_buff_pool[i].active) {
-			struct ibmveth_buff_pool *pool = 
-						&adapter->rx_buff_pool[i];
-			if(ibmveth_alloc_buffer_pool(pool)) {
-				ibmveth_error_printk("unable to alloc pool\n");
-				return -ENOMEM;
-			}
-			adapter->rx_buff_pool[i].active = 1;
-		} else if (!activate && adapter->rx_buff_pool[i].active) {
-			adapter->rx_buff_pool[i].active = 0;
-			h_free_logical_lan_buffer(adapter->vdev->unit_address,
-					  (u64)pool_size[i]);
-		}
-
 	}
-
-	/* kick the interrupt handler so that the new buffer pools get
-	   replenished or deallocated */
-	ibmveth_interrupt(dev->irq, dev, NULL);
-
-	dev->mtu = new_mtu;
-	return 0;	
+	return -EINVAL;
 }
 
 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
@@ -928,7 +915,7 @@
 	unsigned int *mcastFilterSize_p;
 
 
-	ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n", 
+	ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
 					dev->unit_address);
 
 	mac_addr_p = (unsigned char *) vio_get_attribute(dev, VETH_MAC_ADDR, 0);
@@ -937,7 +924,7 @@
 				"attribute\n", __FILE__, __LINE__);
 		return 0;
 	}
-	
+
 	mcastFilterSize_p= (unsigned int *) vio_get_attribute(dev, VETH_MCAST_FILTER_SIZE, 0);
 	if(!mcastFilterSize_p) {
 		printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
@@ -945,7 +932,7 @@
 				__FILE__, __LINE__);
 		return 0;
 	}
-	
+
 	netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
 
 	if(!netdev)
@@ -960,13 +947,14 @@
 	adapter->vdev = dev;
 	adapter->netdev = netdev;
 	adapter->mcastFilterSize= *mcastFilterSize_p;
-	
+	adapter->pool_config = 0;
+
 	/* 	Some older boxes running PHYP non-natively have an OF that
-		returns a 8-byte local-mac-address field (and the first 
+		returns a 8-byte local-mac-address field (and the first
 		2 bytes have to be ignored) while newer boxes' OF return
-		a 6-byte field. Note that IEEE 1275 specifies that 
+		a 6-byte field. Note that IEEE 1275 specifies that
 		local-mac-address must be a 6-byte field.
-		The RPA doc specifies that the first byte must be 10b, so 
+		The RPA doc specifies that the first byte must be 10b, so
 		we'll just look for it to solve this 8 vs. 6 byte field issue */
 
 	if ((*mac_addr_p & 0x3) != 0x02)
@@ -976,7 +964,7 @@
 	memcpy(&adapter->mac_addr, mac_addr_p, 6);
 
 	adapter->liobn = dev->iommu_table->it_index;
-	
+
 	netdev->irq = dev->irq;
 	netdev->open               = ibmveth_open;
 	netdev->poll               = ibmveth_poll;
@@ -989,14 +977,21 @@
 	netdev->ethtool_ops           = &netdev_ethtool_ops;
 	netdev->change_mtu         = ibmveth_change_mtu;
 	SET_NETDEV_DEV(netdev, &dev->dev);
- 	netdev->features |= NETIF_F_LLTX; 
+ 	netdev->features |= NETIF_F_LLTX;
 	spin_lock_init(&adapter->stats_lock);
 
 	memcpy(&netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
 
-	for(i = 0; i<IbmVethNumBufferPools; i++)
-		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i, 
-					 pool_count[i], pool_size[i]);
+	for(i = 0; i<IbmVethNumBufferPools; i++) {
+		struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
+		ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
+					 pool_count[i], pool_size[i],
+					 pool_active[i]);
+		kobj->parent = &dev->dev.kobj;
+		sprintf(kobj->name, "pool%d", i);
+		kobj->ktype = &ktype_veth_pool;
+		kobject_register(kobj);
+	}
 
 	ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
 
@@ -1025,6 +1020,10 @@
 {
 	struct net_device *netdev = dev->dev.driver_data;
 	struct ibmveth_adapter *adapter = netdev->priv;
+	int i;
+
+	for(i = 0; i<IbmVethNumBufferPools; i++)
+		kobject_unregister(&adapter->rx_buff_pool[i].kobj);
 
 	unregister_netdev(netdev);
 
@@ -1048,7 +1047,7 @@
 	remove_proc_entry(IBMVETH_PROC_DIR, NULL);
 }
 
-static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos) 
+static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
 {
 	if (*pos == 0) {
 		return (void *)1;
@@ -1063,18 +1062,18 @@
 	return NULL;
 }
 
-static void ibmveth_seq_stop(struct seq_file *seq, void *v) 
+static void ibmveth_seq_stop(struct seq_file *seq, void *v)
 {
 }
 
-static int ibmveth_seq_show(struct seq_file *seq, void *v) 
+static int ibmveth_seq_show(struct seq_file *seq, void *v)
 {
 	struct ibmveth_adapter *adapter = seq->private;
 	char *current_mac = ((char*) &adapter->netdev->dev_addr);
 	char *firmware_mac = ((char*) &adapter->mac_addr) ;
 
 	seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
-	
+
 	seq_printf(seq, "Unit Address:    0x%x\n", adapter->vdev->unit_address);
 	seq_printf(seq, "LIOBN:           0x%lx\n", adapter->liobn);
 	seq_printf(seq, "Current MAC:     %02X:%02X:%02X:%02X:%02X:%02X\n",
@@ -1083,7 +1082,7 @@
 	seq_printf(seq, "Firmware MAC:    %02X:%02X:%02X:%02X:%02X:%02X\n",
 		   firmware_mac[0], firmware_mac[1], firmware_mac[2],
 		   firmware_mac[3], firmware_mac[4], firmware_mac[5]);
-	
+
 	seq_printf(seq, "\nAdapter Statistics:\n");
 	seq_printf(seq, "  TX:  skbuffs linearized:          %ld\n", adapter->tx_linearized);
 	seq_printf(seq, "       multi-descriptor sends:      %ld\n", adapter->tx_multidesc_send);
@@ -1095,7 +1094,7 @@
 	seq_printf(seq, "       add buffer failures:         %ld\n", adapter->replenish_add_buff_failure);
 	seq_printf(seq, "       invalid buffers:             %ld\n", adapter->rx_invalid_buffer);
 	seq_printf(seq, "       no buffers:                  %ld\n", adapter->rx_no_buffer);
-	
+
 	return 0;
 }
 static struct seq_operations ibmveth_seq_ops = {
@@ -1153,11 +1152,11 @@
 }
 
 #else /* CONFIG_PROC_FS */
-static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) 
+static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
 {
 }
 
-static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) 
+static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
 {
 }
 static void ibmveth_proc_register_driver(void)
@@ -1169,6 +1168,132 @@
 }
 #endif /* CONFIG_PROC_FS */
 
+static struct attribute veth_active_attr;
+static struct attribute veth_num_attr;
+static struct attribute veth_size_attr;
+
+static ssize_t veth_pool_show(struct kobject * kobj,
+                              struct attribute * attr, char * buf)
+{
+	struct ibmveth_buff_pool *pool = container_of(kobj,
+						      struct ibmveth_buff_pool,
+						      kobj);
+
+	if (attr == &veth_active_attr)
+		return sprintf(buf, "%d\n", pool->active);
+	else if (attr == &veth_num_attr)
+		return sprintf(buf, "%d\n", pool->size);
+	else if (attr == &veth_size_attr)
+		return sprintf(buf, "%d\n", pool->buff_size);
+	return 0;
+}
+
+static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
+const char * buf, size_t count)
+{
+	struct ibmveth_buff_pool *pool = container_of(kobj,
+						      struct ibmveth_buff_pool,
+						      kobj);
+	struct net_device *netdev =
+	    container_of(kobj->parent, struct device, kobj)->driver_data;
+	struct ibmveth_adapter *adapter = netdev->priv;
+	long value = simple_strtol(buf, NULL, 10);
+	long rc;
+
+	if (attr == &veth_active_attr) {
+		if (value && !pool->active) {
+			if(ibmveth_alloc_buffer_pool(pool)) {
+                                ibmveth_error_printk("unable to alloc pool\n");
+                                return -ENOMEM;
+                        }
+			pool->active = 1;
+			adapter->pool_config = 1;
+			ibmveth_close(netdev);
+			adapter->pool_config = 0;
+			if ((rc = ibmveth_open(netdev)))
+				return rc;
+		} else if (!value && pool->active) {
+			int mtu = netdev->mtu + IBMVETH_BUFF_OH;
+			int i;
+			/* Make sure there is a buffer pool with buffers that
+			   can hold a packet of the size of the MTU */
+			for(i = 0; i<IbmVethNumBufferPools; i++) {
+				if (pool == &adapter->rx_buff_pool[i])
+					continue;
+				if (!adapter->rx_buff_pool[i].active)
+					continue;
+				if (mtu < adapter->rx_buff_pool[i].buff_size) {
+					pool->active = 0;
+					h_free_logical_lan_buffer(adapter->
+								  vdev->
+								  unit_address,
+								  pool->
+								  buff_size);
+				}
+			}
+			if (pool->active) {
+				ibmveth_error_printk("no active pool >= MTU\n");
+				return -EPERM;
+			}
+		}
+	} else if (attr == &veth_num_attr) {
+		if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
+			return -EINVAL;
+		else {
+			adapter->pool_config = 1;
+			ibmveth_close(netdev);
+			adapter->pool_config = 0;
+			pool->size = value;
+			if ((rc = ibmveth_open(netdev)))
+				return rc;
+		}
+	} else if (attr == &veth_size_attr) {
+		if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
+			return -EINVAL;
+		else {
+			adapter->pool_config = 1;
+			ibmveth_close(netdev);
+			adapter->pool_config = 0;
+			pool->buff_size = value;
+			if ((rc = ibmveth_open(netdev)))
+				return rc;
+		}
+	}
+
+	/* kick the interrupt handler to allocate/deallocate pools */
+	ibmveth_interrupt(netdev->irq, netdev, NULL);
+	return count;
+}
+
+
+#define ATTR(_name, _mode)      \
+        struct attribute veth_##_name##_attr = {               \
+        .name = __stringify(_name), .mode = _mode, .owner = THIS_MODULE \
+        };
+
+static ATTR(active, 0644);
+static ATTR(num, 0644);
+static ATTR(size, 0644);
+
+static struct attribute * veth_pool_attrs[] = {
+	&veth_active_attr,
+	&veth_num_attr,
+	&veth_size_attr,
+	NULL,
+};
+
+static struct sysfs_ops veth_pool_ops = {
+	.show   = veth_pool_show,
+	.store  = veth_pool_store,
+};
+
+static struct kobj_type ktype_veth_pool = {
+	.release        = NULL,
+	.sysfs_ops      = &veth_pool_ops,
+	.default_attrs  = veth_pool_attrs,
+};
+
+
 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
 	{ "network", "IBM,l-lan"},
 	{ "", "" }
@@ -1198,7 +1323,7 @@
 {
 	vio_unregister_driver(&ibmveth_driver);
 	ibmveth_proc_unregister_driver();
-}	
+}
 
 module_init(ibmveth_module_init);
 module_exit(ibmveth_module_exit);
diff --git a/drivers/net/ibmveth.h b/drivers/net/ibmveth.h
index 46919a8..8385bf8 100644
--- a/drivers/net/ibmveth.h
+++ b/drivers/net/ibmveth.h
@@ -75,10 +75,13 @@
 
 #define IbmVethNumBufferPools 5
 #define IBMVETH_BUFF_OH 22 /* Overhead: 14 ethernet header + 8 opaque handle */
+#define IBMVETH_MAX_MTU 68
+#define IBMVETH_MAX_POOL_COUNT 4096
+#define IBMVETH_MAX_BUF_SIZE (1024 * 128)
 
-/* pool_size should be sorted */
 static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 };
 static int pool_count[] = { 256, 768, 256, 256, 256 };
+static int pool_active[] = { 1, 1, 0, 0, 0};
 
 #define IBM_VETH_INVALID_MAP ((u16)0xffff)
 
@@ -94,6 +97,7 @@
     dma_addr_t *dma_addr;
     struct sk_buff **skbuff;
     int active;
+    struct kobject kobj;
 };
 
 struct ibmveth_rx_q {
@@ -118,6 +122,7 @@
     dma_addr_t filter_list_dma;
     struct ibmveth_buff_pool rx_buff_pool[IbmVethNumBufferPools];
     struct ibmveth_rx_q rx_queue;
+    int pool_config;
 
     /* adapter specific stats */
     u64 replenish_task_cycles;
@@ -134,7 +139,7 @@
     spinlock_t stats_lock;
 };
 
-struct ibmveth_buf_desc_fields {	
+struct ibmveth_buf_desc_fields {
     u32 valid : 1;
     u32 toggle : 1;
     u32 reserved : 6;
@@ -143,7 +148,7 @@
 };
 
 union ibmveth_buf_desc {
-    u64 desc;	
+    u64 desc;
     struct ibmveth_buf_desc_fields fields;
 };
 
diff --git a/drivers/net/ixgb/Makefile b/drivers/net/ixgb/Makefile
index 7c7aff1..a8a2d3d 100644
--- a/drivers/net/ixgb/Makefile
+++ b/drivers/net/ixgb/Makefile
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # 
-# Copyright(c) 1999 - 2002 Intel Corporation. All rights reserved.
+# Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
 # 
 # This program is free software; you can redistribute it and/or modify it 
 # under the terms of the GNU General Public License as published by the Free 
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index c83271b..a83ef28 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -84,7 +84,12 @@
 #define IXGB_DBG(args...)
 #endif
 
-#define IXGB_ERR(args...) printk(KERN_ERR "ixgb: " args)
+#define PFX "ixgb: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args))
+
 
 /* TX/RX descriptor defines */
 #define DEFAULT_TXD	 256
@@ -175,6 +180,7 @@
 	uint64_t hw_csum_tx_good;
 	uint64_t hw_csum_tx_error;
 	uint32_t tx_int_delay;
+	uint32_t tx_timeout_count;
 	boolean_t tx_int_delay_enable;
 	boolean_t detect_tx_hung;
 
@@ -192,7 +198,9 @@
 
 	/* structs defined in ixgb_hw.h */
 	struct ixgb_hw hw;
+	u16 msg_enable;
 	struct ixgb_hw_stats stats;
+	uint32_t alloc_rx_buff_failed;
 #ifdef CONFIG_PCI_MSI
 	boolean_t have_msi;
 #endif
diff --git a/drivers/net/ixgb/ixgb_ee.c b/drivers/net/ixgb/ixgb_ee.c
index 661a46b..8357c55 100644
--- a/drivers/net/ixgb/ixgb_ee.c
+++ b/drivers/net/ixgb/ixgb_ee.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
diff --git a/drivers/net/ixgb/ixgb_ee.h b/drivers/net/ixgb/ixgb_ee.h
index 5190aa8..bf6fa22 100644
--- a/drivers/net/ixgb/ixgb_ee.h
+++ b/drivers/net/ixgb/ixgb_ee.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index d38ade5..cf19b89 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -44,6 +44,8 @@
 extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
 extern void ixgb_update_stats(struct ixgb_adapter *adapter);
 
+#define IXGB_ALL_RAR_ENTRIES 16
+
 struct ixgb_stats {
 	char stat_string[ETH_GSTRING_LEN];
 	int sizeof_stat;
@@ -76,6 +78,7 @@
 	{"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)},
 	{"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)},
 	{"tx_deferred_ok", IXGB_STAT(stats.dc)},
+	{"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
 	{"rx_long_length_errors", IXGB_STAT(stats.roc)},
 	{"rx_short_length_errors", IXGB_STAT(stats.ruc)},
 #ifdef NETIF_F_TSO
@@ -117,6 +120,16 @@
 	return 0;
 }
 
+static void ixgb_set_speed_duplex(struct net_device *netdev)
+{
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
+	/* be optimistic about our link, since we were up before */
+	adapter->link_speed = 10000;
+	adapter->link_duplex = FULL_DUPLEX;
+	netif_carrier_on(netdev);
+	netif_wake_queue(netdev);
+}
+
 static int
 ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
 {
@@ -130,12 +143,7 @@
 		ixgb_down(adapter, TRUE);
 		ixgb_reset(adapter);
 		ixgb_up(adapter);
-		/* be optimistic about our link, since we were up before */
-		adapter->link_speed = 10000;
-		adapter->link_duplex = FULL_DUPLEX;
-		netif_carrier_on(netdev);
-		netif_wake_queue(netdev);
-		
+		ixgb_set_speed_duplex(netdev);
 	} else
 		ixgb_reset(adapter);
 
@@ -183,11 +191,7 @@
 	if(netif_running(adapter->netdev)) {
 		ixgb_down(adapter, TRUE);
 		ixgb_up(adapter);
-		/* be optimistic about our link, since we were up before */
-		adapter->link_speed = 10000;
-		adapter->link_duplex = FULL_DUPLEX;
-		netif_carrier_on(netdev);
-		netif_wake_queue(netdev);
+		ixgb_set_speed_duplex(netdev);
 	} else
 		ixgb_reset(adapter);
 		
@@ -212,11 +216,7 @@
 	if(netif_running(netdev)) {
 		ixgb_down(adapter,TRUE);
 		ixgb_up(adapter);
-		/* be optimistic about our link, since we were up before */
-		adapter->link_speed = 10000;
-		adapter->link_duplex = FULL_DUPLEX;
-		netif_carrier_on(netdev);
-		netif_wake_queue(netdev);
+		ixgb_set_speed_duplex(netdev);
 	} else
 		ixgb_reset(adapter);
 	return 0;
@@ -251,6 +251,19 @@
 } 
 #endif /* NETIF_F_TSO */
 
+static uint32_t
+ixgb_get_msglevel(struct net_device *netdev)
+{
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void
+ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
+{
+	struct ixgb_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
 #define IXGB_GET_STAT(_A_, _R_) _A_->stats._R_
 
 static int 
@@ -303,7 +316,7 @@
 	*reg++ = IXGB_READ_REG(hw, RXCSUM);	/*  20 */
 
 	/* there are 16 RAR entries in hardware, we only use 3 */
-	for(i = 0; i < 16; i++) {
+	for(i = 0; i < IXGB_ALL_RAR_ENTRIES; i++) {
 		*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
 		*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
 	}
@@ -593,11 +606,7 @@
 		adapter->tx_ring = tx_new;
 		if((err = ixgb_up(adapter)))
 			return err;
-		/* be optimistic about our link, since we were up before */
-		adapter->link_speed = 10000;
-		adapter->link_duplex = FULL_DUPLEX;
-		netif_carrier_on(netdev);
-		netif_wake_queue(netdev);
+		ixgb_set_speed_duplex(netdev);
 	}
 
 	return 0;
@@ -714,6 +723,8 @@
 	.set_tx_csum = ixgb_set_tx_csum,
 	.get_sg	= ethtool_op_get_sg,
 	.set_sg	= ethtool_op_set_sg,
+	.get_msglevel = ixgb_get_msglevel,
+	.set_msglevel = ixgb_set_msglevel,
 #ifdef NETIF_F_TSO
 	.get_tso = ethtool_op_get_tso,
 	.set_tso = ixgb_set_tso,
diff --git a/drivers/net/ixgb/ixgb_hw.c b/drivers/net/ixgb/ixgb_hw.c
index 620cad4..f7fa10e 100644
--- a/drivers/net/ixgb/ixgb_hw.c
+++ b/drivers/net/ixgb/ixgb_hw.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
diff --git a/drivers/net/ixgb/ixgb_hw.h b/drivers/net/ixgb/ixgb_hw.h
index 382c630..cb45689 100644
--- a/drivers/net/ixgb/ixgb_hw.h
+++ b/drivers/net/ixgb/ixgb_hw.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -57,6 +57,7 @@
 typedef enum {
 	ixgb_media_type_unknown = 0,
 	ixgb_media_type_fiber = 1,
+	ixgb_media_type_copper = 2,
 	ixgb_num_media_types
 } ixgb_media_type;
 
diff --git a/drivers/net/ixgb/ixgb_ids.h b/drivers/net/ixgb/ixgb_ids.h
index aee207e..40a085f 100644
--- a/drivers/net/ixgb/ixgb_ids.h
+++ b/drivers/net/ixgb/ixgb_ids.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -43,6 +43,8 @@
 #define IXGB_SUBDEVICE_ID_A11F      0xA11F   
 #define IXGB_SUBDEVICE_ID_A01F      0xA01F   
 
-#endif /* #ifndef _IXGB_IDS_H_ */
+#define IXGB_DEVICE_ID_82597EX_CX4   0x109E
+#define IXGB_SUBDEVICE_ID_A00C  0xA00C
 
+#endif /* #ifndef _IXGB_IDS_H_ */
 /* End of File */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index cfd67d8..57006fb 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -28,22 +28,6 @@
 
 #include "ixgb.h"
 
-/* Change Log
- * 1.0.96 04/19/05
- * - Make needlessly global code static -- bunk@stusta.de
- * - ethtool cleanup -- shemminger@osdl.org
- * - Support for MODULE_VERSION -- linville@tuxdriver.com
- * - add skb_header_cloned check to the tso path -- herbert@apana.org.au
- * 1.0.88 01/05/05
- * - include fix to the condition that determines when to quit NAPI - Robert Olsson
- * - use netif_poll_{disable/enable} to synchronize between NAPI and i/f up/down
- * 1.0.84 10/26/04
- * - reset buffer_info->dma in Tx resource cleanup logic
- * 1.0.83 10/12/04
- * - sparse cleanup - shemminger@osdl.org
- * - fix tx resource cleanup logic
- */
-
 char ixgb_driver_name[] = "ixgb";
 static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
 
@@ -52,9 +36,9 @@
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION		"1.0.100-k2"DRIVERNAPI
+#define DRV_VERSION		"1.0.109-k2"DRIVERNAPI
 char ixgb_driver_version[] = DRV_VERSION;
-static char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
+static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
 /* ixgb_pci_tbl - PCI Device ID Table
  *
@@ -67,6 +51,8 @@
 static struct pci_device_id ixgb_pci_tbl[] = {
 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
+	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_SR,
 	 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
 	{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_LR,  
@@ -148,6 +134,11 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_VERSION);
 
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static int debug = DEFAULT_DEBUG_LEVEL_SHIFT;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
 /* some defines for controlling descriptor fetches in h/w */
 #define RXDCTL_WTHRESH_DEFAULT 16	/* chip writes back at this many or RXT0 */
 #define RXDCTL_PTHRESH_DEFAULT 0		/* chip considers prefech below
@@ -196,7 +187,7 @@
  * @adapter: board private structure
  **/
 
-static inline void
+static void
 ixgb_irq_disable(struct ixgb_adapter *adapter)
 {
 	atomic_inc(&adapter->irq_sem);
@@ -210,7 +201,7 @@
  * @adapter: board private structure
  **/
 
-static inline void
+static void
 ixgb_irq_enable(struct ixgb_adapter *adapter)
 {
 	if(atomic_dec_and_test(&adapter->irq_sem)) {
@@ -231,6 +222,7 @@
 
 	/* hardware has been reset, we need to reload some things */
 
+	ixgb_rar_set(hw, netdev->dev_addr, 0);
 	ixgb_set_multi(netdev);
 
 	ixgb_restore_vlan(adapter);
@@ -240,6 +232,9 @@
 	ixgb_configure_rx(adapter);
 	ixgb_alloc_rx_buffers(adapter);
 
+	/* disable interrupts and get the hardware into a known state */
+	IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+
 #ifdef CONFIG_PCI_MSI
 	{
 	boolean_t pcix = (IXGB_READ_REG(&adapter->hw, STATUS) & 
@@ -249,7 +244,7 @@
 	if (!pcix)
 	   adapter->have_msi = FALSE;
 	else if((err = pci_enable_msi(adapter->pdev))) {
-		printk (KERN_ERR
+		DPRINTK(PROBE, ERR,
 		 "Unable to allocate MSI interrupt Error: %d\n", err);
 		adapter->have_msi = FALSE;
 		/* proceed to try to request regular interrupt */
@@ -259,11 +254,11 @@
 #endif
 	if((err = request_irq(adapter->pdev->irq, &ixgb_intr,
 				  SA_SHIRQ | SA_SAMPLE_RANDOM,
-				  netdev->name, netdev)))
+			          netdev->name, netdev))) {
+		DPRINTK(PROBE, ERR,
+		 "Unable to allocate interrupt Error: %d\n", err);
 		return err;
-
-	/* disable interrupts and get the hardware into a known state */
-	IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
+	}
 
 	if((hw->max_frame_size != max_frame) ||
 		(hw->max_frame_size !=
@@ -285,11 +280,12 @@
 	}
 
 	mod_timer(&adapter->watchdog_timer, jiffies);
-	ixgb_irq_enable(adapter);
 
 #ifdef CONFIG_IXGB_NAPI
 	netif_poll_enable(netdev);
 #endif
+	ixgb_irq_enable(adapter);
+
 	return 0;
 }
 
@@ -326,7 +322,7 @@
 
 	ixgb_adapter_stop(&adapter->hw);
 	if(!ixgb_init_hw(&adapter->hw))
-		IXGB_DBG("ixgb_init_hw failed.\n");
+		DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
 }
 
 /**
@@ -363,7 +359,8 @@
 	} else {
 		if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) ||
 		   (err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK))) {
-			IXGB_ERR("No usable DMA configuration, aborting\n");
+			printk(KERN_ERR
+			 "ixgb: No usable DMA configuration, aborting\n");
 			goto err_dma_mask;
 		}
 		pci_using_dac = 0;
@@ -388,6 +385,7 @@
 	adapter->netdev = netdev;
 	adapter->pdev = pdev;
 	adapter->hw.back = adapter;
+	adapter->msg_enable = netif_msg_init(debug, DEFAULT_DEBUG_LEVEL_SHIFT);
 
 	mmio_start = pci_resource_start(pdev, BAR_0);
 	mmio_len = pci_resource_len(pdev, BAR_0);
@@ -416,7 +414,7 @@
 	netdev->change_mtu = &ixgb_change_mtu;
 	ixgb_set_ethtool_ops(netdev);
 	netdev->tx_timeout = &ixgb_tx_timeout;
-	netdev->watchdog_timeo = HZ;
+	netdev->watchdog_timeo = 5 * HZ;
 #ifdef CONFIG_IXGB_NAPI
 	netdev->poll = &ixgb_clean;
 	netdev->weight = 64;
@@ -428,6 +426,7 @@
 	netdev->poll_controller = ixgb_netpoll;
 #endif
 
+	strcpy(netdev->name, pci_name(pdev));
 	netdev->mem_start = mmio_start;
 	netdev->mem_end = mmio_start + mmio_len;
 	netdev->base_addr = adapter->hw.io_base;
@@ -449,6 +448,9 @@
 #ifdef NETIF_F_TSO
 	netdev->features |= NETIF_F_TSO;
 #endif
+#ifdef NETIF_F_LLTX
+	netdev->features |= NETIF_F_LLTX;
+#endif
 
 	if(pci_using_dac)
 		netdev->features |= NETIF_F_HIGHDMA;
@@ -456,7 +458,7 @@
 	/* make sure the EEPROM is good */
 
 	if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
-		printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
+		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
 		err = -EIO;
 		goto err_eeprom;
 	}
@@ -465,6 +467,7 @@
 	memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
 
 	if(!is_valid_ether_addr(netdev->perm_addr)) {
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
 		err = -EIO;
 		goto err_eeprom;
 	}
@@ -478,6 +481,7 @@
 	INIT_WORK(&adapter->tx_timeout_task,
 		  (void (*)(void *))ixgb_tx_timeout_task, netdev);
 
+	strcpy(netdev->name, "eth%d");
 	if((err = register_netdev(netdev)))
 		goto err_register;
 
@@ -486,8 +490,7 @@
 	netif_carrier_off(netdev);
 	netif_stop_queue(netdev);
 
-	printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
-		   netdev->name);
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
 	ixgb_check_options(adapter);
 	/* reset the hardware with the new settings */
 
@@ -557,17 +560,17 @@
 	hw->subsystem_vendor_id = pdev->subsystem_vendor;
 	hw->subsystem_id = pdev->subsystem_device;
 
-	adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
-
 	hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
+	adapter->rx_buffer_len = hw->max_frame_size;
 
 	if((hw->device_id == IXGB_DEVICE_ID_82597EX)
-	   ||(hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
-	   ||(hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+	   || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
+	   || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
+	   || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
 			hw->mac_type = ixgb_82597;
 	else {
 		/* should never have loaded on this device */
-		printk(KERN_ERR "ixgb: unsupported device id\n");
+		DPRINTK(PROBE, ERR, "unsupported device id\n");
 	}
 
 	/* enable flow control to be programmed */
@@ -665,6 +668,8 @@
 	size = sizeof(struct ixgb_buffer) * txdr->count;
 	txdr->buffer_info = vmalloc(size);
 	if(!txdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		 "Unable to allocate transmit descriptor ring memory\n");
 		return -ENOMEM;
 	}
 	memset(txdr->buffer_info, 0, size);
@@ -677,6 +682,8 @@
 	txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
 	if(!txdr->desc) {
 		vfree(txdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		 "Unable to allocate transmit descriptor memory\n");
 		return -ENOMEM;
 	}
 	memset(txdr->desc, 0, txdr->size);
@@ -750,6 +757,8 @@
 	size = sizeof(struct ixgb_buffer) * rxdr->count;
 	rxdr->buffer_info = vmalloc(size);
 	if(!rxdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		 "Unable to allocate receive descriptor ring\n");
 		return -ENOMEM;
 	}
 	memset(rxdr->buffer_info, 0, size);
@@ -763,6 +772,8 @@
 
 	if(!rxdr->desc) {
 		vfree(rxdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		 "Unable to allocate receive descriptors\n");
 		return -ENOMEM;
 	}
 	memset(rxdr->desc, 0, rxdr->size);
@@ -794,21 +805,14 @@
 
 	rctl |= IXGB_RCTL_SECRC;
 
-	switch (adapter->rx_buffer_len) {
-	case IXGB_RXBUFFER_2048:
-	default:
+	if (adapter->rx_buffer_len <= IXGB_RXBUFFER_2048)
 		rctl |= IXGB_RCTL_BSIZE_2048;
-		break;
-	case IXGB_RXBUFFER_4096:
+	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_4096)
 		rctl |= IXGB_RCTL_BSIZE_4096;
-		break;
-	case IXGB_RXBUFFER_8192:
+	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_8192)
 		rctl |= IXGB_RCTL_BSIZE_8192;
-		break;
-	case IXGB_RXBUFFER_16384:
+	else if (adapter->rx_buffer_len <= IXGB_RXBUFFER_16384)
 		rctl |= IXGB_RCTL_BSIZE_16384;
-		break;
-	}
 
 	IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
 }
@@ -898,22 +902,25 @@
 	adapter->tx_ring.desc = NULL;
 }
 
-static inline void
+static void
 ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
 					struct ixgb_buffer *buffer_info)
 {
 	struct pci_dev *pdev = adapter->pdev;
-	if(buffer_info->dma) {
-		pci_unmap_page(pdev,
-			   buffer_info->dma,
-			   buffer_info->length,
-			   PCI_DMA_TODEVICE);
-		buffer_info->dma = 0;
-	}
-	if(buffer_info->skb) {
+
+	if (buffer_info->dma)
+		pci_unmap_page(pdev, buffer_info->dma, buffer_info->length,
+		               PCI_DMA_TODEVICE);
+
+	if (buffer_info->skb)
 		dev_kfree_skb_any(buffer_info->skb);
-		buffer_info->skb = NULL;
-	}
+
+	buffer_info->skb = NULL;
+	buffer_info->dma = 0;
+	buffer_info->time_stamp = 0;
+	/* these fields must always be initialized in tx
+	 * buffer_info->length = 0;
+	 * buffer_info->next_to_watch = 0; */
 }
 
 /**
@@ -1112,8 +1119,8 @@
 
 	if(adapter->hw.link_up) {
 		if(!netif_carrier_ok(netdev)) {
-			printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
-				   netdev->name, 10000, "Full Duplex");
+			DPRINTK(LINK, INFO,
+			        "NIC Link is Up 10000 Mbps Full Duplex\n");
 			adapter->link_speed = 10000;
 			adapter->link_duplex = FULL_DUPLEX;
 			netif_carrier_on(netdev);
@@ -1123,9 +1130,7 @@
 		if(netif_carrier_ok(netdev)) {
 			adapter->link_speed = 0;
 			adapter->link_duplex = 0;
-			printk(KERN_INFO
-				   "ixgb: %s NIC Link is Down\n",
-				   netdev->name);
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
 			netif_carrier_off(netdev);
 			netif_stop_queue(netdev);
 
@@ -1158,7 +1163,7 @@
 #define IXGB_TX_FLAGS_VLAN		0x00000002
 #define IXGB_TX_FLAGS_TSO		0x00000004
 
-static inline int
+static int
 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 {
 #ifdef NETIF_F_TSO
@@ -1220,7 +1225,7 @@
 	return 0;
 }
 
-static inline boolean_t
+static boolean_t
 ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
 {
 	struct ixgb_context_desc *context_desc;
@@ -1258,7 +1263,7 @@
 #define IXGB_MAX_TXD_PWR	14
 #define IXGB_MAX_DATA_PER_TXD	(1<<IXGB_MAX_TXD_PWR)
 
-static inline int
+static int
 ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
 	    unsigned int first)
 {
@@ -1284,6 +1289,7 @@
 				size,
 				PCI_DMA_TODEVICE);
 		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = 0;
 
 		len -= size;
 		offset += size;
@@ -1309,6 +1315,7 @@
 					size,
 					PCI_DMA_TODEVICE);
 			buffer_info->time_stamp = jiffies;
+			buffer_info->next_to_watch = 0;
 
 			len -= size;
 			offset += size;
@@ -1323,7 +1330,7 @@
 	return count;
 }
 
-static inline void
+static void
 ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
 {
 	struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@@ -1395,13 +1402,26 @@
 		return 0;
 	}
 
+#ifdef NETIF_F_LLTX
+	local_irq_save(flags);
+	if (!spin_trylock(&adapter->tx_lock)) {
+		/* Collision - tell upper layer to requeue */
+		local_irq_restore(flags);
+		return NETDEV_TX_LOCKED;
+	}
+#else
 	spin_lock_irqsave(&adapter->tx_lock, flags);
+#endif
+
 	if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&adapter->tx_lock, flags);
-		return 1;
+		return NETDEV_TX_BUSY;
 	}
+
+#ifndef NETIF_F_LLTX
 	spin_unlock_irqrestore(&adapter->tx_lock, flags);
+#endif
 
 	if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
 		tx_flags |= IXGB_TX_FLAGS_VLAN;
@@ -1413,10 +1433,13 @@
 	tso = ixgb_tso(adapter, skb);
 	if (tso < 0) {
 		dev_kfree_skb_any(skb);
+#ifdef NETIF_F_LLTX
+		spin_unlock_irqrestore(&adapter->tx_lock, flags);
+#endif
 		return NETDEV_TX_OK;
 	}
 
-	if (tso)
+	if (likely(tso))
 		tx_flags |= IXGB_TX_FLAGS_TSO;
 	else if(ixgb_tx_csum(adapter, skb))
 		tx_flags |= IXGB_TX_FLAGS_CSUM;
@@ -1426,7 +1449,15 @@
 
 	netdev->trans_start = jiffies;
 
-	return 0;
+#ifdef NETIF_F_LLTX
+	/* Make sure there is space in the ring for the next send. */
+	if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
+		netif_stop_queue(netdev);
+
+	spin_unlock_irqrestore(&adapter->tx_lock, flags);
+
+#endif
+	return NETDEV_TX_OK;
 }
 
 /**
@@ -1448,6 +1479,7 @@
 {
 	struct ixgb_adapter *adapter = netdev_priv(netdev);
 
+	adapter->tx_timeout_count++;
 	ixgb_down(adapter, TRUE);
 	ixgb_up(adapter);
 }
@@ -1486,28 +1518,15 @@
 
 	if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
 	   || (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
-		IXGB_ERR("Invalid MTU setting\n");
+		DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
 		return -EINVAL;
 	}
 
-	if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
-	   || (max_frame <= IXGB_RXBUFFER_2048)) {
-		adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
-
-	} else if(max_frame <= IXGB_RXBUFFER_4096) {
-		adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
-
-	} else if(max_frame <= IXGB_RXBUFFER_8192) {
-		adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
-
-	} else {
-		adapter->rx_buffer_len = IXGB_RXBUFFER_16384;
-	}
+	adapter->rx_buffer_len = max_frame;
 
 	netdev->mtu = new_mtu;
 
-	if(old_max_frame != max_frame && netif_running(netdev)) {
-
+	if ((old_max_frame != max_frame) && netif_running(netdev)) {
 		ixgb_down(adapter, TRUE);
 		ixgb_up(adapter);
 	}
@@ -1765,23 +1784,43 @@
 
 	tx_ring->next_to_clean = i;
 
-	spin_lock(&adapter->tx_lock);
-	if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
-	   (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
-
-		netif_wake_queue(netdev);
+	if (unlikely(netif_queue_stopped(netdev))) {
+		spin_lock(&adapter->tx_lock);
+		if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
+		    (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE))
+			netif_wake_queue(netdev);
+		spin_unlock(&adapter->tx_lock);
 	}
-	spin_unlock(&adapter->tx_lock);
 
 	if(adapter->detect_tx_hung) {
 		/* detect a transmit hang in hardware, this serializes the
 		 * check with the clearing of time_stamp and movement of i */
 		adapter->detect_tx_hung = FALSE;
-		if(tx_ring->buffer_info[i].dma &&
-		   time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ)
+		if (tx_ring->buffer_info[eop].dma &&
+		   time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
 		   && !(IXGB_READ_REG(&adapter->hw, STATUS) &
-			IXGB_STATUS_TXOFF))
+		        IXGB_STATUS_TXOFF)) {
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				IXGB_READ_REG(&adapter->hw, TDH),
+				IXGB_READ_REG(&adapter->hw, TDT),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[eop].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->status);
 			netif_stop_queue(netdev);
+		}
 	}
 
 	return cleaned;
@@ -1794,7 +1833,7 @@
  * @sk_buff: socket buffer with received data
  **/
 
-static inline void
+static void
 ixgb_rx_checksum(struct ixgb_adapter *adapter,
 		 struct ixgb_rx_desc *rx_desc,
 		 struct sk_buff *skb)
@@ -1858,6 +1897,7 @@
 #endif
 		status = rx_desc->status;
 		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
 
 		prefetch(skb->data);
 
@@ -1902,6 +1942,26 @@
 			goto rxdesc_done;
 		}
 
+		/* code added for copybreak, this should improve
+		 * performance for small packets with large amounts
+		 * of reassembly being done in the stack */
+#define IXGB_CB_LENGTH 256
+		if (length < IXGB_CB_LENGTH) {
+			struct sk_buff *new_skb =
+			    dev_alloc_skb(length + NET_IP_ALIGN);
+			if (new_skb) {
+				skb_reserve(new_skb, NET_IP_ALIGN);
+				new_skb->dev = netdev;
+				memcpy(new_skb->data - NET_IP_ALIGN,
+				       skb->data - NET_IP_ALIGN,
+				       length + NET_IP_ALIGN);
+				/* save the skb in buffer_info as good */
+				buffer_info->skb = skb;
+				skb = new_skb;
+			}
+		}
+		/* end copybreak code */
+
 		/* Good Receive */
 		skb_put(skb, length);
 
@@ -1931,7 +1991,6 @@
 rxdesc_done:
 		/* clean up descriptor, might be written over by hw */
 		rx_desc->status = 0;
-		buffer_info->skb = NULL;
 
 		/* use prefetched values */
 		rx_desc = next_rxd;
@@ -1971,12 +2030,18 @@
 
 	/* leave three descriptors unused */
 	while(--cleancount > 2) {
-		rx_desc = IXGB_RX_DESC(*rx_ring, i);
+		/* recycle! its good for you */
+		if (!(skb = buffer_info->skb))
+			skb = dev_alloc_skb(adapter->rx_buffer_len
+			                    + NET_IP_ALIGN);
+		else {
+			skb_trim(skb, 0);
+			goto map_skb;
+		}
 
-		skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
-
-		if(unlikely(!skb)) {
+		if (unlikely(!skb)) {
 			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
 			break;
 		}
 
@@ -1990,33 +2055,36 @@
 
 		buffer_info->skb = skb;
 		buffer_info->length = adapter->rx_buffer_len;
-		buffer_info->dma =
-			pci_map_single(pdev,
-				   skb->data,
-				   adapter->rx_buffer_len,
-				   PCI_DMA_FROMDEVICE);
+map_skb:
+		buffer_info->dma = pci_map_single(pdev,
+		                                  skb->data,
+		                                  adapter->rx_buffer_len,
+		                                  PCI_DMA_FROMDEVICE);
 
+		rx_desc = IXGB_RX_DESC(*rx_ring, i);
 		rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
 		/* guarantee DD bit not set now before h/w gets descriptor
 		 * this is the rest of the workaround for h/w double 
 		 * writeback. */
 		rx_desc->status = 0;
 
-		if((i & ~(num_group_tail_writes- 1)) == i) {
-			/* Force memory writes to complete before letting h/w
-			 * know there are new descriptors to fetch.  (Only
-			 * applicable for weak-ordered memory model archs,
-			 * such as IA-64). */
-			wmb();
-
-			IXGB_WRITE_REG(&adapter->hw, RDT, i);
-		}
 
 		if(++i == rx_ring->count) i = 0;
 		buffer_info = &rx_ring->buffer_info[i];
 	}
 
-	rx_ring->next_to_use = i;
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs, such
+		 * as IA-64). */
+		wmb();
+		IXGB_WRITE_REG(&adapter->hw, RDT, i);
+	}
 }
 
 /**
diff --git a/drivers/net/ixgb/ixgb_osdep.h b/drivers/net/ixgb/ixgb_osdep.h
index dba2048..ee982fe 100644
--- a/drivers/net/ixgb/ixgb_osdep.h
+++ b/drivers/net/ixgb/ixgb_osdep.h
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 8a83dfd..39fbed2 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -76,7 +76,7 @@
  *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
  *  - 3 - Full Flow Control Support
  *
- * Default Value: Read flow control settings from the EEPROM
+ * Default Value: 2 - Tx only (silicon bug avoidance)
  */
 
 IXGB_PARAM(FlowControl, "Flow Control setting");
@@ -137,7 +137,7 @@
  *
  * Valid Range: 1 - 65535 
  *
- * Default Value:  256 (0x100)
+ * Default Value:  65535 (0xffff) (we'll send an xon if we recover)
  */
 
 IXGB_PARAM(FCReqTimeout, "Flow Control Request Timeout");
@@ -165,8 +165,6 @@
 
 #define XSUMRX_DEFAULT		 OPTION_ENABLED
 
-#define FLOW_CONTROL_FULL	   ixgb_fc_full
-#define FLOW_CONTROL_DEFAULT  FLOW_CONTROL_FULL
 #define DEFAULT_FCRTL	  		0x28000
 #define DEFAULT_FCRTH			0x30000
 #define MIN_FCRTL			      0
@@ -174,9 +172,9 @@
 #define MIN_FCRTH			      8
 #define MAX_FCRTH			0x3FFF0
 
-#define DEFAULT_FCPAUSE		  	0x100	/* this may be too long */
 #define MIN_FCPAUSE			      1
 #define MAX_FCPAUSE			 0xffff
+#define DEFAULT_FCPAUSE		  	 0xFFFF /* this may be too long */
 
 struct ixgb_option {
 	enum { enable_option, range_option, list_option } type;
@@ -336,7 +334,7 @@
 			.type = list_option,
 			.name = "Flow Control",
 			.err  = "reading default settings from EEPROM",
-			.def  = ixgb_fc_full,
+			.def  = ixgb_fc_tx_pause,
 			.arg  = { .l = { .nr = LIST_LEN(fc_list),
 					 .p = fc_list }}
 		};
@@ -365,8 +363,8 @@
 		} else {
 			adapter->hw.fc.high_water = opt.def;
 		}
-		if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
-			printk (KERN_INFO 
+		if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+			printk (KERN_INFO
 				"Ignoring RxFCHighThresh when no RxFC\n");
 	}
 	{ /* Receive Flow Control Low Threshold */
@@ -385,8 +383,8 @@
 		} else {
 			adapter->hw.fc.low_water = opt.def;
 		}
-		if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
-			printk (KERN_INFO 
+		if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+			printk (KERN_INFO
 				"Ignoring RxFCLowThresh when no RxFC\n");
 	}
 	{ /* Flow Control Pause Time Request*/
@@ -406,12 +404,12 @@
 		} else {
 			adapter->hw.fc.pause_time = opt.def;
 		}
-		if(!(adapter->hw.fc.type & ixgb_fc_rx_pause) )
-			printk (KERN_INFO 
+		if (!(adapter->hw.fc.type & ixgb_fc_tx_pause) )
+			printk (KERN_INFO
 				"Ignoring FCReqTimeout when no RxFC\n");
 	}
 	/* high low and spacing check for rx flow control thresholds */
-	if (adapter->hw.fc.type & ixgb_fc_rx_pause) {
+	if (adapter->hw.fc.type & ixgb_fc_tx_pause) {
 		/* high must be greater than low */
 		if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
 			/* set defaults */
diff --git a/drivers/net/myri10ge/Makefile b/drivers/net/myri10ge/Makefile
new file mode 100644
index 0000000..5df8916
--- /dev/null
+++ b/drivers/net/myri10ge/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Myricom Myri-10G ethernet driver
+#
+
+obj-$(CONFIG_MYRI10GE) += myri10ge.o
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
new file mode 100644
index 0000000..e1feb58
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -0,0 +1,2869 @@
+/*************************************************************************
+ * myri10ge.c: Myricom Myri-10G Ethernet driver.
+ *
+ * Copyright (C) 2005, 2006 Myricom, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Myricom, Inc. nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *
+ * If the eeprom on your board is not recent enough, you will need to get a
+ * newer firmware image at:
+ *   http://www.myri.com/scs/download-Myri10GE.html
+ *
+ * Contact Information:
+ *   <help@myri.com>
+ *   Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
+ *************************************************************************/
+
+#include <linux/tcp.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/ethtool.h>
+#include <linux/firmware.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+#include <linux/timer.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/moduleparam.h>
+#include <linux/io.h>
+#include <net/checksum.h>
+#include <asm/byteorder.h>
+#include <asm/io.h>
+#include <asm/processor.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#include "myri10ge_mcp.h"
+#include "myri10ge_mcp_gen_header.h"
+
+#define MYRI10GE_VERSION_STR "1.0.0"
+
+MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
+MODULE_AUTHOR("Maintainer: help@myri.com");
+MODULE_VERSION(MYRI10GE_VERSION_STR);
+MODULE_LICENSE("Dual BSD/GPL");
+
+#define MYRI10GE_MAX_ETHER_MTU 9014
+
+#define MYRI10GE_ETH_STOPPED 0
+#define MYRI10GE_ETH_STOPPING 1
+#define MYRI10GE_ETH_STARTING 2
+#define MYRI10GE_ETH_RUNNING 3
+#define MYRI10GE_ETH_OPEN_FAILED 4
+
+#define MYRI10GE_EEPROM_STRINGS_SIZE 256
+#define MYRI10GE_MAX_SEND_DESC_TSO ((65536 / 2048) * 2)
+
+#define MYRI10GE_NO_CONFIRM_DATA 0xffffffff
+#define MYRI10GE_NO_RESPONSE_RESULT 0xffffffff
+
+struct myri10ge_rx_buffer_state {
+	struct sk_buff *skb;
+	 DECLARE_PCI_UNMAP_ADDR(bus)
+	 DECLARE_PCI_UNMAP_LEN(len)
+};
+
+struct myri10ge_tx_buffer_state {
+	struct sk_buff *skb;
+	int last;
+	 DECLARE_PCI_UNMAP_ADDR(bus)
+	 DECLARE_PCI_UNMAP_LEN(len)
+};
+
+struct myri10ge_cmd {
+	u32 data0;
+	u32 data1;
+	u32 data2;
+};
+
+struct myri10ge_rx_buf {
+	struct mcp_kreq_ether_recv __iomem *lanai;	/* lanai ptr for recv ring */
+	u8 __iomem *wc_fifo;	/* w/c rx dma addr fifo address */
+	struct mcp_kreq_ether_recv *shadow;	/* host shadow of recv ring */
+	struct myri10ge_rx_buffer_state *info;
+	int cnt;
+	int alloc_fail;
+	int mask;		/* number of rx slots -1 */
+};
+
+struct myri10ge_tx_buf {
+	struct mcp_kreq_ether_send __iomem *lanai;	/* lanai ptr for sendq */
+	u8 __iomem *wc_fifo;	/* w/c send fifo address */
+	struct mcp_kreq_ether_send *req_list;	/* host shadow of sendq */
+	char *req_bytes;
+	struct myri10ge_tx_buffer_state *info;
+	int mask;		/* number of transmit slots -1  */
+	int boundary;		/* boundary transmits cannot cross */
+	int req ____cacheline_aligned;	/* transmit slots submitted     */
+	int pkt_start;		/* packets started */
+	int done ____cacheline_aligned;	/* transmit slots completed     */
+	int pkt_done;		/* packets completed */
+};
+
+struct myri10ge_rx_done {
+	struct mcp_slot *entry;
+	dma_addr_t bus;
+	int cnt;
+	int idx;
+};
+
+struct myri10ge_priv {
+	int running;		/* running?             */
+	int csum_flag;		/* rx_csums?            */
+	struct myri10ge_tx_buf tx;	/* transmit ring        */
+	struct myri10ge_rx_buf rx_small;
+	struct myri10ge_rx_buf rx_big;
+	struct myri10ge_rx_done rx_done;
+	int small_bytes;
+	struct net_device *dev;
+	struct net_device_stats stats;
+	u8 __iomem *sram;
+	int sram_size;
+	unsigned long board_span;
+	unsigned long iomem_base;
+	u32 __iomem *irq_claim;
+	u32 __iomem *irq_deassert;
+	char *mac_addr_string;
+	struct mcp_cmd_response *cmd;
+	dma_addr_t cmd_bus;
+	struct mcp_irq_data *fw_stats;
+	dma_addr_t fw_stats_bus;
+	struct pci_dev *pdev;
+	int msi_enabled;
+	unsigned int link_state;
+	unsigned int rdma_tags_available;
+	int intr_coal_delay;
+	u32 __iomem *intr_coal_delay_ptr;
+	int mtrr;
+	int wake_queue;
+	int stop_queue;
+	int down_cnt;
+	wait_queue_head_t down_wq;
+	struct work_struct watchdog_work;
+	struct timer_list watchdog_timer;
+	int watchdog_tx_done;
+	int watchdog_resets;
+	int tx_linearized;
+	int pause;
+	char *fw_name;
+	char eeprom_strings[MYRI10GE_EEPROM_STRINGS_SIZE];
+	char fw_version[128];
+	u8 mac_addr[6];		/* eeprom mac address */
+	unsigned long serial_number;
+	int vendor_specific_offset;
+	u32 devctl;
+	u16 msi_flags;
+	u32 pm_state[16];
+	u32 read_dma;
+	u32 write_dma;
+	u32 read_write_dma;
+};
+
+static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
+static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
+
+static char *myri10ge_fw_name = NULL;
+module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_fw_name, "Firmware image name\n");
+
+static int myri10ge_ecrc_enable = 1;
+module_param(myri10ge_ecrc_enable, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_ecrc_enable, "Enable Extended CRC on PCI-E\n");
+
+static int myri10ge_max_intr_slots = 1024;
+module_param(myri10ge_max_intr_slots, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_intr_slots, "Interrupt queue slots\n");
+
+static int myri10ge_small_bytes = -1;	/* -1 == auto */
+module_param(myri10ge_small_bytes, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_small_bytes, "Threshold of small packets\n");
+
+static int myri10ge_msi = 1;	/* enable msi by default */
+module_param(myri10ge_msi, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_msi, "Enable Message Signalled Interrupts\n");
+
+static int myri10ge_intr_coal_delay = 25;
+module_param(myri10ge_intr_coal_delay, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_intr_coal_delay, "Interrupt coalescing delay\n");
+
+static int myri10ge_flow_control = 1;
+module_param(myri10ge_flow_control, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_flow_control, "Pause parameter\n");
+
+static int myri10ge_deassert_wait = 1;
+module_param(myri10ge_deassert_wait, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_deassert_wait,
+		 "Wait when deasserting legacy interrupts\n");
+
+static int myri10ge_force_firmware = 0;
+module_param(myri10ge_force_firmware, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_force_firmware,
+		 "Force firmware to assume aligned completions\n");
+
+static int myri10ge_skb_cross_4k = 0;
+module_param(myri10ge_skb_cross_4k, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(myri10ge_skb_cross_4k,
+		 "Can a small skb cross a 4KB boundary?\n");
+
+static int myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+module_param(myri10ge_initial_mtu, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_initial_mtu, "Initial MTU\n");
+
+static int myri10ge_napi_weight = 64;
+module_param(myri10ge_napi_weight, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_napi_weight, "Set NAPI weight\n");
+
+static int myri10ge_watchdog_timeout = 1;
+module_param(myri10ge_watchdog_timeout, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_watchdog_timeout, "Set watchdog timeout\n");
+
+static int myri10ge_max_irq_loops = 1048576;
+module_param(myri10ge_max_irq_loops, int, S_IRUGO);
+MODULE_PARM_DESC(myri10ge_max_irq_loops,
+		 "Set stuck legacy IRQ detection threshold\n");
+
+#define MYRI10GE_FW_OFFSET 1024*1024
+#define MYRI10GE_HIGHPART_TO_U32(X) \
+(sizeof (X) == 8) ? ((u32)((u64)(X) >> 32)) : (0)
+#define MYRI10GE_LOWPART_TO_U32(X) ((u32)(X))
+
+#define myri10ge_pio_copy(to,from,size) __iowrite64_copy(to,from,size/8)
+
+static int
+myri10ge_send_cmd(struct myri10ge_priv *mgp, u32 cmd,
+		  struct myri10ge_cmd *data, int atomic)
+{
+	struct mcp_cmd *buf;
+	char buf_bytes[sizeof(*buf) + 8];
+	struct mcp_cmd_response *response = mgp->cmd;
+	char __iomem *cmd_addr = mgp->sram + MXGEFW_CMD_OFFSET;
+	u32 dma_low, dma_high, result, value;
+	int sleep_total = 0;
+
+	/* ensure buf is aligned to 8 bytes */
+	buf = (struct mcp_cmd *)ALIGN((unsigned long)buf_bytes, 8);
+
+	buf->data0 = htonl(data->data0);
+	buf->data1 = htonl(data->data1);
+	buf->data2 = htonl(data->data2);
+	buf->cmd = htonl(cmd);
+	dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+	dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+	buf->response_addr.low = htonl(dma_low);
+	buf->response_addr.high = htonl(dma_high);
+	response->result = MYRI10GE_NO_RESPONSE_RESULT;
+	mb();
+	myri10ge_pio_copy(cmd_addr, buf, sizeof(*buf));
+
+	/* wait up to 15ms. Longest command is the DMA benchmark,
+	 * which is capped at 5ms, but runs from a timeout handler
+	 * that runs every 7.8ms. So a 15ms timeout leaves us with
+	 * a 2.2ms margin
+	 */
+	if (atomic) {
+		/* if atomic is set, do not sleep,
+		 * and try to get the completion quickly
+		 * (1ms will be enough for those commands) */
+		for (sleep_total = 0;
+		     sleep_total < 1000
+		     && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+		     sleep_total += 10)
+			udelay(10);
+	} else {
+		/* use msleep for most command */
+		for (sleep_total = 0;
+		     sleep_total < 15
+		     && response->result == MYRI10GE_NO_RESPONSE_RESULT;
+		     sleep_total++)
+			msleep(1);
+	}
+
+	result = ntohl(response->result);
+	value = ntohl(response->data);
+	if (result != MYRI10GE_NO_RESPONSE_RESULT) {
+		if (result == 0) {
+			data->data0 = value;
+			return 0;
+		} else {
+			dev_err(&mgp->pdev->dev,
+				"command %d failed, result = %d\n",
+				cmd, result);
+			return -ENXIO;
+		}
+	}
+
+	dev_err(&mgp->pdev->dev, "command %d timed out, result = %d\n",
+		cmd, result);
+	return -EAGAIN;
+}
+
+/*
+ * The eeprom strings on the lanaiX have the format
+ * SN=x\0
+ * MAC=x:x:x:x:x:x\0
+ * PT:ddd mmm xx xx:xx:xx xx\0
+ * PV:ddd mmm xx xx:xx:xx xx\0
+ */
+static int myri10ge_read_mac_addr(struct myri10ge_priv *mgp)
+{
+	char *ptr, *limit;
+	int i;
+
+	ptr = mgp->eeprom_strings;
+	limit = mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE;
+
+	while (*ptr != '\0' && ptr < limit) {
+		if (memcmp(ptr, "MAC=", 4) == 0) {
+			ptr += 4;
+			mgp->mac_addr_string = ptr;
+			for (i = 0; i < 6; i++) {
+				if ((ptr + 2) > limit)
+					goto abort;
+				mgp->mac_addr[i] =
+				    simple_strtoul(ptr, &ptr, 16);
+				ptr += 1;
+			}
+		}
+		if (memcmp((const void *)ptr, "SN=", 3) == 0) {
+			ptr += 3;
+			mgp->serial_number = simple_strtoul(ptr, &ptr, 10);
+		}
+		while (ptr < limit && *ptr++) ;
+	}
+
+	return 0;
+
+abort:
+	dev_err(&mgp->pdev->dev, "failed to parse eeprom_strings\n");
+	return -ENXIO;
+}
+
+/*
+ * Enable or disable periodic RDMAs from the host to make certain
+ * chipsets resend dropped PCIe messages
+ */
+
+static void myri10ge_dummy_rdma(struct myri10ge_priv *mgp, int enable)
+{
+	char __iomem *submit;
+	u32 buf[16];
+	u32 dma_low, dma_high;
+	int i;
+
+	/* clear confirmation addr */
+	mgp->cmd->data = 0;
+	mb();
+
+	/* send a rdma command to the PCIe engine, and wait for the
+	 * response in the confirmation address.  The firmware should
+	 * write a -1 there to indicate it is alive and well
+	 */
+	dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+	dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+	buf[0] = htonl(dma_high);	/* confirm addr MSW */
+	buf[1] = htonl(dma_low);	/* confirm addr LSW */
+	buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA);	/* confirm data */
+	buf[3] = htonl(dma_high);	/* dummy addr MSW */
+	buf[4] = htonl(dma_low);	/* dummy addr LSW */
+	buf[5] = htonl(enable);	/* enable? */
+
+	submit = mgp->sram + 0xfc01c0;
+
+	myri10ge_pio_copy(submit, &buf, sizeof(buf));
+	for (i = 0; mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20; i++)
+		msleep(1);
+	if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA)
+		dev_err(&mgp->pdev->dev, "dummy rdma %s failed\n",
+			(enable ? "enable" : "disable"));
+}
+
+static int
+myri10ge_validate_firmware(struct myri10ge_priv *mgp,
+			   struct mcp_gen_header *hdr)
+{
+	struct device *dev = &mgp->pdev->dev;
+	int major, minor;
+
+	/* check firmware type */
+	if (ntohl(hdr->mcp_type) != MCP_TYPE_ETH) {
+		dev_err(dev, "Bad firmware type: 0x%x\n", ntohl(hdr->mcp_type));
+		return -EINVAL;
+	}
+
+	/* save firmware version for ethtool */
+	strncpy(mgp->fw_version, hdr->version, sizeof(mgp->fw_version));
+
+	sscanf(mgp->fw_version, "%d.%d", &major, &minor);
+
+	if (!(major == MXGEFW_VERSION_MAJOR && minor == MXGEFW_VERSION_MINOR)) {
+		dev_err(dev, "Found firmware version %s\n", mgp->fw_version);
+		dev_err(dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR,
+			MXGEFW_VERSION_MINOR);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int myri10ge_load_hotplug_firmware(struct myri10ge_priv *mgp, u32 * size)
+{
+	unsigned crc, reread_crc;
+	const struct firmware *fw;
+	struct device *dev = &mgp->pdev->dev;
+	struct mcp_gen_header *hdr;
+	size_t hdr_offset;
+	int status;
+
+	if ((status = request_firmware(&fw, mgp->fw_name, dev)) < 0) {
+		dev_err(dev, "Unable to load %s firmware image via hotplug\n",
+			mgp->fw_name);
+		status = -EINVAL;
+		goto abort_with_nothing;
+	}
+
+	/* check size */
+
+	if (fw->size >= mgp->sram_size - MYRI10GE_FW_OFFSET ||
+	    fw->size < MCP_HEADER_PTR_OFFSET + 4) {
+		dev_err(dev, "Firmware size invalid:%d\n", (int)fw->size);
+		status = -EINVAL;
+		goto abort_with_fw;
+	}
+
+	/* check id */
+	hdr_offset = ntohl(*(u32 *) (fw->data + MCP_HEADER_PTR_OFFSET));
+	if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw->size) {
+		dev_err(dev, "Bad firmware file\n");
+		status = -EINVAL;
+		goto abort_with_fw;
+	}
+	hdr = (void *)(fw->data + hdr_offset);
+
+	status = myri10ge_validate_firmware(mgp, hdr);
+	if (status != 0)
+		goto abort_with_fw;
+
+	crc = crc32(~0, fw->data, fw->size);
+	if (mgp->tx.boundary == 2048) {
+		/* Avoid PCI burst on chipset with unaligned completions. */
+		int i;
+		__iomem u32 *ptr = (__iomem u32 *) (mgp->sram +
+						    MYRI10GE_FW_OFFSET);
+		for (i = 0; i < fw->size / 4; i++) {
+			__raw_writel(((u32 *) fw->data)[i], ptr + i);
+			wmb();
+		}
+	} else {
+		myri10ge_pio_copy(mgp->sram + MYRI10GE_FW_OFFSET, fw->data,
+				  fw->size);
+	}
+	/* corruption checking is good for parity recovery and buggy chipset */
+	memcpy_fromio(fw->data, mgp->sram + MYRI10GE_FW_OFFSET, fw->size);
+	reread_crc = crc32(~0, fw->data, fw->size);
+	if (crc != reread_crc) {
+		dev_err(dev, "CRC failed(fw-len=%u), got 0x%x (expect 0x%x)\n",
+			(unsigned)fw->size, reread_crc, crc);
+		status = -EIO;
+		goto abort_with_fw;
+	}
+	*size = (u32) fw->size;
+
+abort_with_fw:
+	release_firmware(fw);
+
+abort_with_nothing:
+	return status;
+}
+
+static int myri10ge_adopt_running_firmware(struct myri10ge_priv *mgp)
+{
+	struct mcp_gen_header *hdr;
+	struct device *dev = &mgp->pdev->dev;
+	const size_t bytes = sizeof(struct mcp_gen_header);
+	size_t hdr_offset;
+	int status;
+
+	/* find running firmware header */
+	hdr_offset = ntohl(__raw_readl(mgp->sram + MCP_HEADER_PTR_OFFSET));
+
+	if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > mgp->sram_size) {
+		dev_err(dev, "Running firmware has bad header offset (%d)\n",
+			(int)hdr_offset);
+		return -EIO;
+	}
+
+	/* copy header of running firmware from SRAM to host memory to
+	 * validate firmware */
+	hdr = kmalloc(bytes, GFP_KERNEL);
+	if (hdr == NULL) {
+		dev_err(dev, "could not malloc firmware hdr\n");
+		return -ENOMEM;
+	}
+	memcpy_fromio(hdr, mgp->sram + hdr_offset, bytes);
+	status = myri10ge_validate_firmware(mgp, hdr);
+	kfree(hdr);
+	return status;
+}
+
+static int myri10ge_load_firmware(struct myri10ge_priv *mgp)
+{
+	char __iomem *submit;
+	u32 buf[16];
+	u32 dma_low, dma_high, size;
+	int status, i;
+
+	size = 0;
+	status = myri10ge_load_hotplug_firmware(mgp, &size);
+	if (status) {
+		dev_warn(&mgp->pdev->dev, "hotplug firmware loading failed\n");
+
+		/* Do not attempt to adopt firmware if there
+		 * was a bad crc */
+		if (status == -EIO)
+			return status;
+
+		status = myri10ge_adopt_running_firmware(mgp);
+		if (status != 0) {
+			dev_err(&mgp->pdev->dev,
+				"failed to adopt running firmware\n");
+			return status;
+		}
+		dev_info(&mgp->pdev->dev,
+			 "Successfully adopted running firmware\n");
+		if (mgp->tx.boundary == 4096) {
+			dev_warn(&mgp->pdev->dev,
+				 "Using firmware currently running on NIC"
+				 ".  For optimal\n");
+			dev_warn(&mgp->pdev->dev,
+				 "performance consider loading optimized "
+				 "firmware\n");
+			dev_warn(&mgp->pdev->dev, "via hotplug\n");
+		}
+
+		mgp->fw_name = "adopted";
+		mgp->tx.boundary = 2048;
+		return status;
+	}
+
+	/* clear confirmation addr */
+	mgp->cmd->data = 0;
+	mb();
+
+	/* send a reload command to the bootstrap MCP, and wait for the
+	 *  response in the confirmation address.  The firmware should
+	 * write a -1 there to indicate it is alive and well
+	 */
+	dma_low = MYRI10GE_LOWPART_TO_U32(mgp->cmd_bus);
+	dma_high = MYRI10GE_HIGHPART_TO_U32(mgp->cmd_bus);
+
+	buf[0] = htonl(dma_high);	/* confirm addr MSW */
+	buf[1] = htonl(dma_low);	/* confirm addr LSW */
+	buf[2] = htonl(MYRI10GE_NO_CONFIRM_DATA);	/* confirm data */
+
+	/* FIX: All newest firmware should un-protect the bottom of
+	 * the sram before handoff. However, the very first interfaces
+	 * do not. Therefore the handoff copy must skip the first 8 bytes
+	 */
+	buf[3] = htonl(MYRI10GE_FW_OFFSET + 8);	/* where the code starts */
+	buf[4] = htonl(size - 8);	/* length of code */
+	buf[5] = htonl(8);	/* where to copy to */
+	buf[6] = htonl(0);	/* where to jump to */
+
+	submit = mgp->sram + 0xfc0000;
+
+	myri10ge_pio_copy(submit, &buf, sizeof(buf));
+	mb();
+	msleep(1);
+	mb();
+	i = 0;
+	while (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA && i < 20) {
+		msleep(1);
+		i++;
+	}
+	if (mgp->cmd->data != MYRI10GE_NO_CONFIRM_DATA) {
+		dev_err(&mgp->pdev->dev, "handoff failed\n");
+		return -ENXIO;
+	}
+	dev_info(&mgp->pdev->dev, "handoff confirmed\n");
+	myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+
+	return 0;
+}
+
+static int myri10ge_update_mac_address(struct myri10ge_priv *mgp, u8 * addr)
+{
+	struct myri10ge_cmd cmd;
+	int status;
+
+	cmd.data0 = ((addr[0] << 24) | (addr[1] << 16)
+		     | (addr[2] << 8) | addr[3]);
+
+	cmd.data1 = ((addr[4] << 8) | (addr[5]));
+
+	status = myri10ge_send_cmd(mgp, MXGEFW_SET_MAC_ADDRESS, &cmd, 0);
+	return status;
+}
+
+static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
+{
+	struct myri10ge_cmd cmd;
+	int status, ctl;
+
+	ctl = pause ? MXGEFW_ENABLE_FLOW_CONTROL : MXGEFW_DISABLE_FLOW_CONTROL;
+	status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
+
+	if (status) {
+		printk(KERN_ERR
+		       "myri10ge: %s: Failed to set flow control mode\n",
+		       mgp->dev->name);
+		return status;
+	}
+	mgp->pause = pause;
+	return 0;
+}
+
+static void
+myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
+{
+	struct myri10ge_cmd cmd;
+	int status, ctl;
+
+	ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
+	status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
+	if (status)
+		printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n",
+		       mgp->dev->name);
+}
+
+static int myri10ge_reset(struct myri10ge_priv *mgp)
+{
+	struct myri10ge_cmd cmd;
+	int status;
+	size_t bytes;
+	u32 len;
+
+	/* try to send a reset command to the card to see if it
+	 * is alive */
+	memset(&cmd, 0, sizeof(cmd));
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_RESET, &cmd, 0);
+	if (status != 0) {
+		dev_err(&mgp->pdev->dev, "failed reset\n");
+		return -ENXIO;
+	}
+
+	/* Now exchange information about interrupts  */
+
+	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+	memset(mgp->rx_done.entry, 0, bytes);
+	cmd.data0 = (u32) bytes;
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd, 0);
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_INTRQ_DMA, &cmd, 0);
+
+	status |=
+	    myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd, 0);
+	mgp->irq_claim = (__iomem u32 *) (mgp->sram + cmd.data0);
+	if (!mgp->msi_enabled) {
+		status |= myri10ge_send_cmd
+		    (mgp, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd, 0);
+		mgp->irq_deassert = (__iomem u32 *) (mgp->sram + cmd.data0);
+
+	}
+	status |= myri10ge_send_cmd
+	    (mgp, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd, 0);
+	mgp->intr_coal_delay_ptr = (__iomem u32 *) (mgp->sram + cmd.data0);
+	if (status != 0) {
+		dev_err(&mgp->pdev->dev, "failed set interrupt parameters\n");
+		return status;
+	}
+	__raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+
+	/* Run a small DMA test.
+	 * The magic multipliers to the length tell the firmware
+	 * to do DMA read, write, or read+write tests.  The
+	 * results are returned in cmd.data0.  The upper 16
+	 * bits or the return is the number of transfers completed.
+	 * The lower 16 bits is the time in 0.5us ticks that the
+	 * transfers took to complete.
+	 */
+
+	len = mgp->tx.boundary;
+
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+	cmd.data2 = len * 0x10000;
+	status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+	if (status == 0)
+		mgp->read_dma = ((cmd.data0 >> 16) * len * 2) /
+		    (cmd.data0 & 0xffff);
+	else
+		dev_warn(&mgp->pdev->dev, "DMA read benchmark failed: %d\n",
+			 status);
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+	cmd.data2 = len * 0x1;
+	status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+	if (status == 0)
+		mgp->write_dma = ((cmd.data0 >> 16) * len * 2) /
+		    (cmd.data0 & 0xffff);
+	else
+		dev_warn(&mgp->pdev->dev, "DMA write benchmark failed: %d\n",
+			 status);
+
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->rx_done.bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->rx_done.bus);
+	cmd.data2 = len * 0x10001;
+	status = myri10ge_send_cmd(mgp, MXGEFW_DMA_TEST, &cmd, 0);
+	if (status == 0)
+		mgp->read_write_dma = ((cmd.data0 >> 16) * len * 2 * 2) /
+		    (cmd.data0 & 0xffff);
+	else
+		dev_warn(&mgp->pdev->dev,
+			 "DMA read/write benchmark failed: %d\n", status);
+
+	memset(mgp->rx_done.entry, 0, bytes);
+
+	/* reset mcp/driver shared state back to 0 */
+	mgp->tx.req = 0;
+	mgp->tx.done = 0;
+	mgp->tx.pkt_start = 0;
+	mgp->tx.pkt_done = 0;
+	mgp->rx_big.cnt = 0;
+	mgp->rx_small.cnt = 0;
+	mgp->rx_done.idx = 0;
+	mgp->rx_done.cnt = 0;
+	status = myri10ge_update_mac_address(mgp, mgp->dev->dev_addr);
+	myri10ge_change_promisc(mgp, 0, 0);
+	myri10ge_change_pause(mgp, mgp->pause);
+	return status;
+}
+
+static inline void
+myri10ge_submit_8rx(struct mcp_kreq_ether_recv __iomem * dst,
+		    struct mcp_kreq_ether_recv *src)
+{
+	u32 low;
+
+	low = src->addr_low;
+	src->addr_low = DMA_32BIT_MASK;
+	myri10ge_pio_copy(dst, src, 8 * sizeof(*src));
+	mb();
+	src->addr_low = low;
+	__raw_writel(low, &dst->addr_low);
+	mb();
+}
+
+/*
+ * Set of routines to get a new receive buffer.  Any buffer which
+ * crosses a 4KB boundary must start on a 4KB boundary due to PCIe
+ * wdma restrictions. We also try to align any smaller allocation to
+ * at least a 16 byte boundary for efficiency.  We assume the linux
+ * memory allocator works by powers of 2, and will not return memory
+ * smaller than 2KB which crosses a 4KB boundary.  If it does, we fall
+ * back to allocating 2x as much space as required.
+ *
+ * We intend to replace large (>4KB) skb allocations by using
+ * pages directly and building a fraglist in the near future.
+ */
+
+static inline struct sk_buff *myri10ge_alloc_big(int bytes)
+{
+	struct sk_buff *skb;
+	unsigned long data, roundup;
+
+	skb = dev_alloc_skb(bytes + 4096 + MXGEFW_PAD);
+	if (skb == NULL)
+		return NULL;
+
+	/* Correct skb->truesize so that socket buffer
+	 * accounting is not confused the rounding we must
+	 * do to satisfy alignment constraints.
+	 */
+	skb->truesize -= 4096;
+
+	data = (unsigned long)(skb->data);
+	roundup = (-data) & (4095);
+	skb_reserve(skb, roundup);
+	return skb;
+}
+
+/* Allocate 2x as much space as required and use whichever portion
+ * does not cross a 4KB boundary */
+static inline struct sk_buff *myri10ge_alloc_small_safe(unsigned int bytes)
+{
+	struct sk_buff *skb;
+	unsigned long data, boundary;
+
+	skb = dev_alloc_skb(2 * (bytes + MXGEFW_PAD) - 1);
+	if (unlikely(skb == NULL))
+		return NULL;
+
+	/* Correct skb->truesize so that socket buffer
+	 * accounting is not confused the rounding we must
+	 * do to satisfy alignment constraints.
+	 */
+	skb->truesize -= bytes + MXGEFW_PAD;
+
+	data = (unsigned long)(skb->data);
+	boundary = (data + 4095UL) & ~4095UL;
+	if ((boundary - data) >= (bytes + MXGEFW_PAD))
+		return skb;
+
+	skb_reserve(skb, boundary - data);
+	return skb;
+}
+
+/* Allocate just enough space, and verify that the allocated
+ * space does not cross a 4KB boundary */
+static inline struct sk_buff *myri10ge_alloc_small(int bytes)
+{
+	struct sk_buff *skb;
+	unsigned long roundup, data, end;
+
+	skb = dev_alloc_skb(bytes + 16 + MXGEFW_PAD);
+	if (unlikely(skb == NULL))
+		return NULL;
+
+	/* Round allocated buffer to 16 byte boundary */
+	data = (unsigned long)(skb->data);
+	roundup = (-data) & 15UL;
+	skb_reserve(skb, roundup);
+	/* Verify that the data buffer does not cross a page boundary */
+	data = (unsigned long)(skb->data);
+	end = data + bytes + MXGEFW_PAD - 1;
+	if (unlikely(((end >> 12) != (data >> 12)) && (data & 4095UL))) {
+		printk(KERN_NOTICE
+		       "myri10ge_alloc_small: small skb crossed 4KB boundary\n");
+		myri10ge_skb_cross_4k = 1;
+		dev_kfree_skb_any(skb);
+		skb = myri10ge_alloc_small_safe(bytes);
+	}
+	return skb;
+}
+
+static inline int
+myri10ge_getbuf(struct myri10ge_rx_buf *rx, struct pci_dev *pdev, int bytes,
+		int idx)
+{
+	struct sk_buff *skb;
+	dma_addr_t bus;
+	int len, retval = 0;
+
+	bytes += VLAN_HLEN;	/* account for 802.1q vlan tag */
+
+	if ((bytes + MXGEFW_PAD) > (4096 - 16) /* linux overhead */ )
+		skb = myri10ge_alloc_big(bytes);
+	else if (myri10ge_skb_cross_4k)
+		skb = myri10ge_alloc_small_safe(bytes);
+	else
+		skb = myri10ge_alloc_small(bytes);
+
+	if (unlikely(skb == NULL)) {
+		rx->alloc_fail++;
+		retval = -ENOBUFS;
+		goto done;
+	}
+
+	/* set len so that it only covers the area we
+	 * need mapped for DMA */
+	len = bytes + MXGEFW_PAD;
+
+	bus = pci_map_single(pdev, skb->data, len, PCI_DMA_FROMDEVICE);
+	rx->info[idx].skb = skb;
+	pci_unmap_addr_set(&rx->info[idx], bus, bus);
+	pci_unmap_len_set(&rx->info[idx], len, len);
+	rx->shadow[idx].addr_low = htonl(MYRI10GE_LOWPART_TO_U32(bus));
+	rx->shadow[idx].addr_high = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+
+done:
+	/* copy 8 descriptors (64-bytes) to the mcp at a time */
+	if ((idx & 7) == 7) {
+		if (rx->wc_fifo == NULL)
+			myri10ge_submit_8rx(&rx->lanai[idx - 7],
+					    &rx->shadow[idx - 7]);
+		else {
+			mb();
+			myri10ge_pio_copy(rx->wc_fifo,
+					  &rx->shadow[idx - 7], 64);
+		}
+	}
+	return retval;
+}
+
+static inline void myri10ge_vlan_ip_csum(struct sk_buff *skb, u16 hw_csum)
+{
+	struct vlan_hdr *vh = (struct vlan_hdr *)(skb->data);
+
+	if ((skb->protocol == ntohs(ETH_P_8021Q)) &&
+	    (vh->h_vlan_encapsulated_proto == htons(ETH_P_IP) ||
+	     vh->h_vlan_encapsulated_proto == htons(ETH_P_IPV6))) {
+		skb->csum = hw_csum;
+		skb->ip_summed = CHECKSUM_HW;
+	}
+}
+
+static inline unsigned long
+myri10ge_rx_done(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
+		 int bytes, int len, int csum)
+{
+	dma_addr_t bus;
+	struct sk_buff *skb;
+	int idx, unmap_len;
+
+	idx = rx->cnt & rx->mask;
+	rx->cnt++;
+
+	/* save a pointer to the received skb */
+	skb = rx->info[idx].skb;
+	bus = pci_unmap_addr(&rx->info[idx], bus);
+	unmap_len = pci_unmap_len(&rx->info[idx], len);
+
+	/* try to replace the received skb */
+	if (myri10ge_getbuf(rx, mgp->pdev, bytes, idx)) {
+		/* drop the frame -- the old skbuf is re-cycled */
+		mgp->stats.rx_dropped += 1;
+		return 0;
+	}
+
+	/* unmap the recvd skb */
+	pci_unmap_single(mgp->pdev, bus, unmap_len, PCI_DMA_FROMDEVICE);
+
+	/* mcp implicitly skips 1st bytes so that packet is properly
+	 * aligned */
+	skb_reserve(skb, MXGEFW_PAD);
+
+	/* set the length of the frame */
+	skb_put(skb, len);
+
+	skb->protocol = eth_type_trans(skb, mgp->dev);
+	skb->dev = mgp->dev;
+	if (mgp->csum_flag) {
+		if ((skb->protocol == ntohs(ETH_P_IP)) ||
+		    (skb->protocol == ntohs(ETH_P_IPV6))) {
+			skb->csum = ntohs((u16) csum);
+			skb->ip_summed = CHECKSUM_HW;
+		} else
+			myri10ge_vlan_ip_csum(skb, ntohs((u16) csum));
+	}
+
+	netif_receive_skb(skb);
+	mgp->dev->last_rx = jiffies;
+	return 1;
+}
+
+static inline void myri10ge_tx_done(struct myri10ge_priv *mgp, int mcp_index)
+{
+	struct pci_dev *pdev = mgp->pdev;
+	struct myri10ge_tx_buf *tx = &mgp->tx;
+	struct sk_buff *skb;
+	int idx, len;
+	int limit = 0;
+
+	while (tx->pkt_done != mcp_index) {
+		idx = tx->done & tx->mask;
+		skb = tx->info[idx].skb;
+
+		/* Mark as free */
+		tx->info[idx].skb = NULL;
+		if (tx->info[idx].last) {
+			tx->pkt_done++;
+			tx->info[idx].last = 0;
+		}
+		tx->done++;
+		len = pci_unmap_len(&tx->info[idx], len);
+		pci_unmap_len_set(&tx->info[idx], len, 0);
+		if (skb) {
+			mgp->stats.tx_bytes += skb->len;
+			mgp->stats.tx_packets++;
+			dev_kfree_skb_irq(skb);
+			if (len)
+				pci_unmap_single(pdev,
+						 pci_unmap_addr(&tx->info[idx],
+								bus), len,
+						 PCI_DMA_TODEVICE);
+		} else {
+			if (len)
+				pci_unmap_page(pdev,
+					       pci_unmap_addr(&tx->info[idx],
+							      bus), len,
+					       PCI_DMA_TODEVICE);
+		}
+
+		/* limit potential for livelock by only handling
+		 * 2 full tx rings per call */
+		if (unlikely(++limit > 2 * tx->mask))
+			break;
+	}
+	/* start the queue if we've stopped it */
+	if (netif_queue_stopped(mgp->dev)
+	    && tx->req - tx->done < (tx->mask >> 1)) {
+		mgp->wake_queue++;
+		netif_wake_queue(mgp->dev);
+	}
+}
+
+static inline void myri10ge_clean_rx_done(struct myri10ge_priv *mgp, int *limit)
+{
+	struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+	unsigned long rx_bytes = 0;
+	unsigned long rx_packets = 0;
+	unsigned long rx_ok;
+
+	int idx = rx_done->idx;
+	int cnt = rx_done->cnt;
+	u16 length;
+	u16 checksum;
+
+	while (rx_done->entry[idx].length != 0 && *limit != 0) {
+		length = ntohs(rx_done->entry[idx].length);
+		rx_done->entry[idx].length = 0;
+		checksum = ntohs(rx_done->entry[idx].checksum);
+		if (length <= mgp->small_bytes)
+			rx_ok = myri10ge_rx_done(mgp, &mgp->rx_small,
+						 mgp->small_bytes,
+						 length, checksum);
+		else
+			rx_ok = myri10ge_rx_done(mgp, &mgp->rx_big,
+						 mgp->dev->mtu + ETH_HLEN,
+						 length, checksum);
+		rx_packets += rx_ok;
+		rx_bytes += rx_ok * (unsigned long)length;
+		cnt++;
+		idx = cnt & (myri10ge_max_intr_slots - 1);
+
+		/* limit potential for livelock by only handling a
+		 * limited number of frames. */
+		(*limit)--;
+	}
+	rx_done->idx = idx;
+	rx_done->cnt = cnt;
+	mgp->stats.rx_packets += rx_packets;
+	mgp->stats.rx_bytes += rx_bytes;
+}
+
+static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
+{
+	struct mcp_irq_data *stats = mgp->fw_stats;
+
+	if (unlikely(stats->stats_updated)) {
+		if (mgp->link_state != stats->link_up) {
+			mgp->link_state = stats->link_up;
+			if (mgp->link_state) {
+				printk(KERN_INFO "myri10ge: %s: link up\n",
+				       mgp->dev->name);
+				netif_carrier_on(mgp->dev);
+			} else {
+				printk(KERN_INFO "myri10ge: %s: link down\n",
+				       mgp->dev->name);
+				netif_carrier_off(mgp->dev);
+			}
+		}
+		if (mgp->rdma_tags_available !=
+		    ntohl(mgp->fw_stats->rdma_tags_available)) {
+			mgp->rdma_tags_available =
+			    ntohl(mgp->fw_stats->rdma_tags_available);
+			printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
+			       "%d tags left\n", mgp->dev->name,
+			       mgp->rdma_tags_available);
+		}
+		mgp->down_cnt += stats->link_down;
+		if (stats->link_down)
+			wake_up(&mgp->down_wq);
+	}
+}
+
+static int myri10ge_poll(struct net_device *netdev, int *budget)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	struct myri10ge_rx_done *rx_done = &mgp->rx_done;
+	int limit, orig_limit, work_done;
+
+	/* process as many rx events as NAPI will allow */
+	limit = min(*budget, netdev->quota);
+	orig_limit = limit;
+	myri10ge_clean_rx_done(mgp, &limit);
+	work_done = orig_limit - limit;
+	*budget -= work_done;
+	netdev->quota -= work_done;
+
+	if (rx_done->entry[rx_done->idx].length == 0 || !netif_running(netdev)) {
+		netif_rx_complete(netdev);
+		__raw_writel(htonl(3), mgp->irq_claim);
+		return 0;
+	}
+	return 1;
+}
+
+static irqreturn_t myri10ge_intr(int irq, void *arg, struct pt_regs *regs)
+{
+	struct myri10ge_priv *mgp = arg;
+	struct mcp_irq_data *stats = mgp->fw_stats;
+	struct myri10ge_tx_buf *tx = &mgp->tx;
+	u32 send_done_count;
+	int i;
+
+	/* make sure it is our IRQ, and that the DMA has finished */
+	if (unlikely(!stats->valid))
+		return (IRQ_NONE);
+
+	/* low bit indicates receives are present, so schedule
+	 * napi poll handler */
+	if (stats->valid & 1)
+		netif_rx_schedule(mgp->dev);
+
+	if (!mgp->msi_enabled) {
+		__raw_writel(0, mgp->irq_deassert);
+		if (!myri10ge_deassert_wait)
+			stats->valid = 0;
+		mb();
+	} else
+		stats->valid = 0;
+
+	/* Wait for IRQ line to go low, if using INTx */
+	i = 0;
+	while (1) {
+		i++;
+		/* check for transmit completes and receives */
+		send_done_count = ntohl(stats->send_done_count);
+		if (send_done_count != tx->pkt_done)
+			myri10ge_tx_done(mgp, (int)send_done_count);
+		if (unlikely(i > myri10ge_max_irq_loops)) {
+			printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
+			       mgp->dev->name);
+			stats->valid = 0;
+			schedule_work(&mgp->watchdog_work);
+		}
+		if (likely(stats->valid == 0))
+			break;
+		cpu_relax();
+		barrier();
+	}
+
+	myri10ge_check_statblock(mgp);
+
+	__raw_writel(htonl(3), mgp->irq_claim + 1);
+	return (IRQ_HANDLED);
+}
+
+static int
+myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
+{
+	cmd->autoneg = AUTONEG_DISABLE;
+	cmd->speed = SPEED_10000;
+	cmd->duplex = DUPLEX_FULL;
+	return 0;
+}
+
+static void
+myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+	strlcpy(info->driver, "myri10ge", sizeof(info->driver));
+	strlcpy(info->version, MYRI10GE_VERSION_STR, sizeof(info->version));
+	strlcpy(info->fw_version, mgp->fw_version, sizeof(info->fw_version));
+	strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info));
+}
+
+static int
+myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	coal->rx_coalesce_usecs = mgp->intr_coal_delay;
+	return 0;
+}
+
+static int
+myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+	mgp->intr_coal_delay = coal->rx_coalesce_usecs;
+	__raw_writel(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr);
+	return 0;
+}
+
+static void
+myri10ge_get_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *pause)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+	pause->autoneg = 0;
+	pause->rx_pause = mgp->pause;
+	pause->tx_pause = mgp->pause;
+}
+
+static int
+myri10ge_set_pauseparam(struct net_device *netdev,
+			struct ethtool_pauseparam *pause)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+	if (pause->tx_pause != mgp->pause)
+		return myri10ge_change_pause(mgp, pause->tx_pause);
+	if (pause->rx_pause != mgp->pause)
+		return myri10ge_change_pause(mgp, pause->tx_pause);
+	if (pause->autoneg != 0)
+		return -EINVAL;
+	return 0;
+}
+
+static void
+myri10ge_get_ringparam(struct net_device *netdev,
+		       struct ethtool_ringparam *ring)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+
+	ring->rx_mini_max_pending = mgp->rx_small.mask + 1;
+	ring->rx_max_pending = mgp->rx_big.mask + 1;
+	ring->rx_jumbo_max_pending = 0;
+	ring->tx_max_pending = mgp->rx_small.mask + 1;
+	ring->rx_mini_pending = ring->rx_mini_max_pending;
+	ring->rx_pending = ring->rx_max_pending;
+	ring->rx_jumbo_pending = ring->rx_jumbo_max_pending;
+	ring->tx_pending = ring->tx_max_pending;
+}
+
+static u32 myri10ge_get_rx_csum(struct net_device *netdev)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	if (mgp->csum_flag)
+		return 1;
+	else
+		return 0;
+}
+
+static int myri10ge_set_rx_csum(struct net_device *netdev, u32 csum_enabled)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	if (csum_enabled)
+		mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
+	else
+		mgp->csum_flag = 0;
+	return 0;
+}
+
+static const char myri10ge_gstrings_stats[][ETH_GSTRING_LEN] = {
+	"rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
+	"tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
+	"rx_length_errors", "rx_over_errors", "rx_crc_errors",
+	"rx_frame_errors", "rx_fifo_errors", "rx_missed_errors",
+	"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
+	"tx_heartbeat_errors", "tx_window_errors",
+	/* device-specific stats */
+	"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
+	"serial_number", "tx_pkt_start", "tx_pkt_done",
+	"tx_req", "tx_done", "rx_small_cnt", "rx_big_cnt",
+	"wake_queue", "stop_queue", "watchdog_resets", "tx_linearized",
+	"link_up", "dropped_link_overflow", "dropped_link_error_or_filtered",
+	"dropped_runt", "dropped_overrun", "dropped_no_small_buffer",
+	"dropped_no_big_buffer"
+};
+
+#define MYRI10GE_NET_STATS_LEN      21
+#define MYRI10GE_STATS_LEN  sizeof(myri10ge_gstrings_stats) / ETH_GSTRING_LEN
+
+static void
+myri10ge_get_strings(struct net_device *netdev, u32 stringset, u8 * data)
+{
+	switch (stringset) {
+	case ETH_SS_STATS:
+		memcpy(data, *myri10ge_gstrings_stats,
+		       sizeof(myri10ge_gstrings_stats));
+		break;
+	}
+}
+
+static int myri10ge_get_stats_count(struct net_device *netdev)
+{
+	return MYRI10GE_STATS_LEN;
+}
+
+static void
+myri10ge_get_ethtool_stats(struct net_device *netdev,
+			   struct ethtool_stats *stats, u64 * data)
+{
+	struct myri10ge_priv *mgp = netdev_priv(netdev);
+	int i;
+
+	for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
+		data[i] = ((unsigned long *)&mgp->stats)[i];
+
+	data[i++] = (unsigned int)mgp->read_dma;
+	data[i++] = (unsigned int)mgp->write_dma;
+	data[i++] = (unsigned int)mgp->read_write_dma;
+	data[i++] = (unsigned int)mgp->serial_number;
+	data[i++] = (unsigned int)mgp->tx.pkt_start;
+	data[i++] = (unsigned int)mgp->tx.pkt_done;
+	data[i++] = (unsigned int)mgp->tx.req;
+	data[i++] = (unsigned int)mgp->tx.done;
+	data[i++] = (unsigned int)mgp->rx_small.cnt;
+	data[i++] = (unsigned int)mgp->rx_big.cnt;
+	data[i++] = (unsigned int)mgp->wake_queue;
+	data[i++] = (unsigned int)mgp->stop_queue;
+	data[i++] = (unsigned int)mgp->watchdog_resets;
+	data[i++] = (unsigned int)mgp->tx_linearized;
+	data[i++] = (unsigned int)ntohl(mgp->fw_stats->link_up);
+	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_link_overflow);
+	data[i++] =
+	    (unsigned int)ntohl(mgp->fw_stats->dropped_link_error_or_filtered);
+	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_runt);
+	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_overrun);
+	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_small_buffer);
+	data[i++] = (unsigned int)ntohl(mgp->fw_stats->dropped_no_big_buffer);
+}
+
+static struct ethtool_ops myri10ge_ethtool_ops = {
+	.get_settings = myri10ge_get_settings,
+	.get_drvinfo = myri10ge_get_drvinfo,
+	.get_coalesce = myri10ge_get_coalesce,
+	.set_coalesce = myri10ge_set_coalesce,
+	.get_pauseparam = myri10ge_get_pauseparam,
+	.set_pauseparam = myri10ge_set_pauseparam,
+	.get_ringparam = myri10ge_get_ringparam,
+	.get_rx_csum = myri10ge_get_rx_csum,
+	.set_rx_csum = myri10ge_set_rx_csum,
+	.get_tx_csum = ethtool_op_get_tx_csum,
+	.set_tx_csum = ethtool_op_set_tx_hw_csum,
+	.get_sg = ethtool_op_get_sg,
+	.set_sg = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso = ethtool_op_get_tso,
+	.set_tso = ethtool_op_set_tso,
+#endif
+	.get_strings = myri10ge_get_strings,
+	.get_stats_count = myri10ge_get_stats_count,
+	.get_ethtool_stats = myri10ge_get_ethtool_stats
+};
+
+static int myri10ge_allocate_rings(struct net_device *dev)
+{
+	struct myri10ge_priv *mgp;
+	struct myri10ge_cmd cmd;
+	int tx_ring_size, rx_ring_size;
+	int tx_ring_entries, rx_ring_entries;
+	int i, status;
+	size_t bytes;
+
+	mgp = netdev_priv(dev);
+
+	/* get ring sizes */
+
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd, 0);
+	tx_ring_size = cmd.data0;
+	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd, 0);
+	rx_ring_size = cmd.data0;
+
+	tx_ring_entries = tx_ring_size / sizeof(struct mcp_kreq_ether_send);
+	rx_ring_entries = rx_ring_size / sizeof(struct mcp_dma_addr);
+	mgp->tx.mask = tx_ring_entries - 1;
+	mgp->rx_small.mask = mgp->rx_big.mask = rx_ring_entries - 1;
+
+	/* allocate the host shadow rings */
+
+	bytes = 8 + (MYRI10GE_MAX_SEND_DESC_TSO + 4)
+	    * sizeof(*mgp->tx.req_list);
+	mgp->tx.req_bytes = kzalloc(bytes, GFP_KERNEL);
+	if (mgp->tx.req_bytes == NULL)
+		goto abort_with_nothing;
+
+	/* ensure req_list entries are aligned to 8 bytes */
+	mgp->tx.req_list = (struct mcp_kreq_ether_send *)
+	    ALIGN((unsigned long)mgp->tx.req_bytes, 8);
+
+	bytes = rx_ring_entries * sizeof(*mgp->rx_small.shadow);
+	mgp->rx_small.shadow = kzalloc(bytes, GFP_KERNEL);
+	if (mgp->rx_small.shadow == NULL)
+		goto abort_with_tx_req_bytes;
+
+	bytes = rx_ring_entries * sizeof(*mgp->rx_big.shadow);
+	mgp->rx_big.shadow = kzalloc(bytes, GFP_KERNEL);
+	if (mgp->rx_big.shadow == NULL)
+		goto abort_with_rx_small_shadow;
+
+	/* allocate the host info rings */
+
+	bytes = tx_ring_entries * sizeof(*mgp->tx.info);
+	mgp->tx.info = kzalloc(bytes, GFP_KERNEL);
+	if (mgp->tx.info == NULL)
+		goto abort_with_rx_big_shadow;
+
+	bytes = rx_ring_entries * sizeof(*mgp->rx_small.info);
+	mgp->rx_small.info = kzalloc(bytes, GFP_KERNEL);
+	if (mgp->rx_small.info == NULL)
+		goto abort_with_tx_info;
+
+	bytes = rx_ring_entries * sizeof(*mgp->rx_big.info);
+	mgp->rx_big.info = kzalloc(bytes, GFP_KERNEL);
+	if (mgp->rx_big.info == NULL)
+		goto abort_with_rx_small_info;
+
+	/* Fill the receive rings */
+
+	for (i = 0; i <= mgp->rx_small.mask; i++) {
+		status = myri10ge_getbuf(&mgp->rx_small, mgp->pdev,
+					 mgp->small_bytes, i);
+		if (status) {
+			printk(KERN_ERR
+			       "myri10ge: %s: alloced only %d small bufs\n",
+			       dev->name, i);
+			goto abort_with_rx_small_ring;
+		}
+	}
+
+	for (i = 0; i <= mgp->rx_big.mask; i++) {
+		status =
+		    myri10ge_getbuf(&mgp->rx_big, mgp->pdev,
+				    dev->mtu + ETH_HLEN, i);
+		if (status) {
+			printk(KERN_ERR
+			       "myri10ge: %s: alloced only %d big bufs\n",
+			       dev->name, i);
+			goto abort_with_rx_big_ring;
+		}
+	}
+
+	return 0;
+
+abort_with_rx_big_ring:
+	for (i = 0; i <= mgp->rx_big.mask; i++) {
+		if (mgp->rx_big.info[i].skb != NULL)
+			dev_kfree_skb_any(mgp->rx_big.info[i].skb);
+		if (pci_unmap_len(&mgp->rx_big.info[i], len))
+			pci_unmap_single(mgp->pdev,
+					 pci_unmap_addr(&mgp->rx_big.info[i],
+							bus),
+					 pci_unmap_len(&mgp->rx_big.info[i],
+						       len),
+					 PCI_DMA_FROMDEVICE);
+	}
+
+abort_with_rx_small_ring:
+	for (i = 0; i <= mgp->rx_small.mask; i++) {
+		if (mgp->rx_small.info[i].skb != NULL)
+			dev_kfree_skb_any(mgp->rx_small.info[i].skb);
+		if (pci_unmap_len(&mgp->rx_small.info[i], len))
+			pci_unmap_single(mgp->pdev,
+					 pci_unmap_addr(&mgp->rx_small.info[i],
+							bus),
+					 pci_unmap_len(&mgp->rx_small.info[i],
+						       len),
+					 PCI_DMA_FROMDEVICE);
+	}
+	kfree(mgp->rx_big.info);
+
+abort_with_rx_small_info:
+	kfree(mgp->rx_small.info);
+
+abort_with_tx_info:
+	kfree(mgp->tx.info);
+
+abort_with_rx_big_shadow:
+	kfree(mgp->rx_big.shadow);
+
+abort_with_rx_small_shadow:
+	kfree(mgp->rx_small.shadow);
+
+abort_with_tx_req_bytes:
+	kfree(mgp->tx.req_bytes);
+	mgp->tx.req_bytes = NULL;
+	mgp->tx.req_list = NULL;
+
+abort_with_nothing:
+	return status;
+}
+
+static void myri10ge_free_rings(struct net_device *dev)
+{
+	struct myri10ge_priv *mgp;
+	struct sk_buff *skb;
+	struct myri10ge_tx_buf *tx;
+	int i, len, idx;
+
+	mgp = netdev_priv(dev);
+
+	for (i = 0; i <= mgp->rx_big.mask; i++) {
+		if (mgp->rx_big.info[i].skb != NULL)
+			dev_kfree_skb_any(mgp->rx_big.info[i].skb);
+		if (pci_unmap_len(&mgp->rx_big.info[i], len))
+			pci_unmap_single(mgp->pdev,
+					 pci_unmap_addr(&mgp->rx_big.info[i],
+							bus),
+					 pci_unmap_len(&mgp->rx_big.info[i],
+						       len),
+					 PCI_DMA_FROMDEVICE);
+	}
+
+	for (i = 0; i <= mgp->rx_small.mask; i++) {
+		if (mgp->rx_small.info[i].skb != NULL)
+			dev_kfree_skb_any(mgp->rx_small.info[i].skb);
+		if (pci_unmap_len(&mgp->rx_small.info[i], len))
+			pci_unmap_single(mgp->pdev,
+					 pci_unmap_addr(&mgp->rx_small.info[i],
+							bus),
+					 pci_unmap_len(&mgp->rx_small.info[i],
+						       len),
+					 PCI_DMA_FROMDEVICE);
+	}
+
+	tx = &mgp->tx;
+	while (tx->done != tx->req) {
+		idx = tx->done & tx->mask;
+		skb = tx->info[idx].skb;
+
+		/* Mark as free */
+		tx->info[idx].skb = NULL;
+		tx->done++;
+		len = pci_unmap_len(&tx->info[idx], len);
+		pci_unmap_len_set(&tx->info[idx], len, 0);
+		if (skb) {
+			mgp->stats.tx_dropped++;
+			dev_kfree_skb_any(skb);
+			if (len)
+				pci_unmap_single(mgp->pdev,
+						 pci_unmap_addr(&tx->info[idx],
+								bus), len,
+						 PCI_DMA_TODEVICE);
+		} else {
+			if (len)
+				pci_unmap_page(mgp->pdev,
+					       pci_unmap_addr(&tx->info[idx],
+							      bus), len,
+					       PCI_DMA_TODEVICE);
+		}
+	}
+	kfree(mgp->rx_big.info);
+
+	kfree(mgp->rx_small.info);
+
+	kfree(mgp->tx.info);
+
+	kfree(mgp->rx_big.shadow);
+
+	kfree(mgp->rx_small.shadow);
+
+	kfree(mgp->tx.req_bytes);
+	mgp->tx.req_bytes = NULL;
+	mgp->tx.req_list = NULL;
+}
+
+static int myri10ge_open(struct net_device *dev)
+{
+	struct myri10ge_priv *mgp;
+	struct myri10ge_cmd cmd;
+	int status, big_pow2;
+
+	mgp = netdev_priv(dev);
+
+	if (mgp->running != MYRI10GE_ETH_STOPPED)
+		return -EBUSY;
+
+	mgp->running = MYRI10GE_ETH_STARTING;
+	status = myri10ge_reset(mgp);
+	if (status != 0) {
+		printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name);
+		mgp->running = MYRI10GE_ETH_STOPPED;
+		return -ENXIO;
+	}
+
+	/* decide what small buffer size to use.  For good TCP rx
+	 * performance, it is important to not receive 1514 byte
+	 * frames into jumbo buffers, as it confuses the socket buffer
+	 * accounting code, leading to drops and erratic performance.
+	 */
+
+	if (dev->mtu <= ETH_DATA_LEN)
+		mgp->small_bytes = 128;	/* enough for a TCP header */
+	else
+		mgp->small_bytes = ETH_FRAME_LEN;	/* enough for an ETH_DATA_LEN frame */
+
+	/* Override the small buffer size? */
+	if (myri10ge_small_bytes > 0)
+		mgp->small_bytes = myri10ge_small_bytes;
+
+	/* If the user sets an obscenely small MTU, adjust the small
+	 * bytes down to nearly nothing */
+	if (mgp->small_bytes >= (dev->mtu + ETH_HLEN))
+		mgp->small_bytes = 64;
+
+	/* get the lanai pointers to the send and receive rings */
+
+	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SEND_OFFSET, &cmd, 0);
+	mgp->tx.lanai =
+	    (struct mcp_kreq_ether_send __iomem *)(mgp->sram + cmd.data0);
+
+	status |=
+	    myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd, 0);
+	mgp->rx_small.lanai =
+	    (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
+
+	status |= myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd, 0);
+	mgp->rx_big.lanai =
+	    (struct mcp_kreq_ether_recv __iomem *)(mgp->sram + cmd.data0);
+
+	if (status != 0) {
+		printk(KERN_ERR
+		       "myri10ge: %s: failed to get ring sizes or locations\n",
+		       dev->name);
+		mgp->running = MYRI10GE_ETH_STOPPED;
+		return -ENXIO;
+	}
+
+	if (mgp->mtrr >= 0) {
+		mgp->tx.wc_fifo = (u8 __iomem *) mgp->sram + 0x200000;
+		mgp->rx_small.wc_fifo = (u8 __iomem *) mgp->sram + 0x300000;
+		mgp->rx_big.wc_fifo = (u8 __iomem *) mgp->sram + 0x340000;
+	} else {
+		mgp->tx.wc_fifo = NULL;
+		mgp->rx_small.wc_fifo = NULL;
+		mgp->rx_big.wc_fifo = NULL;
+	}
+
+	status = myri10ge_allocate_rings(dev);
+	if (status != 0)
+		goto abort_with_nothing;
+
+	/* Firmware needs the big buff size as a power of 2.  Lie and
+	 * tell him the buffer is larger, because we only use 1
+	 * buffer/pkt, and the mtu will prevent overruns.
+	 */
+	big_pow2 = dev->mtu + ETH_HLEN + MXGEFW_PAD;
+	while ((big_pow2 & (big_pow2 - 1)) != 0)
+		big_pow2++;
+
+	/* now give firmware buffers sizes, and MTU */
+	cmd.data0 = dev->mtu + ETH_HLEN + VLAN_HLEN;
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_MTU, &cmd, 0);
+	cmd.data0 = mgp->small_bytes;
+	status |=
+	    myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd, 0);
+	cmd.data0 = big_pow2;
+	status |=
+	    myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
+	if (status) {
+		printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n",
+		       dev->name);
+		goto abort_with_rings;
+	}
+
+	cmd.data0 = MYRI10GE_LOWPART_TO_U32(mgp->fw_stats_bus);
+	cmd.data1 = MYRI10GE_HIGHPART_TO_U32(mgp->fw_stats_bus);
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_STATS_DMA, &cmd, 0);
+	if (status) {
+		printk(KERN_ERR "myri10ge: %s: Couldn't set stats DMA\n",
+		       dev->name);
+		goto abort_with_rings;
+	}
+
+	mgp->link_state = -1;
+	mgp->rdma_tags_available = 15;
+
+	netif_poll_enable(mgp->dev);	/* must happen prior to any irq */
+
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
+	if (status) {
+		printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
+		       dev->name);
+		goto abort_with_rings;
+	}
+
+	mgp->wake_queue = 0;
+	mgp->stop_queue = 0;
+	mgp->running = MYRI10GE_ETH_RUNNING;
+	mgp->watchdog_timer.expires = jiffies + myri10ge_watchdog_timeout * HZ;
+	add_timer(&mgp->watchdog_timer);
+	netif_wake_queue(dev);
+	return 0;
+
+abort_with_rings:
+	myri10ge_free_rings(dev);
+
+abort_with_nothing:
+	mgp->running = MYRI10GE_ETH_STOPPED;
+	return -ENOMEM;
+}
+
+static int myri10ge_close(struct net_device *dev)
+{
+	struct myri10ge_priv *mgp;
+	struct myri10ge_cmd cmd;
+	int status, old_down_cnt;
+
+	mgp = netdev_priv(dev);
+
+	if (mgp->running != MYRI10GE_ETH_RUNNING)
+		return 0;
+
+	if (mgp->tx.req_bytes == NULL)
+		return 0;
+
+	del_timer_sync(&mgp->watchdog_timer);
+	mgp->running = MYRI10GE_ETH_STOPPING;
+	netif_poll_disable(mgp->dev);
+	netif_carrier_off(dev);
+	netif_stop_queue(dev);
+	old_down_cnt = mgp->down_cnt;
+	mb();
+	status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
+	if (status)
+		printk(KERN_ERR "myri10ge: %s: Couldn't bring down link\n",
+		       dev->name);
+
+	wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt, HZ);
+	if (old_down_cnt == mgp->down_cnt)
+		printk(KERN_ERR "myri10ge: %s never got down irq\n", dev->name);
+
+	netif_tx_disable(dev);
+
+	myri10ge_free_rings(dev);
+
+	mgp->running = MYRI10GE_ETH_STOPPED;
+	return 0;
+}
+
+/* copy an array of struct mcp_kreq_ether_send's to the mcp.  Copy
+ * backwards one at a time and handle ring wraps */
+
+static inline void
+myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx,
+			      struct mcp_kreq_ether_send *src, int cnt)
+{
+	int idx, starting_slot;
+	starting_slot = tx->req;
+	while (cnt > 1) {
+		cnt--;
+		idx = (starting_slot + cnt) & tx->mask;
+		myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src));
+		mb();
+	}
+}
+
+/*
+ * copy an array of struct mcp_kreq_ether_send's to the mcp.  Copy
+ * at most 32 bytes at a time, so as to avoid involving the software
+ * pio handler in the nic.   We re-write the first segment's flags
+ * to mark them valid only after writing the entire chain.
+ */
+
+static inline void
+myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src,
+		    int cnt)
+{
+	int idx, i;
+	struct mcp_kreq_ether_send __iomem *dstp, *dst;
+	struct mcp_kreq_ether_send *srcp;
+	u8 last_flags;
+
+	idx = tx->req & tx->mask;
+
+	last_flags = src->flags;
+	src->flags = 0;
+	mb();
+	dst = dstp = &tx->lanai[idx];
+	srcp = src;
+
+	if ((idx + cnt) < tx->mask) {
+		for (i = 0; i < (cnt - 1); i += 2) {
+			myri10ge_pio_copy(dstp, srcp, 2 * sizeof(*src));
+			mb();	/* force write every 32 bytes */
+			srcp += 2;
+			dstp += 2;
+		}
+	} else {
+		/* submit all but the first request, and ensure
+		 * that it is submitted below */
+		myri10ge_submit_req_backwards(tx, src, cnt);
+		i = 0;
+	}
+	if (i < cnt) {
+		/* submit the first request */
+		myri10ge_pio_copy(dstp, srcp, sizeof(*src));
+		mb();		/* barrier before setting valid flag */
+	}
+
+	/* re-write the last 32-bits with the valid flags */
+	src->flags = last_flags;
+	__raw_writel(*((u32 *) src + 3), (u32 __iomem *) dst + 3);
+	tx->req += cnt;
+	mb();
+}
+
+static inline void
+myri10ge_submit_req_wc(struct myri10ge_tx_buf *tx,
+		       struct mcp_kreq_ether_send *src, int cnt)
+{
+	tx->req += cnt;
+	mb();
+	while (cnt >= 4) {
+		myri10ge_pio_copy(tx->wc_fifo, src, 64);
+		mb();
+		src += 4;
+		cnt -= 4;
+	}
+	if (cnt > 0) {
+		/* pad it to 64 bytes.  The src is 64 bytes bigger than it
+		 * needs to be so that we don't overrun it */
+		myri10ge_pio_copy(tx->wc_fifo + (cnt << 18), src, 64);
+		mb();
+	}
+}
+
+/*
+ * Transmit a packet.  We need to split the packet so that a single
+ * segment does not cross myri10ge->tx.boundary, so this makes segment
+ * counting tricky.  So rather than try to count segments up front, we
+ * just give up if there are too few segments to hold a reasonably
+ * fragmented packet currently available.  If we run
+ * out of segments while preparing a packet for DMA, we just linearize
+ * it and try again.
+ */
+
+static int myri10ge_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct myri10ge_priv *mgp = netdev_priv(dev);
+	struct mcp_kreq_ether_send *req;
+	struct myri10ge_tx_buf *tx = &mgp->tx;
+	struct skb_frag_struct *frag;
+	dma_addr_t bus;
+	u32 low, high_swapped;
+	unsigned int len;
+	int idx, last_idx, avail, frag_cnt, frag_idx, count, mss, max_segments;
+	u16 pseudo_hdr_offset, cksum_offset;
+	int cum_len, seglen, boundary, rdma_count;
+	u8 flags, odd_flag;
+
+again:
+	req = tx->req_list;
+	avail = tx->mask - 1 - (tx->req - tx->done);
+
+	mss = 0;
+	max_segments = MXGEFW_MAX_SEND_DESC;
+
+#ifdef NETIF_F_TSO
+	if (skb->len > (dev->mtu + ETH_HLEN)) {
+		mss = skb_shinfo(skb)->tso_size;
+		if (mss != 0)
+			max_segments = MYRI10GE_MAX_SEND_DESC_TSO;
+	}
+#endif				/*NETIF_F_TSO */
+
+	if ((unlikely(avail < max_segments))) {
+		/* we are out of transmit resources */
+		mgp->stop_queue++;
+		netif_stop_queue(dev);
+		return 1;
+	}
+
+	/* Setup checksum offloading, if needed */
+	cksum_offset = 0;
+	pseudo_hdr_offset = 0;
+	odd_flag = 0;
+	flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
+	if (likely(skb->ip_summed == CHECKSUM_HW)) {
+		cksum_offset = (skb->h.raw - skb->data);
+		pseudo_hdr_offset = (skb->h.raw + skb->csum) - skb->data;
+		/* If the headers are excessively large, then we must
+		 * fall back to a software checksum */
+		if (unlikely(cksum_offset > 255 || pseudo_hdr_offset > 127)) {
+			if (skb_checksum_help(skb, 0))
+				goto drop;
+			cksum_offset = 0;
+			pseudo_hdr_offset = 0;
+		} else {
+			pseudo_hdr_offset = htons(pseudo_hdr_offset);
+			odd_flag = MXGEFW_FLAGS_ALIGN_ODD;
+			flags |= MXGEFW_FLAGS_CKSUM;
+		}
+	}
+
+	cum_len = 0;
+
+#ifdef NETIF_F_TSO
+	if (mss) {		/* TSO */
+		/* this removes any CKSUM flag from before */
+		flags = (MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST);
+
+		/* negative cum_len signifies to the
+		 * send loop that we are still in the
+		 * header portion of the TSO packet.
+		 * TSO header must be at most 134 bytes long */
+		cum_len = -((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
+
+		/* for TSO, pseudo_hdr_offset holds mss.
+		 * The firmware figures out where to put
+		 * the checksum by parsing the header. */
+		pseudo_hdr_offset = htons(mss);
+	} else
+#endif				/*NETIF_F_TSO */
+		/* Mark small packets, and pad out tiny packets */
+	if (skb->len <= MXGEFW_SEND_SMALL_SIZE) {
+		flags |= MXGEFW_FLAGS_SMALL;
+
+		/* pad frames to at least ETH_ZLEN bytes */
+		if (unlikely(skb->len < ETH_ZLEN)) {
+			skb = skb_padto(skb, ETH_ZLEN);
+			if (skb == NULL) {
+				/* The packet is gone, so we must
+				 * return 0 */
+				mgp->stats.tx_dropped += 1;
+				return 0;
+			}
+			/* adjust the len to account for the zero pad
+			 * so that the nic can know how long it is */
+			skb->len = ETH_ZLEN;
+		}
+	}
+
+	/* map the skb for DMA */
+	len = skb->len - skb->data_len;
+	idx = tx->req & tx->mask;
+	tx->info[idx].skb = skb;
+	bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);
+	pci_unmap_addr_set(&tx->info[idx], bus, bus);
+	pci_unmap_len_set(&tx->info[idx], len, len);
+
+	frag_cnt = skb_shinfo(skb)->nr_frags;
+	frag_idx = 0;
+	count = 0;
+	rdma_count = 0;
+
+	/* "rdma_count" is the number of RDMAs belonging to the
+	 * current packet BEFORE the current send request. For
+	 * non-TSO packets, this is equal to "count".
+	 * For TSO packets, rdma_count needs to be reset
+	 * to 0 after a segment cut.
+	 *
+	 * The rdma_count field of the send request is
+	 * the number of RDMAs of the packet starting at
+	 * that request. For TSO send requests with one ore more cuts
+	 * in the middle, this is the number of RDMAs starting
+	 * after the last cut in the request. All previous
+	 * segments before the last cut implicitly have 1 RDMA.
+	 *
+	 * Since the number of RDMAs is not known beforehand,
+	 * it must be filled-in retroactively - after each
+	 * segmentation cut or at the end of the entire packet.
+	 */
+
+	while (1) {
+		/* Break the SKB or Fragment up into pieces which
+		 * do not cross mgp->tx.boundary */
+		low = MYRI10GE_LOWPART_TO_U32(bus);
+		high_swapped = htonl(MYRI10GE_HIGHPART_TO_U32(bus));
+		while (len) {
+			u8 flags_next;
+			int cum_len_next;
+
+			if (unlikely(count == max_segments))
+				goto abort_linearize;
+
+			boundary = (low + tx->boundary) & ~(tx->boundary - 1);
+			seglen = boundary - low;
+			if (seglen > len)
+				seglen = len;
+			flags_next = flags & ~MXGEFW_FLAGS_FIRST;
+			cum_len_next = cum_len + seglen;
+#ifdef NETIF_F_TSO
+			if (mss) {	/* TSO */
+				(req - rdma_count)->rdma_count = rdma_count + 1;
+
+				if (likely(cum_len >= 0)) {	/* payload */
+					int next_is_first, chop;
+
+					chop = (cum_len_next > mss);
+					cum_len_next = cum_len_next % mss;
+					next_is_first = (cum_len_next == 0);
+					flags |= chop * MXGEFW_FLAGS_TSO_CHOP;
+					flags_next |= next_is_first *
+					    MXGEFW_FLAGS_FIRST;
+					rdma_count |= -(chop | next_is_first);
+					rdma_count += chop & !next_is_first;
+				} else if (likely(cum_len_next >= 0)) {	/* header ends */
+					int small;
+
+					rdma_count = -1;
+					cum_len_next = 0;
+					seglen = -cum_len;
+					small = (mss <= MXGEFW_SEND_SMALL_SIZE);
+					flags_next = MXGEFW_FLAGS_TSO_PLD |
+					    MXGEFW_FLAGS_FIRST |
+					    (small * MXGEFW_FLAGS_SMALL);
+				}
+			}
+#endif				/* NETIF_F_TSO */
+			req->addr_high = high_swapped;
+			req->addr_low = htonl(low);
+			req->pseudo_hdr_offset = pseudo_hdr_offset;
+			req->pad = 0;	/* complete solid 16-byte block; does this matter? */
+			req->rdma_count = 1;
+			req->length = htons(seglen);
+			req->cksum_offset = cksum_offset;
+			req->flags = flags | ((cum_len & 1) * odd_flag);
+
+			low += seglen;
+			len -= seglen;
+			cum_len = cum_len_next;
+			flags = flags_next;
+			req++;
+			count++;
+			rdma_count++;
+			if (unlikely(cksum_offset > seglen))
+				cksum_offset -= seglen;
+			else
+				cksum_offset = 0;
+		}
+		if (frag_idx == frag_cnt)
+			break;
+
+		/* map next fragment for DMA */
+		idx = (count + tx->req) & tx->mask;
+		frag = &skb_shinfo(skb)->frags[frag_idx];
+		frag_idx++;
+		len = frag->size;
+		bus = pci_map_page(mgp->pdev, frag->page, frag->page_offset,
+				   len, PCI_DMA_TODEVICE);
+		pci_unmap_addr_set(&tx->info[idx], bus, bus);
+		pci_unmap_len_set(&tx->info[idx], len, len);
+	}
+
+	(req - rdma_count)->rdma_count = rdma_count;
+#ifdef NETIF_F_TSO
+	if (mss)
+		do {
+			req--;
+			req->flags |= MXGEFW_FLAGS_TSO_LAST;
+		} while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP |
+					 MXGEFW_FLAGS_FIRST)));
+#endif
+	idx = ((count - 1) + tx->req) & tx->mask;
+	tx->info[idx].last = 1;
+	if (tx->wc_fifo == NULL)
+		myri10ge_submit_req(tx, tx->req_list, count);
+	else
+		myri10ge_submit_req_wc(tx, tx->req_list, count);
+	tx->pkt_start++;
+	if ((avail - count) < MXGEFW_MAX_SEND_DESC) {
+		mgp->stop_queue++;
+		netif_stop_queue(dev);
+	}
+	dev->trans_start = jiffies;
+	return 0;
+
+abort_linearize:
+	/* Free any DMA resources we've alloced and clear out the skb
+	 * slot so as to not trip up assertions, and to avoid a
+	 * double-free if linearizing fails */
+
+	last_idx = (idx + 1) & tx->mask;
+	idx = tx->req & tx->mask;
+	tx->info[idx].skb = NULL;
+	do {
+		len = pci_unmap_len(&tx->info[idx], len);
+		if (len) {
+			if (tx->info[idx].skb != NULL)
+				pci_unmap_single(mgp->pdev,
+						 pci_unmap_addr(&tx->info[idx],
+								bus), len,
+						 PCI_DMA_TODEVICE);
+			else
+				pci_unmap_page(mgp->pdev,
+					       pci_unmap_addr(&tx->info[idx],
+							      bus), len,
+					       PCI_DMA_TODEVICE);
+			pci_unmap_len_set(&tx->info[idx], len, 0);
+			tx->info[idx].skb = NULL;
+		}
+		idx = (idx + 1) & tx->mask;
+	} while (idx != last_idx);
+	if (skb_shinfo(skb)->tso_size) {
+		printk(KERN_ERR
+		       "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
+		       mgp->dev->name);
+		goto drop;
+	}
+
+	if (skb_linearize(skb, GFP_ATOMIC))
+		goto drop;
+
+	mgp->tx_linearized++;
+	goto again;
+
+drop:
+	dev_kfree_skb_any(skb);
+	mgp->stats.tx_dropped += 1;
+	return 0;
+
+}
+
+static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
+{
+	struct myri10ge_priv *mgp = netdev_priv(dev);
+	return &mgp->stats;
+}
+
+static void myri10ge_set_multicast_list(struct net_device *dev)
+{
+	/* can be called from atomic contexts,
+	 * pass 1 to force atomicity in myri10ge_send_cmd() */
+	myri10ge_change_promisc(netdev_priv(dev), dev->flags & IFF_PROMISC, 1);
+}
+
+static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
+{
+	struct sockaddr *sa = addr;
+	struct myri10ge_priv *mgp = netdev_priv(dev);
+	int status;
+
+	if (!is_valid_ether_addr(sa->sa_data))
+		return -EADDRNOTAVAIL;
+
+	status = myri10ge_update_mac_address(mgp, sa->sa_data);
+	if (status != 0) {
+		printk(KERN_ERR
+		       "myri10ge: %s: changing mac address failed with %d\n",
+		       dev->name, status);
+		return status;
+	}
+
+	/* change the dev structure */
+	memcpy(dev->dev_addr, sa->sa_data, 6);
+	return 0;
+}
+
+static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct myri10ge_priv *mgp = netdev_priv(dev);
+	int error = 0;
+
+	if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
+		printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n",
+		       dev->name, new_mtu);
+		return -EINVAL;
+	}
+	printk(KERN_INFO "%s: changing mtu from %d to %d\n",
+	       dev->name, dev->mtu, new_mtu);
+	if (mgp->running) {
+		/* if we change the mtu on an active device, we must
+		 * reset the device so the firmware sees the change */
+		myri10ge_close(dev);
+		dev->mtu = new_mtu;
+		myri10ge_open(dev);
+	} else
+		dev->mtu = new_mtu;
+
+	return error;
+}
+
+/*
+ * Enable ECRC to align PCI-E Completion packets on an 8-byte boundary.
+ * Only do it if the bridge is a root port since we don't want to disturb
+ * any other device, except if forced with myri10ge_ecrc_enable > 1.
+ */
+
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE	0x005d
+
+static void myri10ge_enable_ecrc(struct myri10ge_priv *mgp)
+{
+	struct pci_dev *bridge = mgp->pdev->bus->self;
+	struct device *dev = &mgp->pdev->dev;
+	unsigned cap;
+	unsigned err_cap;
+	u16 val;
+	u8 ext_type;
+	int ret;
+
+	if (!myri10ge_ecrc_enable || !bridge)
+		return;
+
+	/* check that the bridge is a root port */
+	cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+	pci_read_config_word(bridge, cap + PCI_CAP_FLAGS, &val);
+	ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+	if (ext_type != PCI_EXP_TYPE_ROOT_PORT) {
+		if (myri10ge_ecrc_enable > 1) {
+			struct pci_dev *old_bridge = bridge;
+
+			/* Walk the hierarchy up to the root port
+			 * where ECRC has to be enabled */
+			do {
+				bridge = bridge->bus->self;
+				if (!bridge) {
+					dev_err(dev,
+						"Failed to find root port"
+						" to force ECRC\n");
+					return;
+				}
+				cap =
+				    pci_find_capability(bridge, PCI_CAP_ID_EXP);
+				pci_read_config_word(bridge,
+						     cap + PCI_CAP_FLAGS, &val);
+				ext_type = (val & PCI_EXP_FLAGS_TYPE) >> 4;
+			} while (ext_type != PCI_EXP_TYPE_ROOT_PORT);
+
+			dev_info(dev,
+				 "Forcing ECRC on non-root port %s"
+				 " (enabling on root port %s)\n",
+				 pci_name(old_bridge), pci_name(bridge));
+		} else {
+			dev_err(dev,
+				"Not enabling ECRC on non-root port %s\n",
+				pci_name(bridge));
+			return;
+		}
+	}
+
+	cap = pci_find_ext_capability(bridge, PCI_EXT_CAP_ID_ERR);
+	/* nvidia ext cap is not always linked in ext cap chain */
+	if (!cap
+	    && bridge->vendor == PCI_VENDOR_ID_NVIDIA
+	    && bridge->device == PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_PCIE)
+		cap = 0x160;
+
+	if (!cap)
+		return;
+
+	ret = pci_read_config_dword(bridge, cap + PCI_ERR_CAP, &err_cap);
+	if (ret) {
+		dev_err(dev, "failed reading ext-conf-space of %s\n",
+			pci_name(bridge));
+		dev_err(dev, "\t pci=nommconf in use? "
+			"or buggy/incomplete/absent ACPI MCFG attr?\n");
+		return;
+	}
+	if (!(err_cap & PCI_ERR_CAP_ECRC_GENC))
+		return;
+
+	err_cap |= PCI_ERR_CAP_ECRC_GENE;
+	pci_write_config_dword(bridge, cap + PCI_ERR_CAP, err_cap);
+	dev_info(dev, "Enabled ECRC on upstream bridge %s\n", pci_name(bridge));
+	mgp->tx.boundary = 4096;
+	mgp->fw_name = myri10ge_fw_aligned;
+}
+
+/*
+ * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput
+ * when the PCI-E Completion packets are aligned on an 8-byte
+ * boundary.  Some PCI-E chip sets always align Completion packets; on
+ * the ones that do not, the alignment can be enforced by enabling
+ * ECRC generation (if supported).
+ *
+ * When PCI-E Completion packets are not aligned, it is actually more
+ * efficient to limit Read-DMA transactions to 2KB, rather than 4KB.
+ *
+ * If the driver can neither enable ECRC nor verify that it has
+ * already been enabled, then it must use a firmware image which works
+ * around unaligned completion packets (myri10ge_ethp_z8e.dat), and it
+ * should also ensure that it never gives the device a Read-DMA which is
+ * larger than 2KB by setting the tx.boundary to 2KB.  If ECRC is
+ * enabled, then the driver should use the aligned (myri10ge_eth_z8e.dat)
+ * firmware image, and set tx.boundary to 4KB.
+ */
+
+#define PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE	0x0132
+
+static void myri10ge_select_firmware(struct myri10ge_priv *mgp)
+{
+	struct pci_dev *bridge = mgp->pdev->bus->self;
+
+	mgp->tx.boundary = 2048;
+	mgp->fw_name = myri10ge_fw_unaligned;
+
+	if (myri10ge_force_firmware == 0) {
+		myri10ge_enable_ecrc(mgp);
+
+		/* Check to see if the upstream bridge is known to
+		 * provide aligned completions */
+		if (bridge
+		    /* ServerWorks HT2000/HT1000 */
+		    && bridge->vendor == PCI_VENDOR_ID_SERVERWORKS
+		    && bridge->device ==
+		    PCI_DEVICE_ID_SERVERWORKS_HT2000_PCIE) {
+			dev_info(&mgp->pdev->dev,
+				 "Assuming aligned completions (0x%x:0x%x)\n",
+				 bridge->vendor, bridge->device);
+			mgp->tx.boundary = 4096;
+			mgp->fw_name = myri10ge_fw_aligned;
+		}
+	} else {
+		if (myri10ge_force_firmware == 1) {
+			dev_info(&mgp->pdev->dev,
+				 "Assuming aligned completions (forced)\n");
+			mgp->tx.boundary = 4096;
+			mgp->fw_name = myri10ge_fw_aligned;
+		} else {
+			dev_info(&mgp->pdev->dev,
+				 "Assuming unaligned completions (forced)\n");
+			mgp->tx.boundary = 2048;
+			mgp->fw_name = myri10ge_fw_unaligned;
+		}
+	}
+	if (myri10ge_fw_name != NULL) {
+		dev_info(&mgp->pdev->dev, "overriding firmware to %s\n",
+			 myri10ge_fw_name);
+		mgp->fw_name = myri10ge_fw_name;
+	}
+}
+
+static void myri10ge_save_state(struct myri10ge_priv *mgp)
+{
+	struct pci_dev *pdev = mgp->pdev;
+	int cap;
+
+	pci_save_state(pdev);
+	/* now save PCIe and MSI state that Linux will not
+	 * save for us */
+	cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &mgp->devctl);
+	cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	pci_read_config_word(pdev, cap + PCI_MSI_FLAGS, &mgp->msi_flags);
+}
+
+static void myri10ge_restore_state(struct myri10ge_priv *mgp)
+{
+	struct pci_dev *pdev = mgp->pdev;
+	int cap;
+
+	/* restore PCIe and MSI state that linux will not */
+	cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	pci_write_config_dword(pdev, cap + PCI_CAP_ID_EXP, mgp->devctl);
+	cap = pci_find_capability(pdev, PCI_CAP_ID_MSI);
+	pci_write_config_word(pdev, cap + PCI_MSI_FLAGS, mgp->msi_flags);
+
+	pci_restore_state(pdev);
+}
+
+#ifdef CONFIG_PM
+
+static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct myri10ge_priv *mgp;
+	struct net_device *netdev;
+
+	mgp = pci_get_drvdata(pdev);
+	if (mgp == NULL)
+		return -EINVAL;
+	netdev = mgp->dev;
+
+	netif_device_detach(netdev);
+	if (netif_running(netdev)) {
+		printk(KERN_INFO "myri10ge: closing %s\n", netdev->name);
+		rtnl_lock();
+		myri10ge_close(netdev);
+		rtnl_unlock();
+	}
+	myri10ge_dummy_rdma(mgp, 0);
+	free_irq(pdev->irq, mgp);
+	myri10ge_save_state(mgp);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int myri10ge_resume(struct pci_dev *pdev)
+{
+	struct myri10ge_priv *mgp;
+	struct net_device *netdev;
+	int status;
+	u16 vendor;
+
+	mgp = pci_get_drvdata(pdev);
+	if (mgp == NULL)
+		return -EINVAL;
+	netdev = mgp->dev;
+	pci_set_power_state(pdev, 0);	/* zeros conf space as a side effect */
+	msleep(5);		/* give card time to respond */
+	pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+	if (vendor == 0xffff) {
+		printk(KERN_ERR "myri10ge: %s: device disappeared!\n",
+		       mgp->dev->name);
+		return -EIO;
+	}
+	myri10ge_restore_state(mgp);
+	pci_enable_device(pdev);
+	pci_set_master(pdev);
+
+	status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ,
+			     netdev->name, mgp);
+	if (status != 0) {
+		dev_err(&pdev->dev, "failed to allocate IRQ\n");
+		goto abort_with_msi;
+	}
+
+	myri10ge_reset(mgp);
+	myri10ge_dummy_rdma(mgp, mgp->tx.boundary != 4096);
+
+	/* Save configuration space to be restored if the
+	 * nic resets due to a parity error */
+	myri10ge_save_state(mgp);
+
+	if (netif_running(netdev)) {
+		rtnl_lock();
+		myri10ge_open(netdev);
+		rtnl_unlock();
+	}
+	netif_device_attach(netdev);
+
+	return 0;
+
+abort_with_msi:
+	return -EIO;
+
+}
+
+#endif				/* CONFIG_PM */
+
+static u32 myri10ge_read_reboot(struct myri10ge_priv *mgp)
+{
+	struct pci_dev *pdev = mgp->pdev;
+	int vs = mgp->vendor_specific_offset;
+	u32 reboot;
+
+	/*enter read32 mode */
+	pci_write_config_byte(pdev, vs + 0x10, 0x3);
+
+	/*read REBOOT_STATUS (0xfffffff0) */
+	pci_write_config_dword(pdev, vs + 0x18, 0xfffffff0);
+	pci_read_config_dword(pdev, vs + 0x14, &reboot);
+	return reboot;
+}
+
+/*
+ * This watchdog is used to check whether the board has suffered
+ * from a parity error and needs to be recovered.
+ */
+static void myri10ge_watchdog(void *arg)
+{
+	struct myri10ge_priv *mgp = arg;
+	u32 reboot;
+	int status;
+	u16 cmd, vendor;
+
+	mgp->watchdog_resets++;
+	pci_read_config_word(mgp->pdev, PCI_COMMAND, &cmd);
+	if ((cmd & PCI_COMMAND_MASTER) == 0) {
+		/* Bus master DMA disabled?  Check to see
+		 * if the card rebooted due to a parity error
+		 * For now, just report it */
+		reboot = myri10ge_read_reboot(mgp);
+		printk(KERN_ERR
+		       "myri10ge: %s: NIC rebooted (0x%x), resetting\n",
+		       mgp->dev->name, reboot);
+		/*
+		 * A rebooted nic will come back with config space as
+		 * it was after power was applied to PCIe bus.
+		 * Attempt to restore config space which was saved
+		 * when the driver was loaded, or the last time the
+		 * nic was resumed from power saving mode.
+		 */
+		myri10ge_restore_state(mgp);
+	} else {
+		/* if we get back -1's from our slot, perhaps somebody
+		 * powered off our card.  Don't try to reset it in
+		 * this case */
+		if (cmd == 0xffff) {
+			pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
+			if (vendor == 0xffff) {
+				printk(KERN_ERR
+				       "myri10ge: %s: device disappeared!\n",
+				       mgp->dev->name);
+				return;
+			}
+		}
+		/* Perhaps it is a software error.  Try to reset */
+
+		printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
+		       mgp->dev->name);
+		printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
+		       mgp->dev->name, mgp->tx.req, mgp->tx.done,
+		       mgp->tx.pkt_start, mgp->tx.pkt_done,
+		       (int)ntohl(mgp->fw_stats->send_done_count));
+		msleep(2000);
+		printk(KERN_INFO "myri10ge: %s: %d %d %d %d %d\n",
+		       mgp->dev->name, mgp->tx.req, mgp->tx.done,
+		       mgp->tx.pkt_start, mgp->tx.pkt_done,
+		       (int)ntohl(mgp->fw_stats->send_done_count));
+	}
+	rtnl_lock();
+	myri10ge_close(mgp->dev);
+	status = myri10ge_load_firmware(mgp);
+	if (status != 0)
+		printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
+		       mgp->dev->name);
+	else
+		myri10ge_open(mgp->dev);
+	rtnl_unlock();
+}
+
+/*
+ * We use our own timer routine rather than relying upon
+ * netdev->tx_timeout because we have a very large hardware transmit
+ * queue.  Due to the large queue, the netdev->tx_timeout function
+ * cannot detect a NIC with a parity error in a timely fashion if the
+ * NIC is lightly loaded.
+ */
+static void myri10ge_watchdog_timer(unsigned long arg)
+{
+	struct myri10ge_priv *mgp;
+
+	mgp = (struct myri10ge_priv *)arg;
+	if (mgp->tx.req != mgp->tx.done &&
+	    mgp->tx.done == mgp->watchdog_tx_done)
+		/* nic seems like it might be stuck.. */
+		schedule_work(&mgp->watchdog_work);
+	else
+		/* rearm timer */
+		mod_timer(&mgp->watchdog_timer,
+			  jiffies + myri10ge_watchdog_timeout * HZ);
+
+	mgp->watchdog_tx_done = mgp->tx.done;
+}
+
+static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct myri10ge_priv *mgp;
+	struct device *dev = &pdev->dev;
+	size_t bytes;
+	int i;
+	int status = -ENXIO;
+	int cap;
+	int dac_enabled;
+	u16 val;
+
+	netdev = alloc_etherdev(sizeof(*mgp));
+	if (netdev == NULL) {
+		dev_err(dev, "Could not allocate ethernet device\n");
+		return -ENOMEM;
+	}
+
+	mgp = netdev_priv(netdev);
+	memset(mgp, 0, sizeof(*mgp));
+	mgp->dev = netdev;
+	mgp->pdev = pdev;
+	mgp->csum_flag = MXGEFW_FLAGS_CKSUM;
+	mgp->pause = myri10ge_flow_control;
+	mgp->intr_coal_delay = myri10ge_intr_coal_delay;
+	init_waitqueue_head(&mgp->down_wq);
+
+	if (pci_enable_device(pdev)) {
+		dev_err(&pdev->dev, "pci_enable_device call failed\n");
+		status = -ENODEV;
+		goto abort_with_netdev;
+	}
+	myri10ge_select_firmware(mgp);
+
+	/* Find the vendor-specific cap so we can check
+	 * the reboot register later on */
+	mgp->vendor_specific_offset
+	    = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+
+	/* Set our max read request to 4KB */
+	cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (cap < 64) {
+		dev_err(&pdev->dev, "Bad PCI_CAP_ID_EXP location %d\n", cap);
+		goto abort_with_netdev;
+	}
+	status = pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &val);
+	if (status != 0) {
+		dev_err(&pdev->dev, "Error %d reading PCI_EXP_DEVCTL\n",
+			status);
+		goto abort_with_netdev;
+	}
+	val = (val & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12);
+	status = pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, val);
+	if (status != 0) {
+		dev_err(&pdev->dev, "Error %d writing PCI_EXP_DEVCTL\n",
+			status);
+		goto abort_with_netdev;
+	}
+
+	pci_set_master(pdev);
+	dac_enabled = 1;
+	status = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
+	if (status != 0) {
+		dac_enabled = 0;
+		dev_err(&pdev->dev,
+			"64-bit pci address mask was refused, trying 32-bit");
+		status = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	}
+	if (status != 0) {
+		dev_err(&pdev->dev, "Error %d setting DMA mask\n", status);
+		goto abort_with_netdev;
+	}
+	mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
+				      &mgp->cmd_bus, GFP_KERNEL);
+	if (mgp->cmd == NULL)
+		goto abort_with_netdev;
+
+	mgp->fw_stats = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+					   &mgp->fw_stats_bus, GFP_KERNEL);
+	if (mgp->fw_stats == NULL)
+		goto abort_with_cmd;
+
+	mgp->board_span = pci_resource_len(pdev, 0);
+	mgp->iomem_base = pci_resource_start(pdev, 0);
+	mgp->mtrr = -1;
+#ifdef CONFIG_MTRR
+	mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
+			     MTRR_TYPE_WRCOMB, 1);
+#endif
+	/* Hack.  need to get rid of these magic numbers */
+	mgp->sram_size =
+	    2 * 1024 * 1024 - (2 * (48 * 1024) + (32 * 1024)) - 0x100;
+	if (mgp->sram_size > mgp->board_span) {
+		dev_err(&pdev->dev, "board span %ld bytes too small\n",
+			mgp->board_span);
+		goto abort_with_wc;
+	}
+	mgp->sram = ioremap(mgp->iomem_base, mgp->board_span);
+	if (mgp->sram == NULL) {
+		dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
+			mgp->board_span, mgp->iomem_base);
+		status = -ENXIO;
+		goto abort_with_wc;
+	}
+	memcpy_fromio(mgp->eeprom_strings,
+		      mgp->sram + mgp->sram_size - MYRI10GE_EEPROM_STRINGS_SIZE,
+		      MYRI10GE_EEPROM_STRINGS_SIZE);
+	memset(mgp->eeprom_strings + MYRI10GE_EEPROM_STRINGS_SIZE - 2, 0, 2);
+	status = myri10ge_read_mac_addr(mgp);
+	if (status)
+		goto abort_with_ioremap;
+
+	for (i = 0; i < ETH_ALEN; i++)
+		netdev->dev_addr[i] = mgp->mac_addr[i];
+
+	/* allocate rx done ring */
+	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+	mgp->rx_done.entry = dma_alloc_coherent(&pdev->dev, bytes,
+						&mgp->rx_done.bus, GFP_KERNEL);
+	if (mgp->rx_done.entry == NULL)
+		goto abort_with_ioremap;
+	memset(mgp->rx_done.entry, 0, bytes);
+
+	status = myri10ge_load_firmware(mgp);
+	if (status != 0) {
+		dev_err(&pdev->dev, "failed to load firmware\n");
+		goto abort_with_rx_done;
+	}
+
+	status = myri10ge_reset(mgp);
+	if (status != 0) {
+		dev_err(&pdev->dev, "failed reset\n");
+		goto abort_with_firmware;
+	}
+
+	if (myri10ge_msi) {
+		status = pci_enable_msi(pdev);
+		if (status != 0)
+			dev_err(&pdev->dev,
+				"Error %d setting up MSI; falling back to xPIC\n",
+				status);
+		else
+			mgp->msi_enabled = 1;
+	}
+
+	status = request_irq(pdev->irq, myri10ge_intr, SA_SHIRQ,
+			     netdev->name, mgp);
+	if (status != 0) {
+		dev_err(&pdev->dev, "failed to allocate IRQ\n");
+		goto abort_with_firmware;
+	}
+
+	pci_set_drvdata(pdev, mgp);
+	if ((myri10ge_initial_mtu + ETH_HLEN) > MYRI10GE_MAX_ETHER_MTU)
+		myri10ge_initial_mtu = MYRI10GE_MAX_ETHER_MTU - ETH_HLEN;
+	if ((myri10ge_initial_mtu + ETH_HLEN) < 68)
+		myri10ge_initial_mtu = 68;
+	netdev->mtu = myri10ge_initial_mtu;
+	netdev->open = myri10ge_open;
+	netdev->stop = myri10ge_close;
+	netdev->hard_start_xmit = myri10ge_xmit;
+	netdev->get_stats = myri10ge_get_stats;
+	netdev->base_addr = mgp->iomem_base;
+	netdev->irq = pdev->irq;
+	netdev->change_mtu = myri10ge_change_mtu;
+	netdev->set_multicast_list = myri10ge_set_multicast_list;
+	netdev->set_mac_address = myri10ge_set_mac_address;
+	netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_TSO;
+	if (dac_enabled)
+		netdev->features |= NETIF_F_HIGHDMA;
+	netdev->poll = myri10ge_poll;
+	netdev->weight = myri10ge_napi_weight;
+
+	/* Save configuration space to be restored if the
+	 * nic resets due to a parity error */
+	myri10ge_save_state(mgp);
+	/* Restore state immediately since pci_save_msi_state disables MSI */
+	myri10ge_restore_state(mgp);
+
+	/* Setup the watchdog timer */
+	setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
+		    (unsigned long)mgp);
+
+	SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+	INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog, mgp);
+	status = register_netdev(netdev);
+	if (status != 0) {
+		dev_err(&pdev->dev, "register_netdev failed: %d\n", status);
+		goto abort_with_irq;
+	}
+
+	printk(KERN_INFO "myri10ge: %s: %s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+	       netdev->name, (mgp->msi_enabled ? "MSI" : "xPIC"),
+	       pdev->irq, mgp->tx.boundary, mgp->fw_name,
+	       (mgp->mtrr >= 0 ? "Enabled" : "Disabled"));
+
+	return 0;
+
+abort_with_irq:
+	free_irq(pdev->irq, mgp);
+	if (mgp->msi_enabled)
+		pci_disable_msi(pdev);
+
+abort_with_firmware:
+	myri10ge_dummy_rdma(mgp, 0);
+
+abort_with_rx_done:
+	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+	dma_free_coherent(&pdev->dev, bytes,
+			  mgp->rx_done.entry, mgp->rx_done.bus);
+
+abort_with_ioremap:
+	iounmap(mgp->sram);
+
+abort_with_wc:
+#ifdef CONFIG_MTRR
+	if (mgp->mtrr >= 0)
+		mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+	dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+			  mgp->fw_stats, mgp->fw_stats_bus);
+
+abort_with_cmd:
+	dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+			  mgp->cmd, mgp->cmd_bus);
+
+abort_with_netdev:
+
+	free_netdev(netdev);
+	return status;
+}
+
+/*
+ * myri10ge_remove
+ *
+ * Does what is necessary to shutdown one Myrinet device. Called
+ *   once for each Myrinet card by the kernel when a module is
+ *   unloaded.
+ */
+static void myri10ge_remove(struct pci_dev *pdev)
+{
+	struct myri10ge_priv *mgp;
+	struct net_device *netdev;
+	size_t bytes;
+
+	mgp = pci_get_drvdata(pdev);
+	if (mgp == NULL)
+		return;
+
+	flush_scheduled_work();
+	netdev = mgp->dev;
+	unregister_netdev(netdev);
+	free_irq(pdev->irq, mgp);
+	if (mgp->msi_enabled)
+		pci_disable_msi(pdev);
+
+	myri10ge_dummy_rdma(mgp, 0);
+
+	bytes = myri10ge_max_intr_slots * sizeof(*mgp->rx_done.entry);
+	dma_free_coherent(&pdev->dev, bytes,
+			  mgp->rx_done.entry, mgp->rx_done.bus);
+
+	iounmap(mgp->sram);
+
+#ifdef CONFIG_MTRR
+	if (mgp->mtrr >= 0)
+		mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
+#endif
+	dma_free_coherent(&pdev->dev, sizeof(*mgp->fw_stats),
+			  mgp->fw_stats, mgp->fw_stats_bus);
+
+	dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
+			  mgp->cmd, mgp->cmd_bus);
+
+	free_netdev(netdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 	0x0008
+
+static struct pci_device_id myri10ge_pci_tbl[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
+	{0},
+};
+
+static struct pci_driver myri10ge_driver = {
+	.name = "myri10ge",
+	.probe = myri10ge_probe,
+	.remove = myri10ge_remove,
+	.id_table = myri10ge_pci_tbl,
+#ifdef CONFIG_PM
+	.suspend = myri10ge_suspend,
+	.resume = myri10ge_resume,
+#endif
+};
+
+static __init int myri10ge_init_module(void)
+{
+	printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
+	       MYRI10GE_VERSION_STR);
+	return pci_register_driver(&myri10ge_driver);
+}
+
+module_init(myri10ge_init_module);
+
+static __exit void myri10ge_cleanup_module(void)
+{
+	pci_unregister_driver(&myri10ge_driver);
+}
+
+module_exit(myri10ge_cleanup_module);
diff --git a/drivers/net/myri10ge/myri10ge_mcp.h b/drivers/net/myri10ge/myri10ge_mcp.h
new file mode 100644
index 0000000..0a6cae6
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge_mcp.h
@@ -0,0 +1,205 @@
+#ifndef __MYRI10GE_MCP_H__
+#define __MYRI10GE_MCP_H__
+
+#define MXGEFW_VERSION_MAJOR	1
+#define MXGEFW_VERSION_MINOR	4
+
+/* 8 Bytes */
+struct mcp_dma_addr {
+	u32 high;
+	u32 low;
+};
+
+/* 4 Bytes */
+struct mcp_slot {
+	u16 checksum;
+	u16 length;
+};
+
+/* 64 Bytes */
+struct mcp_cmd {
+	u32 cmd;
+	u32 data0;		/* will be low portion if data > 32 bits */
+	/* 8 */
+	u32 data1;		/* will be high portion if data > 32 bits */
+	u32 data2;		/* currently unused.. */
+	/* 16 */
+	struct mcp_dma_addr response_addr;
+	/* 24 */
+	u8 pad[40];
+};
+
+/* 8 Bytes */
+struct mcp_cmd_response {
+	u32 data;
+	u32 result;
+};
+
+/*
+ * flags used in mcp_kreq_ether_send_t:
+ *
+ * The SMALL flag is only needed in the first segment. It is raised
+ * for packets that are total less or equal 512 bytes.
+ *
+ * The CKSUM flag must be set in all segments.
+ *
+ * The PADDED flags is set if the packet needs to be padded, and it
+ * must be set for all segments.
+ *
+ * The  MXGEFW_FLAGS_ALIGN_ODD must be set if the cumulative
+ * length of all previous segments was odd.
+ */
+
+#define MXGEFW_FLAGS_SMALL      0x1
+#define MXGEFW_FLAGS_TSO_HDR    0x1
+#define MXGEFW_FLAGS_FIRST      0x2
+#define MXGEFW_FLAGS_ALIGN_ODD  0x4
+#define MXGEFW_FLAGS_CKSUM      0x8
+#define MXGEFW_FLAGS_TSO_LAST   0x8
+#define MXGEFW_FLAGS_NO_TSO     0x10
+#define MXGEFW_FLAGS_TSO_CHOP   0x10
+#define MXGEFW_FLAGS_TSO_PLD    0x20
+
+#define MXGEFW_SEND_SMALL_SIZE  1520
+#define MXGEFW_MAX_MTU          9400
+
+union mcp_pso_or_cumlen {
+	u16 pseudo_hdr_offset;
+	u16 cum_len;
+};
+
+#define	MXGEFW_MAX_SEND_DESC 12
+#define MXGEFW_PAD	    2
+
+/* 16 Bytes */
+struct mcp_kreq_ether_send {
+	u32 addr_high;
+	u32 addr_low;
+	u16 pseudo_hdr_offset;
+	u16 length;
+	u8 pad;
+	u8 rdma_count;
+	u8 cksum_offset;	/* where to start computing cksum */
+	u8 flags;		/* as defined above */
+};
+
+/* 8 Bytes */
+struct mcp_kreq_ether_recv {
+	u32 addr_high;
+	u32 addr_low;
+};
+
+/* Commands */
+
+#define MXGEFW_CMD_OFFSET 0xf80000
+
+enum myri10ge_mcp_cmd_type {
+	MXGEFW_CMD_NONE = 0,
+	/* Reset the mcp, it is left in a safe state, waiting
+	 * for the driver to set all its parameters */
+	MXGEFW_CMD_RESET,
+
+	/* get the version number of the current firmware..
+	 * (may be available in the eeprom strings..? */
+	MXGEFW_GET_MCP_VERSION,
+
+	/* Parameters which must be set by the driver before it can
+	 * issue MXGEFW_CMD_ETHERNET_UP. They persist until the next
+	 * MXGEFW_CMD_RESET is issued */
+
+	MXGEFW_CMD_SET_INTRQ_DMA,
+	MXGEFW_CMD_SET_BIG_BUFFER_SIZE,	/* in bytes, power of 2 */
+	MXGEFW_CMD_SET_SMALL_BUFFER_SIZE,	/* in bytes */
+
+	/* Parameters which refer to lanai SRAM addresses where the
+	 * driver must issue PIO writes for various things */
+
+	MXGEFW_CMD_GET_SEND_OFFSET,
+	MXGEFW_CMD_GET_SMALL_RX_OFFSET,
+	MXGEFW_CMD_GET_BIG_RX_OFFSET,
+	MXGEFW_CMD_GET_IRQ_ACK_OFFSET,
+	MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET,
+
+	/* Parameters which refer to rings stored on the MCP,
+	 * and whose size is controlled by the mcp */
+
+	MXGEFW_CMD_GET_SEND_RING_SIZE,	/* in bytes */
+	MXGEFW_CMD_GET_RX_RING_SIZE,	/* in bytes */
+
+	/* Parameters which refer to rings stored in the host,
+	 * and whose size is controlled by the host.  Note that
+	 * all must be physically contiguous and must contain
+	 * a power of 2 number of entries.  */
+
+	MXGEFW_CMD_SET_INTRQ_SIZE,	/* in bytes */
+
+	/* command to bring ethernet interface up.  Above parameters
+	 * (plus mtu & mac address) must have been exchanged prior
+	 * to issuing this command  */
+	MXGEFW_CMD_ETHERNET_UP,
+
+	/* command to bring ethernet interface down.  No further sends
+	 * or receives may be processed until an MXGEFW_CMD_ETHERNET_UP
+	 * is issued, and all interrupt queues must be flushed prior
+	 * to ack'ing this command */
+
+	MXGEFW_CMD_ETHERNET_DOWN,
+
+	/* commands the driver may issue live, without resetting
+	 * the nic.  Note that increasing the mtu "live" should
+	 * only be done if the driver has already supplied buffers
+	 * sufficiently large to handle the new mtu.  Decreasing
+	 * the mtu live is safe */
+
+	MXGEFW_CMD_SET_MTU,
+	MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET,	/* in microseconds */
+	MXGEFW_CMD_SET_STATS_INTERVAL,	/* in microseconds */
+	MXGEFW_CMD_SET_STATS_DMA,
+
+	MXGEFW_ENABLE_PROMISC,
+	MXGEFW_DISABLE_PROMISC,
+	MXGEFW_SET_MAC_ADDRESS,
+
+	MXGEFW_ENABLE_FLOW_CONTROL,
+	MXGEFW_DISABLE_FLOW_CONTROL,
+
+	/* do a DMA test
+	 * data0,data1 = DMA address
+	 * data2       = RDMA length (MSH), WDMA length (LSH)
+	 * command return data = repetitions (MSH), 0.5-ms ticks (LSH)
+	 */
+	MXGEFW_DMA_TEST
+};
+
+enum myri10ge_mcp_cmd_status {
+	MXGEFW_CMD_OK = 0,
+	MXGEFW_CMD_UNKNOWN,
+	MXGEFW_CMD_ERROR_RANGE,
+	MXGEFW_CMD_ERROR_BUSY,
+	MXGEFW_CMD_ERROR_EMPTY,
+	MXGEFW_CMD_ERROR_CLOSED,
+	MXGEFW_CMD_ERROR_HASH_ERROR,
+	MXGEFW_CMD_ERROR_BAD_PORT,
+	MXGEFW_CMD_ERROR_RESOURCES
+};
+
+/* 40 Bytes */
+struct mcp_irq_data {
+	u32 send_done_count;
+
+	u32 link_up;
+	u32 dropped_link_overflow;
+	u32 dropped_link_error_or_filtered;
+	u32 dropped_runt;
+	u32 dropped_overrun;
+	u32 dropped_no_small_buffer;
+	u32 dropped_no_big_buffer;
+	u32 rdma_tags_available;
+
+	u8 tx_stopped;
+	u8 link_down;
+	u8 stats_updated;
+	u8 valid;
+};
+
+#endif				/* __MYRI10GE_MCP_H__ */
diff --git a/drivers/net/myri10ge/myri10ge_mcp_gen_header.h b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
new file mode 100644
index 0000000..487f779
--- /dev/null
+++ b/drivers/net/myri10ge/myri10ge_mcp_gen_header.h
@@ -0,0 +1,58 @@
+#ifndef __MYRI10GE_MCP_GEN_HEADER_H__
+#define __MYRI10GE_MCP_GEN_HEADER_H__
+
+/* this file define a standard header used as a first entry point to
+ * exchange information between firmware/driver and driver.  The
+ * header structure can be anywhere in the mcp. It will usually be in
+ * the .data section, because some fields needs to be initialized at
+ * compile time.
+ * The 32bit word at offset MX_HEADER_PTR_OFFSET in the mcp must
+ * contains the location of the header.
+ *
+ * Typically a MCP will start with the following:
+ * .text
+ * .space 52    ! to help catch MEMORY_INT errors
+ * bt start     ! jump to real code
+ * nop
+ * .long _gen_mcp_header
+ *
+ * The source will have a definition like:
+ *
+ * mcp_gen_header_t gen_mcp_header = {
+ * .header_length = sizeof(mcp_gen_header_t),
+ * .mcp_type = MCP_TYPE_XXX,
+ * .version = "something $Id: mcp_gen_header.h,v 1.2 2006/05/13 10:04:35 bgoglin Exp $",
+ * .mcp_globals = (unsigned)&Globals
+ * };
+ */
+
+#define MCP_HEADER_PTR_OFFSET  0x3c
+
+#define MCP_TYPE_MX 0x4d582020	/* "MX  " */
+#define MCP_TYPE_PCIE 0x70636965	/* "PCIE" pcie-only MCP */
+#define MCP_TYPE_ETH 0x45544820	/* "ETH " */
+#define MCP_TYPE_MCP0 0x4d435030	/* "MCP0" */
+
+struct mcp_gen_header {
+	/* the first 4 fields are filled at compile time */
+	unsigned header_length;
+	unsigned mcp_type;
+	char version[128];
+	unsigned mcp_globals;	/* pointer to mcp-type specific structure */
+
+	/* filled by the MCP at run-time */
+	unsigned sram_size;
+	unsigned string_specs;	/* either the original STRING_SPECS or a superset */
+	unsigned string_specs_len;
+
+	/* Fields above this comment are guaranteed to be present.
+	 *
+	 * Fields below this comment are extensions added in later versions
+	 * of this struct, drivers should compare the header_length against
+	 * offsetof(field) to check wether a given MCP implements them.
+	 *
+	 * Never remove any field.  Keep everything naturally align.
+	 */
+};
+
+#endif				/* __MYRI10GE_MCP_GEN_HEADER_H__ */
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 66e74f7..bf58db2 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -107,7 +107,7 @@
 
 	if(!configured) {
 		printk("netconsole: not configured, aborting\n");
-		return -EINVAL;
+		return 0;
 	}
 
 	if(netpoll_setup(&np))
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 4260c21..a8f6bfc 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1204,7 +1204,7 @@
 
 	dev->last_rx = jiffies;
 	lp->linux_stats.rx_packets++;
-	lp->linux_stats.rx_bytes += skb->len;
+	lp->linux_stats.rx_bytes += pkt_len;
 	outb(0xFF, ioaddr + AM2150_RCV_NEXT); /* skip to next frame */
 	continue;
       } else {
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d090df4..661bfe5 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -12,7 +12,7 @@
     Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
 
     pcnet_cs.c 1.153 2003/11/09 18:53:09
-    
+
     The network driver code is based on Donald Becker's NE2000 code:
 
     Written 1992,1993 by Donald Becker.
@@ -146,7 +146,7 @@
 #define MII_PHYID_REG2		0x03
 
 static hw_info_t hw_info[] = {
-    { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT }, 
+    { /* Accton EN2212 */ 0x0ff0, 0x00, 0x00, 0xe8, DELAY_OUTPUT },
     { /* Allied Telesis LA-PCM */ 0x0ff0, 0x00, 0x00, 0xf4, 0 },
     { /* APEX MultiCard */ 0x03f4, 0x00, 0x20, 0xe5, 0 },
     { /* ASANTE FriendlyNet */ 0x4910, 0x00, 0x00, 0x94,
@@ -193,7 +193,7 @@
     { /* NE2000 Compatible */ 0x0ff0, 0x00, 0xa0, 0x0c, 0 },
     { /* Network General Sniffer */ 0x0ff0, 0x00, 0x00, 0x65,
       HAS_MISC_REG | HAS_IBM_MISC },
-    { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45, 
+    { /* Panasonic VEL211 */ 0x0ff0, 0x00, 0x80, 0x45,
       HAS_MISC_REG | HAS_IBM_MISC },
     { /* PreMax PE-200 */ 0x07f0, 0x00, 0x20, 0xe0, 0 },
     { /* RPTI EP400 */ 0x0110, 0x00, 0x40, 0x95, 0 },
@@ -330,7 +330,7 @@
 	for (j = 0; j < 6; j++)
 	    dev->dev_addr[j] = readb(base + (j<<1));
     }
-    
+
     iounmap(virt);
     j = pcmcia_release_window(link->win);
     if (j != CS_SUCCESS)
@@ -490,7 +490,7 @@
 	if (link->io.NumPorts2 > 0) {
 	    /* for master/slave multifunction cards */
 	    link->io.Attributes2 = IO_DATA_PATH_WIDTH_8;
-	    link->irq.Attributes = 
+	    link->irq.Attributes =
 		IRQ_TYPE_DYNAMIC_SHARING|IRQ_FIRST_SHARED;
 	}
     } else {
@@ -543,19 +543,19 @@
 	manfid = le16_to_cpu(buf[0]);
 	prodid = le16_to_cpu(buf[1]);
     }
-    
+
     tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
     tuple.Attributes = 0;
     CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
     while (last_ret == CS_SUCCESS) {
 	cistpl_cftable_entry_t *cfg = &(parse.cftable_entry);
 	cistpl_io_t *io = &(parse.cftable_entry.io);
-	
+
 	if (pcmcia_get_tuple_data(link, &tuple) != 0 ||
 			pcmcia_parse_tuple(link, &tuple, &parse) != 0 ||
 			cfg->index == 0 || cfg->io.nwin == 0)
 		goto next_entry;
-	
+
 	link->conf.ConfigIndex = cfg->index;
 	/* For multifunction cards, by convention, we configure the
 	   network function with window 0, and serial with window 1 */
@@ -584,7 +584,7 @@
     }
 
     CS_CHECK(RequestIRQ, pcmcia_request_irq(link, &link->irq));
-    
+
     if (link->io.NumPorts2 == 8) {
 	link->conf.Attributes |= CONF_ENABLE_SPKR;
 	link->conf.Status = CCSR_AUDIO_ENA;
@@ -592,7 +592,7 @@
     if ((manfid == MANFID_IBM) &&
 	(prodid == PRODID_IBM_HOME_AND_AWAY))
 	link->conf.ConfigIndex |= 0x10;
-    
+
     CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link, &link->conf));
     dev->irq = link->irq.AssignedIRQ;
     dev->base_addr = link->io.BasePort1;
@@ -614,7 +614,7 @@
 	hw_info = get_ax88190(link);
     if (hw_info == NULL)
 	hw_info = get_hwired(link);
-    
+
     if (hw_info == NULL) {
 	printk(KERN_NOTICE "pcnet_cs: unable to read hardware net"
 	       " address for io base %#3lx\n", dev->base_addr);
@@ -631,7 +631,7 @@
 	info->flags &= ~USE_BIG_BUF;
     if (!use_big_buf)
 	info->flags &= ~USE_BIG_BUF;
-    
+
     if (info->flags & USE_BIG_BUF) {
 	start_pg = SOCKET_START_PG;
 	stop_pg = SOCKET_STOP_PG;
@@ -929,7 +929,7 @@
     kio_addr_t nic_base = dev->base_addr;
     pcnet_dev_t *info = PRIV(dev);
     u_char tmp;
-    
+
     if (info->flags & HAS_MISC_REG) {
 	tmp = inb_p(nic_base + PCNET_MISC) & ~3;
 	if (dev->if_port == 2)
@@ -1022,7 +1022,7 @@
 
     ei_close(dev);
     free_irq(dev->irq, dev);
-    
+
     link->open--;
     netif_stop_queue(dev);
     del_timer_sync(&info->watchdog);
@@ -1054,12 +1054,12 @@
 	udelay(100);
     }
     outb_p(ENISR_RESET, nic_base + EN0_ISR); /* Ack intr. */
-    
+
     if (i == 100)
 	printk(KERN_ERR "%s: pcnet_reset_8390() did not complete.\n",
 	       dev->name);
     set_misc_reg(dev);
-    
+
 } /* pcnet_reset_8390 */
 
 /*====================================================================*/
@@ -1233,7 +1233,7 @@
 	       dev->name, ei_status.dmaing, ei_status.irqlock);
 	return;
     }
-    
+
     ei_status.dmaing |= 0x01;
     outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + PCNET_CMD);
     outb_p(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO);
@@ -1458,7 +1458,7 @@
     void __iomem *xfer_start = ei_status.mem + (TX_PAGES<<8)
 				+ (ring_page << 8)
 				- (ei_status.rx_start_page << 8);
-    
+
     copyin(hdr, xfer_start, sizeof(struct e8390_pkt_hdr));
     /* Fix for big endian systems */
     hdr->count = le16_to_cpu(hdr->count);
@@ -1473,7 +1473,7 @@
     unsigned long offset = (TX_PAGES<<8) + ring_offset
 				- (ei_status.rx_start_page << 8);
     char *buf = skb->data;
-    
+
     if (offset + count > ei_status.priv) {
 	/* We must wrap the input move. */
 	int semi_count = ei_status.priv - offset;
@@ -1541,7 +1541,7 @@
 	info->base = NULL; link->win = NULL;
 	goto failed;
     }
-    
+
     ei_status.mem = info->base + offset;
     ei_status.priv = req.Size;
     dev->mem_start = (u_long)ei_status.mem;
@@ -1768,6 +1768,8 @@
 	PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "NE2K.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("PMX   ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "PE-200.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "tamarack.cis"),
+	PCMCIA_DEVICE_PROD_ID123("Fast Ethernet", "CF Size PC Card", "1.0",
+		0xb4be14e3, 0x43ac239b, 0x0877b627),
 	PCMCIA_DEVICE_NULL
 };
 MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index fa39b94..cda3e53 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -45,5 +45,11 @@
 	---help---
 	  Currently supports the cis8204
 
+config SMSC_PHY
+	tristate "Drivers for SMSC PHYs"
+	depends on PHYLIB
+	---help---
+	  Currently supports the LAN83C185 PHY
+
 endmenu
 
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e4116a5..d961413 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -8,3 +8,4 @@
 obj-$(CONFIG_CICADA_PHY)	+= cicada.o
 obj-$(CONFIG_LXT_PHY)		+= lxt.o
 obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
+obj-$(CONFIG_SMSC_PHY)		+= smsc.o
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
new file mode 100644
index 0000000..25e31fb
--- /dev/null
+++ b/drivers/net/phy/smsc.c
@@ -0,0 +1,101 @@
+/*
+ * drivers/net/phy/smsc.c
+ *
+ * Driver for SMSC PHYs
+ *
+ * Author: Herbert Valerio Riedel
+ *
+ * Copyright (c) 2006 Herbert Valerio Riedel <hvr@gnu.org>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+#include <linux/netdevice.h>
+
+#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
+#define MII_LAN83C185_IM  30 /* Interrupt Mask */
+
+#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
+#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
+#define MII_LAN83C185_ISF_INT3 (1<<3) /* Auto-Negotiation LP Ack */
+#define MII_LAN83C185_ISF_INT4 (1<<4) /* Link Down */
+#define MII_LAN83C185_ISF_INT5 (1<<5) /* Remote Fault Detected */
+#define MII_LAN83C185_ISF_INT6 (1<<6) /* Auto-Negotiation complete */
+#define MII_LAN83C185_ISF_INT7 (1<<7) /* ENERGYON */
+
+#define MII_LAN83C185_ISF_INT_ALL (0x0e)
+
+#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
+	(MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+
+
+static int lan83c185_config_intr(struct phy_device *phydev)
+{
+	int rc = phy_write (phydev, MII_LAN83C185_IM,
+			((PHY_INTERRUPT_ENABLED == phydev->interrupts)
+			? MII_LAN83C185_ISF_INT_PHYLIB_EVENTS
+			: 0));
+
+	return rc < 0 ? rc : 0;
+}
+
+static int lan83c185_ack_interrupt(struct phy_device *phydev)
+{
+	int rc = phy_read (phydev, MII_LAN83C185_ISF);
+
+	return rc < 0 ? rc : 0;
+}
+
+static int lan83c185_config_init(struct phy_device *phydev)
+{
+	return lan83c185_ack_interrupt (phydev);
+}
+
+
+static struct phy_driver lan83c185_driver = {
+	.phy_id		= 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "SMSC LAN83C185",
+
+	.features	= (PHY_BASIC_FEATURES | SUPPORTED_Pause
+				| SUPPORTED_Asym_Pause),
+	.flags		= PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG,
+
+	/* basic functions */
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.config_init	= lan83c185_config_init,
+
+	/* IRQ related */
+	.ack_interrupt	= lan83c185_ack_interrupt,
+	.config_intr	= lan83c185_config_intr,
+
+	.driver		= { .owner = THIS_MODULE, }
+};
+
+static int __init smsc_init(void)
+{
+	return phy_driver_register (&lan83c185_driver);
+}
+
+static void __exit smsc_exit(void)
+{
+	phy_driver_unregister (&lan83c185_driver);
+}
+
+MODULE_DESCRIPTION("SMSC PHY driver");
+MODULE_AUTHOR("Herbert Valerio Riedel");
+MODULE_LICENSE("GPL");
+
+module_init(smsc_init);
+module_exit(smsc_exit);
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 475dc93..0d101a1 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -861,6 +861,9 @@
 		 * give dev_queue_xmit something it can free.
 		 */
 		skb2 = skb_clone(skb, GFP_ATOMIC);
+
+		if (skb2 == NULL)
+			goto abort;
 	}
 
 	ph = (struct pppoe_hdr *) skb_push(skb2, sizeof(struct pppoe_hdr));
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 0ad3310..9945cc6 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -184,6 +184,7 @@
 
 static struct pci_device_id rtl8169_pci_tbl[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), },
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8129), },
 	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), },
 	{ PCI_DEVICE(0x16ec,			0x0116), },
 	{ PCI_VENDOR_ID_LINKSYS,		0x1032, PCI_ANY_ID, 0x0024, },
diff --git a/drivers/net/s2io-regs.h b/drivers/net/s2io-regs.h
index 00179bc..0ef5258 100644
--- a/drivers/net/s2io-regs.h
+++ b/drivers/net/s2io-regs.h
@@ -167,6 +167,7 @@
 	u8 unused4[0x08];
 
 	u64 gpio_int_reg;
+#define GPIO_INT_REG_DP_ERR_INT                BIT(0)
 #define GPIO_INT_REG_LINK_DOWN                 BIT(1)
 #define GPIO_INT_REG_LINK_UP                   BIT(2)
 	u64 gpio_int_mask;
@@ -187,7 +188,7 @@
 /* PIC Control registers */
 	u64 pic_control;
 #define PIC_CNTL_RX_ALARM_MAP_1                BIT(0)
-#define PIC_CNTL_SHARED_SPLITS(n)              vBIT(n,11,4)
+#define PIC_CNTL_SHARED_SPLITS(n)              vBIT(n,11,5)
 
 	u64 swapper_ctrl;
 #define SWAPPER_CTRL_PIF_R_FE                  BIT(0)
@@ -267,6 +268,21 @@
 
 	/* General Configuration */
 	u64 mdio_control;
+#define MDIO_MMD_INDX_ADDR(val)		vBIT(val, 0, 16)
+#define MDIO_MMD_DEV_ADDR(val)		vBIT(val, 19, 5)
+#define MDIO_MMD_PMA_DEV_ADDR		0x1
+#define MDIO_MMD_PMD_DEV_ADDR		0x1
+#define MDIO_MMD_WIS_DEV_ADDR		0x2
+#define MDIO_MMD_PCS_DEV_ADDR		0x3
+#define MDIO_MMD_PHYXS_DEV_ADDR		0x4
+#define MDIO_MMS_PRT_ADDR(val)		vBIT(val, 27, 5)
+#define MDIO_CTRL_START_TRANS(val)	vBIT(val, 56, 4)
+#define MDIO_OP(val)			vBIT(val, 60, 2)
+#define MDIO_OP_ADDR_TRANS		0x0
+#define MDIO_OP_WRITE_TRANS		0x1
+#define MDIO_OP_READ_POST_INC_TRANS	0x2
+#define MDIO_OP_READ_TRANS		0x3
+#define MDIO_MDIO_DATA(val)		vBIT(val, 32, 16)
 
 	u64 dtx_control;
 
@@ -284,9 +300,13 @@
 	u64 gpio_control;
 #define GPIO_CTRL_GPIO_0		BIT(8)
 	u64 misc_control;
+#define EXT_REQ_EN			BIT(1)
 #define MISC_LINK_STABILITY_PRD(val)   vBIT(val,29,3)
 
-	u8 unused7_1[0x240 - 0x208];
+	u8 unused7_1[0x230 - 0x208];
+
+	u64 pic_control2;
+	u64 ini_dperr_ctrl;
 
 	u64 wreq_split_mask;
 #define	WREQ_SPLIT_MASK_SET_MASK(val)	vBIT(val, 52, 12)
@@ -493,6 +513,7 @@
 #define PRC_CTRL_NO_SNOOP_DESC                 BIT(22)
 #define PRC_CTRL_NO_SNOOP_BUFF                 BIT(23)
 #define PRC_CTRL_BIMODAL_INTERRUPT             BIT(37)
+#define PRC_CTRL_GROUP_READS                   BIT(38)
 #define PRC_CTRL_RXD_BACKOFF_INTERVAL(val)     vBIT(val,40,24)
 
 	u64 prc_alarm_action;
@@ -541,7 +562,12 @@
 #define RX_PA_CFG_IGNORE_LLC_CTRL          BIT(3)
 #define RX_PA_CFG_IGNORE_L2_ERR            BIT(6)
 
-	u8 unused12[0x700 - 0x1D8];
+	u64 unused_11_1;
+
+	u64 ring_bump_counter1;
+	u64 ring_bump_counter2;
+
+	u8 unused12[0x700 - 0x1F0];
 
 	u64 rxdma_debug_ctrl;
 
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 79208f4..cac9fdd 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -26,15 +26,22 @@
  *
  * The module loadable parameters that are supported by the driver and a brief
  * explaination of all the variables.
+ *
  * rx_ring_num : This can be used to program the number of receive rings used
  * in the driver.
- * rx_ring_sz: This defines the number of descriptors each ring can have. This
- * is also an array of size 8.
+ * rx_ring_sz: This defines the number of receive blocks each ring can have.
+ *     This is also an array of size 8.
  * rx_ring_mode: This defines the operation mode of all 8 rings. The valid
  *		values are 1, 2 and 3.
  * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
  * tx_fifo_len: This too is an array of 8. Each element defines the number of
  * Tx descriptors that can be associated with each corresponding FIFO.
+ * intr_type: This defines the type of interrupt. The values can be 0(INTA),
+ *     1(MSI), 2(MSI_X). Default value is '0(INTA)'
+ * lro: Specifies whether to enable Large Receive Offload (LRO) or not.
+ *     Possible values '1' for enable '0' for disable. Default is '0'
+ * lro_max_pkts: This parameter defines maximum number of packets can be
+ *     aggregated as a single large packet
  ************************************************************************/
 
 #include <linux/config.h>
@@ -70,7 +77,7 @@
 #include "s2io.h"
 #include "s2io-regs.h"
 
-#define DRV_VERSION "2.0.11.2"
+#define DRV_VERSION "2.0.14.2"
 
 /* S2io Driver name & version. */
 static char s2io_driver_name[] = "Neterion";
@@ -106,18 +113,14 @@
 #define LOW	2
 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
 {
-	int level = 0;
 	mac_info_t *mac_control;
 
 	mac_control = &sp->mac_control;
-	if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16) {
-		level = LOW;
-		if (rxb_size <= rxd_count[sp->rxd_mode]) {
-			level = PANIC;
-		}
-	}
-
-	return level;
+	if (rxb_size <= rxd_count[sp->rxd_mode])
+		return PANIC;
+	else if ((mac_control->rings[ring].pkt_cnt - rxb_size) > 16)
+		return  LOW;
+	return 0;
 }
 
 /* Ethtool related variables and Macros. */
@@ -136,7 +139,11 @@
 	{"tmac_mcst_frms"},
 	{"tmac_bcst_frms"},
 	{"tmac_pause_ctrl_frms"},
+	{"tmac_ttl_octets"},
+	{"tmac_ucst_frms"},
+	{"tmac_nucst_frms"},
 	{"tmac_any_err_frms"},
+	{"tmac_ttl_less_fb_octets"},
 	{"tmac_vld_ip_octets"},
 	{"tmac_vld_ip"},
 	{"tmac_drop_ip"},
@@ -151,13 +158,27 @@
 	{"rmac_vld_mcst_frms"},
 	{"rmac_vld_bcst_frms"},
 	{"rmac_in_rng_len_err_frms"},
+	{"rmac_out_rng_len_err_frms"},
 	{"rmac_long_frms"},
 	{"rmac_pause_ctrl_frms"},
+	{"rmac_unsup_ctrl_frms"},
+	{"rmac_ttl_octets"},
+	{"rmac_accepted_ucst_frms"},
+	{"rmac_accepted_nucst_frms"},
 	{"rmac_discarded_frms"},
+	{"rmac_drop_events"},
+	{"rmac_ttl_less_fb_octets"},
+	{"rmac_ttl_frms"},
 	{"rmac_usized_frms"},
 	{"rmac_osized_frms"},
 	{"rmac_frag_frms"},
 	{"rmac_jabber_frms"},
+	{"rmac_ttl_64_frms"},
+	{"rmac_ttl_65_127_frms"},
+	{"rmac_ttl_128_255_frms"},
+	{"rmac_ttl_256_511_frms"},
+	{"rmac_ttl_512_1023_frms"},
+	{"rmac_ttl_1024_1518_frms"},
 	{"rmac_ip"},
 	{"rmac_ip_octets"},
 	{"rmac_hdr_err_ip"},
@@ -166,12 +187,82 @@
 	{"rmac_tcp"},
 	{"rmac_udp"},
 	{"rmac_err_drp_udp"},
+	{"rmac_xgmii_err_sym"},
+	{"rmac_frms_q0"},
+	{"rmac_frms_q1"},
+	{"rmac_frms_q2"},
+	{"rmac_frms_q3"},
+	{"rmac_frms_q4"},
+	{"rmac_frms_q5"},
+	{"rmac_frms_q6"},
+	{"rmac_frms_q7"},
+	{"rmac_full_q0"},
+	{"rmac_full_q1"},
+	{"rmac_full_q2"},
+	{"rmac_full_q3"},
+	{"rmac_full_q4"},
+	{"rmac_full_q5"},
+	{"rmac_full_q6"},
+	{"rmac_full_q7"},
 	{"rmac_pause_cnt"},
+	{"rmac_xgmii_data_err_cnt"},
+	{"rmac_xgmii_ctrl_err_cnt"},
 	{"rmac_accepted_ip"},
 	{"rmac_err_tcp"},
+	{"rd_req_cnt"},
+	{"new_rd_req_cnt"},
+	{"new_rd_req_rtry_cnt"},
+	{"rd_rtry_cnt"},
+	{"wr_rtry_rd_ack_cnt"},
+	{"wr_req_cnt"},
+	{"new_wr_req_cnt"},
+	{"new_wr_req_rtry_cnt"},
+	{"wr_rtry_cnt"},
+	{"wr_disc_cnt"},
+	{"rd_rtry_wr_ack_cnt"},
+	{"txp_wr_cnt"},
+	{"txd_rd_cnt"},
+	{"txd_wr_cnt"},
+	{"rxd_rd_cnt"},
+	{"rxd_wr_cnt"},
+	{"txf_rd_cnt"},
+	{"rxf_wr_cnt"},
+	{"rmac_ttl_1519_4095_frms"},
+	{"rmac_ttl_4096_8191_frms"},
+	{"rmac_ttl_8192_max_frms"},
+	{"rmac_ttl_gt_max_frms"},
+	{"rmac_osized_alt_frms"},
+	{"rmac_jabber_alt_frms"},
+	{"rmac_gt_max_alt_frms"},
+	{"rmac_vlan_frms"},
+	{"rmac_len_discard"},
+	{"rmac_fcs_discard"},
+	{"rmac_pf_discard"},
+	{"rmac_da_discard"},
+	{"rmac_red_discard"},
+	{"rmac_rts_discard"},
+	{"rmac_ingm_full_discard"},
+	{"link_fault_cnt"},
 	{"\n DRIVER STATISTICS"},
 	{"single_bit_ecc_errs"},
 	{"double_bit_ecc_errs"},
+	{"parity_err_cnt"},
+	{"serious_err_cnt"},
+	{"soft_reset_cnt"},
+	{"fifo_full_cnt"},
+	{"ring_full_cnt"},
+	("alarm_transceiver_temp_high"),
+	("alarm_transceiver_temp_low"),
+	("alarm_laser_bias_current_high"),
+	("alarm_laser_bias_current_low"),
+	("alarm_laser_output_power_high"),
+	("alarm_laser_output_power_low"),
+	("warn_transceiver_temp_high"),
+	("warn_transceiver_temp_low"),
+	("warn_laser_bias_current_high"),
+	("warn_laser_bias_current_low"),
+	("warn_laser_output_power_high"),
+	("warn_laser_output_power_low"),
 	("lro_aggregated_pkts"),
 	("lro_flush_both_count"),
 	("lro_out_of_sequence_pkts"),
@@ -220,9 +311,7 @@
  * the XAUI.
  */
 
-#define SWITCH_SIGN	0xA5A5A5A5A5A5A5A5ULL
 #define	END_SIGN	0x0
-
 static const u64 herc_act_dtx_cfg[] = {
 	/* Set address */
 	0x8000051536750000ULL, 0x80000515367500E0ULL,
@@ -244,37 +333,19 @@
 	END_SIGN
 };
 
-static const u64 xena_mdio_cfg[] = {
-	/* Reset PMA PLL */
-	0xC001010000000000ULL, 0xC0010100000000E0ULL,
-	0xC0010100008000E4ULL,
-	/* Remove Reset from PMA PLL */
-	0xC001010000000000ULL, 0xC0010100000000E0ULL,
-	0xC0010100000000E4ULL,
-	END_SIGN
-};
-
 static const u64 xena_dtx_cfg[] = {
+	/* Set address */
 	0x8000051500000000ULL, 0x80000515000000E0ULL,
-	0x80000515D93500E4ULL, 0x8001051500000000ULL,
-	0x80010515000000E0ULL, 0x80010515001E00E4ULL,
+	/* Write data */
+	0x80000515D9350004ULL, 0x80000515D93500E4ULL,
+	/* Set address */
+	0x8001051500000000ULL, 0x80010515000000E0ULL,
+	/* Write data */
+	0x80010515001E0004ULL, 0x80010515001E00E4ULL,
+	/* Set address */
 	0x8002051500000000ULL, 0x80020515000000E0ULL,
-	0x80020515F21000E4ULL,
-	/* Set PADLOOPBACKN */
-	0x8002051500000000ULL, 0x80020515000000E0ULL,
-	0x80020515B20000E4ULL, 0x8003051500000000ULL,
-	0x80030515000000E0ULL, 0x80030515B20000E4ULL,
-	0x8004051500000000ULL, 0x80040515000000E0ULL,
-	0x80040515B20000E4ULL, 0x8005051500000000ULL,
-	0x80050515000000E0ULL, 0x80050515B20000E4ULL,
-	SWITCH_SIGN,
-	/* Remove PADLOOPBACKN */
-	0x8002051500000000ULL, 0x80020515000000E0ULL,
-	0x80020515F20000E4ULL, 0x8003051500000000ULL,
-	0x80030515000000E0ULL, 0x80030515F20000E4ULL,
-	0x8004051500000000ULL, 0x80040515000000E0ULL,
-	0x80040515F20000E4ULL, 0x8005051500000000ULL,
-	0x80050515000000E0ULL, 0x80050515F20000E4ULL,
+	/* Write data */
+	0x80020515F2100004ULL, 0x80020515F21000E4ULL,
 	END_SIGN
 };
 
@@ -303,15 +374,15 @@
 /* Module Loadable parameters. */
 static unsigned int tx_fifo_num = 1;
 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
-    {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
+    {DEFAULT_FIFO_0_LEN, [1 ...(MAX_TX_FIFOS - 1)] = DEFAULT_FIFO_1_7_LEN};
 static unsigned int rx_ring_num = 1;
 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
-    {[0 ...(MAX_RX_RINGS - 1)] = 0 };
+    {[0 ...(MAX_RX_RINGS - 1)] = SMALL_BLK_CNT};
 static unsigned int rts_frm_len[MAX_RX_RINGS] =
     {[0 ...(MAX_RX_RINGS - 1)] = 0 };
 static unsigned int rx_ring_mode = 1;
 static unsigned int use_continuous_tx_intrs = 1;
-static unsigned int rmac_pause_time = 65535;
+static unsigned int rmac_pause_time = 0x100;
 static unsigned int mc_pause_threshold_q0q3 = 187;
 static unsigned int mc_pause_threshold_q4q7 = 187;
 static unsigned int shared_splits;
@@ -549,11 +620,6 @@
 					rx_blocks->block_dma_addr +
 					(rxd_size[nic->rxd_mode] * l);
 			}
-
-			mac_control->rings[i].rx_blocks[j].block_virt_addr =
-				tmp_v_addr;
-			mac_control->rings[i].rx_blocks[j].block_dma_addr =
-				tmp_p_addr;
 		}
 		/* Interlinking all Rx Blocks */
 		for (j = 0; j < blk_cnt; j++) {
@@ -772,7 +838,21 @@
 	return mode;
 }
 
+#define NEC_VENID   0x1033
+#define NEC_DEVID   0x0125
+static int s2io_on_nec_bridge(struct pci_dev *s2io_pdev)
+{
+	struct pci_dev *tdev = NULL;
+	while ((tdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) {
+		if ((tdev->vendor == NEC_VENID) && (tdev->device == NEC_DEVID)){
+			if (tdev->bus == s2io_pdev->bus->parent)
+				return 1;
+		}
+	}
+	return 0;
+}
 
+static int bus_speed[8] = {33, 133, 133, 200, 266, 133, 200, 266};
 /**
  * s2io_print_pci_mode -
  */
@@ -789,6 +869,14 @@
 	if ( val64 & PCI_MODE_UNKNOWN_MODE)
 		return -1;	/* Unknown PCI mode */
 
+	config->bus_speed = bus_speed[mode];
+
+	if (s2io_on_nec_bridge(nic->pdev)) {
+		DBG_PRINT(ERR_DBG, "%s: Device is on PCI-E bus\n",
+							nic->dev->name);
+		return mode;
+	}
+
 	if (val64 & PCI_MODE_32_BITS) {
 		DBG_PRINT(ERR_DBG, "%s: Device is on 32 bit ", nic->dev->name);
 	} else {
@@ -798,35 +886,27 @@
 	switch(mode) {
 		case PCI_MODE_PCI_33:
 			DBG_PRINT(ERR_DBG, "33MHz PCI bus\n");
-			config->bus_speed = 33;
 			break;
 		case PCI_MODE_PCI_66:
 			DBG_PRINT(ERR_DBG, "66MHz PCI bus\n");
-			config->bus_speed = 133;
 			break;
 		case PCI_MODE_PCIX_M1_66:
 			DBG_PRINT(ERR_DBG, "66MHz PCIX(M1) bus\n");
-			config->bus_speed = 133; /* Herc doubles the clock rate */
 			break;
 		case PCI_MODE_PCIX_M1_100:
 			DBG_PRINT(ERR_DBG, "100MHz PCIX(M1) bus\n");
-			config->bus_speed = 200;
 			break;
 		case PCI_MODE_PCIX_M1_133:
 			DBG_PRINT(ERR_DBG, "133MHz PCIX(M1) bus\n");
-			config->bus_speed = 266;
 			break;
 		case PCI_MODE_PCIX_M2_66:
 			DBG_PRINT(ERR_DBG, "133MHz PCIX(M2) bus\n");
-			config->bus_speed = 133;
 			break;
 		case PCI_MODE_PCIX_M2_100:
 			DBG_PRINT(ERR_DBG, "200MHz PCIX(M2) bus\n");
-			config->bus_speed = 200;
 			break;
 		case PCI_MODE_PCIX_M2_133:
 			DBG_PRINT(ERR_DBG, "266MHz PCIX(M2) bus\n");
-			config->bus_speed = 266;
 			break;
 		default:
 			return -1;	/* Unsupported bus speed */
@@ -854,7 +934,7 @@
 	int i, j;
 	mac_info_t *mac_control;
 	struct config_param *config;
-	int mdio_cnt = 0, dtx_cnt = 0;
+	int dtx_cnt = 0;
 	unsigned long long mem_share;
 	int mem_size;
 
@@ -901,20 +981,6 @@
 	val64 = dev->mtu;
 	writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
 
-	/*
-	 * Configuring the XAUI Interface of Xena.
-	 * ***************************************
-	 * To Configure the Xena's XAUI, one has to write a series
-	 * of 64 bit values into two registers in a particular
-	 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
-	 * which will be defined in the array of configuration values
-	 * (xena_dtx_cfg & xena_mdio_cfg) at appropriate places
-	 * to switch writing from one regsiter to another. We continue
-	 * writing these values until we encounter the 'END_SIGN' macro.
-	 * For example, After making a series of 21 writes into
-	 * dtx_control register the 'SWITCH_SIGN' appears and hence we
-	 * start writing into mdio_control until we encounter END_SIGN.
-	 */
 	if (nic->device_type & XFRAME_II_DEVICE) {
 		while (herc_act_dtx_cfg[dtx_cnt] != END_SIGN) {
 			SPECIAL_REG_WRITE(herc_act_dtx_cfg[dtx_cnt],
@@ -924,35 +990,11 @@
 			dtx_cnt++;
 		}
 	} else {
-		while (1) {
-		      dtx_cfg:
-			while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
-				if (xena_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
-					dtx_cnt++;
-					goto mdio_cfg;
-				}
-				SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
-						  &bar0->dtx_control, UF);
-				val64 = readq(&bar0->dtx_control);
-				dtx_cnt++;
-			}
-		      mdio_cfg:
-			while (xena_mdio_cfg[mdio_cnt] != END_SIGN) {
-				if (xena_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
-					mdio_cnt++;
-					goto dtx_cfg;
-				}
-				SPECIAL_REG_WRITE(xena_mdio_cfg[mdio_cnt],
-						  &bar0->mdio_control, UF);
-				val64 = readq(&bar0->mdio_control);
-				mdio_cnt++;
-			}
-			if ((xena_dtx_cfg[dtx_cnt] == END_SIGN) &&
-			    (xena_mdio_cfg[mdio_cnt] == END_SIGN)) {
-				break;
-			} else {
-				goto dtx_cfg;
-			}
+		while (xena_dtx_cfg[dtx_cnt] != END_SIGN) {
+			SPECIAL_REG_WRITE(xena_dtx_cfg[dtx_cnt],
+					  &bar0->dtx_control, UF);
+			val64 = readq(&bar0->dtx_control);
+			dtx_cnt++;
 		}
 	}
 
@@ -994,11 +1036,6 @@
 		}
 	}
 
-	/* Enable Tx FIFO partition 0. */
-	val64 = readq(&bar0->tx_fifo_partition_0);
-	val64 |= BIT(0);	/* To enable the FIFO partition. */
-	writeq(val64, &bar0->tx_fifo_partition_0);
-
 	/*
 	 * Disable 4 PCCs for Xena1, 2 and 3 as per H/W bug
 	 * SXE-008 TRANSMIT DMA ARBITRATION ISSUE.
@@ -1177,6 +1214,11 @@
 		break;
 	}
 
+	/* Enable Tx FIFO partition 0. */
+	val64 = readq(&bar0->tx_fifo_partition_0);
+	val64 |= (TX_FIFO_PARTITION_EN);
+	writeq(val64, &bar0->tx_fifo_partition_0);
+
 	/* Filling the Rx round robin registers as per the
 	 * number of Rings and steering based on QoS.
          */
@@ -1545,19 +1587,26 @@
 	val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
 	writeq(val64, &bar0->pic_control);
 
+	if (nic->config.bus_speed == 266) {
+		writeq(TXREQTO_VAL(0x7f) | TXREQTO_EN, &bar0->txreqtimeout);
+		writeq(0x0, &bar0->read_retry_delay);
+		writeq(0x0, &bar0->write_retry_delay);
+	}
+
 	/*
 	 * Programming the Herc to split every write transaction
 	 * that does not start on an ADB to reduce disconnects.
 	 */
 	if (nic->device_type == XFRAME_II_DEVICE) {
-		val64 = WREQ_SPLIT_MASK_SET_MASK(255);
-		writeq(val64, &bar0->wreq_split_mask);
-	}
-
-	/* Setting Link stability period to 64 ms */ 
-	if (nic->device_type == XFRAME_II_DEVICE) {
-		val64 = MISC_LINK_STABILITY_PRD(3);
+		val64 = EXT_REQ_EN | MISC_LINK_STABILITY_PRD(3);
 		writeq(val64, &bar0->misc_control);
+		val64 = readq(&bar0->pic_control2);
+		val64 &= ~(BIT(13)|BIT(14)|BIT(15));
+		writeq(val64, &bar0->pic_control2);
+	}
+	if (strstr(nic->product_name, "CX4")) {
+		val64 = TMAC_AVG_IPG(0x17);
+		writeq(val64, &bar0->tmac_avg_ipg);
 	}
 
 	return SUCCESS;
@@ -1948,6 +1997,10 @@
 			val64 |= PRC_CTRL_RC_ENABLED;
 		else
 			val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
+		if (nic->device_type == XFRAME_II_DEVICE)
+			val64 |= PRC_CTRL_GROUP_READS;
+		val64 &= ~PRC_CTRL_RXD_BACKOFF_INTERVAL(0xFFFFFF);
+		val64 |= PRC_CTRL_RXD_BACKOFF_INTERVAL(0x1000);
 		writeq(val64, &bar0->prc_ctrl_n[i]);
 	}
 
@@ -2018,6 +2071,13 @@
 	val64 |= ADAPTER_EOI_TX_ON;
 	writeq(val64, &bar0->adapter_control);
 
+	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
+		/*
+		 * Dont see link state interrupts initally on some switches,
+		 * so directly scheduling the link state task here.
+		 */
+		schedule_work(&nic->set_link_task);
+	}
 	/* SXE-002: Initialize link and activity LED */
 	subid = nic->pdev->subsystem_device;
 	if (((subid & 0xFF) >= 0x07) &&
@@ -2029,12 +2089,6 @@
 		writeq(val64, (void __iomem *)bar0 + 0x2700);
 	}
 
-	/*
-	 * Don't see link state interrupts on certain switches, so
-	 * directly scheduling a link state task from here.
-	 */
-	schedule_work(&nic->set_link_task);
-
 	return SUCCESS;
 }
 /**
@@ -2134,7 +2188,7 @@
 {
 	XENA_dev_config_t __iomem *bar0 = nic->bar0;
 	register u64 val64 = 0;
-	u16 interruptible, i;
+	u16 interruptible;
 	mac_info_t *mac_control;
 	struct config_param *config;
 
@@ -2147,12 +2201,10 @@
 	interruptible |= TX_MAC_INTR | RX_MAC_INTR;
 	en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
 
-	/*  Disable PRCs */
-	for (i = 0; i < config->rx_ring_num; i++) {
-		val64 = readq(&bar0->prc_ctrl_n[i]);
-		val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
-		writeq(val64, &bar0->prc_ctrl_n[i]);
-	}
+	/* Clearing Adapter_En bit of ADAPTER_CONTROL Register */
+	val64 = readq(&bar0->adapter_control);
+	val64 &= ~(ADAPTER_CNTL_EN);
+	writeq(val64, &bar0->adapter_control);
 }
 
 static int fill_rxd_3buf(nic_t *nic, RxD_t *rxdp, struct sk_buff *skb)
@@ -2231,13 +2283,12 @@
 	alloc_cnt = mac_control->rings[ring_no].pkt_cnt -
 	    atomic_read(&nic->rx_bufs_left[ring_no]);
 
+	block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index;
+	off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
 	while (alloc_tab < alloc_cnt) {
 		block_no = mac_control->rings[ring_no].rx_curr_put_info.
 		    block_index;
-		block_no1 = mac_control->rings[ring_no].rx_curr_get_info.
-		    block_index;
 		off = mac_control->rings[ring_no].rx_curr_put_info.offset;
-		off1 = mac_control->rings[ring_no].rx_curr_get_info.offset;
 
 		rxdp = mac_control->rings[ring_no].
 				rx_blocks[block_no].rxds[off].virt_addr;
@@ -2307,9 +2358,9 @@
 			memset(rxdp, 0, sizeof(RxD1_t));
 			skb_reserve(skb, NET_IP_ALIGN);
 			((RxD1_t*)rxdp)->Buffer0_ptr = pci_map_single
-			    (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
-			rxdp->Control_2 &= (~MASK_BUFFER0_SIZE_1);
-			rxdp->Control_2 |= SET_BUFFER0_SIZE_1(size);
+			    (nic->pdev, skb->data, size - NET_IP_ALIGN,
+				PCI_DMA_FROMDEVICE);
+			rxdp->Control_2 = SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN);
 
 		} else if (nic->rxd_mode >= RXD_MODE_3A) {
 			/*
@@ -2516,7 +2567,7 @@
 	mac_info_t *mac_control;
 	struct config_param *config;
 	XENA_dev_config_t __iomem *bar0 = nic->bar0;
-	u64 val64;
+	u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
 	int i;
 
 	atomic_inc(&nic->isr_cnt);
@@ -2528,8 +2579,8 @@
 		nic->pkts_to_process = dev->quota;
 	org_pkts_to_process = nic->pkts_to_process;
 
-	val64 = readq(&bar0->rx_traffic_int);
 	writeq(val64, &bar0->rx_traffic_int);
+	val64 = readl(&bar0->rx_traffic_int);
 
 	for (i = 0; i < config->rx_ring_num; i++) {
 		rx_intr_handler(&mac_control->rings[i]);
@@ -2554,7 +2605,8 @@
 		}
 	}
 	/* Re enable the Rx interrupts. */
-	en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
+	writeq(0x0, &bar0->rx_traffic_mask);
+	val64 = readl(&bar0->rx_traffic_mask);
 	atomic_dec(&nic->isr_cnt);
 	return 0;
 
@@ -2666,6 +2718,7 @@
 					 ((RxD3_t*)rxdp)->Buffer2_ptr,
 					 dev->mtu, PCI_DMA_FROMDEVICE);
 		}
+		prefetch(skb->data);
 		rx_osm_handler(ring_data, rxdp);
 		get_info.offset++;
 		ring_data->rx_curr_get_info.offset = get_info.offset;
@@ -2737,6 +2790,10 @@
 		if (txdlp->Control_1 & TXD_T_CODE) {
 			unsigned long long err;
 			err = txdlp->Control_1 & TXD_T_CODE;
+			if (err & 0x1) {
+				nic->mac_control.stats_info->sw_stat.
+						parity_err_cnt++;
+			}
 			if ((err >> 48) == 0xA) {
 				DBG_PRINT(TX_DBG, "TxD returned due \
 to loss of link\n");
@@ -2760,7 +2817,8 @@
 		dev_kfree_skb_irq(skb);
 
 		get_info.offset++;
-		get_info.offset %= get_info.fifo_len + 1;
+		if (get_info.offset == get_info.fifo_len + 1)
+			get_info.offset = 0;
 		txdlp = (TxD_t *) fifo_data->list_info
 		    [get_info.offset].list_virt_addr;
 		fifo_data->tx_curr_get_info.offset =
@@ -2774,6 +2832,256 @@
 }
 
 /**
+ *  s2io_mdio_write - Function to write in to MDIO registers
+ *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ *  @addr     : address value
+ *  @value    : data value
+ *  @dev      : pointer to net_device structure
+ *  Description:
+ *  This function is used to write values to the MDIO registers
+ *  NONE
+ */
+static void s2io_mdio_write(u32 mmd_type, u64 addr, u16 value, struct net_device *dev)
+{
+	u64 val64 = 0x0;
+	nic_t *sp = dev->priv;
+	XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
+
+	//address transaction
+	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+			| MDIO_MMD_DEV_ADDR(mmd_type)
+			| MDIO_MMS_PRT_ADDR(0x0);
+	writeq(val64, &bar0->mdio_control);
+	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+	writeq(val64, &bar0->mdio_control);
+	udelay(100);
+
+	//Data transaction
+	val64 = 0x0;
+	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+			| MDIO_MMD_DEV_ADDR(mmd_type)
+			| MDIO_MMS_PRT_ADDR(0x0)
+			| MDIO_MDIO_DATA(value)
+			| MDIO_OP(MDIO_OP_WRITE_TRANS);
+	writeq(val64, &bar0->mdio_control);
+	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+	writeq(val64, &bar0->mdio_control);
+	udelay(100);
+
+	val64 = 0x0;
+	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+	| MDIO_MMD_DEV_ADDR(mmd_type)
+	| MDIO_MMS_PRT_ADDR(0x0)
+	| MDIO_OP(MDIO_OP_READ_TRANS);
+	writeq(val64, &bar0->mdio_control);
+	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+	writeq(val64, &bar0->mdio_control);
+	udelay(100);
+
+}
+
+/**
+ *  s2io_mdio_read - Function to write in to MDIO registers
+ *  @mmd_type : MMD type value (PMA/PMD/WIS/PCS/PHYXS)
+ *  @addr     : address value
+ *  @dev      : pointer to net_device structure
+ *  Description:
+ *  This function is used to read values to the MDIO registers
+ *  NONE
+ */
+static u64 s2io_mdio_read(u32 mmd_type, u64 addr, struct net_device *dev)
+{
+	u64 val64 = 0x0;
+	u64 rval64 = 0x0;
+	nic_t *sp = dev->priv;
+	XENA_dev_config_t *bar0 = (XENA_dev_config_t *)sp->bar0;
+
+	/* address transaction */
+	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+			| MDIO_MMD_DEV_ADDR(mmd_type)
+			| MDIO_MMS_PRT_ADDR(0x0);
+	writeq(val64, &bar0->mdio_control);
+	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+	writeq(val64, &bar0->mdio_control);
+	udelay(100);
+
+	/* Data transaction */
+	val64 = 0x0;
+	val64 = val64 | MDIO_MMD_INDX_ADDR(addr)
+			| MDIO_MMD_DEV_ADDR(mmd_type)
+			| MDIO_MMS_PRT_ADDR(0x0)
+			| MDIO_OP(MDIO_OP_READ_TRANS);
+	writeq(val64, &bar0->mdio_control);
+	val64 = val64 | MDIO_CTRL_START_TRANS(0xE);
+	writeq(val64, &bar0->mdio_control);
+	udelay(100);
+
+	/* Read the value from regs */
+	rval64 = readq(&bar0->mdio_control);
+	rval64 = rval64 & 0xFFFF0000;
+	rval64 = rval64 >> 16;
+	return rval64;
+}
+/**
+ *  s2io_chk_xpak_counter - Function to check the status of the xpak counters
+ *  @counter      : couter value to be updated
+ *  @flag         : flag to indicate the status
+ *  @type         : counter type
+ *  Description:
+ *  This function is to check the status of the xpak counters value
+ *  NONE
+ */
+
+static void s2io_chk_xpak_counter(u64 *counter, u64 * regs_stat, u32 index, u16 flag, u16 type)
+{
+	u64 mask = 0x3;
+	u64 val64;
+	int i;
+	for(i = 0; i <index; i++)
+		mask = mask << 0x2;
+
+	if(flag > 0)
+	{
+		*counter = *counter + 1;
+		val64 = *regs_stat & mask;
+		val64 = val64 >> (index * 0x2);
+		val64 = val64 + 1;
+		if(val64 == 3)
+		{
+			switch(type)
+			{
+			case 1:
+				DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+					  "service. Excessive temperatures may "
+					  "result in premature transceiver "
+					  "failure \n");
+			break;
+			case 2:
+				DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+					  "service Excessive bias currents may "
+					  "indicate imminent laser diode "
+					  "failure \n");
+			break;
+			case 3:
+				DBG_PRINT(ERR_DBG, "Take Xframe NIC out of "
+					  "service Excessive laser output "
+					  "power may saturate far-end "
+					  "receiver\n");
+			break;
+			default:
+				DBG_PRINT(ERR_DBG, "Incorrect XPAK Alarm "
+					  "type \n");
+			}
+			val64 = 0x0;
+		}
+		val64 = val64 << (index * 0x2);
+		*regs_stat = (*regs_stat & (~mask)) | (val64);
+
+	} else {
+		*regs_stat = *regs_stat & (~mask);
+	}
+}
+
+/**
+ *  s2io_updt_xpak_counter - Function to update the xpak counters
+ *  @dev         : pointer to net_device struct
+ *  Description:
+ *  This function is to upate the status of the xpak counters value
+ *  NONE
+ */
+static void s2io_updt_xpak_counter(struct net_device *dev)
+{
+	u16 flag  = 0x0;
+	u16 type  = 0x0;
+	u16 val16 = 0x0;
+	u64 val64 = 0x0;
+	u64 addr  = 0x0;
+
+	nic_t *sp = dev->priv;
+	StatInfo_t *stat_info = sp->mac_control.stats_info;
+
+	/* Check the communication with the MDIO slave */
+	addr = 0x0000;
+	val64 = 0x0;
+	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+	if((val64 == 0xFFFF) || (val64 == 0x0000))
+	{
+		DBG_PRINT(ERR_DBG, "ERR: MDIO slave access failed - "
+			  "Returned %llx\n", (unsigned long long)val64);
+		return;
+	}
+
+	/* Check for the expecte value of 2040 at PMA address 0x0000 */
+	if(val64 != 0x2040)
+	{
+		DBG_PRINT(ERR_DBG, "Incorrect value at PMA address 0x0000 - ");
+		DBG_PRINT(ERR_DBG, "Returned: %llx- Expected: 0x2040\n",
+			  (unsigned long long)val64);
+		return;
+	}
+
+	/* Loading the DOM register to MDIO register */
+	addr = 0xA100;
+	s2io_mdio_write(MDIO_MMD_PMA_DEV_ADDR, addr, val16, dev);
+	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+	/* Reading the Alarm flags */
+	addr = 0xA070;
+	val64 = 0x0;
+	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+	flag = CHECKBIT(val64, 0x7);
+	type = 1;
+	s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_transceiver_temp_high,
+				&stat_info->xpak_stat.xpak_regs_stat,
+				0x0, flag, type);
+
+	if(CHECKBIT(val64, 0x6))
+		stat_info->xpak_stat.alarm_transceiver_temp_low++;
+
+	flag = CHECKBIT(val64, 0x3);
+	type = 2;
+	s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_bias_current_high,
+				&stat_info->xpak_stat.xpak_regs_stat,
+				0x2, flag, type);
+
+	if(CHECKBIT(val64, 0x2))
+		stat_info->xpak_stat.alarm_laser_bias_current_low++;
+
+	flag = CHECKBIT(val64, 0x1);
+	type = 3;
+	s2io_chk_xpak_counter(&stat_info->xpak_stat.alarm_laser_output_power_high,
+				&stat_info->xpak_stat.xpak_regs_stat,
+				0x4, flag, type);
+
+	if(CHECKBIT(val64, 0x0))
+		stat_info->xpak_stat.alarm_laser_output_power_low++;
+
+	/* Reading the Warning flags */
+	addr = 0xA074;
+	val64 = 0x0;
+	val64 = s2io_mdio_read(MDIO_MMD_PMA_DEV_ADDR, addr, dev);
+
+	if(CHECKBIT(val64, 0x7))
+		stat_info->xpak_stat.warn_transceiver_temp_high++;
+
+	if(CHECKBIT(val64, 0x6))
+		stat_info->xpak_stat.warn_transceiver_temp_low++;
+
+	if(CHECKBIT(val64, 0x3))
+		stat_info->xpak_stat.warn_laser_bias_current_high++;
+
+	if(CHECKBIT(val64, 0x2))
+		stat_info->xpak_stat.warn_laser_bias_current_low++;
+
+	if(CHECKBIT(val64, 0x1))
+		stat_info->xpak_stat.warn_laser_output_power_high++;
+
+	if(CHECKBIT(val64, 0x0))
+		stat_info->xpak_stat.warn_laser_output_power_low++;
+}
+
+/**
  *  alarm_intr_handler - Alarm Interrrupt handler
  *  @nic: device private variable
  *  Description: If the interrupt was neither because of Rx packet or Tx
@@ -2790,6 +3098,18 @@
 	struct net_device *dev = (struct net_device *) nic->dev;
 	XENA_dev_config_t __iomem *bar0 = nic->bar0;
 	register u64 val64 = 0, err_reg = 0;
+	u64 cnt;
+	int i;
+	nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
+	/* Handling the XPAK counters update */
+	if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
+		/* waiting for an hour */
+		nic->mac_control.stats_info->xpak_stat.xpak_timer_count++;
+	} else {
+		s2io_updt_xpak_counter(dev);
+		/* reset the count to zero */
+		nic->mac_control.stats_info->xpak_stat.xpak_timer_count = 0;
+	}
 
 	/* Handling link status change error Intr */
 	if (s2io_link_fault_indication(nic) == MAC_RMAC_ERR_TIMER) {
@@ -2816,6 +3136,8 @@
 					     MC_ERR_REG_MIRI_ECC_DB_ERR_1)) {
 					netif_stop_queue(dev);
 					schedule_work(&nic->rst_timer_task);
+					nic->mac_control.stats_info->sw_stat.
+							soft_reset_cnt++;
 				}
 			}
 		} else {
@@ -2827,11 +3149,13 @@
 	/* In case of a serious error, the device will be Reset. */
 	val64 = readq(&bar0->serr_source);
 	if (val64 & SERR_SOURCE_ANY) {
+		nic->mac_control.stats_info->sw_stat.serious_err_cnt++;
 		DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
 		DBG_PRINT(ERR_DBG, "serious error %llx!!\n", 
 			  (unsigned long long)val64);
 		netif_stop_queue(dev);
 		schedule_work(&nic->rst_timer_task);
+		nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
 	}
 
 	/*
@@ -2849,6 +3173,35 @@
 		ac = readq(&bar0->adapter_control);
 		schedule_work(&nic->set_link_task);
 	}
+	/* Check for data parity error */
+	val64 = readq(&bar0->pic_int_status);
+	if (val64 & PIC_INT_GPIO) {
+		val64 = readq(&bar0->gpio_int_reg);
+		if (val64 & GPIO_INT_REG_DP_ERR_INT) {
+			nic->mac_control.stats_info->sw_stat.parity_err_cnt++;
+			schedule_work(&nic->rst_timer_task);
+			nic->mac_control.stats_info->sw_stat.soft_reset_cnt++;
+		}
+	}
+
+	/* Check for ring full counter */
+	if (nic->device_type & XFRAME_II_DEVICE) {
+		val64 = readq(&bar0->ring_bump_counter1);
+		for (i=0; i<4; i++) {
+			cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
+			cnt >>= 64 - ((i+1)*16);
+			nic->mac_control.stats_info->sw_stat.ring_full_cnt
+				+= cnt;
+		}
+
+		val64 = readq(&bar0->ring_bump_counter2);
+		for (i=0; i<4; i++) {
+			cnt = ( val64 & vBIT(0xFFFF,(i*16),16));
+			cnt >>= 64 - ((i+1)*16);
+			nic->mac_control.stats_info->sw_stat.ring_full_cnt
+				+= cnt;
+		}
+	}
 
 	/* Other type of interrupts are not being handled now,  TODO */
 }
@@ -2864,23 +3217,26 @@
  *   SUCCESS on success and FAILURE on failure.
  */
 
-static int wait_for_cmd_complete(nic_t * sp)
+static int wait_for_cmd_complete(void *addr, u64 busy_bit)
 {
-	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	int ret = FAILURE, cnt = 0;
 	u64 val64;
 
 	while (TRUE) {
-		val64 = readq(&bar0->rmac_addr_cmd_mem);
-		if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
+		val64 = readq(addr);
+		if (!(val64 & busy_bit)) {
 			ret = SUCCESS;
 			break;
 		}
-		msleep(50);
+
+		if(in_interrupt())
+			mdelay(50);
+		else
+			msleep(50);
+
 		if (cnt++ > 10)
 			break;
 	}
-
 	return ret;
 }
 
@@ -2919,6 +3275,9 @@
 	 * PCI write to sw_reset register is done by this time.
 	 */
 	msleep(250);
+	if (strstr(sp->product_name, "CX4")) {
+		msleep(750);
+	}
 
 	/* Restore the PCI state saved during initialization. */
 	pci_restore_state(sp->pdev);
@@ -3137,7 +3496,7 @@
 	u64 val64;
 	int i;
 
-	for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+	for (i=0; i< nic->avail_msix_vectors; i++) {
 		writeq(nic->msix_info[i].addr, &bar0->xmsi_address);
 		writeq(nic->msix_info[i].data, &bar0->xmsi_data);
 		val64 = (BIT(7) | BIT(15) | vBIT(i, 26, 6));
@@ -3156,7 +3515,7 @@
 	int i;
 
 	/* Store and display */
-	for (i=0; i< MAX_REQUESTED_MSI_X; i++) {
+	for (i=0; i< nic->avail_msix_vectors; i++) {
 		val64 = (BIT(15) | vBIT(i, 26, 6));
 		writeq(val64, &bar0->xmsi_access);
 		if (wait_for_msix_trans(nic, i)) {
@@ -3284,15 +3643,24 @@
 		writeq(tx_mat, &bar0->tx_mat0_n[7]);
 	}
 
+	nic->avail_msix_vectors = 0;
 	ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X);
+	/* We fail init if error or we get less vectors than min required */
+	if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) {
+		nic->avail_msix_vectors = ret;
+		ret = pci_enable_msix(nic->pdev, nic->entries, ret);
+	}
 	if (ret) {
 		DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name);
 		kfree(nic->entries);
 		kfree(nic->s2io_entries);
 		nic->entries = NULL;
 		nic->s2io_entries = NULL;
+		nic->avail_msix_vectors = 0;
 		return -ENOMEM;
 	}
+	if (!nic->avail_msix_vectors)
+		nic->avail_msix_vectors = MAX_REQUESTED_MSI_X;
 
 	/*
 	 * To enable MSI-X, MSI also needs to be enabled, due to a bug
@@ -3325,8 +3693,6 @@
 {
 	nic_t *sp = dev->priv;
 	int err = 0;
-	int i;
-	u16 msi_control; /* Temp variable */
 
 	/*
 	 * Make sure you have link off by default every time
@@ -3336,11 +3702,14 @@
 	sp->last_link_state = 0;
 
 	/* Initialize H/W and enable interrupts */
-	if (s2io_card_up(sp)) {
+	err = s2io_card_up(sp);
+	if (err) {
 		DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
 			  dev->name);
-		err = -ENODEV;
-		goto hw_init_failed;
+		if (err == -ENODEV)
+			goto hw_init_failed;
+		else
+			goto hw_enable_failed;
 	}
 
 	/* Store the values of the MSIX table in the nic_t structure */
@@ -3357,6 +3726,8 @@
 		}
 	}
 	if (sp->intr_type == MSI_X) {
+		int i;
+
 		for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) {
 			if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) {
 				sprintf(sp->desc1, "%s:MSI-X-%d-TX",
@@ -3409,24 +3780,26 @@
 isr_registration_failed:
 	del_timer_sync(&sp->alarm_timer);
 	if (sp->intr_type == MSI_X) {
-		if (sp->device_type == XFRAME_II_DEVICE) {
-			for (i=1; (sp->s2io_entries[i].in_use == 
+		int i;
+		u16 msi_control; /* Temp variable */
+
+		for (i=1; (sp->s2io_entries[i].in_use == 
 				MSIX_REGISTERED_SUCCESS); i++) {
-				int vector = sp->entries[i].vector;
-				void *arg = sp->s2io_entries[i].arg;
+			int vector = sp->entries[i].vector;
+			void *arg = sp->s2io_entries[i].arg;
 
-				free_irq(vector, arg);
-			}
-			pci_disable_msix(sp->pdev);
-
-			/* Temp */
-			pci_read_config_word(sp->pdev, 0x42, &msi_control);
-			msi_control &= 0xFFFE; /* Disable MSI */
-			pci_write_config_word(sp->pdev, 0x42, msi_control);
+			free_irq(vector, arg);
 		}
+		pci_disable_msix(sp->pdev);
+
+		/* Temp */
+		pci_read_config_word(sp->pdev, 0x42, &msi_control);
+		msi_control &= 0xFFFE; /* Disable MSI */
+		pci_write_config_word(sp->pdev, 0x42, msi_control);
 	}
 	else if (sp->intr_type == MSI)
 		pci_disable_msi(sp->pdev);
+hw_enable_failed:
 	s2io_reset(sp);
 hw_init_failed:
 	if (sp->intr_type == MSI_X) {
@@ -3454,35 +3827,12 @@
 static int s2io_close(struct net_device *dev)
 {
 	nic_t *sp = dev->priv;
-	int i;
-	u16 msi_control;
 
 	flush_scheduled_work();
 	netif_stop_queue(dev);
 	/* Reset card, kill tasklet and free Tx and Rx buffers. */
-	s2io_card_down(sp);
+	s2io_card_down(sp, 1);
 
-	if (sp->intr_type == MSI_X) {
-		if (sp->device_type == XFRAME_II_DEVICE) {
-			for (i=1; (sp->s2io_entries[i].in_use == 
-					MSIX_REGISTERED_SUCCESS); i++) {
-				int vector = sp->entries[i].vector;
-				void *arg = sp->s2io_entries[i].arg;
-
-				free_irq(vector, arg);
-			}
-			pci_read_config_word(sp->pdev, 0x42, &msi_control);
-			msi_control &= 0xFFFE; /* Disable MSI */
-			pci_write_config_word(sp->pdev, 0x42, msi_control);
-
-			pci_disable_msix(sp->pdev);
-		}
-	}
-	else {
-		free_irq(sp->pdev->irq, dev);
-		if (sp->intr_type == MSI)
-			pci_disable_msi(sp->pdev);
-	}	
 	sp->device_close_flag = TRUE;	/* Device is shut down. */
 	return 0;
 }
@@ -3545,7 +3895,8 @@
 
 	queue_len = mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
 	/* Avoid "put" pointer going beyond "get" pointer */
-	if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
+	if (txdp->Host_Control ||
+		   ((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
 		DBG_PRINT(TX_DBG, "Error in xmit, No free TXDs.\n");
 		netif_stop_queue(dev);
 		dev_kfree_skb(skb);
@@ -3655,11 +4006,13 @@
 	mmiowb();
 
 	put_off++;
-	put_off %= mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1;
+	if (put_off == mac_control->fifos[queue].tx_curr_put_info.fifo_len + 1)
+		put_off = 0;
 	mac_control->fifos[queue].tx_curr_put_info.offset = put_off;
 
 	/* Avoid "put" pointer going beyond "get" pointer */
-	if (((put_off + 1) % queue_len) == get_off) {
+	if (((put_off+1) == queue_len ? 0 : (put_off+1)) == get_off) {
+		sp->mac_control.stats_info->sw_stat.fifo_full_cnt++;
 		DBG_PRINT(TX_DBG,
 			  "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
 			  put_off, get_off);
@@ -3795,7 +4148,6 @@
 	atomic_dec(&sp->isr_cnt);
 	return IRQ_HANDLED;
 }
-
 static void s2io_txpic_intr_handle(nic_t *sp)
 {
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
@@ -3806,41 +4158,56 @@
 		val64 = readq(&bar0->gpio_int_reg);
 		if ((val64 & GPIO_INT_REG_LINK_DOWN) &&
 		    (val64 & GPIO_INT_REG_LINK_UP)) {
+			/*
+			 * This is unstable state so clear both up/down
+			 * interrupt and adapter to re-evaluate the link state.
+			 */
 			val64 |=  GPIO_INT_REG_LINK_DOWN;
 			val64 |= GPIO_INT_REG_LINK_UP;
 			writeq(val64, &bar0->gpio_int_reg);
-			goto masking;
+			val64 = readq(&bar0->gpio_int_mask);
+			val64 &= ~(GPIO_INT_MASK_LINK_UP |
+				   GPIO_INT_MASK_LINK_DOWN);
+			writeq(val64, &bar0->gpio_int_mask);
 		}
+		else if (val64 & GPIO_INT_REG_LINK_UP) {
+			val64 = readq(&bar0->adapter_status);
+			if (verify_xena_quiescence(sp, val64,
+						   sp->device_enabled_once)) {
+				/* Enable Adapter */
+				val64 = readq(&bar0->adapter_control);
+				val64 |= ADAPTER_CNTL_EN;
+				writeq(val64, &bar0->adapter_control);
+				val64 |= ADAPTER_LED_ON;
+				writeq(val64, &bar0->adapter_control);
+				if (!sp->device_enabled_once)
+					sp->device_enabled_once = 1;
 
-		if (((sp->last_link_state == LINK_UP) &&
-			(val64 & GPIO_INT_REG_LINK_DOWN)) ||
-		((sp->last_link_state == LINK_DOWN) &&
-		(val64 & GPIO_INT_REG_LINK_UP))) {
-			val64 = readq(&bar0->gpio_int_mask);
-			val64 |=  GPIO_INT_MASK_LINK_DOWN;
-			val64 |= GPIO_INT_MASK_LINK_UP;
-			writeq(val64, &bar0->gpio_int_mask);
-			s2io_set_link((unsigned long)sp);
-		}
-masking:
-		if (sp->last_link_state == LINK_UP) {
-			/*enable down interrupt */
-			val64 = readq(&bar0->gpio_int_mask);
-			/* unmasks link down intr */
-			val64 &=  ~GPIO_INT_MASK_LINK_DOWN;
-			/* masks link up intr */
-			val64 |= GPIO_INT_MASK_LINK_UP;
-			writeq(val64, &bar0->gpio_int_mask);
-		} else {
-			/*enable UP Interrupt */
-			val64 = readq(&bar0->gpio_int_mask);
-			/* unmasks link up interrupt */
-			val64 &= ~GPIO_INT_MASK_LINK_UP;
-			/* masks link down interrupt */
-			val64 |=  GPIO_INT_MASK_LINK_DOWN;
-			writeq(val64, &bar0->gpio_int_mask);
+				s2io_link(sp, LINK_UP);
+				/*
+				 * unmask link down interrupt and mask link-up
+				 * intr
+				 */
+				val64 = readq(&bar0->gpio_int_mask);
+				val64 &= ~GPIO_INT_MASK_LINK_DOWN;
+				val64 |= GPIO_INT_MASK_LINK_UP;
+				writeq(val64, &bar0->gpio_int_mask);
+
+			}
+		}else if (val64 & GPIO_INT_REG_LINK_DOWN) {
+			val64 = readq(&bar0->adapter_status);
+			if (verify_xena_quiescence(sp, val64,
+						   sp->device_enabled_once)) {
+				s2io_link(sp, LINK_DOWN);
+				/* Link is down so unmaks link up interrupt */
+				val64 = readq(&bar0->gpio_int_mask);
+				val64 &= ~GPIO_INT_MASK_LINK_UP;
+				val64 |= GPIO_INT_MASK_LINK_DOWN;
+				writeq(val64, &bar0->gpio_int_mask);
+			}
 		}
 	}
+	val64 = readq(&bar0->gpio_int_mask);
 }
 
 /**
@@ -3863,7 +4230,7 @@
 	nic_t *sp = dev->priv;
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	int i;
-	u64 reason = 0, val64;
+	u64 reason = 0, val64, org_mask;
 	mac_info_t *mac_control;
 	struct config_param *config;
 
@@ -3887,43 +4254,41 @@
 		return IRQ_NONE;
 	}
 
+	val64 = 0xFFFFFFFFFFFFFFFFULL;
+	/* Store current mask before masking all interrupts */
+	org_mask = readq(&bar0->general_int_mask);
+	writeq(val64, &bar0->general_int_mask);
+
 #ifdef CONFIG_S2IO_NAPI
 	if (reason & GEN_INTR_RXTRAFFIC) {
 		if (netif_rx_schedule_prep(dev)) {
-			en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
-					      DISABLE_INTRS);
+			writeq(val64, &bar0->rx_traffic_mask);
 			__netif_rx_schedule(dev);
 		}
 	}
 #else
-	/* If Intr is because of Rx Traffic */
-	if (reason & GEN_INTR_RXTRAFFIC) {
-		/*
-		 * rx_traffic_int reg is an R1 register, writing all 1's
-		 * will ensure that the actual interrupt causing bit get's
-		 * cleared and hence a read can be avoided.
-		 */
-		val64 = 0xFFFFFFFFFFFFFFFFULL;
-		writeq(val64, &bar0->rx_traffic_int);
-		for (i = 0; i < config->rx_ring_num; i++) {
-			rx_intr_handler(&mac_control->rings[i]);
-		}
+	/*
+	 * Rx handler is called by default, without checking for the
+	 * cause of interrupt.
+	 * rx_traffic_int reg is an R1 register, writing all 1's
+	 * will ensure that the actual interrupt causing bit get's
+	 * cleared and hence a read can be avoided.
+	 */
+	writeq(val64, &bar0->rx_traffic_int);
+	for (i = 0; i < config->rx_ring_num; i++) {
+		rx_intr_handler(&mac_control->rings[i]);
 	}
 #endif
 
-	/* If Intr is because of Tx Traffic */
-	if (reason & GEN_INTR_TXTRAFFIC) {
-		/*
-		 * tx_traffic_int reg is an R1 register, writing all 1's
-		 * will ensure that the actual interrupt causing bit get's
-		 * cleared and hence a read can be avoided.
-		 */
-		val64 = 0xFFFFFFFFFFFFFFFFULL;
-		writeq(val64, &bar0->tx_traffic_int);
+	/*
+	 * tx_traffic_int reg is an R1 register, writing all 1's
+	 * will ensure that the actual interrupt causing bit get's
+	 * cleared and hence a read can be avoided.
+	 */
+	writeq(val64, &bar0->tx_traffic_int);
 
-		for (i = 0; i < config->tx_fifo_num; i++)
-			tx_intr_handler(&mac_control->fifos[i]);
-	}
+	for (i = 0; i < config->tx_fifo_num; i++)
+		tx_intr_handler(&mac_control->fifos[i]);
 
 	if (reason & GEN_INTR_TXPIC)
 		s2io_txpic_intr_handle(sp);
@@ -3949,6 +4314,7 @@
 					DBG_PRINT(ERR_DBG, " in ISR!!\n");
 					clear_bit(0, (&sp->tasklet_status));
 					atomic_dec(&sp->isr_cnt);
+					writeq(org_mask, &bar0->general_int_mask);
 					return IRQ_HANDLED;
 				}
 				clear_bit(0, (&sp->tasklet_status));
@@ -3964,7 +4330,7 @@
 		}
 	}
 #endif
-
+	writeq(org_mask, &bar0->general_int_mask);
 	atomic_dec(&sp->isr_cnt);
 	return IRQ_HANDLED;
 }
@@ -4067,7 +4433,8 @@
 		    RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
 		writeq(val64, &bar0->rmac_addr_cmd_mem);
 		/* Wait till command completes */
-		wait_for_cmd_complete(sp);
+		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
 
 		sp->m_cast_flg = 1;
 		sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
@@ -4082,7 +4449,8 @@
 		    RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
 		writeq(val64, &bar0->rmac_addr_cmd_mem);
 		/* Wait till command completes */
-		wait_for_cmd_complete(sp);
+		wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
 
 		sp->m_cast_flg = 0;
 		sp->all_multi_pos = 0;
@@ -4147,7 +4515,8 @@
 			writeq(val64, &bar0->rmac_addr_cmd_mem);
 
 			/* Wait for command completes */
-			if (wait_for_cmd_complete(sp)) {
+			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
 				DBG_PRINT(ERR_DBG, "%s: Adding ",
 					  dev->name);
 				DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4177,7 +4546,8 @@
 			writeq(val64, &bar0->rmac_addr_cmd_mem);
 
 			/* Wait for command completes */
-			if (wait_for_cmd_complete(sp)) {
+			if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+				      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
 				DBG_PRINT(ERR_DBG, "%s: Adding ",
 					  dev->name);
 				DBG_PRINT(ERR_DBG, "Multicasts failed\n");
@@ -4222,7 +4592,8 @@
 	    RMAC_ADDR_CMD_MEM_OFFSET(0);
 	writeq(val64, &bar0->rmac_addr_cmd_mem);
 	/* Wait till command completes */
-	if (wait_for_cmd_complete(sp)) {
+	if (wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+		      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
 		DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
 		return FAILURE;
 	}
@@ -4619,6 +4990,44 @@
 	}
 	return ret;
 }
+static void s2io_vpd_read(nic_t *nic)
+{
+	u8 vpd_data[256],data;
+	int i=0, cnt, fail = 0;
+	int vpd_addr = 0x80;
+
+	if (nic->device_type == XFRAME_II_DEVICE) {
+		strcpy(nic->product_name, "Xframe II 10GbE network adapter");
+		vpd_addr = 0x80;
+	}
+	else {
+		strcpy(nic->product_name, "Xframe I 10GbE network adapter");
+		vpd_addr = 0x50;
+	}
+
+	for (i = 0; i < 256; i +=4 ) {
+		pci_write_config_byte(nic->pdev, (vpd_addr + 2), i);
+		pci_read_config_byte(nic->pdev,  (vpd_addr + 2), &data);
+		pci_write_config_byte(nic->pdev, (vpd_addr + 3), 0);
+		for (cnt = 0; cnt <5; cnt++) {
+			msleep(2);
+			pci_read_config_byte(nic->pdev, (vpd_addr + 3), &data);
+			if (data == 0x80)
+				break;
+		}
+		if (cnt >= 5) {
+			DBG_PRINT(ERR_DBG, "Read of VPD data failed\n");
+			fail = 1;
+			break;
+		}
+		pci_read_config_dword(nic->pdev,  (vpd_addr + 4),
+				      (u32 *)&vpd_data[i]);
+	}
+	if ((!fail) && (vpd_data[1] < VPD_PRODUCT_NAME_LEN)) {
+		memset(nic->product_name, 0, vpd_data[1]);
+		memcpy(nic->product_name, &vpd_data[3], vpd_data[1]);
+	}
+}
 
 /**
  *  s2io_ethtool_geeprom  - reads the value stored in the Eeprom.
@@ -4931,8 +5340,10 @@
 	u64 val64;
 
 	val64 = readq(&bar0->adapter_status);
-	if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
+	if(!(LINK_IS_UP(val64)))
 		*data = 1;
+	else
+		*data = 0;
 
 	return 0;
 }
@@ -5112,7 +5523,6 @@
 	int i = 0;
 	nic_t *sp = dev->priv;
 	StatInfo_t *stat_info = sp->mac_control.stats_info;
-	u64 tmp;
 
 	s2io_updt_stats(sp);
 	tmp_stats[i++] =
@@ -5129,9 +5539,19 @@
 		(u64)le32_to_cpu(stat_info->tmac_bcst_frms_oflow) << 32 |
 		le32_to_cpu(stat_info->tmac_bcst_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_pause_ctrl_frms);
+        tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->tmac_ttl_octets_oflow) << 32 |
+                le32_to_cpu(stat_info->tmac_ttl_octets);
+	tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->tmac_ucst_frms_oflow) << 32 |
+                le32_to_cpu(stat_info->tmac_ucst_frms);
+	tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->tmac_nucst_frms_oflow) << 32 |
+                le32_to_cpu(stat_info->tmac_nucst_frms);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->tmac_any_err_frms_oflow) << 32 |
 		le32_to_cpu(stat_info->tmac_any_err_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->tmac_ttl_less_fb_octets);
 	tmp_stats[i++] = le64_to_cpu(stat_info->tmac_vld_ip_octets);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->tmac_vld_ip_oflow) << 32 |
@@ -5163,11 +5583,27 @@
 		(u64)le32_to_cpu(stat_info->rmac_vld_bcst_frms_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_vld_bcst_frms);
 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_in_rng_len_err_frms);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_out_rng_len_err_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_long_frms);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_pause_ctrl_frms);
+	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_unsup_ctrl_frms);
+        tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->rmac_ttl_octets_oflow) << 32 |
+		le32_to_cpu(stat_info->rmac_ttl_octets);
+        tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->rmac_accepted_ucst_frms_oflow)
+		<< 32 | le32_to_cpu(stat_info->rmac_accepted_ucst_frms);
+	tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->rmac_accepted_nucst_frms_oflow)
+                 << 32 | le32_to_cpu(stat_info->rmac_accepted_nucst_frms);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->rmac_discarded_frms_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_discarded_frms);
+        tmp_stats[i++] =
+                (u64)le32_to_cpu(stat_info->rmac_drop_events_oflow)
+                 << 32 | le32_to_cpu(stat_info->rmac_drop_events);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_less_fb_octets);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_frms);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->rmac_usized_frms_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_usized_frms);
@@ -5180,40 +5616,129 @@
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->rmac_jabber_frms_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_jabber_frms);
-	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
+	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_64_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_65_127_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_128_255_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_256_511_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_512_1023_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1024_1518_frms);
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_ip_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_ip);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ip_octets);
 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_hdr_err_ip);
-	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_drop_ip_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_drop_ip);
-	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_icmp_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_icmp);
 	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_tcp);
-	tmp_stats[i++] = (u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
+	tmp_stats[i++] =
+		(u64)le32_to_cpu(stat_info->rmac_udp_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_udp);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->rmac_err_drp_udp_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_err_drp_udp);
+	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_err_sym);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q0);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q1);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q2);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q3);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q4);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q5);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q6);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_frms_q7);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q0);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q1);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q2);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q3);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q4);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q5);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q6);
+        tmp_stats[i++] = le16_to_cpu(stat_info->rmac_full_q7);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->rmac_pause_cnt_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_pause_cnt);
+	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_data_err_cnt);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_xgmii_ctrl_err_cnt);
 	tmp_stats[i++] =
 		(u64)le32_to_cpu(stat_info->rmac_accepted_ip_oflow) << 32 |
 		le32_to_cpu(stat_info->rmac_accepted_ip);
 	tmp_stats[i++] = le32_to_cpu(stat_info->rmac_err_tcp);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rd_req_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->new_rd_req_rtry_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_rd_ack_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->wr_req_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->new_wr_req_rtry_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->wr_rtry_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->wr_disc_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rd_rtry_wr_ack_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->txp_wr_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->txd_rd_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->txd_wr_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rxd_rd_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rxd_wr_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->txf_rd_cnt);
+	tmp_stats[i++] = le32_to_cpu(stat_info->rxf_wr_cnt);
+	tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_1519_4095_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_4096_8191_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_8192_max_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_ttl_gt_max_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_osized_alt_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_jabber_alt_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_gt_max_alt_frms);
+        tmp_stats[i++] = le64_to_cpu(stat_info->rmac_vlan_frms);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_len_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_fcs_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_pf_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_da_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_red_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_rts_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->rmac_ingm_full_discard);
+        tmp_stats[i++] = le32_to_cpu(stat_info->link_fault_cnt);
 	tmp_stats[i++] = 0;
 	tmp_stats[i++] = stat_info->sw_stat.single_ecc_errs;
 	tmp_stats[i++] = stat_info->sw_stat.double_ecc_errs;
+	tmp_stats[i++] = stat_info->sw_stat.parity_err_cnt;
+	tmp_stats[i++] = stat_info->sw_stat.serious_err_cnt;
+	tmp_stats[i++] = stat_info->sw_stat.soft_reset_cnt;
+	tmp_stats[i++] = stat_info->sw_stat.fifo_full_cnt;
+	tmp_stats[i++] = stat_info->sw_stat.ring_full_cnt;
+	tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_high;
+	tmp_stats[i++] = stat_info->xpak_stat.alarm_transceiver_temp_low;
+	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_high;
+	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_bias_current_low;
+	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_high;
+	tmp_stats[i++] = stat_info->xpak_stat.alarm_laser_output_power_low;
+	tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_high;
+	tmp_stats[i++] = stat_info->xpak_stat.warn_transceiver_temp_low;
+	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_high;
+	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_bias_current_low;
+	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_high;
+	tmp_stats[i++] = stat_info->xpak_stat.warn_laser_output_power_low;
 	tmp_stats[i++] = stat_info->sw_stat.clubbed_frms_cnt;
 	tmp_stats[i++] = stat_info->sw_stat.sending_both;
 	tmp_stats[i++] = stat_info->sw_stat.outof_sequence_pkts;
 	tmp_stats[i++] = stat_info->sw_stat.flush_max_pkts;
-	tmp = 0;
 	if (stat_info->sw_stat.num_aggregations) {
-		tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
-		do_div(tmp, stat_info->sw_stat.num_aggregations);
+		u64 tmp = stat_info->sw_stat.sum_avg_pkts_aggregated;
+		int count = 0;
+		/* 
+		 * Since 64-bit divide does not work on all platforms,
+		 * do repeated subtraction.
+		 */
+		while (tmp >= stat_info->sw_stat.num_aggregations) {
+			tmp -= stat_info->sw_stat.num_aggregations;
+			count++;
+		}
+		tmp_stats[i++] = count;
 	}
-	tmp_stats[i++] = tmp;
+	else
+		tmp_stats[i++] = 0;
 }
 
 static int s2io_ethtool_get_regs_len(struct net_device *dev)
@@ -5351,7 +5876,7 @@
 
 	dev->mtu = new_mtu;
 	if (netif_running(dev)) {
-		s2io_card_down(sp);
+		s2io_card_down(sp, 0);
 		netif_stop_queue(dev);
 		if (s2io_card_up(sp)) {
 			DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
@@ -5489,12 +6014,172 @@
 	clear_bit(0, &(nic->link_state));
 }
 
-static void s2io_card_down(nic_t * sp)
+static int set_rxd_buffer_pointer(nic_t *sp, RxD_t *rxdp, buffAdd_t *ba,
+			   struct sk_buff **skb, u64 *temp0, u64 *temp1,
+			   u64 *temp2, int size)
+{
+	struct net_device *dev = sp->dev;
+	struct sk_buff *frag_list;
+
+	if ((sp->rxd_mode == RXD_MODE_1) && (rxdp->Host_Control == 0)) {
+		/* allocate skb */
+		if (*skb) {
+			DBG_PRINT(INFO_DBG, "SKB is not NULL\n");
+			/*
+			 * As Rx frame are not going to be processed,
+			 * using same mapped address for the Rxd
+			 * buffer pointer
+			 */
+			((RxD1_t*)rxdp)->Buffer0_ptr = *temp0;
+		} else {
+			*skb = dev_alloc_skb(size);
+			if (!(*skb)) {
+				DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
+				DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
+				return -ENOMEM ;
+			}
+			/* storing the mapped addr in a temp variable
+			 * such it will be used for next rxd whose
+			 * Host Control is NULL
+			 */
+			((RxD1_t*)rxdp)->Buffer0_ptr = *temp0 =
+				pci_map_single( sp->pdev, (*skb)->data,
+					size - NET_IP_ALIGN,
+					PCI_DMA_FROMDEVICE);
+			rxdp->Host_Control = (unsigned long) (*skb);
+		}
+	} else if ((sp->rxd_mode == RXD_MODE_3B) && (rxdp->Host_Control == 0)) {
+		/* Two buffer Mode */
+		if (*skb) {
+			((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+			((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+			((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+		} else {
+			*skb = dev_alloc_skb(size);
+			((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+				pci_map_single(sp->pdev, (*skb)->data,
+					       dev->mtu + 4,
+					       PCI_DMA_FROMDEVICE);
+			((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+				pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
+						PCI_DMA_FROMDEVICE);
+			rxdp->Host_Control = (unsigned long) (*skb);
+
+			/* Buffer-1 will be dummy buffer not used */
+			((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+				pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
+					       PCI_DMA_FROMDEVICE);
+		}
+	} else if ((rxdp->Host_Control == 0)) {
+		/* Three buffer mode */
+		if (*skb) {
+			((RxD3_t*)rxdp)->Buffer0_ptr = *temp0;
+			((RxD3_t*)rxdp)->Buffer1_ptr = *temp1;
+			((RxD3_t*)rxdp)->Buffer2_ptr = *temp2;
+		} else {
+			*skb = dev_alloc_skb(size);
+
+			((RxD3_t*)rxdp)->Buffer0_ptr = *temp0 =
+				pci_map_single(sp->pdev, ba->ba_0, BUF0_LEN,
+					       PCI_DMA_FROMDEVICE);
+			/* Buffer-1 receives L3/L4 headers */
+			((RxD3_t*)rxdp)->Buffer1_ptr = *temp1 =
+				pci_map_single( sp->pdev, (*skb)->data,
+						l3l4hdr_size + 4,
+						PCI_DMA_FROMDEVICE);
+			/*
+			 * skb_shinfo(skb)->frag_list will have L4
+			 * data payload
+			 */
+			skb_shinfo(*skb)->frag_list = dev_alloc_skb(dev->mtu +
+								   ALIGN_SIZE);
+			if (skb_shinfo(*skb)->frag_list == NULL) {
+				DBG_PRINT(ERR_DBG, "%s: dev_alloc_skb \
+					  failed\n ", dev->name);
+				return -ENOMEM ;
+			}
+			frag_list = skb_shinfo(*skb)->frag_list;
+			frag_list->next = NULL;
+			/*
+			 * Buffer-2 receives L4 data payload
+			 */
+			((RxD3_t*)rxdp)->Buffer2_ptr = *temp2 =
+				pci_map_single( sp->pdev, frag_list->data,
+						dev->mtu, PCI_DMA_FROMDEVICE);
+		}
+	}
+	return 0;
+}
+static void set_rxd_buffer_size(nic_t *sp, RxD_t *rxdp, int size)
+{
+	struct net_device *dev = sp->dev;
+	if (sp->rxd_mode == RXD_MODE_1) {
+		rxdp->Control_2 = SET_BUFFER0_SIZE_1( size - NET_IP_ALIGN);
+	} else if (sp->rxd_mode == RXD_MODE_3B) {
+		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1);
+		rxdp->Control_2 |= SET_BUFFER2_SIZE_3( dev->mtu + 4);
+	} else {
+		rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN);
+		rxdp->Control_2 |= SET_BUFFER1_SIZE_3(l3l4hdr_size + 4);
+		rxdp->Control_2 |= SET_BUFFER2_SIZE_3(dev->mtu);
+	}
+}
+
+static  int rxd_owner_bit_reset(nic_t *sp)
+{
+	int i, j, k, blk_cnt = 0, size;
+	mac_info_t * mac_control = &sp->mac_control;
+	struct config_param *config = &sp->config;
+	struct net_device *dev = sp->dev;
+	RxD_t *rxdp = NULL;
+	struct sk_buff *skb = NULL;
+	buffAdd_t *ba = NULL;
+	u64 temp0_64 = 0, temp1_64 = 0, temp2_64 = 0;
+
+	/* Calculate the size based on ring mode */
+	size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
+		HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
+	if (sp->rxd_mode == RXD_MODE_1)
+		size += NET_IP_ALIGN;
+	else if (sp->rxd_mode == RXD_MODE_3B)
+		size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4;
+	else
+		size = l3l4hdr_size + ALIGN_SIZE + BUF0_LEN + 4;
+
+	for (i = 0; i < config->rx_ring_num; i++) {
+		blk_cnt = config->rx_cfg[i].num_rxd /
+			(rxd_count[sp->rxd_mode] +1);
+
+		for (j = 0; j < blk_cnt; j++) {
+			for (k = 0; k < rxd_count[sp->rxd_mode]; k++) {
+				rxdp = mac_control->rings[i].
+					rx_blocks[j].rxds[k].virt_addr;
+				if(sp->rxd_mode >= RXD_MODE_3A)
+					ba = &mac_control->rings[i].ba[j][k];
+				set_rxd_buffer_pointer(sp, rxdp, ba,
+						       &skb,(u64 *)&temp0_64,
+						       (u64 *)&temp1_64,
+						       (u64 *)&temp2_64, size);
+
+				set_rxd_buffer_size(sp, rxdp, size);
+				wmb();
+				/* flip the Ownership bit to Hardware */
+				rxdp->Control_1 |= RXD_OWN_XENA;
+			}
+		}
+	}
+	return 0;
+
+}
+
+static void s2io_card_down(nic_t * sp, int flag)
 {
 	int cnt = 0;
 	XENA_dev_config_t __iomem *bar0 = sp->bar0;
 	unsigned long flags;
 	register u64 val64 = 0;
+	struct net_device *dev = sp->dev;
 
 	del_timer_sync(&sp->alarm_timer);
 	/* If s2io_set_link task is executing, wait till it completes. */
@@ -5505,12 +6190,51 @@
 
 	/* disable Tx and Rx traffic on the NIC */
 	stop_nic(sp);
+	if (flag) {
+		if (sp->intr_type == MSI_X) {
+			int i;
+			u16 msi_control;
+
+			for (i=1; (sp->s2io_entries[i].in_use ==
+				MSIX_REGISTERED_SUCCESS); i++) {
+				int vector = sp->entries[i].vector;
+				void *arg = sp->s2io_entries[i].arg;
+
+				free_irq(vector, arg);
+			}
+			pci_read_config_word(sp->pdev, 0x42, &msi_control);
+			msi_control &= 0xFFFE; /* Disable MSI */
+			pci_write_config_word(sp->pdev, 0x42, msi_control);
+			pci_disable_msix(sp->pdev);
+		} else {
+			free_irq(sp->pdev->irq, dev);
+			if (sp->intr_type == MSI)
+				pci_disable_msi(sp->pdev);
+		}
+	}
+	/* Waiting till all Interrupt handlers are complete */
+	cnt = 0;
+	do {
+		msleep(10);
+		if (!atomic_read(&sp->isr_cnt))
+			break;
+		cnt++;
+	} while(cnt < 5);
 
 	/* Kill tasklet. */
 	tasklet_kill(&sp->task);
 
 	/* Check if the device is Quiescent and then Reset the NIC */
 	do {
+		/* As per the HW requirement we need to replenish the
+		 * receive buffer to avoid the ring bump. Since there is
+		 * no intention of processing the Rx frame at this pointwe are
+		 * just settting the ownership bit of rxd in Each Rx
+		 * ring to HW and set the appropriate buffer size
+		 * based on the ring mode
+		 */
+		rxd_owner_bit_reset(sp);
+
 		val64 = readq(&bar0->adapter_status);
 		if (verify_xena_quiescence(sp, val64, sp->device_enabled_once)) {
 			break;
@@ -5528,15 +6252,6 @@
 	} while (1);
 	s2io_reset(sp);
 
-	/* Waiting till all Interrupt handlers are complete */
-	cnt = 0;
-	do {
-		msleep(10);
-		if (!atomic_read(&sp->isr_cnt))
-			break;
-		cnt++;
-	} while(cnt < 5);
-
 	spin_lock_irqsave(&sp->tx_lock, flags);
 	/* Free all Tx buffers */
 	free_tx_buffers(sp);
@@ -5637,7 +6352,7 @@
 	struct net_device *dev = (struct net_device *) data;
 	nic_t *sp = dev->priv;
 
-	s2io_card_down(sp);
+	s2io_card_down(sp, 0);
 	if (s2io_card_up(sp)) {
 		DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
 			  dev->name);
@@ -5667,6 +6382,7 @@
 
 	if (netif_carrier_ok(dev)) {
 		schedule_work(&sp->rst_timer_task);
+		sp->mac_control.stats_info->sw_stat.soft_reset_cnt++;
 	}
 }
 
@@ -5695,18 +6411,33 @@
 		((unsigned long) rxdp->Host_Control);
 	int ring_no = ring_data->ring_no;
 	u16 l3_csum, l4_csum;
+	unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
 	lro_t *lro;
 
 	skb->dev = dev;
-	if (rxdp->Control_1 & RXD_T_CODE) {
-		unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
-		DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
-			  dev->name, err);
-		dev_kfree_skb(skb);
-		sp->stats.rx_crc_errors++;
-		atomic_dec(&sp->rx_bufs_left[ring_no]);
-		rxdp->Host_Control = 0;
-		return 0;
+
+	if (err) {
+		/* Check for parity error */
+		if (err & 0x1) {
+			sp->mac_control.stats_info->sw_stat.parity_err_cnt++;
+		}
+
+		/*
+		* Drop the packet if bad transfer code. Exception being
+		* 0x5, which could be due to unsupported IPv6 extension header.
+		* In this case, we let stack handle the packet.
+		* Note that in this case, since checksum will be incorrect,
+		* stack will validate the same.
+		*/
+		if (err && ((err >> 48) != 0x5)) {
+			DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
+				dev->name, err);
+			sp->stats.rx_crc_errors++;
+			dev_kfree_skb(skb);
+			atomic_dec(&sp->rx_bufs_left[ring_no]);
+			rxdp->Host_Control = 0;
+			return 0;
+		}
 	}
 
 	/* Updating statistics */
@@ -5792,6 +6523,9 @@
 						clear_lro_session(lro);
 						goto send_up;
 					case 0: /* sessions exceeded */
+					case -1: /* non-TCP or not
+						  * L2 aggregatable
+						  */
 					case 5: /*
 						 * First pkt in session not
 						 * L3/L4 aggregatable
@@ -5918,13 +6652,6 @@
 	pci_write_config_word(sp->pdev, PCI_COMMAND,
 			      (pci_cmd | PCI_COMMAND_PARITY));
 	pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
-
-	/* Forcibly disabling relaxed ordering capability of the card. */
-	pcix_cmd &= 0xfffd;
-	pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			      pcix_cmd);
-	pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
-			     &(pcix_cmd));
 }
 
 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@neterion.com>");
@@ -5954,6 +6681,55 @@
 module_param(lro, int, 0);
 module_param(lro_max_pkts, int, 0);
 
+static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type)
+{
+	if ( tx_fifo_num > 8) {
+		DBG_PRINT(ERR_DBG, "s2io: Requested number of Tx fifos not "
+			 "supported\n");
+		DBG_PRINT(ERR_DBG, "s2io: Default to 8 Tx fifos\n");
+		tx_fifo_num = 8;
+	}
+	if ( rx_ring_num > 8) {
+		DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not "
+			 "supported\n");
+		DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n");
+		rx_ring_num = 8;
+	}
+#ifdef CONFIG_S2IO_NAPI
+	if (*dev_intr_type != INTA) {
+		DBG_PRINT(ERR_DBG, "s2io: NAPI cannot be enabled when "
+			  "MSI/MSI-X is enabled. Defaulting to INTA\n");
+		*dev_intr_type = INTA;
+	}
+#endif
+#ifndef CONFIG_PCI_MSI
+	if (*dev_intr_type != INTA) {
+		DBG_PRINT(ERR_DBG, "s2io: This kernel does not support"
+			  "MSI/MSI-X. Defaulting to INTA\n");
+		*dev_intr_type = INTA;
+	}
+#else
+	if (*dev_intr_type > MSI_X) {
+		DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. "
+			  "Defaulting to INTA\n");
+		*dev_intr_type = INTA;
+	}
+#endif
+	if ((*dev_intr_type == MSI_X) &&
+			((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
+			(pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
+		DBG_PRINT(ERR_DBG, "s2io: Xframe I does not support MSI_X. " 
+					"Defaulting to INTA\n");
+		*dev_intr_type = INTA;
+	}
+	if (rx_ring_mode > 3) {
+		DBG_PRINT(ERR_DBG, "s2io: Requested ring mode not supported\n");
+		DBG_PRINT(ERR_DBG, "s2io: Defaulting to 3-buffer mode\n");
+		rx_ring_mode = 3;
+	}
+	return SUCCESS;
+}
+
 /**
  *  s2io_init_nic - Initialization of the adapter .
  *  @pdev : structure containing the PCI related information of the device.
@@ -5984,15 +6760,8 @@
 	int mode;
 	u8 dev_intr_type = intr_type;
 
-#ifdef CONFIG_S2IO_NAPI
-	if (dev_intr_type != INTA) {
-		DBG_PRINT(ERR_DBG, "NAPI cannot be enabled when MSI/MSI-X \
-is enabled. Defaulting to INTA\n");
-		dev_intr_type = INTA;
-	}
-	else
-		DBG_PRINT(ERR_DBG, "NAPI support has been enabled\n");
-#endif
+	if ((ret = s2io_verify_parm(pdev, &dev_intr_type)))
+		return ret;
 
 	if ((ret = pci_enable_device(pdev))) {
 		DBG_PRINT(ERR_DBG,
@@ -6017,14 +6786,6 @@
 		pci_disable_device(pdev);
 		return -ENOMEM;
 	}
-
-	if ((dev_intr_type == MSI_X) && 
-			((pdev->device != PCI_DEVICE_ID_HERC_WIN) &&
-			(pdev->device != PCI_DEVICE_ID_HERC_UNI))) {
-		DBG_PRINT(ERR_DBG, "Xframe I does not support MSI_X. \
-Defaulting to INTA\n");
-		dev_intr_type = INTA;
-	}
 	if (dev_intr_type != MSI_X) {
 		if (pci_request_regions(pdev, s2io_driver_name)) {
 			DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
@@ -6100,8 +6861,6 @@
 	config = &sp->config;
 
 	/* Tx side parameters. */
-	if (tx_fifo_len[0] == 0)
-		tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
 	config->tx_fifo_num = tx_fifo_num;
 	for (i = 0; i < MAX_TX_FIFOS; i++) {
 		config->tx_cfg[i].fifo_len = tx_fifo_len[i];
@@ -6125,8 +6884,6 @@
 	config->max_txds = MAX_SKB_FRAGS + 2;
 
 	/* Rx side parameters. */
-	if (rx_ring_sz[0] == 0)
-		rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
 	config->rx_ring_num = rx_ring_num;
 	for (i = 0; i < MAX_RX_RINGS; i++) {
 		config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
@@ -6267,8 +7024,8 @@
 	val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
 	    RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
 	writeq(val64, &bar0->rmac_addr_cmd_mem);
-	wait_for_cmd_complete(sp);
-
+	wait_for_cmd_complete(&bar0->rmac_addr_cmd_mem,
+		      RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING);
 	tmp64 = readq(&bar0->rmac_addr_data0_mem);
 	mac_down = (u32) tmp64;
 	mac_up = (u32) (tmp64 >> 32);
@@ -6322,82 +7079,63 @@
 		ret = -ENODEV;
 		goto register_failed;
 	}
-
-	if (sp->device_type & XFRAME_II_DEVICE) {
-		DBG_PRINT(ERR_DBG, "%s: Neterion Xframe II 10GbE adapter ",
-			  dev->name);
-		DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
+	s2io_vpd_read(sp);
+	DBG_PRINT(ERR_DBG, "%s: Neterion %s",dev->name, sp->product_name);
+	DBG_PRINT(ERR_DBG, "(rev %d), Driver version %s\n",
 				get_xena_rev_id(sp->pdev),
 				s2io_driver_version);
-		switch(sp->intr_type) {
-			case INTA:
-				DBG_PRINT(ERR_DBG, ", Intr type INTA");
-				break;
-			case MSI:
-				DBG_PRINT(ERR_DBG, ", Intr type MSI");
-				break;
-			case MSI_X:
-				DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
-				break;
-		}
-
-		DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
-		DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
+	DBG_PRINT(ERR_DBG, "Copyright(c) 2002-2005 Neterion Inc.\n");
+	DBG_PRINT(ERR_DBG, "%s: MAC ADDR: "
+			  "%02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
 			  sp->def_mac_addr[0].mac_addr[0],
 			  sp->def_mac_addr[0].mac_addr[1],
 			  sp->def_mac_addr[0].mac_addr[2],
 			  sp->def_mac_addr[0].mac_addr[3],
 			  sp->def_mac_addr[0].mac_addr[4],
 			  sp->def_mac_addr[0].mac_addr[5]);
+	if (sp->device_type & XFRAME_II_DEVICE) {
 		mode = s2io_print_pci_mode(sp);
 		if (mode < 0) {
-			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode ");
+			DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n");
 			ret = -EBADSLT;
+			unregister_netdev(dev);
 			goto set_swap_failed;
 		}
-	} else {
-		DBG_PRINT(ERR_DBG, "%s: Neterion Xframe I 10GbE adapter ",
-			  dev->name);
-		DBG_PRINT(ERR_DBG, "(rev %d), Version %s",
-					get_xena_rev_id(sp->pdev),
-					s2io_driver_version);
-		switch(sp->intr_type) {
-			case INTA:
-				DBG_PRINT(ERR_DBG, ", Intr type INTA");
-				break;
-			case MSI:
-				DBG_PRINT(ERR_DBG, ", Intr type MSI");
-				break;
-			case MSI_X:
-				DBG_PRINT(ERR_DBG, ", Intr type MSI-X");
-				break;
-		}
-		DBG_PRINT(ERR_DBG, "\nCopyright(c) 2002-2005 Neterion Inc.\n");
-		DBG_PRINT(ERR_DBG, "MAC ADDR: %02x:%02x:%02x:%02x:%02x:%02x\n",
-			  sp->def_mac_addr[0].mac_addr[0],
-			  sp->def_mac_addr[0].mac_addr[1],
-			  sp->def_mac_addr[0].mac_addr[2],
-			  sp->def_mac_addr[0].mac_addr[3],
-			  sp->def_mac_addr[0].mac_addr[4],
-			  sp->def_mac_addr[0].mac_addr[5]);
 	}
-	if (sp->rxd_mode == RXD_MODE_3B)
-		DBG_PRINT(ERR_DBG, "%s: 2-Buffer mode support has been "
-			  "enabled\n",dev->name);
-	if (sp->rxd_mode == RXD_MODE_3A)
-		DBG_PRINT(ERR_DBG, "%s: 3-Buffer mode support has been "
-			  "enabled\n",dev->name);
-
+	switch(sp->rxd_mode) {
+		case RXD_MODE_1:
+		    DBG_PRINT(ERR_DBG, "%s: 1-Buffer receive mode enabled\n",
+						dev->name);
+		    break;
+		case RXD_MODE_3B:
+		    DBG_PRINT(ERR_DBG, "%s: 2-Buffer receive mode enabled\n",
+						dev->name);
+		    break;
+		case RXD_MODE_3A:
+		    DBG_PRINT(ERR_DBG, "%s: 3-Buffer receive mode enabled\n",
+						dev->name);
+		    break;
+	}
+#ifdef CONFIG_S2IO_NAPI
+	DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name);
+#endif
+	switch(sp->intr_type) {
+		case INTA:
+		    DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name);
+		    break;
+		case MSI:
+		    DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI\n", dev->name);
+		    break;
+		case MSI_X:
+		    DBG_PRINT(ERR_DBG, "%s: Interrupt type MSI-X\n", dev->name);
+		    break;
+	}
 	if (sp->lro)
 		DBG_PRINT(ERR_DBG, "%s: Large receive offload enabled\n",
-			dev->name);
+			  dev->name);
 
 	/* Initialize device name */
-	strcpy(sp->name, dev->name);
-	if (sp->device_type & XFRAME_II_DEVICE)
-		strcat(sp->name, ": Neterion Xframe II 10GbE adapter");
-	else
-		strcat(sp->name, ": Neterion Xframe I 10GbE adapter");
+	sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
 
 	/* Initialize bimodal Interrupts */
 	sp->config.bimodal = bimodal;
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 0a0b5b2..3203732 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -31,6 +31,8 @@
 #define SUCCESS 0
 #define FAILURE -1
 
+#define CHECKBIT(value, nbit) (value & (1 << nbit))
+
 /* Maximum time to flicker LED when asked to identify NIC using ethtool */
 #define MAX_FLICKER_TIME	60000 /* 60 Secs */
 
@@ -78,6 +80,11 @@
 typedef struct {
 	unsigned long long single_ecc_errs;
 	unsigned long long double_ecc_errs;
+	unsigned long long parity_err_cnt;
+	unsigned long long serious_err_cnt;
+	unsigned long long soft_reset_cnt;
+	unsigned long long fifo_full_cnt;
+	unsigned long long ring_full_cnt;
 	/* LRO statistics */
 	unsigned long long clubbed_frms_cnt;
 	unsigned long long sending_both;
@@ -87,6 +94,25 @@
 	unsigned long long num_aggregations;
 } swStat_t;
 
+/* Xpak releated alarm and warnings */
+typedef struct {
+	u64 alarm_transceiver_temp_high;
+	u64 alarm_transceiver_temp_low;
+	u64 alarm_laser_bias_current_high;
+	u64 alarm_laser_bias_current_low;
+	u64 alarm_laser_output_power_high;
+	u64 alarm_laser_output_power_low;
+	u64 warn_transceiver_temp_high;
+	u64 warn_transceiver_temp_low;
+	u64 warn_laser_bias_current_high;
+	u64 warn_laser_bias_current_low;
+	u64 warn_laser_output_power_high;
+	u64 warn_laser_output_power_low;
+	u64 xpak_regs_stat;
+	u32 xpak_timer_count;
+} xpakStat_t;
+
+
 /* The statistics block of Xena */
 typedef struct stat_block {
 /* Tx MAC statistics counters. */
@@ -263,7 +289,9 @@
 	u32 rmac_accepted_ip_oflow;
 	u32 reserved_14;
 	u32 link_fault_cnt;
+	u8  buffer[20];
 	swStat_t sw_stat;
+	xpakStat_t xpak_stat;
 } StatInfo_t;
 
 /*
@@ -659,7 +687,8 @@
 } usr_addr_t;
 
 /* Default Tunable parameters of the NIC. */
-#define DEFAULT_FIFO_LEN 4096
+#define DEFAULT_FIFO_0_LEN 4096
+#define DEFAULT_FIFO_1_7_LEN 512
 #define SMALL_BLK_CNT	30
 #define LARGE_BLK_CNT	100
 
@@ -732,7 +761,7 @@
 	int device_close_flag;
 	int device_enabled_once;
 
-	char name[50];
+	char name[60];
 	struct tasklet_struct task;
 	volatile unsigned long tasklet_status;
 
@@ -803,6 +832,8 @@
 	char desc1[35];
 	char desc2[35];
 
+	int avail_msix_vectors; /* No. of MSI-X vectors granted by system */
+
 	struct msix_info_st msix_info[0x3f];
 
 #define XFRAME_I_DEVICE		1
@@ -824,6 +855,8 @@
 	spinlock_t	rx_lock;
 	atomic_t	isr_cnt;
 	u64 *ufo_in_band_v;
+#define VPD_PRODUCT_NAME_LEN 50
+	u8  product_name[VPD_PRODUCT_NAME_LEN];
 };
 
 #define RESET_ERROR 1;
@@ -848,28 +881,32 @@
 	writel((u32) (val), addr);
 	writel((u32) (val >> 32), (addr + 4));
 }
+#endif
 
-/* In 32 bit modes, some registers have to be written in a
- * particular order to expect correct hardware operation. The
- * macro SPECIAL_REG_WRITE is used to perform such ordered
- * writes. Defines UF (Upper First) and LF (Lower First) will
- * be used to specify the required write order.
+/* 
+ * Some registers have to be written in a particular order to 
+ * expect correct hardware operation. The macro SPECIAL_REG_WRITE 
+ * is used to perform such ordered writes. Defines UF (Upper First) 
+ * and LF (Lower First) will be used to specify the required write order.
  */
 #define UF	1
 #define LF	2
 static inline void SPECIAL_REG_WRITE(u64 val, void __iomem *addr, int order)
 {
+	u32 ret;
+
 	if (order == LF) {
 		writel((u32) (val), addr);
+		ret = readl(addr);
 		writel((u32) (val >> 32), (addr + 4));
+		ret = readl(addr + 4);
 	} else {
 		writel((u32) (val >> 32), (addr + 4));
+		ret = readl(addr + 4);
 		writel((u32) (val), addr);
+		ret = readl(addr);
 	}
 }
-#else
-#define SPECIAL_REG_WRITE(val, addr, dummy) writeq(val, addr)
-#endif
 
 /*  Interrupt related values of Xena */
 
@@ -965,7 +1002,7 @@
 static struct ethtool_ops netdev_ethtool_ops;
 static void s2io_set_link(unsigned long data);
 static int s2io_set_swapper(nic_t * sp);
-static void s2io_card_down(nic_t *nic);
+static void s2io_card_down(nic_t *nic, int flag);
 static int s2io_card_up(nic_t *nic);
 static int get_xena_rev_id(struct pci_dev *pdev);
 static void restore_xmsi_data(nic_t *nic);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index f5a3bf4..d058741 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1,6 +1,6 @@
 /* sis900.c: A SiS 900/7016 PCI Fast Ethernet driver for Linux.
    Copyright 1999 Silicon Integrated System Corporation 
-   Revision:	1.08.09 Sep. 19 2005
+   Revision:	1.08.10 Apr. 2 2006
    
    Modified from the driver which is originally written by Donald Becker.
    
@@ -17,9 +17,10 @@
    SiS 7014 Single Chip 100BASE-TX/10BASE-T Physical Layer Solution,
    preliminary Rev. 1.0 Jan. 18, 1998
 
+   Rev 1.08.10 Apr.  2 2006 Daniele Venzano add vlan (jumbo packets) support
    Rev 1.08.09 Sep. 19 2005 Daniele Venzano add Wake on LAN support
    Rev 1.08.08 Jan. 22 2005 Daniele Venzano use netif_msg for debugging messages
-   Rev 1.08.07 Nov.  2 2003 Daniele Venzano <webvenza@libero.it> add suspend/resume support
+   Rev 1.08.07 Nov.  2 2003 Daniele Venzano <venza@brownhat.org> add suspend/resume support
    Rev 1.08.06 Sep. 24 2002 Mufasa Yang bug fix for Tx timeout & add SiS963 support
    Rev 1.08.05 Jun.  6 2002 Mufasa Yang bug fix for read_eeprom & Tx descriptor over-boundary
    Rev 1.08.04 Apr. 25 2002 Mufasa Yang <mufasa@sis.com.tw> added SiS962 support
@@ -77,7 +78,7 @@
 #include "sis900.h"
 
 #define SIS900_MODULE_NAME "sis900"
-#define SIS900_DRV_VERSION "v1.08.09 Sep. 19 2005"
+#define SIS900_DRV_VERSION "v1.08.10 Apr. 2 2006"
 
 static char version[] __devinitdata =
 KERN_INFO "sis900.c: " SIS900_DRV_VERSION "\n";
@@ -1402,6 +1403,11 @@
 		rx_flags |= RxATX;
 	}
 
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+	/* Can accept Jumbo packet */
+	rx_flags |= RxAJAB;
+#endif
+
 	outl (tx_flags, ioaddr + txcfg);
 	outl (rx_flags, ioaddr + rxcfg);
 }
@@ -1714,18 +1720,26 @@
 
 	while (rx_status & OWN) {
 		unsigned int rx_size;
+		unsigned int data_size;
 
 		if (--rx_work_limit < 0)
 			break;
 
-		rx_size = (rx_status & DSIZE) - CRC_SIZE;
+		data_size = rx_status & DSIZE;
+		rx_size = data_size - CRC_SIZE;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+		/* ``TOOLONG'' flag means jumbo packet recived. */
+		if ((rx_status & TOOLONG) && data_size <= MAX_FRAME_SIZE)
+			rx_status &= (~ ((unsigned int)TOOLONG));
+#endif
 
 		if (rx_status & (ABORT|OVERRUN|TOOLONG|RUNT|RXISERR|CRCERR|FAERR)) {
 			/* corrupted packet received */
 			if (netif_msg_rx_err(sis_priv))
 				printk(KERN_DEBUG "%s: Corrupted packet "
-				       "received, buffer status = 0x%8.8x.\n",
-				       net_dev->name, rx_status);
+				       "received, buffer status = 0x%8.8x/%d.\n",
+				       net_dev->name, rx_status, data_size);
 			sis_priv->stats.rx_errors++;
 			if (rx_status & OVERRUN)
 				sis_priv->stats.rx_over_errors++;
diff --git a/drivers/net/sis900.h b/drivers/net/sis900.h
index 5032394..4834e3a 100644
--- a/drivers/net/sis900.h
+++ b/drivers/net/sis900.h
@@ -310,8 +310,14 @@
 #define CRC_SIZE                4
 #define MAC_HEADER_SIZE         14
 
-#define TX_BUF_SIZE     1536
-#define RX_BUF_SIZE     1536
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define MAX_FRAME_SIZE  (1518 + 4)
+#else
+#define MAX_FRAME_SIZE  1518
+#endif /* CONFIG_VLAN_802_1Q */
+
+#define TX_BUF_SIZE     (MAX_FRAME_SIZE+18)
+#define RX_BUF_SIZE     (MAX_FRAME_SIZE+18)
 
 #define NUM_TX_DESC     16      	/* Number of Tx descriptor registers. */
 #define NUM_RX_DESC     16       	/* Number of Rx descriptor registers. */
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 5ca5a1b..536dd1c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -44,12 +44,13 @@
 #include "skge.h"
 
 #define DRV_NAME		"skge"
-#define DRV_VERSION		"1.5"
+#define DRV_VERSION		"1.6"
 #define PFX			DRV_NAME " "
 
 #define DEFAULT_TX_RING_SIZE	128
 #define DEFAULT_RX_RING_SIZE	512
 #define MAX_TX_RING_SIZE	1024
+#define TX_LOW_WATER		(MAX_SKB_FRAGS + 1)
 #define MAX_RX_RING_SIZE	4096
 #define RX_COPY_THRESHOLD	128
 #define RX_BUF_SIZE		1536
@@ -401,7 +402,7 @@
 	int err;
 
 	if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE ||
-	    p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE)
+	    p->tx_pending < TX_LOW_WATER || p->tx_pending > MAX_TX_RING_SIZE)
 		return -EINVAL;
 
 	skge->rx_ring.count = p->rx_pending;
@@ -603,7 +604,7 @@
 	struct skge_hw *hw = skge->hw;
 	int port = skge->port;
 
-	spin_lock_bh(&hw->phy_lock);
+	mutex_lock(&hw->phy_mutex);
 	if (hw->chip_id == CHIP_ID_GENESIS) {
 		switch (mode) {
 		case LED_MODE_OFF:
@@ -663,7 +664,7 @@
 				     PHY_M_LED_MO_RX(MO_LED_ON));
 		}
 	}
-	spin_unlock_bh(&hw->phy_lock);
+	mutex_unlock(&hw->phy_mutex);
 }
 
 /* blink LED's for finding board */
@@ -2038,7 +2039,7 @@
 	netif_stop_queue(skge->netdev);
 	netif_carrier_off(skge->netdev);
 
-	spin_lock_bh(&hw->phy_lock);
+	mutex_lock(&hw->phy_mutex);
 	if (hw->chip_id == CHIP_ID_GENESIS) {
 		genesis_reset(hw, port);
 		genesis_mac_init(hw, port);
@@ -2046,7 +2047,7 @@
 		yukon_reset(hw, port);
 		yukon_init(hw, port);
 	}
-	spin_unlock_bh(&hw->phy_lock);
+	mutex_unlock(&hw->phy_mutex);
 }
 
 /* Basic MII support */
@@ -2067,12 +2068,12 @@
 		/* fallthru */
 	case SIOCGMIIREG: {
 		u16 val = 0;
-		spin_lock_bh(&hw->phy_lock);
+		mutex_lock(&hw->phy_mutex);
 		if (hw->chip_id == CHIP_ID_GENESIS)
 			err = __xm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
 		else
 			err = __gm_phy_read(hw, skge->port, data->reg_num & 0x1f, &val);
-		spin_unlock_bh(&hw->phy_lock);
+		mutex_unlock(&hw->phy_mutex);
 		data->val_out = val;
 		break;
 	}
@@ -2081,14 +2082,14 @@
 		if (!capable(CAP_NET_ADMIN))
 			return -EPERM;
 
-		spin_lock_bh(&hw->phy_lock);
+		mutex_lock(&hw->phy_mutex);
 		if (hw->chip_id == CHIP_ID_GENESIS)
 			err = xm_phy_write(hw, skge->port, data->reg_num & 0x1f,
 				   data->val_in);
 		else
 			err = gm_phy_write(hw, skge->port, data->reg_num & 0x1f,
 				   data->val_in);
-		spin_unlock_bh(&hw->phy_lock);
+		mutex_unlock(&hw->phy_mutex);
 		break;
 	}
 	return err;
@@ -2191,12 +2192,12 @@
 		goto free_rx_ring;
 
 	/* Initialize MAC */
-	spin_lock_bh(&hw->phy_lock);
+	mutex_lock(&hw->phy_mutex);
 	if (hw->chip_id == CHIP_ID_GENESIS)
 		genesis_mac_init(hw, port);
 	else
 		yukon_mac_init(hw, port);
-	spin_unlock_bh(&hw->phy_lock);
+	mutex_unlock(&hw->phy_mutex);
 
 	/* Configure RAMbuffers */
 	chunk = hw->ram_size / ((hw->ports + 1)*2);
@@ -2302,21 +2303,20 @@
 {
 	struct skge_port *skge = netdev_priv(dev);
 	struct skge_hw *hw = skge->hw;
-	struct skge_ring *ring = &skge->tx_ring;
 	struct skge_element *e;
 	struct skge_tx_desc *td;
 	int i;
 	u32 control, len;
 	u64 map;
+	unsigned long flags;
 
 	skb = skb_padto(skb, ETH_ZLEN);
 	if (!skb)
 		return NETDEV_TX_OK;
 
-	if (!spin_trylock(&skge->tx_lock)) {
+	if (!spin_trylock_irqsave(&skge->tx_lock, flags))
 		/* Collision - tell upper layer to requeue */
 		return NETDEV_TX_LOCKED;
-	}
 
 	if (unlikely(skge_avail(&skge->tx_ring) < skb_shinfo(skb)->nr_frags + 1)) {
 		if (!netif_queue_stopped(dev)) {
@@ -2325,12 +2325,13 @@
 			printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
 			       dev->name);
 		}
-		spin_unlock(&skge->tx_lock);
+		spin_unlock_irqrestore(&skge->tx_lock, flags);
 		return NETDEV_TX_BUSY;
 	}
 
-	e = ring->to_use;
+	e = skge->tx_ring.to_use;
 	td = e->desc;
+	BUG_ON(td->control & BMU_OWN);
 	e->skb = skb;
 	len = skb_headlen(skb);
 	map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
@@ -2371,8 +2372,10 @@
 					   frag->size, PCI_DMA_TODEVICE);
 
 			e = e->next;
-			e->skb = NULL;
+			e->skb = skb;
 			tf = e->desc;
+			BUG_ON(tf->control & BMU_OWN);
+
 			tf->dma_lo = map;
 			tf->dma_hi = (u64) map >> 32;
 			pci_unmap_addr_set(e, mapaddr, map);
@@ -2389,56 +2392,68 @@
 
 	skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
 
-	if (netif_msg_tx_queued(skge))
+	if (unlikely(netif_msg_tx_queued(skge)))
 		printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
-		       dev->name, e - ring->start, skb->len);
+		       dev->name, e - skge->tx_ring.start, skb->len);
 
-	ring->to_use = e->next;
-	if (skge_avail(&skge->tx_ring) <= MAX_SKB_FRAGS + 1) {
+	skge->tx_ring.to_use = e->next;
+	if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
 		pr_debug("%s: transmit queue full\n", dev->name);
 		netif_stop_queue(dev);
 	}
 
-	mmiowb();
-	spin_unlock(&skge->tx_lock);
+	spin_unlock_irqrestore(&skge->tx_lock, flags);
 
 	dev->trans_start = jiffies;
 
 	return NETDEV_TX_OK;
 }
 
-static void skge_tx_complete(struct skge_port *skge, struct skge_element *last)
+
+/* Free resources associated with this reing element */
+static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
+			 u32 control)
 {
 	struct pci_dev *pdev = skge->hw->pdev;
-	struct skge_element *e;
 
-	for (e = skge->tx_ring.to_clean; e != last; e = e->next) {
-		struct sk_buff *skb = e->skb;
-		int i;
+	BUG_ON(!e->skb);
 
-		e->skb = NULL;
+	/* skb header vs. fragment */
+	if (control & BMU_STF)
 		pci_unmap_single(pdev, pci_unmap_addr(e, mapaddr),
-				 skb_headlen(skb), PCI_DMA_TODEVICE);
+				 pci_unmap_len(e, maplen),
+				 PCI_DMA_TODEVICE);
+	else
+		pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
+			       pci_unmap_len(e, maplen),
+			       PCI_DMA_TODEVICE);
 
-		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
-			e = e->next;
-			pci_unmap_page(pdev, pci_unmap_addr(e, mapaddr),
-				       skb_shinfo(skb)->frags[i].size,
-				       PCI_DMA_TODEVICE);
-		}
+	if (control & BMU_EOF) {
+		if (unlikely(netif_msg_tx_done(skge)))
+			printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
+			       skge->netdev->name, e - skge->tx_ring.start);
 
-		dev_kfree_skb(skb);
+		dev_kfree_skb_any(e->skb);
 	}
-	skge->tx_ring.to_clean = e;
+	e->skb = NULL;
 }
 
+/* Free all buffers in transmit ring */
 static void skge_tx_clean(struct skge_port *skge)
 {
+	struct skge_element *e;
+	unsigned long flags;
 
-	spin_lock_bh(&skge->tx_lock);
-	skge_tx_complete(skge, skge->tx_ring.to_use);
+	spin_lock_irqsave(&skge->tx_lock, flags);
+	for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
+		struct skge_tx_desc *td = e->desc;
+		skge_tx_free(skge, e, td->control);
+		td->control = 0;
+	}
+
+	skge->tx_ring.to_clean = e;
 	netif_wake_queue(skge->netdev);
-	spin_unlock_bh(&skge->tx_lock);
+	spin_unlock_irqrestore(&skge->tx_lock, flags);
 }
 
 static void skge_tx_timeout(struct net_device *dev)
@@ -2664,32 +2679,28 @@
 	return NULL;
 }
 
-static void skge_tx_done(struct skge_port *skge)
+/* Free all buffers in Tx ring which are no longer owned by device */
+static void skge_txirq(struct net_device *dev)
 {
+	struct skge_port *skge = netdev_priv(dev);
 	struct skge_ring *ring = &skge->tx_ring;
-	struct skge_element *e, *last;
+	struct skge_element *e;
+
+	rmb();
 
 	spin_lock(&skge->tx_lock);
-	last = ring->to_clean;
 	for (e = ring->to_clean; e != ring->to_use; e = e->next) {
 		struct skge_tx_desc *td = e->desc;
 
 		if (td->control & BMU_OWN)
 			break;
 
-		if (td->control & BMU_EOF) {
-			last = e->next;
-			if (unlikely(netif_msg_tx_done(skge)))
-				printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
-				       skge->netdev->name, e - ring->start);
-		}
+		skge_tx_free(skge, e, td->control);
 	}
+	skge->tx_ring.to_clean = e;
 
-	skge_tx_complete(skge, last);
-
-	skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
-
-	if (skge_avail(&skge->tx_ring) > MAX_SKB_FRAGS + 1)
+	if (netif_queue_stopped(skge->netdev)
+	    && skge_avail(&skge->tx_ring) > TX_LOW_WATER)
 		netif_wake_queue(skge->netdev);
 
 	spin_unlock(&skge->tx_lock);
@@ -2704,8 +2715,6 @@
 	int to_do = min(dev->quota, *budget);
 	int work_done = 0;
 
-	skge_tx_done(skge);
-
 	for (e = ring->to_clean; prefetch(e->next), work_done < to_do; e = e->next) {
 		struct skge_rx_desc *rd = e->desc;
 		struct sk_buff *skb;
@@ -2737,10 +2746,12 @@
 		return 1; /* not done */
 
 	netif_rx_complete(dev);
-	mmiowb();
 
-  	hw->intr_mask |= skge->port == 0 ? (IS_R1_F|IS_XA1_F) : (IS_R2_F|IS_XA2_F);
+	spin_lock_irq(&hw->hw_lock);
+	hw->intr_mask |= rxirqmask[skge->port];
   	skge_write32(hw, B0_IMSK, hw->intr_mask);
+	mmiowb();
+	spin_unlock_irq(&hw->hw_lock);
 
 	return 0;
 }
@@ -2847,16 +2858,16 @@
 }
 
 /*
- * Interrupt from PHY are handled in tasklet (soft irq)
+ * Interrupt from PHY are handled in work queue
  * because accessing phy registers requires spin wait which might
  * cause excess interrupt latency.
  */
-static void skge_extirq(unsigned long data)
+static void skge_extirq(void *arg)
 {
-	struct skge_hw *hw = (struct skge_hw *) data;
+	struct skge_hw *hw = arg;
 	int port;
 
-	spin_lock(&hw->phy_lock);
+	mutex_lock(&hw->phy_mutex);
 	for (port = 0; port < hw->ports; port++) {
 		struct net_device *dev = hw->dev[port];
 		struct skge_port *skge = netdev_priv(dev);
@@ -2868,10 +2879,12 @@
 				bcom_phy_intr(skge);
 		}
 	}
-	spin_unlock(&hw->phy_lock);
+	mutex_unlock(&hw->phy_mutex);
 
+	spin_lock_irq(&hw->hw_lock);
 	hw->intr_mask |= IS_EXT_REG;
 	skge_write32(hw, B0_IMSK, hw->intr_mask);
+	spin_unlock_irq(&hw->hw_lock);
 }
 
 static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
@@ -2884,54 +2897,68 @@
 	if (status == 0)
 		return IRQ_NONE;
 
+	spin_lock(&hw->hw_lock);
+	status &= hw->intr_mask;
 	if (status & IS_EXT_REG) {
 		hw->intr_mask &= ~IS_EXT_REG;
-		tasklet_schedule(&hw->ext_tasklet);
+		schedule_work(&hw->phy_work);
 	}
 
-	if (status & (IS_R1_F|IS_XA1_F)) {
+	if (status & IS_XA1_F) {
+		skge_write8(hw, Q_ADDR(Q_XA1, Q_CSR), CSR_IRQ_CL_F);
+		skge_txirq(hw->dev[0]);
+	}
+
+	if (status & IS_R1_F) {
 		skge_write8(hw, Q_ADDR(Q_R1, Q_CSR), CSR_IRQ_CL_F);
-		hw->intr_mask &= ~(IS_R1_F|IS_XA1_F);
+		hw->intr_mask &= ~IS_R1_F;
 		netif_rx_schedule(hw->dev[0]);
 	}
 
-	if (status & (IS_R2_F|IS_XA2_F)) {
-		skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
-		hw->intr_mask &= ~(IS_R2_F|IS_XA2_F);
-		netif_rx_schedule(hw->dev[1]);
-	}
-
-	if (likely((status & hw->intr_mask) == 0))
-		return IRQ_HANDLED;
-
-	if (status & IS_PA_TO_RX1) {
-		struct skge_port *skge = netdev_priv(hw->dev[0]);
-		++skge->net_stats.rx_over_errors;
-		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
-	}
-
-	if (status & IS_PA_TO_RX2) {
-		struct skge_port *skge = netdev_priv(hw->dev[1]);
-		++skge->net_stats.rx_over_errors;
-		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
-	}
-
 	if (status & IS_PA_TO_TX1)
 		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
 
-	if (status & IS_PA_TO_TX2)
-		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+	if (status & IS_PA_TO_RX1) {
+		struct skge_port *skge = netdev_priv(hw->dev[0]);
+
+		++skge->net_stats.rx_over_errors;
+		skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
+	}
+
 
 	if (status & IS_MAC1)
 		skge_mac_intr(hw, 0);
 
-	if (status & IS_MAC2)
-		skge_mac_intr(hw, 1);
+	if (hw->dev[1]) {
+		if (status & IS_XA2_F) {
+			skge_write8(hw, Q_ADDR(Q_XA2, Q_CSR), CSR_IRQ_CL_F);
+			skge_txirq(hw->dev[1]);
+		}
+
+		if (status & IS_R2_F) {
+			skge_write8(hw, Q_ADDR(Q_R2, Q_CSR), CSR_IRQ_CL_F);
+			hw->intr_mask &= ~IS_R2_F;
+			netif_rx_schedule(hw->dev[1]);
+		}
+
+		if (status & IS_PA_TO_RX2) {
+			struct skge_port *skge = netdev_priv(hw->dev[1]);
+			++skge->net_stats.rx_over_errors;
+			skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
+		}
+
+		if (status & IS_PA_TO_TX2)
+			skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
+
+		if (status & IS_MAC2)
+			skge_mac_intr(hw, 1);
+	}
 
 	if (status & IS_HW_ERR)
 		skge_error_irq(hw);
 
 	skge_write32(hw, B0_IMSK, hw->intr_mask);
+	spin_unlock(&hw->hw_lock);
 
 	return IRQ_HANDLED;
 }
@@ -2957,7 +2984,7 @@
 	if (!is_valid_ether_addr(addr->sa_data))
 		return -EADDRNOTAVAIL;
 
-	spin_lock_bh(&hw->phy_lock);
+	mutex_lock(&hw->phy_mutex);
 	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
 	memcpy_toio(hw->regs + B2_MAC_1 + port*8,
 		    dev->dev_addr, ETH_ALEN);
@@ -2970,7 +2997,7 @@
 		gma_set_addr(hw, port, GM_SRC_ADDR_1L, dev->dev_addr);
 		gma_set_addr(hw, port, GM_SRC_ADDR_2L, dev->dev_addr);
 	}
-	spin_unlock_bh(&hw->phy_lock);
+	mutex_unlock(&hw->phy_mutex);
 
 	return 0;
 }
@@ -3082,6 +3109,7 @@
 	else
 		hw->ram_size = t8 * 4096;
 
+	spin_lock_init(&hw->hw_lock);
 	hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1;
 	if (hw->ports > 1)
 		hw->intr_mask |= IS_PORT_2;
@@ -3150,14 +3178,14 @@
 
 	skge_write32(hw, B0_IMSK, hw->intr_mask);
 
-	spin_lock_bh(&hw->phy_lock);
+	mutex_lock(&hw->phy_mutex);
 	for (i = 0; i < hw->ports; i++) {
 		if (hw->chip_id == CHIP_ID_GENESIS)
 			genesis_reset(hw, i);
 		else
 			yukon_reset(hw, i);
 	}
-	spin_unlock_bh(&hw->phy_lock);
+	mutex_unlock(&hw->phy_mutex);
 
 	return 0;
 }
@@ -3305,8 +3333,8 @@
 	}
 
 	hw->pdev = pdev;
-	spin_lock_init(&hw->phy_lock);
-	tasklet_init(&hw->ext_tasklet, skge_extirq, (unsigned long) hw);
+	mutex_init(&hw->phy_mutex);
+	INIT_WORK(&hw->phy_work, skge_extirq, hw);
 
 	hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
 	if (!hw->regs) {
@@ -3334,6 +3362,14 @@
 	if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
 		goto err_out_led_off;
 
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk(KERN_ERR PFX "%s: bad (zero?) ethernet address in rom\n",
+		       pci_name(pdev));
+		err = -EIO;
+		goto err_out_free_netdev;
+	}
+
+
 	err = register_netdev(dev);
 	if (err) {
 		printk(KERN_ERR PFX "%s: cannot register net device\n",
@@ -3388,11 +3424,15 @@
 	dev0 = hw->dev[0];
 	unregister_netdev(dev0);
 
+	spin_lock_irq(&hw->hw_lock);
+	hw->intr_mask = 0;
 	skge_write32(hw, B0_IMSK, 0);
+	spin_unlock_irq(&hw->hw_lock);
+
 	skge_write16(hw, B0_LED, LED_STAT_OFF);
 	skge_write8(hw, B0_CTST, CS_RST_SET);
 
-	tasklet_kill(&hw->ext_tasklet);
+	flush_scheduled_work();
 
 	free_irq(pdev->irq, hw);
 	pci_release_regions(pdev);
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 1f1ce88..ed19ff4 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -2388,6 +2388,7 @@
 struct skge_hw {
 	void __iomem  	     *regs;
 	struct pci_dev	     *pdev;
+	spinlock_t	     hw_lock;
 	u32		     intr_mask;
 	struct net_device    *dev[2];
 
@@ -2399,9 +2400,8 @@
 	u32	     	     ram_size;
 	u32	     	     ram_offset;
 	u16		     phy_addr;
-
-	struct tasklet_struct ext_tasklet;
-	spinlock_t	     phy_lock;
+	struct work_struct   phy_work;
+	struct mutex	     phy_mutex;
 };
 
 enum {
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
new file mode 100644
index 0000000..bdd8702
--- /dev/null
+++ b/drivers/net/smc911x.c
@@ -0,0 +1,2307 @@
+/*
+ * smc911x.c
+ * This is a driver for SMSC's LAN911{5,6,7,8} single-chip Ethernet devices.
+ *
+ * Copyright (C) 2005 Sensoria Corp
+ *	   Derived from the unified SMC91x driver by Nicolas Pitre
+ *	   and the smsc911x.c reference driver by SMSC
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ * Arguments:
+ *	 watchdog  = TX watchdog timeout
+ *	 tx_fifo_kb = Size of TX FIFO in KB
+ *
+ * History:
+ *	  04/16/05	Dustin McIntire		 Initial version
+ */
+static const char version[] =
+	 "smc911x.c: v1.0 04-16-2005 by Dustin McIntire <dustin@sensoria.com>\n";
+
+/* Debugging options */
+#define ENABLE_SMC_DEBUG_RX		0
+#define ENABLE_SMC_DEBUG_TX		0
+#define ENABLE_SMC_DEBUG_DMA		0
+#define ENABLE_SMC_DEBUG_PKTS		0
+#define ENABLE_SMC_DEBUG_MISC		0
+#define ENABLE_SMC_DEBUG_FUNC		0
+
+#define SMC_DEBUG_RX		((ENABLE_SMC_DEBUG_RX	? 1 : 0) << 0)
+#define SMC_DEBUG_TX		((ENABLE_SMC_DEBUG_TX	? 1 : 0) << 1)
+#define SMC_DEBUG_DMA		((ENABLE_SMC_DEBUG_DMA	? 1 : 0) << 2)
+#define SMC_DEBUG_PKTS		((ENABLE_SMC_DEBUG_PKTS ? 1 : 0) << 3)
+#define SMC_DEBUG_MISC		((ENABLE_SMC_DEBUG_MISC ? 1 : 0) << 4)
+#define SMC_DEBUG_FUNC		((ENABLE_SMC_DEBUG_FUNC ? 1 : 0) << 5)
+
+#ifndef SMC_DEBUG
+#define SMC_DEBUG	 ( SMC_DEBUG_RX	  | \
+			   SMC_DEBUG_TX	  | \
+			   SMC_DEBUG_DMA  | \
+			   SMC_DEBUG_PKTS | \
+			   SMC_DEBUG_MISC | \
+			   SMC_DEBUG_FUNC   \
+			 )
+#endif
+
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/workqueue.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include "smc911x.h"
+
+/*
+ * Transmit timeout, default 5 seconds.
+ */
+static int watchdog = 5000;
+module_param(watchdog, int, 0400);
+MODULE_PARM_DESC(watchdog, "transmit timeout in milliseconds");
+
+static int tx_fifo_kb=8;
+module_param(tx_fifo_kb, int, 0400);
+MODULE_PARM_DESC(tx_fifo_kb,"transmit FIFO size in KB (1<x<15)(default=8)");
+
+MODULE_LICENSE("GPL");
+
+/*
+ * The internal workings of the driver.  If you are changing anything
+ * here with the SMC stuff, you should have the datasheet and know
+ * what you are doing.
+ */
+#define CARDNAME "smc911x"
+
+/*
+ * Use power-down feature of the chip
+ */
+#define POWER_DOWN		 1
+
+
+/* store this information for the driver.. */
+struct smc911x_local {
+	/*
+	 * If I have to wait until the DMA is finished and ready to reload a
+	 * packet, I will store the skbuff here. Then, the DMA will send it
+	 * out and free it.
+	 */
+	struct sk_buff *pending_tx_skb;
+
+	/*
+	 * these are things that the kernel wants me to keep, so users
+	 * can find out semi-useless statistics of how well the card is
+	 * performing
+	 */
+	struct net_device_stats stats;
+
+	/* version/revision of the SMC911x chip */
+	u16 version;
+	u16 revision;
+
+	/* FIFO sizes */
+	int tx_fifo_kb;
+	int tx_fifo_size;
+	int rx_fifo_size;
+	int afc_cfg;
+
+	/* Contains the current active receive/phy mode */
+	int ctl_rfduplx;
+	int ctl_rspeed;
+
+	u32 msg_enable;
+	u32 phy_type;
+	struct mii_if_info mii;
+
+	/* work queue */
+	struct work_struct phy_configure;
+	int work_pending;
+
+	int tx_throttle;
+	spinlock_t lock;
+
+#ifdef SMC_USE_DMA
+	/* DMA needs the physical address of the chip */
+	u_long physaddr;
+	int rxdma;
+	int txdma;
+	int rxdma_active;
+	int txdma_active;
+	struct sk_buff *current_rx_skb;
+	struct sk_buff *current_tx_skb;
+	struct device *dev;
+#endif
+};
+
+#if SMC_DEBUG > 0
+#define DBG(n, args...)				 \
+	do {					 \
+		if (SMC_DEBUG & (n))		 \
+			printk(args);		 \
+	} while (0)
+
+#define PRINTK(args...)   printk(args)
+#else
+#define DBG(n, args...)   do { } while (0)
+#define PRINTK(args...)   printk(KERN_DEBUG args)
+#endif
+
+#if SMC_DEBUG_PKTS > 0
+static void PRINT_PKT(u_char *buf, int length)
+{
+	int i;
+	int remainder;
+	int lines;
+
+	lines = length / 16;
+	remainder = length % 16;
+
+	for (i = 0; i < lines ; i ++) {
+		int cur;
+		for (cur = 0; cur < 8; cur++) {
+			u_char a, b;
+			a = *buf++;
+			b = *buf++;
+			printk("%02x%02x ", a, b);
+		}
+		printk("\n");
+	}
+	for (i = 0; i < remainder/2 ; i++) {
+		u_char a, b;
+		a = *buf++;
+		b = *buf++;
+		printk("%02x%02x ", a, b);
+	}
+	printk("\n");
+}
+#else
+#define PRINT_PKT(x...)  do { } while (0)
+#endif
+
+
+/* this enables an interrupt in the interrupt mask register */
+#define SMC_ENABLE_INT(x) do {				\
+	unsigned int  __mask;				\
+	unsigned long __flags;				\
+	spin_lock_irqsave(&lp->lock, __flags);		\
+	__mask = SMC_GET_INT_EN();			\
+	__mask |= (x);					\
+	SMC_SET_INT_EN(__mask);				\
+	spin_unlock_irqrestore(&lp->lock, __flags);	\
+} while (0)
+
+/* this disables an interrupt from the interrupt mask register */
+#define SMC_DISABLE_INT(x) do {				\
+	unsigned int  __mask;				\
+	unsigned long __flags;				\
+	spin_lock_irqsave(&lp->lock, __flags);		\
+	__mask = SMC_GET_INT_EN();			\
+	__mask &= ~(x);					\
+	SMC_SET_INT_EN(__mask);				\
+	spin_unlock_irqrestore(&lp->lock, __flags);	\
+} while (0)
+
+/*
+ * this does a soft reset on the device
+ */
+static void smc911x_reset(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned int reg, timeout=0, resets=1;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	/*	 Take out of PM setting first */
+	if ((SMC_GET_PMT_CTRL() & PMT_CTRL_READY_) == 0) {
+		/* Write to the bytetest will take out of powerdown */
+		SMC_SET_BYTE_TEST(0);
+		timeout=10;
+		do {
+			udelay(10);
+			reg = SMC_GET_PMT_CTRL() & PMT_CTRL_READY_;
+		} while ( timeout-- && !reg);
+		if (timeout == 0) {
+			PRINTK("%s: smc911x_reset timeout waiting for PM restore\n", dev->name);
+			return;
+		}
+	}
+
+	/* Disable all interrupts */
+	spin_lock_irqsave(&lp->lock, flags);
+	SMC_SET_INT_EN(0);
+	spin_unlock_irqrestore(&lp->lock, flags);
+
+	while (resets--) {
+		SMC_SET_HW_CFG(HW_CFG_SRST_);
+		timeout=10;
+		do {
+			udelay(10);
+			reg = SMC_GET_HW_CFG();
+			/* If chip indicates reset timeout then try again */
+			if (reg & HW_CFG_SRST_TO_) {
+				PRINTK("%s: chip reset timeout, retrying...\n", dev->name);
+				resets++;
+				break;
+			}
+		} while ( timeout-- && (reg & HW_CFG_SRST_));
+	}
+	if (timeout == 0) {
+		PRINTK("%s: smc911x_reset timeout waiting for reset\n", dev->name);
+		return;
+	}
+
+	/* make sure EEPROM has finished loading before setting GPIO_CFG */
+	timeout=1000;
+	while ( timeout-- && (SMC_GET_E2P_CMD() & E2P_CMD_EPC_BUSY_)) {
+		udelay(10);
+	}
+	if (timeout == 0){
+		PRINTK("%s: smc911x_reset timeout waiting for EEPROM busy\n", dev->name);
+		return;
+	}
+
+	/* Initialize interrupts */
+	SMC_SET_INT_EN(0);
+	SMC_ACK_INT(-1);
+
+	/* Reset the FIFO level and flow control settings */
+	SMC_SET_HW_CFG((lp->tx_fifo_kb & 0xF) << 16);
+//TODO: Figure out what appropriate pause time is
+	SMC_SET_FLOW(FLOW_FCPT_ | FLOW_FCEN_);
+	SMC_SET_AFC_CFG(lp->afc_cfg);
+
+
+	/* Set to LED outputs */
+	SMC_SET_GPIO_CFG(0x70070000);
+
+	/*
+	 * Deassert IRQ for 1*10us for edge type interrupts
+	 * and drive IRQ pin push-pull
+	 */
+	SMC_SET_IRQ_CFG( (1 << 24) | INT_CFG_IRQ_EN_ | INT_CFG_IRQ_TYPE_ );
+
+	/* clear anything saved */
+	if (lp->pending_tx_skb != NULL) {
+		dev_kfree_skb (lp->pending_tx_skb);
+		lp->pending_tx_skb = NULL;
+		lp->stats.tx_errors++;
+		lp->stats.tx_aborted_errors++;
+	}
+}
+
+/*
+ * Enable Interrupts, Receive, and Transmit
+ */
+static void smc911x_enable(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned mask, cfg, cr;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	SMC_SET_MAC_ADDR(dev->dev_addr);
+
+	/* Enable TX */
+	cfg = SMC_GET_HW_CFG();
+	cfg &= HW_CFG_TX_FIF_SZ_ | 0xFFF;
+	cfg |= HW_CFG_SF_;
+	SMC_SET_HW_CFG(cfg);
+	SMC_SET_FIFO_TDA(0xFF);
+	/* Update TX stats on every 64 packets received or every 1 sec */
+	SMC_SET_FIFO_TSL(64);
+	SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+
+	spin_lock_irqsave(&lp->lock, flags);
+	SMC_GET_MAC_CR(cr);
+	cr |= MAC_CR_TXEN_ | MAC_CR_HBDIS_;
+	SMC_SET_MAC_CR(cr);
+	SMC_SET_TX_CFG(TX_CFG_TX_ON_);
+	spin_unlock_irqrestore(&lp->lock, flags);
+
+	/* Add 2 byte padding to start of packets */
+	SMC_SET_RX_CFG((2<<8) & RX_CFG_RXDOFF_);
+
+	/* Turn on receiver and enable RX */
+	if (cr & MAC_CR_RXEN_)
+		DBG(SMC_DEBUG_RX, "%s: Receiver already enabled\n", dev->name);
+
+	spin_lock_irqsave(&lp->lock, flags);
+	SMC_SET_MAC_CR( cr | MAC_CR_RXEN_ );
+	spin_unlock_irqrestore(&lp->lock, flags);
+
+	/* Interrupt on every received packet */
+	SMC_SET_FIFO_RSA(0x01);
+	SMC_SET_FIFO_RSL(0x00);
+
+	/* now, enable interrupts */
+	mask = INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_ | INT_EN_RSFL_EN_ |
+		INT_EN_GPT_INT_EN_ | INT_EN_RXDFH_INT_EN_ | INT_EN_RXE_EN_ |
+		INT_EN_PHY_INT_EN_;
+	if (IS_REV_A(lp->revision))
+		mask|=INT_EN_RDFL_EN_;
+	else {
+		mask|=INT_EN_RDFO_EN_;
+	}
+	SMC_ENABLE_INT(mask);
+}
+
+/*
+ * this puts the device in an inactive state
+ */
+static void smc911x_shutdown(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned cr;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+
+	/* Disable IRQ's */
+	SMC_SET_INT_EN(0);
+
+	/* Turn of Rx and TX */
+	spin_lock_irqsave(&lp->lock, flags);
+	SMC_GET_MAC_CR(cr);
+	cr &= ~(MAC_CR_TXEN_ | MAC_CR_RXEN_ | MAC_CR_HBDIS_);
+	SMC_SET_MAC_CR(cr);
+	SMC_SET_TX_CFG(TX_CFG_STOP_TX_);
+	spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+static inline void smc911x_drop_pkt(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int fifo_count, timeout, reg;
+
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__);
+	fifo_count = SMC_GET_RX_FIFO_INF() & 0xFFFF;
+	if (fifo_count <= 4) {
+		/* Manually dump the packet data */
+		while (fifo_count--)
+			SMC_GET_RX_FIFO();
+	} else	 {
+		/* Fast forward through the bad packet */
+		SMC_SET_RX_DP_CTRL(RX_DP_CTRL_FFWD_BUSY_);
+		timeout=50;
+		do {
+			udelay(10);
+			reg = SMC_GET_RX_DP_CTRL() & RX_DP_CTRL_FFWD_BUSY_;
+		} while ( timeout-- && reg);
+		if (timeout == 0) {
+			PRINTK("%s: timeout waiting for RX fast forward\n", dev->name);
+		}
+	}
+}
+
+/*
+ * This is the procedure to handle the receipt of a packet.
+ * It should be called after checking for packet presence in
+ * the RX status FIFO.	 It must be called with the spin lock
+ * already held.
+ */
+static inline void	 smc911x_rcv(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int pkt_len, status;
+	struct sk_buff *skb;
+	unsigned char *data;
+
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n",
+		dev->name, __FUNCTION__);
+	status = SMC_GET_RX_STS_FIFO();
+	DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n",
+		dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff);
+	pkt_len = (status & RX_STS_PKT_LEN_) >> 16;
+	if (status & RX_STS_ES_) {
+		/* Deal with a bad packet */
+		lp->stats.rx_errors++;
+		if (status & RX_STS_CRC_ERR_)
+			lp->stats.rx_crc_errors++;
+		else {
+			if (status & RX_STS_LEN_ERR_)
+				lp->stats.rx_length_errors++;
+			if (status & RX_STS_MCAST_)
+				lp->stats.multicast++;
+		}
+		/* Remove the bad packet data from the RX FIFO */
+		smc911x_drop_pkt(dev);
+	} else {
+		/* Receive a valid packet */
+		/* Alloc a buffer with extra room for DMA alignment */
+		skb=dev_alloc_skb(pkt_len+32);
+		if (unlikely(skb == NULL)) {
+			PRINTK( "%s: Low memory, rcvd packet dropped.\n",
+				dev->name);
+			lp->stats.rx_dropped++;
+			smc911x_drop_pkt(dev);
+			return;
+		}
+		/* Align IP header to 32 bits
+		 * Note that the device is configured to add a 2
+		 * byte padding to the packet start, so we really
+		 * want to write to the orignal data pointer */
+		data = skb->data;
+		skb_reserve(skb, 2);
+		skb_put(skb,pkt_len-4);
+#ifdef SMC_USE_DMA
+		{
+		unsigned int fifo;
+		/* Lower the FIFO threshold if possible */
+		fifo = SMC_GET_FIFO_INT();
+		if (fifo & 0xFF) fifo--;
+		DBG(SMC_DEBUG_RX, "%s: Setting RX stat FIFO threshold to %d\n",
+			dev->name, fifo & 0xff);
+		SMC_SET_FIFO_INT(fifo);
+		/* Setup RX DMA */
+		SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN16_ | ((2<<8) & RX_CFG_RXDOFF_));
+		lp->rxdma_active = 1;
+		lp->current_rx_skb = skb;
+		SMC_PULL_DATA(data, (pkt_len+2+15) & ~15);
+		/* Packet processing deferred to DMA RX interrupt */
+		}
+#else
+		SMC_SET_RX_CFG(RX_CFG_RX_END_ALGN4_ | ((2<<8) & RX_CFG_RXDOFF_));
+		SMC_PULL_DATA(data, pkt_len+2+3);
+
+		DBG(SMC_DEBUG_PKTS, "%s: Received packet\n", dev->name,);
+		PRINT_PKT(data, ((pkt_len - 4) <= 64) ? pkt_len - 4 : 64);
+		dev->last_rx = jiffies;
+		skb->dev = dev;
+		skb->protocol = eth_type_trans(skb, dev);
+		netif_rx(skb);
+		lp->stats.rx_packets++;
+		lp->stats.rx_bytes += pkt_len-4;
+#endif
+	}
+}
+
+/*
+ * This is called to actually send a packet to the chip.
+ */
+static void smc911x_hardware_send_pkt(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	struct sk_buff *skb;
+	unsigned int cmdA, cmdB, len;
+	unsigned char *buf;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__);
+	BUG_ON(lp->pending_tx_skb == NULL);
+
+	skb = lp->pending_tx_skb;
+	lp->pending_tx_skb = NULL;
+
+	/* cmdA {25:24] data alignment [20:16] start offset [10:0] buffer length */
+	/* cmdB {31:16] pkt tag [10:0] length */
+#ifdef SMC_USE_DMA
+	/* 16 byte buffer alignment mode */
+	buf = (char*)((u32)(skb->data) & ~0xF);
+	len = (skb->len + 0xF + ((u32)skb->data & 0xF)) & ~0xF;
+	cmdA = (1<<24) | (((u32)skb->data & 0xF)<<16) |
+			TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+			skb->len;
+#else
+	buf = (char*)((u32)skb->data & ~0x3);
+	len = (skb->len + 3 + ((u32)skb->data & 3)) & ~0x3;
+	cmdA = (((u32)skb->data & 0x3) << 16) |
+			TX_CMD_A_INT_FIRST_SEG_ | TX_CMD_A_INT_LAST_SEG_ |
+			skb->len;
+#endif
+	/* tag is packet length so we can use this in stats update later */
+	cmdB = (skb->len  << 16) | (skb->len & 0x7FF);
+
+	DBG(SMC_DEBUG_TX, "%s: TX PKT LENGTH 0x%04x (%d) BUF 0x%p CMDA 0x%08x CMDB 0x%08x\n",
+		 dev->name, len, len, buf, cmdA, cmdB);
+	SMC_SET_TX_FIFO(cmdA);
+	SMC_SET_TX_FIFO(cmdB);
+
+	DBG(SMC_DEBUG_PKTS, "%s: Transmitted packet\n", dev->name);
+	PRINT_PKT(buf, len <= 64 ? len : 64);
+
+	/* Send pkt via PIO or DMA */
+#ifdef SMC_USE_DMA
+	lp->current_tx_skb = skb;
+	SMC_PUSH_DATA(buf, len);
+	/* DMA complete IRQ will free buffer and set jiffies */
+#else
+	SMC_PUSH_DATA(buf, len);
+	dev->trans_start = jiffies;
+	dev_kfree_skb(skb);
+#endif
+	spin_lock_irqsave(&lp->lock, flags);
+	if (!lp->tx_throttle) {
+		netif_wake_queue(dev);
+	}
+	spin_unlock_irqrestore(&lp->lock, flags);
+	SMC_ENABLE_INT(INT_EN_TDFA_EN_ | INT_EN_TSFL_EN_);
+}
+
+/*
+ * Since I am not sure if I will have enough room in the chip's ram
+ * to store the packet, I call this routine which either sends it
+ * now, or set the card to generates an interrupt when ready
+ * for the packet.
+ */
+static int smc911x_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int free;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+		dev->name, __FUNCTION__);
+
+	BUG_ON(lp->pending_tx_skb != NULL);
+
+	free = SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TDFREE_;
+	DBG(SMC_DEBUG_TX, "%s: TX free space %d\n", dev->name, free);
+
+	/* Turn off the flow when running out of space in FIFO */
+	if (free <= SMC911X_TX_FIFO_LOW_THRESHOLD) {
+		DBG(SMC_DEBUG_TX, "%s: Disabling data flow due to low FIFO space (%d)\n",
+			dev->name, free);
+		spin_lock_irqsave(&lp->lock, flags);
+		/* Reenable when at least 1 packet of size MTU present */
+		SMC_SET_FIFO_TDA((SMC911X_TX_FIFO_LOW_THRESHOLD)/64);
+		lp->tx_throttle = 1;
+		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&lp->lock, flags);
+	}
+
+	/* Drop packets when we run out of space in TX FIFO
+	 * Account for overhead required for:
+	 *
+	 *	  Tx command words			 8 bytes
+	 *	  Start offset				 15 bytes
+	 *	  End padding				 15 bytes
+	 */
+	if (unlikely(free < (skb->len + 8 + 15 + 15))) {
+		printk("%s: No Tx free space %d < %d\n",
+			dev->name, free, skb->len);
+		lp->pending_tx_skb = NULL;
+		lp->stats.tx_errors++;
+		lp->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+#ifdef SMC_USE_DMA
+	{
+		/* If the DMA is already running then defer this packet Tx until
+		 * the DMA IRQ starts it
+		 */
+		spin_lock_irqsave(&lp->lock, flags);
+		if (lp->txdma_active) {
+			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Tx DMA running, deferring packet\n", dev->name);
+			lp->pending_tx_skb = skb;
+			netif_stop_queue(dev);
+			spin_unlock_irqrestore(&lp->lock, flags);
+			return 0;
+		} else {
+			DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: Activating Tx DMA\n", dev->name);
+			lp->txdma_active = 1;
+		}
+		spin_unlock_irqrestore(&lp->lock, flags);
+	}
+#endif
+	lp->pending_tx_skb = skb;
+	smc911x_hardware_send_pkt(dev);
+
+	return 0;
+}
+
+/*
+ * This handles a TX status interrupt, which is only called when:
+ * - a TX error occurred, or
+ * - TX of a packet completed.
+ */
+static void smc911x_tx(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned int tx_status;
+
+	DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n",
+		dev->name, __FUNCTION__);
+
+	/* Collect the TX status */
+	while (((SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16) != 0) {
+		DBG(SMC_DEBUG_TX, "%s: Tx stat FIFO used 0x%04x\n",
+			dev->name,
+			(SMC_GET_TX_FIFO_INF() & TX_FIFO_INF_TSUSED_) >> 16);
+		tx_status = SMC_GET_TX_STS_FIFO();
+		lp->stats.tx_packets++;
+		lp->stats.tx_bytes+=tx_status>>16;
+		DBG(SMC_DEBUG_TX, "%s: Tx FIFO tag 0x%04x status 0x%04x\n",
+			dev->name, (tx_status & 0xffff0000) >> 16,
+			tx_status & 0x0000ffff);
+		/* count Tx errors, but ignore lost carrier errors when in
+		 * full-duplex mode */
+		if ((tx_status & TX_STS_ES_) && !(lp->ctl_rfduplx &&
+		    !(tx_status & 0x00000306))) {
+			lp->stats.tx_errors++;
+		}
+		if (tx_status & TX_STS_MANY_COLL_) {
+			lp->stats.collisions+=16;
+			lp->stats.tx_aborted_errors++;
+		} else {
+			lp->stats.collisions+=(tx_status & TX_STS_COLL_CNT_) >> 3;
+		}
+		/* carrier error only has meaning for half-duplex communication */
+		if ((tx_status & (TX_STS_LOC_ | TX_STS_NO_CARR_)) &&
+		    !lp->ctl_rfduplx) {
+			lp->stats.tx_carrier_errors++;
+		}
+		if (tx_status & TX_STS_LATE_COLL_) {
+			lp->stats.collisions++;
+			lp->stats.tx_aborted_errors++;
+		}
+	}
+}
+
+
+/*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/
+/*
+ * Reads a register from the MII Management serial interface
+ */
+
+static int smc911x_phy_read(struct net_device *dev, int phyaddr, int phyreg)
+{
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int phydata;
+
+	SMC_GET_MII(phyreg, phyaddr, phydata);
+
+	DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n",
+		__FUNCTION__, phyaddr, phyreg, phydata);
+	return phydata;
+}
+
+
+/*
+ * Writes a register to the MII Management serial interface
+ */
+static void smc911x_phy_write(struct net_device *dev, int phyaddr, int phyreg,
+			int phydata)
+{
+	unsigned long ioaddr = dev->base_addr;
+
+	DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n",
+		__FUNCTION__, phyaddr, phyreg, phydata);
+
+	SMC_SET_MII(phyreg, phyaddr, phydata);
+}
+
+/*
+ * Finds and reports the PHY address (115 and 117 have external
+ * PHY interface 118 has internal only
+ */
+static void smc911x_phy_detect(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	int phyaddr;
+	unsigned int cfg, id1, id2;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	lp->phy_type = 0;
+
+	/*
+	 * Scan all 32 PHY addresses if necessary, starting at
+	 * PHY#1 to PHY#31, and then PHY#0 last.
+	 */
+	switch(lp->version) {
+		case 0x115:
+		case 0x117:
+			cfg = SMC_GET_HW_CFG();
+			if (cfg & HW_CFG_EXT_PHY_DET_) {
+				cfg &= ~HW_CFG_PHY_CLK_SEL_;
+				cfg |= HW_CFG_PHY_CLK_SEL_CLK_DIS_;
+				SMC_SET_HW_CFG(cfg);
+				udelay(10); /* Wait for clocks to stop */
+
+				cfg |= HW_CFG_EXT_PHY_EN_;
+				SMC_SET_HW_CFG(cfg);
+				udelay(10); /* Wait for clocks to stop */
+
+				cfg &= ~HW_CFG_PHY_CLK_SEL_;
+				cfg |= HW_CFG_PHY_CLK_SEL_EXT_PHY_;
+				SMC_SET_HW_CFG(cfg);
+				udelay(10); /* Wait for clocks to stop */
+
+				cfg |= HW_CFG_SMI_SEL_;
+				SMC_SET_HW_CFG(cfg);
+
+				for (phyaddr = 1; phyaddr < 32; ++phyaddr) {
+
+					/* Read the PHY identifiers */
+					SMC_GET_PHY_ID1(phyaddr & 31, id1);
+					SMC_GET_PHY_ID2(phyaddr & 31, id2);
+
+					/* Make sure it is a valid identifier */
+					if (id1 != 0x0000 && id1 != 0xffff &&
+					    id1 != 0x8000 && id2 != 0x0000 &&
+					    id2 != 0xffff && id2 != 0x8000) {
+						/* Save the PHY's address */
+						lp->mii.phy_id = phyaddr & 31;
+						lp->phy_type = id1 << 16 | id2;
+						break;
+					}
+				}
+			}
+		default:
+			/* Internal media only */
+			SMC_GET_PHY_ID1(1, id1);
+			SMC_GET_PHY_ID2(1, id2);
+			/* Save the PHY's address */
+			lp->mii.phy_id = 1;
+			lp->phy_type = id1 << 16 | id2;
+	}
+
+	DBG(SMC_DEBUG_MISC, "%s: phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+		dev->name, id1, id2, lp->mii.phy_id);
+}
+
+/*
+ * Sets the PHY to a configuration as determined by the user.
+ * Called with spin_lock held.
+ */
+static int smc911x_phy_fixed(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int phyaddr = lp->mii.phy_id;
+	int bmcr;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	/* Enter Link Disable state */
+	SMC_GET_PHY_BMCR(phyaddr, bmcr);
+	bmcr |= BMCR_PDOWN;
+	SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+	/*
+	 * Set our fixed capabilities
+	 * Disable auto-negotiation
+	 */
+	bmcr &= ~BMCR_ANENABLE;
+	if (lp->ctl_rfduplx)
+		bmcr |= BMCR_FULLDPLX;
+
+	if (lp->ctl_rspeed == 100)
+		bmcr |= BMCR_SPEED100;
+
+	/* Write our capabilities to the phy control register */
+	SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+	/* Re-Configure the Receive/Phy Control register */
+	bmcr &= ~BMCR_PDOWN;
+	SMC_SET_PHY_BMCR(phyaddr, bmcr);
+
+	return 1;
+}
+
+/*
+ * smc911x_phy_reset - reset the phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Issue a software reset for the specified PHY and
+ * wait up to 100ms for the reset to complete.	 We should
+ * not access the PHY for 50ms after issuing the reset.
+ *
+ * The time to wait appears to be dependent on the PHY.
+ *
+ */
+static int smc911x_phy_reset(struct net_device *dev, int phy)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int timeout;
+	unsigned long flags;
+	unsigned int reg;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+
+	spin_lock_irqsave(&lp->lock, flags);
+	reg = SMC_GET_PMT_CTRL();
+	reg &= ~0xfffff030;
+	reg |= PMT_CTRL_PHY_RST_;
+	SMC_SET_PMT_CTRL(reg);
+	spin_unlock_irqrestore(&lp->lock, flags);
+	for (timeout = 2; timeout; timeout--) {
+		msleep(50);
+		spin_lock_irqsave(&lp->lock, flags);
+		reg = SMC_GET_PMT_CTRL();
+		spin_unlock_irqrestore(&lp->lock, flags);
+		if (!(reg & PMT_CTRL_PHY_RST_)) {
+			/* extra delay required because the phy may
+			 * not be completed with its reset
+			 * when PHY_BCR_RESET_ is cleared. 256us
+			 * should suffice, but use 500us to be safe
+			 */
+			udelay(500);
+		break;
+		}
+	}
+
+	return reg & PMT_CTRL_PHY_RST_;
+}
+
+/*
+ * smc911x_phy_powerdown - powerdown phy
+ * @dev: net device
+ * @phy: phy address
+ *
+ * Power down the specified PHY
+ */
+static void smc911x_phy_powerdown(struct net_device *dev, int phy)
+{
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int bmcr;
+
+	/* Enter Link Disable state */
+	SMC_GET_PHY_BMCR(phy, bmcr);
+	bmcr |= BMCR_PDOWN;
+	SMC_SET_PHY_BMCR(phy, bmcr);
+}
+
+/*
+ * smc911x_phy_check_media - check the media status and adjust BMCR
+ * @dev: net device
+ * @init: set true for initialisation
+ *
+ * Select duplex mode depending on negotiation state.	This
+ * also updates our carrier state.
+ */
+static void smc911x_phy_check_media(struct net_device *dev, int init)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int phyaddr = lp->mii.phy_id;
+	unsigned int bmcr, cr;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) {
+		/* duplex state has changed */
+		SMC_GET_PHY_BMCR(phyaddr, bmcr);
+		SMC_GET_MAC_CR(cr);
+		if (lp->mii.full_duplex) {
+			DBG(SMC_DEBUG_MISC, "%s: Configuring for full-duplex mode\n", dev->name);
+			bmcr |= BMCR_FULLDPLX;
+			cr |= MAC_CR_RCVOWN_;
+		} else {
+			DBG(SMC_DEBUG_MISC, "%s: Configuring for half-duplex mode\n", dev->name);
+			bmcr &= ~BMCR_FULLDPLX;
+			cr &= ~MAC_CR_RCVOWN_;
+		}
+		SMC_SET_PHY_BMCR(phyaddr, bmcr);
+		SMC_SET_MAC_CR(cr);
+	}
+}
+
+/*
+ * Configures the specified PHY through the MII management interface
+ * using Autonegotiation.
+ * Calls smc911x_phy_fixed() if the user has requested a certain config.
+ * If RPC ANEG bit is set, the media selection is dependent purely on
+ * the selection by the MII (either in the MII BMCR reg or the result
+ * of autonegotiation.)  If the RPC ANEG bit is cleared, the selection
+ * is controlled by the RPC SPEED and RPC DPLX bits.
+ */
+static void smc911x_phy_configure(void *data)
+{
+	struct net_device *dev = data;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int phyaddr = lp->mii.phy_id;
+	int my_phy_caps; /* My PHY capabilities */
+	int my_ad_caps; /* My Advertised capabilities */
+	int status;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__);
+
+	/*
+	 * We should not be called if phy_type is zero.
+	 */
+	if (lp->phy_type == 0)
+		 goto smc911x_phy_configure_exit;
+
+	if (smc911x_phy_reset(dev, phyaddr)) {
+		printk("%s: PHY reset timed out\n", dev->name);
+		goto smc911x_phy_configure_exit;
+	}
+	spin_lock_irqsave(&lp->lock, flags);
+
+	/*
+	 * Enable PHY Interrupts (for register 18)
+	 * Interrupts listed here are enabled
+	 */
+	SMC_SET_PHY_INT_MASK(phyaddr, PHY_INT_MASK_ENERGY_ON_ |
+		 PHY_INT_MASK_ANEG_COMP_ | PHY_INT_MASK_REMOTE_FAULT_ |
+		 PHY_INT_MASK_LINK_DOWN_);
+
+	/* If the user requested no auto neg, then go set his request */
+	if (lp->mii.force_media) {
+		smc911x_phy_fixed(dev);
+		goto smc911x_phy_configure_exit;
+	}
+
+	/* Copy our capabilities from MII_BMSR to MII_ADVERTISE */
+	SMC_GET_PHY_BMSR(phyaddr, my_phy_caps);
+	if (!(my_phy_caps & BMSR_ANEGCAPABLE)) {
+		printk(KERN_INFO "Auto negotiation NOT supported\n");
+		smc911x_phy_fixed(dev);
+		goto smc911x_phy_configure_exit;
+	}
+
+	/* CSMA capable w/ both pauses */
+	my_ad_caps = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
+
+	if (my_phy_caps & BMSR_100BASE4)
+		my_ad_caps |= ADVERTISE_100BASE4;
+	if (my_phy_caps & BMSR_100FULL)
+		my_ad_caps |= ADVERTISE_100FULL;
+	if (my_phy_caps & BMSR_100HALF)
+		my_ad_caps |= ADVERTISE_100HALF;
+	if (my_phy_caps & BMSR_10FULL)
+		my_ad_caps |= ADVERTISE_10FULL;
+	if (my_phy_caps & BMSR_10HALF)
+		my_ad_caps |= ADVERTISE_10HALF;
+
+	/* Disable capabilities not selected by our user */
+	if (lp->ctl_rspeed != 100)
+		my_ad_caps &= ~(ADVERTISE_100BASE4|ADVERTISE_100FULL|ADVERTISE_100HALF);
+
+	 if (!lp->ctl_rfduplx)
+		my_ad_caps &= ~(ADVERTISE_100FULL|ADVERTISE_10FULL);
+
+	/* Update our Auto-Neg Advertisement Register */
+	SMC_SET_PHY_MII_ADV(phyaddr, my_ad_caps);
+	lp->mii.advertising = my_ad_caps;
+
+	/*
+	 * Read the register back.	 Without this, it appears that when
+	 * auto-negotiation is restarted, sometimes it isn't ready and
+	 * the link does not come up.
+	 */
+	udelay(10);
+	SMC_GET_PHY_MII_ADV(phyaddr, status);
+
+	DBG(SMC_DEBUG_MISC, "%s: phy caps=0x%04x\n", dev->name, my_phy_caps);
+	DBG(SMC_DEBUG_MISC, "%s: phy advertised caps=0x%04x\n", dev->name, my_ad_caps);
+
+	/* Restart auto-negotiation process in order to advertise my caps */
+	SMC_SET_PHY_BMCR(phyaddr, BMCR_ANENABLE | BMCR_ANRESTART);
+
+	smc911x_phy_check_media(dev, 1);
+
+smc911x_phy_configure_exit:
+	spin_unlock_irqrestore(&lp->lock, flags);
+	lp->work_pending = 0;
+}
+
+/*
+ * smc911x_phy_interrupt
+ *
+ * Purpose:  Handle interrupts relating to PHY register 18. This is
+ *	 called from the "hard" interrupt handler under our private spinlock.
+ */
+static void smc911x_phy_interrupt(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int phyaddr = lp->mii.phy_id;
+	int status;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	if (lp->phy_type == 0)
+		return;
+
+	smc911x_phy_check_media(dev, 0);
+	/* read to clear status bits */
+	SMC_GET_PHY_INT_SRC(phyaddr,status);
+	DBG(SMC_DEBUG_MISC, "%s: PHY interrupt status 0x%04x\n",
+		dev->name, status & 0xffff);
+	DBG(SMC_DEBUG_MISC, "%s: AFC_CFG 0x%08x\n",
+		dev->name, SMC_GET_AFC_CFG());
+}
+
+/*--- END PHY CONTROL AND CONFIGURATION-------------------------------------*/
+
+/*
+ * This is the main routine of the driver, to handle the device when
+ * it needs some attention.
+ */
+static irqreturn_t smc911x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct net_device *dev = dev_id;
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned int status, mask, timeout;
+	unsigned int rx_overrun=0, cr, pkts;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	spin_lock_irqsave(&lp->lock, flags);
+
+	/* Spurious interrupt check */
+	if ((SMC_GET_IRQ_CFG() & (INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) !=
+		(INT_CFG_IRQ_INT_ | INT_CFG_IRQ_EN_)) {
+		return IRQ_NONE;
+	}
+
+	mask = SMC_GET_INT_EN();
+	SMC_SET_INT_EN(0);
+
+	/* set a timeout value, so I don't stay here forever */
+	timeout = 8;
+
+
+	do {
+		status = SMC_GET_INT();
+
+		DBG(SMC_DEBUG_MISC, "%s: INT 0x%08x MASK 0x%08x OUTSIDE MASK 0x%08x\n",
+			dev->name, status, mask, status & ~mask);
+
+		status &= mask;
+		if (!status)
+			break;
+
+		/* Handle SW interrupt condition */
+		if (status & INT_STS_SW_INT_) {
+			SMC_ACK_INT(INT_STS_SW_INT_);
+			mask &= ~INT_EN_SW_INT_EN_;
+		}
+		/* Handle various error conditions */
+		if (status & INT_STS_RXE_) {
+			SMC_ACK_INT(INT_STS_RXE_);
+			lp->stats.rx_errors++;
+		}
+		if (status & INT_STS_RXDFH_INT_) {
+			SMC_ACK_INT(INT_STS_RXDFH_INT_);
+			lp->stats.rx_dropped+=SMC_GET_RX_DROP();
+		 }
+		/* Undocumented interrupt-what is the right thing to do here? */
+		if (status & INT_STS_RXDF_INT_) {
+			SMC_ACK_INT(INT_STS_RXDF_INT_);
+		}
+
+		/* Rx Data FIFO exceeds set level */
+		if (status & INT_STS_RDFL_) {
+			if (IS_REV_A(lp->revision)) {
+				rx_overrun=1;
+				SMC_GET_MAC_CR(cr);
+				cr &= ~MAC_CR_RXEN_;
+				SMC_SET_MAC_CR(cr);
+				DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+				lp->stats.rx_errors++;
+				lp->stats.rx_fifo_errors++;
+			}
+			SMC_ACK_INT(INT_STS_RDFL_);
+		}
+		if (status & INT_STS_RDFO_) {
+			if (!IS_REV_A(lp->revision)) {
+				SMC_GET_MAC_CR(cr);
+				cr &= ~MAC_CR_RXEN_;
+				SMC_SET_MAC_CR(cr);
+				rx_overrun=1;
+				DBG(SMC_DEBUG_RX, "%s: RX overrun\n", dev->name);
+				lp->stats.rx_errors++;
+				lp->stats.rx_fifo_errors++;
+			}
+			SMC_ACK_INT(INT_STS_RDFO_);
+		}
+		/* Handle receive condition */
+		if ((status & INT_STS_RSFL_) || rx_overrun) {
+			unsigned int fifo;
+			DBG(SMC_DEBUG_RX, "%s: RX irq\n", dev->name);
+			fifo = SMC_GET_RX_FIFO_INF();
+			pkts = (fifo & RX_FIFO_INF_RXSUSED_) >> 16;
+			DBG(SMC_DEBUG_RX, "%s: Rx FIFO pkts %d, bytes %d\n",
+				dev->name, pkts, fifo & 0xFFFF );
+			if (pkts != 0) {
+#ifdef SMC_USE_DMA
+				unsigned int fifo;
+				if (lp->rxdma_active){
+					DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+						"%s: RX DMA active\n", dev->name);
+					/* The DMA is already running so up the IRQ threshold */
+					fifo = SMC_GET_FIFO_INT() & ~0xFF;
+					fifo |= pkts & 0xFF;
+					DBG(SMC_DEBUG_RX,
+						"%s: Setting RX stat FIFO threshold to %d\n",
+						dev->name, fifo & 0xff);
+					SMC_SET_FIFO_INT(fifo);
+				} else
+#endif
+				smc911x_rcv(dev);
+			}
+			SMC_ACK_INT(INT_STS_RSFL_);
+		}
+		/* Handle transmit FIFO available */
+		if (status & INT_STS_TDFA_) {
+			DBG(SMC_DEBUG_TX, "%s: TX data FIFO space available irq\n", dev->name);
+			SMC_SET_FIFO_TDA(0xFF);
+			lp->tx_throttle = 0;
+#ifdef SMC_USE_DMA
+			if (!lp->txdma_active)
+#endif
+				netif_wake_queue(dev);
+			SMC_ACK_INT(INT_STS_TDFA_);
+		}
+		/* Handle transmit done condition */
+#if 1
+		if (status & (INT_STS_TSFL_ | INT_STS_GPT_INT_)) {
+			DBG(SMC_DEBUG_TX | SMC_DEBUG_MISC,
+				"%s: Tx stat FIFO limit (%d) /GPT irq\n",
+				dev->name, (SMC_GET_FIFO_INT() & 0x00ff0000) >> 16);
+			smc911x_tx(dev);
+			SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+			SMC_ACK_INT(INT_STS_TSFL_);
+			SMC_ACK_INT(INT_STS_TSFL_ | INT_STS_GPT_INT_);
+		}
+#else
+		if (status & INT_STS_TSFL_) {
+			DBG(SMC_DEBUG_TX, "%s: TX status FIFO limit (%d) irq \n", dev->name, );
+			smc911x_tx(dev);
+			SMC_ACK_INT(INT_STS_TSFL_);
+		}
+
+		if (status & INT_STS_GPT_INT_) {
+			DBG(SMC_DEBUG_RX, "%s: IRQ_CFG 0x%08x FIFO_INT 0x%08x RX_CFG 0x%08x\n",
+				dev->name,
+				SMC_GET_IRQ_CFG(),
+				SMC_GET_FIFO_INT(),
+				SMC_GET_RX_CFG());
+			DBG(SMC_DEBUG_RX, "%s: Rx Stat FIFO Used 0x%02x "
+				"Data FIFO Used 0x%04x Stat FIFO 0x%08x\n",
+				dev->name,
+				(SMC_GET_RX_FIFO_INF() & 0x00ff0000) >> 16,
+				SMC_GET_RX_FIFO_INF() & 0xffff,
+				SMC_GET_RX_STS_FIFO_PEEK());
+			SMC_SET_GPT_CFG(GPT_CFG_TIMER_EN_ | 10000);
+			SMC_ACK_INT(INT_STS_GPT_INT_);
+		}
+#endif
+
+		/* Handle PHY interupt condition */
+		if (status & INT_STS_PHY_INT_) {
+			DBG(SMC_DEBUG_MISC, "%s: PHY irq\n", dev->name);
+			smc911x_phy_interrupt(dev);
+			SMC_ACK_INT(INT_STS_PHY_INT_);
+		}
+	} while (--timeout);
+
+	/* restore mask state */
+	SMC_SET_INT_EN(mask);
+
+	DBG(SMC_DEBUG_MISC, "%s: Interrupt done (%d loops)\n",
+		dev->name, 8-timeout);
+
+	spin_unlock_irqrestore(&lp->lock, flags);
+
+	DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout);
+
+	return IRQ_HANDLED;
+}
+
+#ifdef SMC_USE_DMA
+static void
+smc911x_tx_dma_irq(int dma, void *data, struct pt_regs *regs)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct smc911x_local *lp = netdev_priv(dev);
+	struct sk_buff *skb = lp->current_tx_skb;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name);
+	/* Clear the DMA interrupt sources */
+	SMC_DMA_ACK_IRQ(dev, dma);
+	BUG_ON(skb == NULL);
+	dma_unmap_single(NULL, tx_dmabuf, tx_dmalen, DMA_TO_DEVICE);
+	dev->trans_start = jiffies;
+	dev_kfree_skb_irq(skb);
+	lp->current_tx_skb = NULL;
+	if (lp->pending_tx_skb != NULL)
+		smc911x_hardware_send_pkt(dev);
+	else {
+		DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+			"%s: No pending Tx packets. DMA disabled\n", dev->name);
+		spin_lock_irqsave(&lp->lock, flags);
+		lp->txdma_active = 0;
+		if (!lp->tx_throttle) {
+			netif_wake_queue(dev);
+		}
+		spin_unlock_irqrestore(&lp->lock, flags);
+	}
+
+	DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA,
+		"%s: TX DMA irq completed\n", dev->name);
+}
+static void
+smc911x_rx_dma_irq(int dma, void *data, struct pt_regs *regs)
+{
+	struct net_device *dev = (struct net_device *)data;
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	struct sk_buff *skb = lp->current_rx_skb;
+	unsigned long flags;
+	unsigned int pkts;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name);
+	/* Clear the DMA interrupt sources */
+	SMC_DMA_ACK_IRQ(dev, dma);
+	dma_unmap_single(NULL, rx_dmabuf, rx_dmalen, DMA_FROM_DEVICE);
+	BUG_ON(skb == NULL);
+	lp->current_rx_skb = NULL;
+	PRINT_PKT(skb->data, skb->len);
+	dev->last_rx = jiffies;
+	skb->dev = dev;
+	skb->protocol = eth_type_trans(skb, dev);
+	netif_rx(skb);
+	lp->stats.rx_packets++;
+	lp->stats.rx_bytes += skb->len;
+
+	spin_lock_irqsave(&lp->lock, flags);
+	pkts = (SMC_GET_RX_FIFO_INF() & RX_FIFO_INF_RXSUSED_) >> 16;
+	if (pkts != 0) {
+		smc911x_rcv(dev);
+	}else {
+		lp->rxdma_active = 0;
+	}
+	spin_unlock_irqrestore(&lp->lock, flags);
+	DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA,
+		"%s: RX DMA irq completed. DMA RX FIFO PKTS %d\n",
+		dev->name, pkts);
+}
+#endif	 /* SMC_USE_DMA */
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling receive - used by netconsole and other diagnostic tools
+ * to allow network i/o with interrupts disabled.
+ */
+static void smc911x_poll_controller(struct net_device *dev)
+{
+	disable_irq(dev->irq);
+	smc911x_interrupt(dev->irq, dev, NULL);
+	enable_irq(dev->irq);
+}
+#endif
+
+/* Our watchdog timed out. Called by the networking layer */
+static void smc911x_timeout(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int status, mask;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	spin_lock_irqsave(&lp->lock, flags);
+	status = SMC_GET_INT();
+	mask = SMC_GET_INT_EN();
+	spin_unlock_irqrestore(&lp->lock, flags);
+	DBG(SMC_DEBUG_MISC, "%s: INT 0x%02x MASK 0x%02x \n",
+		dev->name, status, mask);
+
+	/* Dump the current TX FIFO contents and restart */
+	mask = SMC_GET_TX_CFG();
+	SMC_SET_TX_CFG(mask | TX_CFG_TXS_DUMP_ | TX_CFG_TXD_DUMP_);
+	/*
+	 * Reconfiguring the PHY doesn't seem like a bad idea here, but
+	 * smc911x_phy_configure() calls msleep() which calls schedule_timeout()
+	 * which calls schedule().	 Hence we use a work queue.
+	 */
+	if (lp->phy_type != 0) {
+		if (schedule_work(&lp->phy_configure)) {
+			lp->work_pending = 1;
+		}
+	}
+
+	/* We can accept TX packets again */
+	dev->trans_start = jiffies;
+	netif_wake_queue(dev);
+}
+
+/*
+ * This routine will, depending on the values passed to it,
+ * either make it accept multicast packets, go into
+ * promiscuous mode (for TCPDUMP and cousins) or accept
+ * a select set of multicast packets
+ */
+static void smc911x_set_multicast_list(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int multicast_table[2];
+	unsigned int mcr, update_multicast = 0;
+	unsigned long flags;
+	/* table for flipping the order of 5 bits */
+	static const unsigned char invert5[] =
+		{0x00, 0x10, 0x08, 0x18, 0x04, 0x14, 0x0C, 0x1C,
+		 0x02, 0x12, 0x0A, 0x1A, 0x06, 0x16, 0x0E, 0x1E,
+		 0x01, 0x11, 0x09, 0x19, 0x05, 0x15, 0x0D, 0x1D,
+		 0x03, 0x13, 0x0B, 0x1B, 0x07, 0x17, 0x0F, 0x1F};
+
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	spin_lock_irqsave(&lp->lock, flags);
+	SMC_GET_MAC_CR(mcr);
+	spin_unlock_irqrestore(&lp->lock, flags);
+
+	if (dev->flags & IFF_PROMISC) {
+
+		DBG(SMC_DEBUG_MISC, "%s: RCR_PRMS\n", dev->name);
+		mcr |= MAC_CR_PRMS_;
+	}
+	/*
+	 * Here, I am setting this to accept all multicast packets.
+	 * I don't need to zero the multicast table, because the flag is
+	 * checked before the table is
+	 */
+	else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+		DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
+		mcr |= MAC_CR_MCPAS_;
+	}
+
+	/*
+	 * This sets the internal hardware table to filter out unwanted
+	 * multicast packets before they take up memory.
+	 *
+	 * The SMC chip uses a hash table where the high 6 bits of the CRC of
+	 * address are the offset into the table.	If that bit is 1, then the
+	 * multicast packet is accepted.  Otherwise, it's dropped silently.
+	 *
+	 * To use the 6 bits as an offset into the table, the high 1 bit is
+	 * the number of the 32 bit register, while the low 5 bits are the bit
+	 * within that register.
+	 */
+	else if (dev->mc_count)  {
+		int i;
+		struct dev_mc_list *cur_addr;
+
+		/* Set the Hash perfec mode */
+		mcr |= MAC_CR_HPFILT_;
+
+		/* start with a table of all zeros: reject all */
+		memset(multicast_table, 0, sizeof(multicast_table));
+
+		cur_addr = dev->mc_list;
+		for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+			int position;
+
+			/* do we have a pointer here? */
+			if (!cur_addr)
+				break;
+			/* make sure this is a multicast address -
+				shouldn't this be a given if we have it here ? */
+			if (!(*cur_addr->dmi_addr & 1))
+				 continue;
+
+			/* only use the low order bits */
+			position = crc32_le(~0, cur_addr->dmi_addr, 6) & 0x3f;
+
+			/* do some messy swapping to put the bit in the right spot */
+			multicast_table[invert5[position&0x1F]&0x1] |=
+				(1<<invert5[(position>>1)&0x1F]);
+		}
+
+		/* be sure I get rid of flags I might have set */
+		mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+		/* now, the table can be loaded into the chipset */
+		update_multicast = 1;
+	} else	 {
+		DBG(SMC_DEBUG_MISC, "%s: ~(MAC_CR_PRMS_|MAC_CR_MCPAS_)\n",
+			dev->name);
+		mcr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
+
+		/*
+		 * since I'm disabling all multicast entirely, I need to
+		 * clear the multicast list
+		 */
+		memset(multicast_table, 0, sizeof(multicast_table));
+		update_multicast = 1;
+	}
+
+	spin_lock_irqsave(&lp->lock, flags);
+	SMC_SET_MAC_CR(mcr);
+	if (update_multicast) {
+		DBG(SMC_DEBUG_MISC,
+			"%s: update mcast hash table 0x%08x 0x%08x\n",
+			dev->name, multicast_table[0], multicast_table[1]);
+		SMC_SET_HASHL(multicast_table[0]);
+		SMC_SET_HASHH(multicast_table[1]);
+	}
+	spin_unlock_irqrestore(&lp->lock, flags);
+}
+
+
+/*
+ * Open and Initialize the board
+ *
+ * Set up everything, reset the card, etc..
+ */
+static int
+smc911x_open(struct net_device *dev)
+{
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	/*
+	 * Check that the address is valid.  If its not, refuse
+	 * to bring the device up.	 The user must specify an
+	 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
+	 */
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__);
+		return -EINVAL;
+	}
+
+	/* reset the hardware */
+	smc911x_reset(dev);
+
+	/* Configure the PHY, initialize the link state */
+	smc911x_phy_configure(dev);
+
+	/* Turn on Tx + Rx */
+	smc911x_enable(dev);
+
+	netif_start_queue(dev);
+
+	return 0;
+}
+
+/*
+ * smc911x_close
+ *
+ * this makes the board clean up everything that it can
+ * and not talk to the outside world.	 Caused by
+ * an 'ifconfig ethX down'
+ */
+static int smc911x_close(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	netif_stop_queue(dev);
+	netif_carrier_off(dev);
+
+	/* clear everything */
+	smc911x_shutdown(dev);
+
+	if (lp->phy_type != 0) {
+		/* We need to ensure that no calls to
+		 * smc911x_phy_configure are pending.
+
+		 * flush_scheduled_work() cannot be called because we
+		 * are running with the netlink semaphore held (from
+		 * devinet_ioctl()) and the pending work queue
+		 * contains linkwatch_event() (scheduled by
+		 * netif_carrier_off() above). linkwatch_event() also
+		 * wants the netlink semaphore.
+		 */
+		while (lp->work_pending)
+			schedule();
+		smc911x_phy_powerdown(dev, lp->mii.phy_id);
+	}
+
+	if (lp->pending_tx_skb) {
+		dev_kfree_skb(lp->pending_tx_skb);
+		lp->pending_tx_skb = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *smc911x_query_statistics(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+
+	return &lp->stats;
+}
+
+/*
+ * Ethtool support
+ */
+static int
+smc911x_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	int ret, status;
+	unsigned long flags;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+	cmd->maxtxpkt = 1;
+	cmd->maxrxpkt = 1;
+
+	if (lp->phy_type != 0) {
+		spin_lock_irqsave(&lp->lock, flags);
+		ret = mii_ethtool_gset(&lp->mii, cmd);
+		spin_unlock_irqrestore(&lp->lock, flags);
+	} else {
+		cmd->supported = SUPPORTED_10baseT_Half |
+				SUPPORTED_10baseT_Full |
+				SUPPORTED_TP | SUPPORTED_AUI;
+
+		if (lp->ctl_rspeed == 10)
+			cmd->speed = SPEED_10;
+		else if (lp->ctl_rspeed == 100)
+			cmd->speed = SPEED_100;
+
+		cmd->autoneg = AUTONEG_DISABLE;
+		if (lp->mii.phy_id==1)
+			cmd->transceiver = XCVR_INTERNAL;
+		else
+			cmd->transceiver = XCVR_EXTERNAL;
+		cmd->port = 0;
+		SMC_GET_PHY_SPECIAL(lp->mii.phy_id, status);
+		cmd->duplex =
+			(status & (PHY_SPECIAL_SPD_10FULL_ | PHY_SPECIAL_SPD_100FULL_)) ?
+				DUPLEX_FULL : DUPLEX_HALF;
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static int
+smc911x_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	int ret;
+	unsigned long flags;
+
+	if (lp->phy_type != 0) {
+		spin_lock_irqsave(&lp->lock, flags);
+		ret = mii_ethtool_sset(&lp->mii, cmd);
+		spin_unlock_irqrestore(&lp->lock, flags);
+	} else {
+		if (cmd->autoneg != AUTONEG_DISABLE ||
+			cmd->speed != SPEED_10 ||
+			(cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
+			(cmd->port != PORT_TP && cmd->port != PORT_AUI))
+			return -EINVAL;
+
+		lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
+
+		ret = 0;
+	}
+
+	return ret;
+}
+
+static void
+smc911x_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	strncpy(info->driver, CARDNAME, sizeof(info->driver));
+	strncpy(info->version, version, sizeof(info->version));
+	strncpy(info->bus_info, dev->class_dev.dev->bus_id, sizeof(info->bus_info));
+}
+
+static int smc911x_ethtool_nwayreset(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	int ret = -EINVAL;
+	unsigned long flags;
+
+	if (lp->phy_type != 0) {
+		spin_lock_irqsave(&lp->lock, flags);
+		ret = mii_nway_restart(&lp->mii);
+		spin_unlock_irqrestore(&lp->lock, flags);
+	}
+
+	return ret;
+}
+
+static u32 smc911x_ethtool_getmsglevel(struct net_device *dev)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	return lp->msg_enable;
+}
+
+static void smc911x_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	lp->msg_enable = level;
+}
+
+static int smc911x_ethtool_getregslen(struct net_device *dev)
+{
+	/* System regs + MAC regs + PHY regs */
+	return (((E2P_CMD - ID_REV)/4 + 1) +
+			(WUCSR - MAC_CR)+1 + 32) * sizeof(u32);
+}
+
+static void smc911x_ethtool_getregs(struct net_device *dev,
+										 struct ethtool_regs* regs, void *buf)
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct smc911x_local *lp = netdev_priv(dev);
+	unsigned long flags;
+	u32 reg,i,j=0;
+	u32 *data = (u32*)buf;
+
+	regs->version = lp->version;
+	for(i=ID_REV;i<=E2P_CMD;i+=4) {
+		data[j++] = SMC_inl(ioaddr,i);
+	}
+	for(i=MAC_CR;i<=WUCSR;i++) {
+		spin_lock_irqsave(&lp->lock, flags);
+		SMC_GET_MAC_CSR(i, reg);
+		spin_unlock_irqrestore(&lp->lock, flags);
+		data[j++] = reg;
+	}
+	for(i=0;i<=31;i++) {
+		spin_lock_irqsave(&lp->lock, flags);
+		SMC_GET_MII(i, lp->mii.phy_id, reg);
+		spin_unlock_irqrestore(&lp->lock, flags);
+		data[j++] = reg & 0xFFFF;
+	}
+}
+
+static int smc911x_ethtool_wait_eeprom_ready(struct net_device *dev)
+{
+	unsigned long ioaddr = dev->base_addr;
+	unsigned int timeout;
+	int e2p_cmd;
+
+	e2p_cmd = SMC_GET_E2P_CMD();
+	for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) {
+		if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) {
+			PRINTK("%s: %s timeout waiting for EEPROM to respond\n",
+				dev->name, __FUNCTION__);
+			return -EFAULT;
+		}
+		mdelay(1);
+		e2p_cmd = SMC_GET_E2P_CMD();
+	}
+	if (timeout == 0) {
+		PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n",
+			dev->name, __FUNCTION__);
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_cmd(struct net_device *dev,
+													int cmd, int addr)
+{
+	unsigned long ioaddr = dev->base_addr;
+	int ret;
+
+	if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+		return ret;
+	SMC_SET_E2P_CMD(E2P_CMD_EPC_BUSY_ |
+		((cmd) & (0x7<<28)) |
+		((addr) & 0xFF));
+	return 0;
+}
+
+static inline int smc911x_ethtool_read_eeprom_byte(struct net_device *dev,
+													u8 *data)
+{
+	unsigned long ioaddr = dev->base_addr;
+	int ret;
+
+	if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+		return ret;
+	*data = SMC_GET_E2P_DATA();
+	return 0;
+}
+
+static inline int smc911x_ethtool_write_eeprom_byte(struct net_device *dev,
+													 u8 data)
+{
+	unsigned long ioaddr = dev->base_addr;
+	int ret;
+
+	if ((ret = smc911x_ethtool_wait_eeprom_ready(dev))!=0)
+		return ret;
+	SMC_SET_E2P_DATA(data);
+	return 0;
+}
+
+static int smc911x_ethtool_geteeprom(struct net_device *dev,
+									  struct ethtool_eeprom *eeprom, u8 *data)
+{
+	u8 eebuf[SMC911X_EEPROM_LEN];
+	int i, ret;
+
+	for(i=0;i<SMC911X_EEPROM_LEN;i++) {
+		if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_READ_, i ))!=0)
+			return ret;
+		if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0)
+			return ret;
+		}
+	memcpy(data, eebuf+eeprom->offset, eeprom->len);
+	return 0;
+}
+
+static int smc911x_ethtool_seteeprom(struct net_device *dev,
+									   struct ethtool_eeprom *eeprom, u8 *data)
+{
+	int i, ret;
+
+	/* Enable erase */
+	if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_EWEN_, 0 ))!=0)
+		return ret;
+	for(i=eeprom->offset;i<(eeprom->offset+eeprom->len);i++) {
+		/* erase byte */
+		if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_ERASE_, i ))!=0)
+			return ret;
+		/* write byte */
+		if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0)
+			 return ret;
+		if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0)
+			return ret;
+		}
+	 return 0;
+}
+
+static int smc911x_ethtool_geteeprom_len(struct net_device *dev)
+{
+	 return SMC911X_EEPROM_LEN;
+}
+
+static struct ethtool_ops smc911x_ethtool_ops = {
+	.get_settings	 = smc911x_ethtool_getsettings,
+	.set_settings	 = smc911x_ethtool_setsettings,
+	.get_drvinfo	 = smc911x_ethtool_getdrvinfo,
+	.get_msglevel	 = smc911x_ethtool_getmsglevel,
+	.set_msglevel	 = smc911x_ethtool_setmsglevel,
+	.nway_reset = smc911x_ethtool_nwayreset,
+	.get_link	 = ethtool_op_get_link,
+	.get_regs_len	 = smc911x_ethtool_getregslen,
+	.get_regs	 = smc911x_ethtool_getregs,
+	.get_eeprom_len = smc911x_ethtool_geteeprom_len,
+	.get_eeprom = smc911x_ethtool_geteeprom,
+	.set_eeprom = smc911x_ethtool_seteeprom,
+};
+
+/*
+ * smc911x_findirq
+ *
+ * This routine has a simple purpose -- make the SMC chip generate an
+ * interrupt, so an auto-detect routine can detect it, and find the IRQ,
+ */
+static int __init smc911x_findirq(unsigned long ioaddr)
+{
+	int timeout = 20;
+	unsigned long cookie;
+
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+
+	cookie = probe_irq_on();
+
+	/*
+	 * Force a SW interrupt
+	 */
+
+	SMC_SET_INT_EN(INT_EN_SW_INT_EN_);
+
+	/*
+	 * Wait until positive that the interrupt has been generated
+	 */
+	do {
+		int int_status;
+		udelay(10);
+		int_status = SMC_GET_INT_EN();
+		if (int_status & INT_EN_SW_INT_EN_)
+			 break;		/* got the interrupt */
+	} while (--timeout);
+
+	/*
+	 * there is really nothing that I can do here if timeout fails,
+	 * as autoirq_report will return a 0 anyway, which is what I
+	 * want in this case.	 Plus, the clean up is needed in both
+	 * cases.
+	 */
+
+	/* and disable all interrupts again */
+	SMC_SET_INT_EN(0);
+
+	/* and return what I found */
+	return probe_irq_off(cookie);
+}
+
+/*
+ * Function: smc911x_probe(unsigned long ioaddr)
+ *
+ * Purpose:
+ *	 Tests to see if a given ioaddr points to an SMC911x chip.
+ *	 Returns a 0 on success
+ *
+ * Algorithm:
+ *	 (1) see if the endian word is OK
+ *	 (1) see if I recognize the chip ID in the appropriate register
+ *
+ * Here I do typical initialization tasks.
+ *
+ * o  Initialize the structure if needed
+ * o  print out my vanity message if not done so already
+ * o  print out what type of hardware is detected
+ * o  print out the ethernet address
+ * o  find the IRQ
+ * o  set up my private data
+ * o  configure the dev structure with my subroutines
+ * o  actually GRAB the irq.
+ * o  GRAB the region
+ */
+static int __init smc911x_probe(struct net_device *dev, unsigned long ioaddr)
+{
+	struct smc911x_local *lp = netdev_priv(dev);
+	int i, retval;
+	unsigned int val, chip_id, revision;
+	const char *version_string;
+
+	DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__);
+
+	/* First, see if the endian word is recognized */
+	val = SMC_GET_BYTE_TEST();
+	DBG(SMC_DEBUG_MISC, "%s: endian probe returned 0x%04x\n", CARDNAME, val);
+	if (val != 0x87654321) {
+		printk(KERN_ERR "Invalid chip endian 0x08%x\n",val);
+		retval = -ENODEV;
+		goto err_out;
+	}
+
+	/*
+	 * check if the revision register is something that I
+	 * recognize.	These might need to be added to later,
+	 * as future revisions could be added.
+	 */
+	chip_id = SMC_GET_PN();
+	DBG(SMC_DEBUG_MISC, "%s: id probe returned 0x%04x\n", CARDNAME, chip_id);
+	for(i=0;chip_ids[i].id != 0; i++) {
+		if (chip_ids[i].id == chip_id) break;
+	}
+	if (!chip_ids[i].id) {
+		printk(KERN_ERR "Unknown chip ID %04x\n", chip_id);
+		retval = -ENODEV;
+		goto err_out;
+	}
+	version_string = chip_ids[i].name;
+
+	revision = SMC_GET_REV();
+	DBG(SMC_DEBUG_MISC, "%s: revision = 0x%04x\n", CARDNAME, revision);
+
+	/* At this point I'll assume that the chip is an SMC911x. */
+	DBG(SMC_DEBUG_MISC, "%s: Found a %s\n", CARDNAME, chip_ids[i].name);
+
+	/* Validate the TX FIFO size requested */
+	if ((tx_fifo_kb < 2) || (tx_fifo_kb > 14)) {
+		printk(KERN_ERR "Invalid TX FIFO size requested %d\n", tx_fifo_kb);
+		retval = -EINVAL;
+		goto err_out;
+	}
+
+	/* fill in some of the fields */
+	dev->base_addr = ioaddr;
+	lp->version = chip_ids[i].id;
+	lp->revision = revision;
+	lp->tx_fifo_kb = tx_fifo_kb;
+	/* Reverse calculate the RX FIFO size from the TX */
+	lp->tx_fifo_size=(lp->tx_fifo_kb<<10) - 512;
+	lp->rx_fifo_size= ((0x4000 - 512 - lp->tx_fifo_size) / 16) * 15;
+
+	/* Set the automatic flow control values */
+	switch(lp->tx_fifo_kb) {
+		/*
+		 *	 AFC_HI is about ((Rx Data Fifo Size)*2/3)/64
+		 *	 AFC_LO is AFC_HI/2
+		 *	 BACK_DUR is about 5uS*(AFC_LO) rounded down
+		 */
+		case 2:/* 13440 Rx Data Fifo Size */
+			lp->afc_cfg=0x008C46AF;break;
+		case 3:/* 12480 Rx Data Fifo Size */
+			lp->afc_cfg=0x0082419F;break;
+		case 4:/* 11520 Rx Data Fifo Size */
+			lp->afc_cfg=0x00783C9F;break;
+		case 5:/* 10560 Rx Data Fifo Size */
+			lp->afc_cfg=0x006E374F;break;
+		case 6:/* 9600 Rx Data Fifo Size */
+			lp->afc_cfg=0x0064328F;break;
+		case 7:/* 8640 Rx Data Fifo Size */
+			lp->afc_cfg=0x005A2D7F;break;
+		case 8:/* 7680 Rx Data Fifo Size */
+			lp->afc_cfg=0x0050287F;break;
+		case 9:/* 6720 Rx Data Fifo Size */
+			lp->afc_cfg=0x0046236F;break;
+		case 10:/* 5760 Rx Data Fifo Size */
+			lp->afc_cfg=0x003C1E6F;break;
+		case 11:/* 4800 Rx Data Fifo Size */
+			lp->afc_cfg=0x0032195F;break;
+		/*
+		 *	 AFC_HI is ~1520 bytes less than RX Data Fifo Size
+		 *	 AFC_LO is AFC_HI/2
+		 *	 BACK_DUR is about 5uS*(AFC_LO) rounded down
+		 */
+		case 12:/* 3840 Rx Data Fifo Size */
+			lp->afc_cfg=0x0024124F;break;
+		case 13:/* 2880 Rx Data Fifo Size */
+			lp->afc_cfg=0x0015073F;break;
+		case 14:/* 1920 Rx Data Fifo Size */
+			lp->afc_cfg=0x0006032F;break;
+		 default:
+			 PRINTK("%s: ERROR -- no AFC_CFG setting found",
+				dev->name);
+			 break;
+	}
+
+	DBG(SMC_DEBUG_MISC | SMC_DEBUG_TX | SMC_DEBUG_RX,
+		"%s: tx_fifo %d rx_fifo %d afc_cfg 0x%08x\n", CARDNAME,
+		lp->tx_fifo_size, lp->rx_fifo_size, lp->afc_cfg);
+
+	spin_lock_init(&lp->lock);
+
+	/* Get the MAC address */
+	SMC_GET_MAC_ADDR(dev->dev_addr);
+
+	/* now, reset the chip, and put it into a known state */
+	smc911x_reset(dev);
+
+	/*
+	 * If dev->irq is 0, then the device has to be banged on to see
+	 * what the IRQ is.
+	 *
+	 * Specifying an IRQ is done with the assumption that the user knows
+	 * what (s)he is doing.  No checking is done!!!!
+	 */
+	if (dev->irq < 1) {
+		int trials;
+
+		trials = 3;
+		while (trials--) {
+			dev->irq = smc911x_findirq(ioaddr);
+			if (dev->irq)
+				break;
+			/* kick the card and try again */
+			smc911x_reset(dev);
+		}
+	}
+	if (dev->irq == 0) {
+		printk("%s: Couldn't autodetect your IRQ. Use irq=xx.\n",
+			dev->name);
+		retval = -ENODEV;
+		goto err_out;
+	}
+	dev->irq = irq_canonicalize(dev->irq);
+
+	/* Fill in the fields of the device structure with ethernet values. */
+	ether_setup(dev);
+
+	dev->open = smc911x_open;
+	dev->stop = smc911x_close;
+	dev->hard_start_xmit = smc911x_hard_start_xmit;
+	dev->tx_timeout = smc911x_timeout;
+	dev->watchdog_timeo = msecs_to_jiffies(watchdog);
+	dev->get_stats = smc911x_query_statistics;
+	dev->set_multicast_list = smc911x_set_multicast_list;
+	dev->ethtool_ops = &smc911x_ethtool_ops;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = smc911x_poll_controller;
+#endif
+
+	INIT_WORK(&lp->phy_configure, smc911x_phy_configure, dev);
+	lp->mii.phy_id_mask = 0x1f;
+	lp->mii.reg_num_mask = 0x1f;
+	lp->mii.force_media = 0;
+	lp->mii.full_duplex = 0;
+	lp->mii.dev = dev;
+	lp->mii.mdio_read = smc911x_phy_read;
+	lp->mii.mdio_write = smc911x_phy_write;
+
+	/*
+	 * Locate the phy, if any.
+	 */
+	smc911x_phy_detect(dev);
+
+	/* Set default parameters */
+	lp->msg_enable = NETIF_MSG_LINK;
+	lp->ctl_rfduplx = 1;
+	lp->ctl_rspeed = 100;
+
+	/* Grab the IRQ */
+	retval = request_irq(dev->irq, &smc911x_interrupt, SA_SHIRQ, dev->name, dev);
+	if (retval)
+		goto err_out;
+
+	set_irq_type(dev->irq, IRQT_FALLING);
+
+#ifdef SMC_USE_DMA
+	lp->rxdma = SMC_DMA_REQUEST(dev, smc911x_rx_dma_irq);
+	lp->txdma = SMC_DMA_REQUEST(dev, smc911x_tx_dma_irq);
+	lp->rxdma_active = 0;
+	lp->txdma_active = 0;
+	dev->dma = lp->rxdma;
+#endif
+
+	retval = register_netdev(dev);
+	if (retval == 0) {
+		/* now, print out the card info, in a short format.. */
+		printk("%s: %s (rev %d) at %#lx IRQ %d",
+			dev->name, version_string, lp->revision,
+			dev->base_addr, dev->irq);
+
+#ifdef SMC_USE_DMA
+		if (lp->rxdma != -1)
+			printk(" RXDMA %d ", lp->rxdma);
+
+		if (lp->txdma != -1)
+			printk("TXDMA %d", lp->txdma);
+#endif
+		printk("\n");
+		if (!is_valid_ether_addr(dev->dev_addr)) {
+			printk("%s: Invalid ethernet MAC address. Please "
+					"set using ifconfig\n", dev->name);
+		} else {
+			/* Print the Ethernet address */
+			printk("%s: Ethernet addr: ", dev->name);
+			for (i = 0; i < 5; i++)
+				printk("%2.2x:", dev->dev_addr[i]);
+			printk("%2.2x\n", dev->dev_addr[5]);
+		}
+
+		if (lp->phy_type == 0) {
+			PRINTK("%s: No PHY found\n", dev->name);
+		} else if ((lp->phy_type & ~0xff) == LAN911X_INTERNAL_PHY_ID) {
+			PRINTK("%s: LAN911x Internal PHY\n", dev->name);
+		} else {
+			PRINTK("%s: External PHY 0x%08x\n", dev->name, lp->phy_type);
+		}
+	}
+
+err_out:
+#ifdef SMC_USE_DMA
+	if (retval) {
+		if (lp->rxdma != -1) {
+			SMC_DMA_FREE(dev, lp->rxdma);
+		}
+		if (lp->txdma != -1) {
+			SMC_DMA_FREE(dev, lp->txdma);
+		}
+	}
+#endif
+	return retval;
+}
+
+/*
+ * smc911x_init(void)
+ *
+ *	  Output:
+ *	 0 --> there is a device
+ *	 anything else, error
+ */
+static int smc911x_drv_probe(struct platform_device *pdev)
+{
+	struct net_device *ndev;
+	struct resource *res;
+	unsigned int *addr;
+	int ret;
+
+	DBG(SMC_DEBUG_FUNC, "--> %s\n",  __FUNCTION__);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	/*
+	 * Request the regions.
+	 */
+	if (!request_mem_region(res->start, SMC911X_IO_EXTENT, CARDNAME)) {
+		 ret = -EBUSY;
+		 goto out;
+	}
+
+	ndev = alloc_etherdev(sizeof(struct smc911x_local));
+	if (!ndev) {
+		printk("%s: could not allocate device.\n", CARDNAME);
+		ret = -ENOMEM;
+		goto release_1;
+	}
+	SET_MODULE_OWNER(ndev);
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	ndev->dma = (unsigned char)-1;
+	ndev->irq = platform_get_irq(pdev, 0);
+
+	addr = ioremap(res->start, SMC911X_IO_EXTENT);
+	if (!addr) {
+		ret = -ENOMEM;
+		goto release_both;
+	}
+
+	platform_set_drvdata(pdev, ndev);
+	ret = smc911x_probe(ndev, (unsigned long)addr);
+	if (ret != 0) {
+		platform_set_drvdata(pdev, NULL);
+		iounmap(addr);
+release_both:
+		free_netdev(ndev);
+release_1:
+		release_mem_region(res->start, SMC911X_IO_EXTENT);
+out:
+		printk("%s: not found (%d).\n", CARDNAME, ret);
+	}
+#ifdef SMC_USE_DMA
+	else {
+		struct smc911x_local *lp = netdev_priv(ndev);
+		lp->physaddr = res->start;
+		lp->dev = &pdev->dev;
+	}
+#endif
+
+	return ret;
+}
+
+static int smc911x_drv_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct resource *res;
+
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	platform_set_drvdata(pdev, NULL);
+
+	unregister_netdev(ndev);
+
+	free_irq(ndev->irq, ndev);
+
+#ifdef SMC_USE_DMA
+	{
+		struct smc911x_local *lp = netdev_priv(ndev);
+		if (lp->rxdma != -1) {
+			SMC_DMA_FREE(dev, lp->rxdma);
+		}
+		if (lp->txdma != -1) {
+			SMC_DMA_FREE(dev, lp->txdma);
+		}
+	}
+#endif
+	iounmap((void *)ndev->base_addr);
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	release_mem_region(res->start, SMC911X_IO_EXTENT);
+
+	free_netdev(ndev);
+	return 0;
+}
+
+static int smc911x_drv_suspend(struct platform_device *dev, pm_message_t state)
+{
+	struct net_device *ndev = platform_get_drvdata(dev);
+	unsigned long ioaddr = ndev->base_addr;
+
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	if (ndev) {
+		if (netif_running(ndev)) {
+			netif_device_detach(ndev);
+			smc911x_shutdown(ndev);
+#if POWER_DOWN
+			/* Set D2 - Energy detect only setting */
+			SMC_SET_PMT_CTRL(2<<12);
+#endif
+		}
+	}
+	return 0;
+}
+
+static int smc911x_drv_resume(struct platform_device *dev)
+{
+	struct net_device *ndev = platform_get_drvdata(dev);
+
+	DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__);
+	if (ndev) {
+		struct smc911x_local *lp = netdev_priv(ndev);
+
+		if (netif_running(ndev)) {
+			smc911x_reset(ndev);
+			smc911x_enable(ndev);
+			if (lp->phy_type != 0)
+				smc911x_phy_configure(ndev);
+			netif_device_attach(ndev);
+		}
+	}
+	return 0;
+}
+
+static struct platform_driver smc911x_driver = {
+	.probe		 = smc911x_drv_probe,
+	.remove	 = smc911x_drv_remove,
+	.suspend	 = smc911x_drv_suspend,
+	.resume	 = smc911x_drv_resume,
+	.driver	 = {
+		.name	 = CARDNAME,
+	},
+};
+
+static int __init smc911x_init(void)
+{
+	return platform_driver_register(&smc911x_driver);
+}
+
+static void __exit smc911x_cleanup(void)
+{
+	platform_driver_unregister(&smc911x_driver);
+}
+
+module_init(smc911x_init);
+module_exit(smc911x_cleanup);
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
new file mode 100644
index 0000000..962a710
--- /dev/null
+++ b/drivers/net/smc911x.h
@@ -0,0 +1,835 @@
+/*------------------------------------------------------------------------
+ . smc911x.h - macros for SMSC's LAN911{5,6,7,8} single-chip Ethernet device.
+ .
+ . Copyright (C) 2005 Sensoria Corp.
+ . Derived from the unified SMC91x driver by Nicolas Pitre
+ .
+ . This program is free software; you can redistribute it and/or modify
+ . it under the terms of the GNU General Public License as published by
+ . the Free Software Foundation; either version 2 of the License, or
+ . (at your option) any later version.
+ .
+ . This program is distributed in the hope that it will be useful,
+ . but WITHOUT ANY WARRANTY; without even the implied warranty of
+ . MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ . GNU General Public License for more details.
+ .
+ . You should have received a copy of the GNU General Public License
+ . along with this program; if not, write to the Free Software
+ . Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ .
+ . Information contained in this file was obtained from the LAN9118
+ . manual from SMC.  To get a copy, if you really want one, you can find
+ . information under www.smsc.com.
+ .
+ . Authors
+ .	 Dustin McIntire		 <dustin@sensoria.com>
+ .
+ ---------------------------------------------------------------------------*/
+#ifndef _SMC911X_H_
+#define _SMC911X_H_
+
+/*
+ * Use the DMA feature on PXA chips
+ */
+#ifdef CONFIG_ARCH_PXA
+  #define SMC_USE_PXA_DMA	1
+  #define SMC_USE_16BIT		0
+  #define SMC_USE_32BIT		1
+#endif
+
+
+/*
+ * Define the bus width specific IO macros
+ */
+
+#if	SMC_USE_16BIT
+#define SMC_inb(a, r)			 readb((a) + (r))
+#define SMC_inw(a, r)			 readw((a) + (r))
+#define SMC_inl(a, r)			 ((SMC_inw(a, r) & 0xFFFF)+(SMC_inw(a+2, r)<<16))
+#define SMC_outb(v, a, r)		 writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)		 writew(v, (a) + (r))
+#define SMC_outl(v, a, r) 			 \
+	do{					 \
+		 writel(v & 0xFFFF, (a) + (r));	 \
+		 writel(v >> 16, (a) + (r) + 2); \
+	 } while (0)
+#define SMC_insl(a, r, p, l)	 readsw((short*)((a) + (r)), p, l*2)
+#define SMC_outsl(a, r, p, l)	 writesw((short*)((a) + (r)), p, l*2)
+
+#elif	SMC_USE_32BIT
+#define SMC_inb(a, r)		 readb((a) + (r))
+#define SMC_inw(a, r)		 readw((a) + (r))
+#define SMC_inl(a, r)		 readl((a) + (r))
+#define SMC_outb(v, a, r)	 writeb(v, (a) + (r))
+#define SMC_outl(v, a, r)	 writel(v, (a) + (r))
+#define SMC_insl(a, r, p, l)	 readsl((int*)((a) + (r)), p, l)
+#define SMC_outsl(a, r, p, l)	 writesl((int*)((a) + (r)), p, l)
+
+#endif /* SMC_USE_16BIT */
+
+
+
+#if	 SMC_USE_PXA_DMA
+#define SMC_USE_DMA
+
+/*
+ * Define the request and free functions
+ * These are unfortunately architecture specific as no generic allocation
+ * mechanism exits
+ */
+#define SMC_DMA_REQUEST(dev, handler) \
+	 pxa_request_dma(dev->name, DMA_PRIO_LOW, handler, dev)
+
+#define SMC_DMA_FREE(dev, dma) \
+	 pxa_free_dma(dma)
+
+#define SMC_DMA_ACK_IRQ(dev, dma)					\
+{									\
+	if (DCSR(dma) & DCSR_BUSERR) {					\
+		printk("%s: DMA %d bus error!\n", dev->name, dma);	\
+	}								\
+	DCSR(dma) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;		\
+}
+
+/*
+ * Use a DMA for RX and TX packets.
+ */
+#include <linux/dma-mapping.h>
+#include <asm/dma.h>
+#include <asm/arch/pxa-regs.h>
+
+static dma_addr_t rx_dmabuf, tx_dmabuf;
+static int rx_dmalen, tx_dmalen;
+
+#ifdef SMC_insl
+#undef SMC_insl
+#define SMC_insl(a, r, p, l) \
+	smc_pxa_dma_insl(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insl(struct device *dev, u_long ioaddr, u_long physaddr,
+		int reg, int dma, u_char *buf, int len)
+{
+	/* 64 bit alignment is required for memory to memory DMA */
+	if ((long)buf & 4) {
+		*((u32 *)buf) = SMC_inl(ioaddr, reg);
+		buf += 4;
+		len--;
+	}
+
+	len *= 4;
+	rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+	rx_dmalen = len;
+	DCSR(dma) = DCSR_NODESC;
+	DTADR(dma) = rx_dmabuf;
+	DSADR(dma) = physaddr + reg;
+	DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+		DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+	DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_insw
+#undef SMC_insw
+#define SMC_insw(a, r, p, l) \
+	smc_pxa_dma_insw(lp->dev, a, lp->physaddr, r, lp->rxdma, p, l)
+
+static inline void
+smc_pxa_dma_insw(struct device *dev, u_long ioaddr, u_long physaddr,
+		int reg, int dma, u_char *buf, int len)
+{
+	/* 64 bit alignment is required for memory to memory DMA */
+	while ((long)buf & 6) {
+		*((u16 *)buf) = SMC_inw(ioaddr, reg);
+		buf += 2;
+		len--;
+	}
+
+	len *= 2;
+	rx_dmabuf = dma_map_single(dev, buf, len, DMA_FROM_DEVICE);
+	rx_dmalen = len;
+	DCSR(dma) = DCSR_NODESC;
+	DTADR(dma) = rx_dmabuf;
+	DSADR(dma) = physaddr + reg;
+	DCMD(dma) = (DCMD_INCTRGADDR | DCMD_BURST32 |
+		DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & rx_dmalen));
+	DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsl
+#undef SMC_outsl
+#define SMC_outsl(a, r, p, l) \
+	 smc_pxa_dma_outsl(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsl(struct device *dev, u_long ioaddr, u_long physaddr,
+		int reg, int dma, u_char *buf, int len)
+{
+	/* 64 bit alignment is required for memory to memory DMA */
+	if ((long)buf & 4) {
+		SMC_outl(*((u32 *)buf), ioaddr, reg);
+		buf += 4;
+		len--;
+	}
+
+	len *= 4;
+	tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
+	tx_dmalen = len;
+	DCSR(dma) = DCSR_NODESC;
+	DSADR(dma) = tx_dmabuf;
+	DTADR(dma) = physaddr + reg;
+	DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+		DCMD_WIDTH4 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+	DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#ifdef SMC_outsw
+#undef SMC_outsw
+#define SMC_outsw(a, r, p, l) \
+	smc_pxa_dma_outsw(lp->dev, a, lp->physaddr, r, lp->txdma, p, l)
+
+static inline void
+smc_pxa_dma_outsw(struct device *dev, u_long ioaddr, u_long physaddr,
+		  int reg, int dma, u_char *buf, int len)
+{
+	/* 64 bit alignment is required for memory to memory DMA */
+	while ((long)buf & 6) {
+		SMC_outw(*((u16 *)buf), ioaddr, reg);
+		buf += 2;
+		len--;
+	}
+
+	len *= 2;
+	tx_dmabuf = dma_map_single(dev, buf, len, DMA_TO_DEVICE);
+	tx_dmalen = len;
+	DCSR(dma) = DCSR_NODESC;
+	DSADR(dma) = tx_dmabuf;
+	DTADR(dma) = physaddr + reg;
+	DCMD(dma) = (DCMD_INCSRCADDR | DCMD_BURST32 |
+		DCMD_WIDTH2 | DCMD_ENDIRQEN | (DCMD_LENGTH & tx_dmalen));
+	DCSR(dma) = DCSR_NODESC | DCSR_RUN;
+}
+#endif
+
+#endif	 /* SMC_USE_PXA_DMA */
+
+
+/* Chip Parameters and Register Definitions */
+
+#define SMC911X_TX_FIFO_LOW_THRESHOLD	(1536*2)
+
+#define SMC911X_IO_EXTENT	 0x100
+
+#define SMC911X_EEPROM_LEN	 7
+
+/* Below are the register offsets and bit definitions
+ * of the Lan911x memory space
+ */
+#define RX_DATA_FIFO		 (0x00)
+
+#define TX_DATA_FIFO		 (0x20)
+#define	TX_CMD_A_INT_ON_COMP_		(0x80000000)
+#define	TX_CMD_A_INT_BUF_END_ALGN_	(0x03000000)
+#define	TX_CMD_A_INT_4_BYTE_ALGN_	(0x00000000)
+#define	TX_CMD_A_INT_16_BYTE_ALGN_	(0x01000000)
+#define	TX_CMD_A_INT_32_BYTE_ALGN_	(0x02000000)
+#define	TX_CMD_A_INT_DATA_OFFSET_	(0x001F0000)
+#define	TX_CMD_A_INT_FIRST_SEG_		(0x00002000)
+#define	TX_CMD_A_INT_LAST_SEG_		(0x00001000)
+#define	TX_CMD_A_BUF_SIZE_		(0x000007FF)
+#define	TX_CMD_B_PKT_TAG_		(0xFFFF0000)
+#define	TX_CMD_B_ADD_CRC_DISABLE_	(0x00002000)
+#define	TX_CMD_B_DISABLE_PADDING_	(0x00001000)
+#define	TX_CMD_B_PKT_BYTE_LENGTH_	(0x000007FF)
+
+#define RX_STATUS_FIFO		(0x40)
+#define	RX_STS_PKT_LEN_			(0x3FFF0000)
+#define	RX_STS_ES_			(0x00008000)
+#define	RX_STS_BCST_			(0x00002000)
+#define	RX_STS_LEN_ERR_			(0x00001000)
+#define	RX_STS_RUNT_ERR_		(0x00000800)
+#define	RX_STS_MCAST_			(0x00000400)
+#define	RX_STS_TOO_LONG_		(0x00000080)
+#define	RX_STS_COLL_			(0x00000040)
+#define	RX_STS_ETH_TYPE_		(0x00000020)
+#define	RX_STS_WDOG_TMT_		(0x00000010)
+#define	RX_STS_MII_ERR_			(0x00000008)
+#define	RX_STS_DRIBBLING_		(0x00000004)
+#define	RX_STS_CRC_ERR_			(0x00000002)
+#define RX_STATUS_FIFO_PEEK 	(0x44)
+#define TX_STATUS_FIFO		(0x48)
+#define	TX_STS_TAG_			(0xFFFF0000)
+#define	TX_STS_ES_			(0x00008000)
+#define	TX_STS_LOC_			(0x00000800)
+#define	TX_STS_NO_CARR_			(0x00000400)
+#define	TX_STS_LATE_COLL_		(0x00000200)
+#define	TX_STS_MANY_COLL_		(0x00000100)
+#define	TX_STS_COLL_CNT_		(0x00000078)
+#define	TX_STS_MANY_DEFER_		(0x00000004)
+#define	TX_STS_UNDERRUN_		(0x00000002)
+#define	TX_STS_DEFERRED_		(0x00000001)
+#define TX_STATUS_FIFO_PEEK	(0x4C)
+#define ID_REV			(0x50)
+#define	ID_REV_CHIP_ID_			(0xFFFF0000)  /* RO */
+#define	ID_REV_REV_ID_			(0x0000FFFF)  /* RO */
+
+#define INT_CFG			(0x54)
+#define	INT_CFG_INT_DEAS_		(0xFF000000)  /* R/W */
+#define	INT_CFG_INT_DEAS_CLR_		(0x00004000)
+#define	INT_CFG_INT_DEAS_STS_		(0x00002000)
+#define	INT_CFG_IRQ_INT_		(0x00001000)  /* RO */
+#define	INT_CFG_IRQ_EN_			(0x00000100)  /* R/W */
+#define	INT_CFG_IRQ_POL_		(0x00000010)  /* R/W Not Affected by SW Reset */
+#define	INT_CFG_IRQ_TYPE_		(0x00000001)  /* R/W Not Affected by SW Reset */
+
+#define INT_STS			(0x58)
+#define	INT_STS_SW_INT_			(0x80000000)  /* R/WC */
+#define	INT_STS_TXSTOP_INT_		(0x02000000)  /* R/WC */
+#define	INT_STS_RXSTOP_INT_		(0x01000000)  /* R/WC */
+#define	INT_STS_RXDFH_INT_		(0x00800000)  /* R/WC */
+#define	INT_STS_RXDF_INT_		(0x00400000)  /* R/WC */
+#define	INT_STS_TX_IOC_			(0x00200000)  /* R/WC */
+#define	INT_STS_RXD_INT_		(0x00100000)  /* R/WC */
+#define	INT_STS_GPT_INT_		(0x00080000)  /* R/WC */
+#define	INT_STS_PHY_INT_		(0x00040000)  /* RO */
+#define	INT_STS_PME_INT_		(0x00020000)  /* R/WC */
+#define	INT_STS_TXSO_			(0x00010000)  /* R/WC */
+#define	INT_STS_RWT_			(0x00008000)  /* R/WC */
+#define	INT_STS_RXE_			(0x00004000)  /* R/WC */
+#define	INT_STS_TXE_			(0x00002000)  /* R/WC */
+//#define	INT_STS_ERX_		(0x00001000)  /* R/WC */
+#define	INT_STS_TDFU_			(0x00000800)  /* R/WC */
+#define	INT_STS_TDFO_			(0x00000400)  /* R/WC */
+#define	INT_STS_TDFA_			(0x00000200)  /* R/WC */
+#define	INT_STS_TSFF_			(0x00000100)  /* R/WC */
+#define	INT_STS_TSFL_			(0x00000080)  /* R/WC */
+//#define	INT_STS_RXDF_		(0x00000040)  /* R/WC */
+#define	INT_STS_RDFO_			(0x00000040)  /* R/WC */
+#define	INT_STS_RDFL_			(0x00000020)  /* R/WC */
+#define	INT_STS_RSFF_			(0x00000010)  /* R/WC */
+#define	INT_STS_RSFL_			(0x00000008)  /* R/WC */
+#define	INT_STS_GPIO2_INT_		(0x00000004)  /* R/WC */
+#define	INT_STS_GPIO1_INT_		(0x00000002)  /* R/WC */
+#define	INT_STS_GPIO0_INT_		(0x00000001)  /* R/WC */
+
+#define INT_EN			(0x5C)
+#define	INT_EN_SW_INT_EN_		(0x80000000)  /* R/W */
+#define	INT_EN_TXSTOP_INT_EN_		(0x02000000)  /* R/W */
+#define	INT_EN_RXSTOP_INT_EN_		(0x01000000)  /* R/W */
+#define	INT_EN_RXDFH_INT_EN_		(0x00800000)  /* R/W */
+//#define	INT_EN_RXDF_INT_EN_		(0x00400000)  /* R/W */
+#define	INT_EN_TIOC_INT_EN_		(0x00200000)  /* R/W */
+#define	INT_EN_RXD_INT_EN_		(0x00100000)  /* R/W */
+#define	INT_EN_GPT_INT_EN_		(0x00080000)  /* R/W */
+#define	INT_EN_PHY_INT_EN_		(0x00040000)  /* R/W */
+#define	INT_EN_PME_INT_EN_		(0x00020000)  /* R/W */
+#define	INT_EN_TXSO_EN_			(0x00010000)  /* R/W */
+#define	INT_EN_RWT_EN_			(0x00008000)  /* R/W */
+#define	INT_EN_RXE_EN_			(0x00004000)  /* R/W */
+#define	INT_EN_TXE_EN_			(0x00002000)  /* R/W */
+//#define	INT_EN_ERX_EN_			(0x00001000)  /* R/W */
+#define	INT_EN_TDFU_EN_			(0x00000800)  /* R/W */
+#define	INT_EN_TDFO_EN_			(0x00000400)  /* R/W */
+#define	INT_EN_TDFA_EN_			(0x00000200)  /* R/W */
+#define	INT_EN_TSFF_EN_			(0x00000100)  /* R/W */
+#define	INT_EN_TSFL_EN_			(0x00000080)  /* R/W */
+//#define	INT_EN_RXDF_EN_			(0x00000040)  /* R/W */
+#define	INT_EN_RDFO_EN_			(0x00000040)  /* R/W */
+#define	INT_EN_RDFL_EN_			(0x00000020)  /* R/W */
+#define	INT_EN_RSFF_EN_			(0x00000010)  /* R/W */
+#define	INT_EN_RSFL_EN_			(0x00000008)  /* R/W */
+#define	INT_EN_GPIO2_INT_		(0x00000004)  /* R/W */
+#define	INT_EN_GPIO1_INT_		(0x00000002)  /* R/W */
+#define	INT_EN_GPIO0_INT_		(0x00000001)  /* R/W */
+
+#define BYTE_TEST		(0x64)
+#define FIFO_INT		(0x68)
+#define	FIFO_INT_TX_AVAIL_LEVEL_	(0xFF000000)  /* R/W */
+#define	FIFO_INT_TX_STS_LEVEL_		(0x00FF0000)  /* R/W */
+#define	FIFO_INT_RX_AVAIL_LEVEL_	(0x0000FF00)  /* R/W */
+#define	FIFO_INT_RX_STS_LEVEL_		(0x000000FF)  /* R/W */
+
+#define RX_CFG			(0x6C)
+#define	RX_CFG_RX_END_ALGN_		(0xC0000000)  /* R/W */
+#define		RX_CFG_RX_END_ALGN4_		(0x00000000)  /* R/W */
+#define		RX_CFG_RX_END_ALGN16_		(0x40000000)  /* R/W */
+#define		RX_CFG_RX_END_ALGN32_		(0x80000000)  /* R/W */
+#define	RX_CFG_RX_DMA_CNT_		(0x0FFF0000)  /* R/W */
+#define	RX_CFG_RX_DUMP_			(0x00008000)  /* R/W */
+#define	RX_CFG_RXDOFF_			(0x00001F00)  /* R/W */
+//#define	RX_CFG_RXBAD_			(0x00000001)  /* R/W */
+
+#define TX_CFG			(0x70)
+//#define	TX_CFG_TX_DMA_LVL_		(0xE0000000)	 /* R/W */
+//#define	TX_CFG_TX_DMA_CNT_		(0x0FFF0000)	 /* R/W Self Clearing */
+#define	TX_CFG_TXS_DUMP_		(0x00008000)  /* Self Clearing */
+#define	TX_CFG_TXD_DUMP_		(0x00004000)  /* Self Clearing */
+#define	TX_CFG_TXSAO_			(0x00000004)  /* R/W */
+#define	TX_CFG_TX_ON_			(0x00000002)  /* R/W */
+#define	TX_CFG_STOP_TX_			(0x00000001)  /* Self Clearing */
+
+#define HW_CFG			(0x74)
+#define	HW_CFG_TTM_			(0x00200000)  /* R/W */
+#define	HW_CFG_SF_			(0x00100000)  /* R/W */
+#define	HW_CFG_TX_FIF_SZ_		(0x000F0000)  /* R/W */
+#define	HW_CFG_TR_			(0x00003000)  /* R/W */
+#define	HW_CFG_PHY_CLK_SEL_		(0x00000060)  /* R/W */
+#define		 HW_CFG_PHY_CLK_SEL_INT_PHY_ 	(0x00000000) /* R/W */
+#define		 HW_CFG_PHY_CLK_SEL_EXT_PHY_ 	(0x00000020) /* R/W */
+#define		 HW_CFG_PHY_CLK_SEL_CLK_DIS_ 	(0x00000040) /* R/W */
+#define	HW_CFG_SMI_SEL_			(0x00000010)  /* R/W */
+#define	HW_CFG_EXT_PHY_DET_		(0x00000008)  /* RO */
+#define	HW_CFG_EXT_PHY_EN_		(0x00000004)  /* R/W */
+#define	HW_CFG_32_16_BIT_MODE_		(0x00000004)  /* RO */
+#define	HW_CFG_SRST_TO_			(0x00000002)  /* RO */
+#define	HW_CFG_SRST_			(0x00000001)  /* Self Clearing */
+
+#define RX_DP_CTRL		(0x78)
+#define	RX_DP_CTRL_RX_FFWD_		(0x80000000)  /* R/W */
+#define	RX_DP_CTRL_FFWD_BUSY_		(0x80000000)  /* RO */
+
+#define RX_FIFO_INF		(0x7C)
+#define	 RX_FIFO_INF_RXSUSED_		(0x00FF0000)  /* RO */
+#define	 RX_FIFO_INF_RXDUSED_		(0x0000FFFF)  /* RO */
+
+#define TX_FIFO_INF		(0x80)
+#define	TX_FIFO_INF_TSUSED_		(0x00FF0000)  /* RO */
+#define	TX_FIFO_INF_TDFREE_		(0x0000FFFF)  /* RO */
+
+#define PMT_CTRL		(0x84)
+#define	PMT_CTRL_PM_MODE_		(0x00003000)  /* Self Clearing */
+#define	PMT_CTRL_PHY_RST_		(0x00000400)  /* Self Clearing */
+#define	PMT_CTRL_WOL_EN_		(0x00000200)  /* R/W */
+#define	PMT_CTRL_ED_EN_			(0x00000100)  /* R/W */
+#define	PMT_CTRL_PME_TYPE_		(0x00000040)  /* R/W Not Affected by SW Reset */
+#define	PMT_CTRL_WUPS_			(0x00000030)  /* R/WC */
+#define		PMT_CTRL_WUPS_NOWAKE_		(0x00000000)  /* R/WC */
+#define		PMT_CTRL_WUPS_ED_		(0x00000010)  /* R/WC */
+#define		PMT_CTRL_WUPS_WOL_		(0x00000020)  /* R/WC */
+#define		PMT_CTRL_WUPS_MULTI_		(0x00000030)  /* R/WC */
+#define	PMT_CTRL_PME_IND_		(0x00000008)  /* R/W */
+#define	PMT_CTRL_PME_POL_		(0x00000004)  /* R/W */
+#define	PMT_CTRL_PME_EN_		(0x00000002)  /* R/W Not Affected by SW Reset */
+#define	PMT_CTRL_READY_			(0x00000001)  /* RO */
+
+#define GPIO_CFG		(0x88)
+#define	GPIO_CFG_LED3_EN_		(0x40000000)  /* R/W */
+#define	GPIO_CFG_LED2_EN_		(0x20000000)  /* R/W */
+#define	GPIO_CFG_LED1_EN_		(0x10000000)  /* R/W */
+#define	GPIO_CFG_GPIO2_INT_POL_		(0x04000000)  /* R/W */
+#define	GPIO_CFG_GPIO1_INT_POL_		(0x02000000)  /* R/W */
+#define	GPIO_CFG_GPIO0_INT_POL_		(0x01000000)  /* R/W */
+#define	GPIO_CFG_EEPR_EN_		(0x00700000)  /* R/W */
+#define	GPIO_CFG_GPIOBUF2_		(0x00040000)  /* R/W */
+#define	GPIO_CFG_GPIOBUF1_		(0x00020000)  /* R/W */
+#define	GPIO_CFG_GPIOBUF0_		(0x00010000)  /* R/W */
+#define	GPIO_CFG_GPIODIR2_		(0x00000400)  /* R/W */
+#define	GPIO_CFG_GPIODIR1_		(0x00000200)  /* R/W */
+#define	GPIO_CFG_GPIODIR0_		(0x00000100)  /* R/W */
+#define	GPIO_CFG_GPIOD4_		(0x00000010)  /* R/W */
+#define	GPIO_CFG_GPIOD3_		(0x00000008)  /* R/W */
+#define	GPIO_CFG_GPIOD2_		(0x00000004)  /* R/W */
+#define	GPIO_CFG_GPIOD1_		(0x00000002)  /* R/W */
+#define	GPIO_CFG_GPIOD0_		(0x00000001)  /* R/W */
+
+#define GPT_CFG			(0x8C)
+#define	GPT_CFG_TIMER_EN_		(0x20000000)  /* R/W */
+#define	GPT_CFG_GPT_LOAD_		(0x0000FFFF)  /* R/W */
+
+#define GPT_CNT			(0x90)
+#define	GPT_CNT_GPT_CNT_		(0x0000FFFF)  /* RO */
+
+#define ENDIAN			(0x98)
+#define FREE_RUN		(0x9C)
+#define RX_DROP			(0xA0)
+#define MAC_CSR_CMD		(0xA4)
+#define	 MAC_CSR_CMD_CSR_BUSY_		(0x80000000)  /* Self Clearing */
+#define	 MAC_CSR_CMD_R_NOT_W_		(0x40000000)  /* R/W */
+#define	 MAC_CSR_CMD_CSR_ADDR_		(0x000000FF)  /* R/W */
+
+#define MAC_CSR_DATA		(0xA8)
+#define AFC_CFG			(0xAC)
+#define		AFC_CFG_AFC_HI_			(0x00FF0000)  /* R/W */
+#define		AFC_CFG_AFC_LO_			(0x0000FF00)  /* R/W */
+#define		AFC_CFG_BACK_DUR_		(0x000000F0)  /* R/W */
+#define		AFC_CFG_FCMULT_			(0x00000008)  /* R/W */
+#define		AFC_CFG_FCBRD_			(0x00000004)  /* R/W */
+#define		AFC_CFG_FCADD_			(0x00000002)  /* R/W */
+#define		AFC_CFG_FCANY_			(0x00000001)  /* R/W */
+
+#define E2P_CMD			(0xB0)
+#define	E2P_CMD_EPC_BUSY_		(0x80000000)  /* Self Clearing */
+#define	E2P_CMD_EPC_CMD_			(0x70000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_READ_		(0x00000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_EWDS_		(0x10000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_EWEN_		(0x20000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_WRITE_		(0x30000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_WRAL_		(0x40000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_ERASE_		(0x50000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_ERAL_		(0x60000000)  /* R/W */
+#define		E2P_CMD_EPC_CMD_RELOAD_		(0x70000000)  /* R/W */
+#define	E2P_CMD_EPC_TIMEOUT_		(0x00000200)  /* RO */
+#define	E2P_CMD_MAC_ADDR_LOADED_	(0x00000100)  /* RO */
+#define	E2P_CMD_EPC_ADDR_		(0x000000FF)  /* R/W */
+
+#define E2P_DATA		(0xB4)
+#define	E2P_DATA_EEPROM_DATA_		(0x000000FF)  /* R/W */
+/* end of LAN register offsets and bit definitions */
+
+/*
+ ****************************************************************************
+ ****************************************************************************
+ * MAC Control and Status Register (Indirect Address)
+ * Offset (through the MAC_CSR CMD and DATA port)
+ ****************************************************************************
+ ****************************************************************************
+ *
+ */
+#define MAC_CR			(0x01)  /* R/W */
+
+/* MAC_CR - MAC Control Register */
+#define MAC_CR_RXALL_			(0x80000000)
+// TODO: delete this bit? It is not described in the data sheet.
+#define MAC_CR_HBDIS_			(0x10000000)
+#define MAC_CR_RCVOWN_			(0x00800000)
+#define MAC_CR_LOOPBK_			(0x00200000)
+#define MAC_CR_FDPX_			(0x00100000)
+#define MAC_CR_MCPAS_			(0x00080000)
+#define MAC_CR_PRMS_			(0x00040000)
+#define MAC_CR_INVFILT_			(0x00020000)
+#define MAC_CR_PASSBAD_			(0x00010000)
+#define MAC_CR_HFILT_			(0x00008000)
+#define MAC_CR_HPFILT_			(0x00002000)
+#define MAC_CR_LCOLL_			(0x00001000)
+#define MAC_CR_BCAST_			(0x00000800)
+#define MAC_CR_DISRTY_			(0x00000400)
+#define MAC_CR_PADSTR_			(0x00000100)
+#define MAC_CR_BOLMT_MASK_		(0x000000C0)
+#define MAC_CR_DFCHK_			(0x00000020)
+#define MAC_CR_TXEN_			(0x00000008)
+#define MAC_CR_RXEN_			(0x00000004)
+
+#define ADDRH			(0x02)	  /* R/W mask 0x0000FFFFUL */
+#define ADDRL			(0x03)	  /* R/W mask 0xFFFFFFFFUL */
+#define HASHH			(0x04)	  /* R/W */
+#define HASHL			(0x05)	  /* R/W */
+
+#define MII_ACC			(0x06)	  /* R/W */
+#define MII_ACC_PHY_ADDR_		(0x0000F800)
+#define MII_ACC_MIIRINDA_		(0x000007C0)
+#define MII_ACC_MII_WRITE_		(0x00000002)
+#define MII_ACC_MII_BUSY_		(0x00000001)
+
+#define MII_DATA		(0x07)	  /* R/W mask 0x0000FFFFUL */
+
+#define FLOW			(0x08)	  /* R/W */
+#define FLOW_FCPT_			(0xFFFF0000)
+#define FLOW_FCPASS_			(0x00000004)
+#define FLOW_FCEN_			(0x00000002)
+#define FLOW_FCBSY_			(0x00000001)
+
+#define VLAN1			(0x09)	  /* R/W mask 0x0000FFFFUL */
+#define VLAN1_VTI1_			(0x0000ffff)
+
+#define VLAN2			(0x0A)	  /* R/W mask 0x0000FFFFUL */
+#define VLAN2_VTI2_			(0x0000ffff)
+
+#define WUFF			(0x0B)	  /* WO */
+
+#define WUCSR			(0x0C)	  /* R/W */
+#define WUCSR_GUE_			(0x00000200)
+#define WUCSR_WUFR_			(0x00000040)
+#define WUCSR_MPR_			(0x00000020)
+#define WUCSR_WAKE_EN_			(0x00000004)
+#define WUCSR_MPEN_			(0x00000002)
+
+/*
+ ****************************************************************************
+ * Chip Specific MII Defines
+ ****************************************************************************
+ *
+ * Phy register offsets and bit definitions
+ *
+ */
+
+#define PHY_MODE_CTRL_STS	((u32)17)	/* Mode Control/Status Register */
+//#define MODE_CTRL_STS_FASTRIP_	  ((u16)0x4000)
+#define MODE_CTRL_STS_EDPWRDOWN_	 ((u16)0x2000)
+//#define MODE_CTRL_STS_LOWSQEN_	   ((u16)0x0800)
+//#define MODE_CTRL_STS_MDPREBP_	   ((u16)0x0400)
+//#define MODE_CTRL_STS_FARLOOPBACK_  ((u16)0x0200)
+//#define MODE_CTRL_STS_FASTEST_	   ((u16)0x0100)
+//#define MODE_CTRL_STS_REFCLKEN_	   ((u16)0x0010)
+//#define MODE_CTRL_STS_PHYADBP_	   ((u16)0x0008)
+//#define MODE_CTRL_STS_FORCE_G_LINK_ ((u16)0x0004)
+#define MODE_CTRL_STS_ENERGYON_	 	((u16)0x0002)
+
+#define PHY_INT_SRC			((u32)29)
+#define PHY_INT_SRC_ENERGY_ON_			((u16)0x0080)
+#define PHY_INT_SRC_ANEG_COMP_			((u16)0x0040)
+#define PHY_INT_SRC_REMOTE_FAULT_		((u16)0x0020)
+#define PHY_INT_SRC_LINK_DOWN_			((u16)0x0010)
+#define PHY_INT_SRC_ANEG_LP_ACK_		((u16)0x0008)
+#define PHY_INT_SRC_PAR_DET_FAULT_		((u16)0x0004)
+#define PHY_INT_SRC_ANEG_PGRX_			((u16)0x0002)
+
+#define PHY_INT_MASK			((u32)30)
+#define PHY_INT_MASK_ENERGY_ON_			((u16)0x0080)
+#define PHY_INT_MASK_ANEG_COMP_			((u16)0x0040)
+#define PHY_INT_MASK_REMOTE_FAULT_		((u16)0x0020)
+#define PHY_INT_MASK_LINK_DOWN_			((u16)0x0010)
+#define PHY_INT_MASK_ANEG_LP_ACK_		((u16)0x0008)
+#define PHY_INT_MASK_PAR_DET_FAULT_		((u16)0x0004)
+#define PHY_INT_MASK_ANEG_PGRX_			((u16)0x0002)
+
+#define PHY_SPECIAL			((u32)31)
+#define PHY_SPECIAL_ANEG_DONE_			((u16)0x1000)
+#define PHY_SPECIAL_RES_			((u16)0x0040)
+#define PHY_SPECIAL_RES_MASK_			((u16)0x0FE1)
+#define PHY_SPECIAL_SPD_			((u16)0x001C)
+#define PHY_SPECIAL_SPD_10HALF_			((u16)0x0004)
+#define PHY_SPECIAL_SPD_10FULL_			((u16)0x0014)
+#define PHY_SPECIAL_SPD_100HALF_		((u16)0x0008)
+#define PHY_SPECIAL_SPD_100FULL_		((u16)0x0018)
+
+#define LAN911X_INTERNAL_PHY_ID		(0x0007C000)
+
+/* Chip ID values */
+#define CHIP_9115	0x115
+#define CHIP_9116	0x116
+#define CHIP_9117	0x117
+#define CHIP_9118	0x118
+
+struct chip_id {
+	u16 id;
+	char *name;
+};
+
+static const struct chip_id chip_ids[] =  {
+	{ CHIP_9115, "LAN9115" },
+	{ CHIP_9116, "LAN9116" },
+	{ CHIP_9117, "LAN9117" },
+	{ CHIP_9118, "LAN9118" },
+	{ 0, NULL },
+};
+
+#define IS_REV_A(x)	((x & 0xFFFF)==0)
+
+/*
+ * Macros to abstract register access according to the data bus
+ * capabilities.  Please use those and not the in/out primitives.
+ */
+/* FIFO read/write macros */
+#define SMC_PUSH_DATA(p, l)	SMC_outsl( ioaddr, TX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_PULL_DATA(p, l)	SMC_insl ( ioaddr, RX_DATA_FIFO, p, (l) >> 2 )
+#define SMC_SET_TX_FIFO(x) 	SMC_outl( x, ioaddr, TX_DATA_FIFO )
+#define SMC_GET_RX_FIFO()	SMC_inl( ioaddr, RX_DATA_FIFO )
+
+
+/* I/O mapped register read/write macros */
+#define SMC_GET_TX_STS_FIFO()		SMC_inl( ioaddr, TX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO()		SMC_inl( ioaddr, RX_STATUS_FIFO )
+#define SMC_GET_RX_STS_FIFO_PEEK()	SMC_inl( ioaddr, RX_STATUS_FIFO_PEEK )
+#define SMC_GET_PN()			(SMC_inl( ioaddr, ID_REV ) >> 16)
+#define SMC_GET_REV()			(SMC_inl( ioaddr, ID_REV ) & 0xFFFF)
+#define SMC_GET_IRQ_CFG()		SMC_inl( ioaddr, INT_CFG )
+#define SMC_SET_IRQ_CFG(x)		SMC_outl( x, ioaddr, INT_CFG )
+#define SMC_GET_INT()			SMC_inl( ioaddr, INT_STS )
+#define SMC_ACK_INT(x)			SMC_outl( x, ioaddr, INT_STS )
+#define SMC_GET_INT_EN()		SMC_inl( ioaddr, INT_EN )
+#define SMC_SET_INT_EN(x)		SMC_outl( x, ioaddr, INT_EN )
+#define SMC_GET_BYTE_TEST()		SMC_inl( ioaddr, BYTE_TEST )
+#define SMC_SET_BYTE_TEST(x)		SMC_outl( x, ioaddr, BYTE_TEST )
+#define SMC_GET_FIFO_INT()		SMC_inl( ioaddr, FIFO_INT )
+#define SMC_SET_FIFO_INT(x)		SMC_outl( x, ioaddr, FIFO_INT )
+#define SMC_SET_FIFO_TDA(x)					\
+	do {							\
+		unsigned long __flags;				\
+		int __mask;					\
+		local_irq_save(__flags);			\
+		__mask = SMC_GET_FIFO_INT() & ~(0xFF<<24);	\
+		SMC_SET_FIFO_INT( __mask | (x)<<24 );		\
+		local_irq_restore(__flags);			\
+	} while (0)
+#define SMC_SET_FIFO_TSL(x)					\
+	do {							\
+		unsigned long __flags;				\
+		int __mask;					\
+		local_irq_save(__flags);			\
+		__mask = SMC_GET_FIFO_INT() & ~(0xFF<<16);	\
+		SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<16));	\
+		local_irq_restore(__flags);			\
+	} while (0)
+#define SMC_SET_FIFO_RSA(x)					\
+	do {							\
+		unsigned long __flags;				\
+		int __mask;					\
+		local_irq_save(__flags);			\
+		__mask = SMC_GET_FIFO_INT() & ~(0xFF<<8);	\
+		SMC_SET_FIFO_INT( __mask | (((x) & 0xFF)<<8));	\
+		local_irq_restore(__flags);			\
+	} while (0)
+#define SMC_SET_FIFO_RSL(x)					\
+	do {							\
+		unsigned long __flags;				\
+		int __mask;					\
+		local_irq_save(__flags);			\
+		__mask = SMC_GET_FIFO_INT() & ~0xFF;		\
+		SMC_SET_FIFO_INT( __mask | ((x) & 0xFF));	\
+		local_irq_restore(__flags);			\
+	} while (0)
+#define SMC_GET_RX_CFG()		SMC_inl( ioaddr, RX_CFG )
+#define SMC_SET_RX_CFG(x)		SMC_outl( x, ioaddr, RX_CFG )
+#define SMC_GET_TX_CFG()		SMC_inl( ioaddr, TX_CFG )
+#define SMC_SET_TX_CFG(x)		SMC_outl( x, ioaddr, TX_CFG )
+#define SMC_GET_HW_CFG()		SMC_inl( ioaddr, HW_CFG )
+#define SMC_SET_HW_CFG(x)		SMC_outl( x, ioaddr, HW_CFG )
+#define SMC_GET_RX_DP_CTRL()		SMC_inl( ioaddr, RX_DP_CTRL )
+#define SMC_SET_RX_DP_CTRL(x)		SMC_outl( x, ioaddr, RX_DP_CTRL )
+#define SMC_GET_PMT_CTRL()		SMC_inl( ioaddr, PMT_CTRL )
+#define SMC_SET_PMT_CTRL(x)		SMC_outl( x, ioaddr, PMT_CTRL )
+#define SMC_GET_GPIO_CFG()		SMC_inl( ioaddr, GPIO_CFG )
+#define SMC_SET_GPIO_CFG(x)		SMC_outl( x, ioaddr, GPIO_CFG )
+#define SMC_GET_RX_FIFO_INF()		SMC_inl( ioaddr, RX_FIFO_INF )
+#define SMC_SET_RX_FIFO_INF(x)		SMC_outl( x, ioaddr, RX_FIFO_INF )
+#define SMC_GET_TX_FIFO_INF()		SMC_inl( ioaddr, TX_FIFO_INF )
+#define SMC_SET_TX_FIFO_INF(x)		SMC_outl( x, ioaddr, TX_FIFO_INF )
+#define SMC_GET_GPT_CFG()		SMC_inl( ioaddr, GPT_CFG )
+#define SMC_SET_GPT_CFG(x)		SMC_outl( x, ioaddr, GPT_CFG )
+#define SMC_GET_RX_DROP()		SMC_inl( ioaddr, RX_DROP )
+#define SMC_SET_RX_DROP(x)		SMC_outl( x, ioaddr, RX_DROP )
+#define SMC_GET_MAC_CMD()		SMC_inl( ioaddr, MAC_CSR_CMD )
+#define SMC_SET_MAC_CMD(x)		SMC_outl( x, ioaddr, MAC_CSR_CMD )
+#define SMC_GET_MAC_DATA()		SMC_inl( ioaddr, MAC_CSR_DATA )
+#define SMC_SET_MAC_DATA(x)		SMC_outl( x, ioaddr, MAC_CSR_DATA )
+#define SMC_GET_AFC_CFG()		SMC_inl( ioaddr, AFC_CFG )
+#define SMC_SET_AFC_CFG(x)		SMC_outl( x, ioaddr, AFC_CFG )
+#define SMC_GET_E2P_CMD()		SMC_inl( ioaddr, E2P_CMD )
+#define SMC_SET_E2P_CMD(x)		SMC_outl( x, ioaddr, E2P_CMD )
+#define SMC_GET_E2P_DATA()		SMC_inl( ioaddr, E2P_DATA )
+#define SMC_SET_E2P_DATA(x)		SMC_outl( x, ioaddr, E2P_DATA )
+
+/* MAC register read/write macros */
+#define SMC_GET_MAC_CSR(a,v)						\
+	do {								\
+		while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_);	\
+		SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ |			\
+			MAC_CSR_CMD_R_NOT_W_ | (a) );			\
+		while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_);	\
+		v = SMC_GET_MAC_DATA();					\
+	} while (0)
+#define SMC_SET_MAC_CSR(a,v)						\
+	do {								\
+		while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_);	\
+		SMC_SET_MAC_DATA(v);					\
+		SMC_SET_MAC_CMD(MAC_CSR_CMD_CSR_BUSY_ | (a) );		\
+		while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_);	\
+	} while (0)
+#define SMC_GET_MAC_CR(x)	SMC_GET_MAC_CSR( MAC_CR, x )
+#define SMC_SET_MAC_CR(x)	SMC_SET_MAC_CSR( MAC_CR, x )
+#define SMC_GET_ADDRH(x)	SMC_GET_MAC_CSR( ADDRH, x )
+#define SMC_SET_ADDRH(x)	SMC_SET_MAC_CSR( ADDRH, x )
+#define SMC_GET_ADDRL(x)	SMC_GET_MAC_CSR( ADDRL, x )
+#define SMC_SET_ADDRL(x)	SMC_SET_MAC_CSR( ADDRL, x )
+#define SMC_GET_HASHH(x)	SMC_GET_MAC_CSR( HASHH, x )
+#define SMC_SET_HASHH(x)	SMC_SET_MAC_CSR( HASHH, x )
+#define SMC_GET_HASHL(x)	SMC_GET_MAC_CSR( HASHL, x )
+#define SMC_SET_HASHL(x)	SMC_SET_MAC_CSR( HASHL, x )
+#define SMC_GET_MII_ACC(x)	SMC_GET_MAC_CSR( MII_ACC, x )
+#define SMC_SET_MII_ACC(x)	SMC_SET_MAC_CSR( MII_ACC, x )
+#define SMC_GET_MII_DATA(x)	SMC_GET_MAC_CSR( MII_DATA, x )
+#define SMC_SET_MII_DATA(x)	SMC_SET_MAC_CSR( MII_DATA, x )
+#define SMC_GET_FLOW(x)		SMC_GET_MAC_CSR( FLOW, x )
+#define SMC_SET_FLOW(x)		SMC_SET_MAC_CSR( FLOW, x )
+#define SMC_GET_VLAN1(x)	SMC_GET_MAC_CSR( VLAN1, x )
+#define SMC_SET_VLAN1(x)	SMC_SET_MAC_CSR( VLAN1, x )
+#define SMC_GET_VLAN2(x)	SMC_GET_MAC_CSR( VLAN2, x )
+#define SMC_SET_VLAN2(x)	SMC_SET_MAC_CSR( VLAN2, x )
+#define SMC_SET_WUFF(x)		SMC_SET_MAC_CSR( WUFF, x )
+#define SMC_GET_WUCSR(x)	SMC_GET_MAC_CSR( WUCSR, x )
+#define SMC_SET_WUCSR(x)	SMC_SET_MAC_CSR( WUCSR, x )
+
+/* PHY register read/write macros */
+#define SMC_GET_MII(a,phy,v)					\
+	do {							\
+		u32 __v;					\
+		do {						\
+			SMC_GET_MII_ACC(__v);			\
+		} while ( __v & MII_ACC_MII_BUSY_ );		\
+		SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) |	\
+			MII_ACC_MII_BUSY_);			\
+		do {						\
+			SMC_GET_MII_ACC(__v);			\
+		} while ( __v & MII_ACC_MII_BUSY_ );		\
+		SMC_GET_MII_DATA(v);				\
+	} while (0)
+#define SMC_SET_MII(a,phy,v)					\
+	do {							\
+		u32 __v;					\
+		do {						\
+			SMC_GET_MII_ACC(__v);			\
+		} while ( __v & MII_ACC_MII_BUSY_ );		\
+		SMC_SET_MII_DATA(v);				\
+		SMC_SET_MII_ACC( ((phy)<<11) | ((a)<<6) |	\
+			MII_ACC_MII_BUSY_	 |		\
+			MII_ACC_MII_WRITE_  );			\
+		do {						\
+			SMC_GET_MII_ACC(__v);			\
+		} while ( __v & MII_ACC_MII_BUSY_ );		\
+	} while (0)
+#define SMC_GET_PHY_BMCR(phy,x)		SMC_GET_MII( MII_BMCR, phy, x )
+#define SMC_SET_PHY_BMCR(phy,x)		SMC_SET_MII( MII_BMCR, phy, x )
+#define SMC_GET_PHY_BMSR(phy,x)		SMC_GET_MII( MII_BMSR, phy, x )
+#define SMC_GET_PHY_ID1(phy,x)		SMC_GET_MII( MII_PHYSID1, phy, x )
+#define SMC_GET_PHY_ID2(phy,x)		SMC_GET_MII( MII_PHYSID2, phy, x )
+#define SMC_GET_PHY_MII_ADV(phy,x)	SMC_GET_MII( MII_ADVERTISE, phy, x )
+#define SMC_SET_PHY_MII_ADV(phy,x)	SMC_SET_MII( MII_ADVERTISE, phy, x )
+#define SMC_GET_PHY_MII_LPA(phy,x)	SMC_GET_MII( MII_LPA, phy, x )
+#define SMC_SET_PHY_MII_LPA(phy,x)	SMC_SET_MII( MII_LPA, phy, x )
+#define SMC_GET_PHY_CTRL_STS(phy,x)	SMC_GET_MII( PHY_MODE_CTRL_STS, phy, x )
+#define SMC_SET_PHY_CTRL_STS(phy,x)	SMC_SET_MII( PHY_MODE_CTRL_STS, phy, x )
+#define SMC_GET_PHY_INT_SRC(phy,x)	SMC_GET_MII( PHY_INT_SRC, phy, x )
+#define SMC_SET_PHY_INT_SRC(phy,x)	SMC_SET_MII( PHY_INT_SRC, phy, x )
+#define SMC_GET_PHY_INT_MASK(phy,x)	SMC_GET_MII( PHY_INT_MASK, phy, x )
+#define SMC_SET_PHY_INT_MASK(phy,x)	SMC_SET_MII( PHY_INT_MASK, phy, x )
+#define SMC_GET_PHY_SPECIAL(phy,x)	SMC_GET_MII( PHY_SPECIAL, phy, x )
+
+
+
+/* Misc read/write macros */
+
+#ifndef SMC_GET_MAC_ADDR
+#define SMC_GET_MAC_ADDR(addr)					\
+	do {							\
+		unsigned int __v;				\
+								\
+		SMC_GET_MAC_CSR(ADDRL, __v);			\
+		addr[0] = __v; addr[1] = __v >> 8;		\
+		addr[2] = __v >> 16; addr[3] = __v >> 24;	\
+		SMC_GET_MAC_CSR(ADDRH, __v);			\
+		addr[4] = __v; addr[5] = __v >> 8;		\
+	} while (0)
+#endif
+
+#define SMC_SET_MAC_ADDR(addr)					\
+	do {							\
+		 SMC_SET_MAC_CSR(ADDRL,				\
+				 addr[0] |			\
+				(addr[1] << 8) |		\
+				(addr[2] << 16) |		\
+				(addr[3] << 24));		\
+		 SMC_SET_MAC_CSR(ADDRH, addr[4]|(addr[5] << 8));\
+	} while (0)
+
+
+#define SMC_WRITE_EEPROM_CMD(cmd, addr)					\
+	do {								\
+		while (SMC_GET_E2P_CMD() & MAC_CSR_CMD_CSR_BUSY_);	\
+		SMC_SET_MAC_CMD(MAC_CSR_CMD_R_NOT_W_ | a );		\
+		while (SMC_GET_MAC_CMD() & MAC_CSR_CMD_CSR_BUSY_);	\
+	} while (0)
+
+#endif	 /* _SMC911X_H_ */
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index e1be1af..f72a4f5 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -129,6 +129,24 @@
 #define SMC_insb(a, r, p, l)	readsb((a) + (r), p, (l))
 #define SMC_outsb(a, r, p, l)	writesb((a) + (r), p, (l))
 
+#elif	defined(CONFIG_MACH_LOGICPD_PXA270)
+
+#define SMC_CAN_USE_8BIT	0
+#define SMC_CAN_USE_16BIT	1
+#define SMC_CAN_USE_32BIT	0
+#define SMC_IO_SHIFT		0
+#define SMC_NOWAIT		1
+#define SMC_USE_PXA_DMA		1
+
+#define SMC_inb(a, r)		readb((a) + (r))
+#define SMC_inw(a, r)		readw((a) + (r))
+#define SMC_inl(a, r)		readl((a) + (r))
+#define SMC_outb(v, a, r)	writeb(v, (a) + (r))
+#define SMC_outw(v, a, r)	writew(v, (a) + (r))
+#define SMC_outl(v, a, r)	writel(v, (a) + (r))
+#define SMC_insw(a, r, p, l)	readsw((a) + (r), p, l)
+#define SMC_outsw(a, r, p, l)	writesw((a) + (r), p, l)
+
 #elif	defined(CONFIG_ARCH_INNOKOM) || \
 	defined(CONFIG_MACH_MAINSTONE) || \
 	defined(CONFIG_ARCH_PXA_IDP) || \
diff --git a/drivers/net/sungem_phy.c b/drivers/net/sungem_phy.c
index b2ddd5e..9282b4b 100644
--- a/drivers/net/sungem_phy.c
+++ b/drivers/net/sungem_phy.c
@@ -345,9 +345,9 @@
 
 static int bcm5461_enable_fiber(struct mii_phy* phy)
 {
-        phy_write(phy, MII_NCONFIG, 0xfc0c);
-        phy_write(phy, MII_BMCR, 0x4140);
-        phy_write(phy, MII_NCONFIG, 0xfc0b);
+	phy_write(phy, MII_NCONFIG, 0xfc0c);
+	phy_write(phy, MII_BMCR, 0x4140);
+	phy_write(phy, MII_NCONFIG, 0xfc0b);
 	phy_write(phy, MII_BMCR, 0x0140);
 
 	return 0;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index e3dd144..5f743b9 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -227,12 +227,12 @@
 	SROMC0InfoLeaf		= 27,
 	MediaBlockMask		= 0x3f,
 	MediaCustomCSRs		= (1 << 6),
-	
+
 	/* PCIPM bits */
 	PM_Sleep		= (1 << 31),
 	PM_Snooze		= (1 << 30),
 	PM_Mask			= PM_Sleep | PM_Snooze,
-	
+
 	/* SIAStatus bits */
 	NWayState		= (1 << 14) | (1 << 13) | (1 << 12),
 	NWayRestart		= (1 << 12),
@@ -858,7 +858,7 @@
 			return;
 		cpu_relax();
 	}
-	
+
 	printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
 }
 
@@ -931,7 +931,7 @@
 		macmode |= FullDuplex;
 	else
 		macmode &= ~FullDuplex;
-	
+
 	if (netif_msg_link(de)) {
 		printk(KERN_INFO "%s: set link %s\n"
 		       KERN_INFO "%s:    mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
@@ -966,9 +966,9 @@
 	u32 status = dr32(SIAStatus);
 	unsigned int carrier;
 	unsigned long flags;
-	
+
 	carrier = (status & NetCxnErr) ? 0 : 1;
-		
+
 	if (carrier) {
 		if (de->media_type != DE_MEDIA_AUI && (status & LinkFailStatus))
 			goto no_link_yet;
@@ -985,7 +985,7 @@
 		return;
 	}
 
-	de_link_down(de);	
+	de_link_down(de);
 
 	if (de->media_lock)
 		return;
@@ -1039,7 +1039,7 @@
 			return 0;
 		break;
 	}
-	
+
 	return 1;
 }
 
@@ -1050,9 +1050,9 @@
 	u32 status = dr32(SIAStatus);
 	unsigned int carrier;
 	unsigned long flags;
-	
+
 	carrier = (status & NetCxnErr) ? 0 : 1;
-		
+
 	if (carrier) {
 		if ((de->media_type == DE_MEDIA_TP_AUTO ||
 		     de->media_type == DE_MEDIA_TP ||
@@ -1072,7 +1072,7 @@
 		return;
 	}
 
-	de_link_down(de);	
+	de_link_down(de);
 
 	/* if media type locked, don't switch media */
 	if (de->media_lock)
@@ -1124,7 +1124,7 @@
 		u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO };
 		de_next_media(de, next_states, ARRAY_SIZE(next_states));
 	}
-	
+
 set_media:
 	spin_lock_irqsave(&de->lock, flags);
 	de_stop_rxtx(de);
@@ -1148,7 +1148,7 @@
 		mod_timer(&de->media_timer, jiffies + DE_TIMER_LINK);
 		return;
 	}
-	
+
 	BUG_ON(!(status & LinkFail));
 
 	if (netif_carrier_ok(de->dev)) {
@@ -1227,7 +1227,7 @@
 	int rc;
 
 	de_adapter_wake(de);
-	
+
 	macmode = dr32(MacMode) & ~MacModeClear;
 
 	rc = de_reset_mac(de);
@@ -1413,7 +1413,7 @@
 	netif_stop_queue(dev);
 	netif_carrier_off(dev);
 	spin_unlock_irqrestore(&de->lock, flags);
-	
+
 	free_irq(dev->irq, dev);
 
 	de_free_rings(de);
@@ -1441,7 +1441,7 @@
 
 	spin_unlock_irq(&de->lock);
 	enable_irq(dev->irq);
-		
+
 	/* Update the error counts. */
 	__de_get_stats(de);
 
@@ -1451,7 +1451,7 @@
 	de_init_rings(de);
 
 	de_init_hw(de);
-	
+
 	netif_wake_queue(dev);
 }
 
@@ -1459,7 +1459,7 @@
 {
 	int i;
 	u32 *rbuf = (u32 *)buf;
-	
+
 	/* read all CSRs */
 	for (i = 0; i < DE_NUM_REGS; i++)
 		rbuf[i] = dr32(i * 8);
@@ -1474,7 +1474,7 @@
 	ecmd->transceiver = XCVR_INTERNAL;
 	ecmd->phy_address = 0;
 	ecmd->advertising = de->media_advertise;
-	
+
 	switch (de->media_type) {
 	case DE_MEDIA_AUI:
 		ecmd->port = PORT_AUI;
@@ -1489,7 +1489,7 @@
 		ecmd->speed = SPEED_10;
 		break;
 	}
-	
+
 	if (dr32(MacMode) & FullDuplex)
 		ecmd->duplex = DUPLEX_FULL;
 	else
@@ -1529,7 +1529,7 @@
 	if (ecmd->autoneg == AUTONEG_ENABLE &&
 	    (!(ecmd->advertising & ADVERTISED_Autoneg)))
 		return -EINVAL;
-	
+
 	switch (ecmd->port) {
 	case PORT_AUI:
 		new_media = DE_MEDIA_AUI;
@@ -1554,22 +1554,22 @@
 			return -EINVAL;
 		break;
 	}
-	
+
 	media_lock = (ecmd->autoneg == AUTONEG_ENABLE) ? 0 : 1;
-	
+
 	if ((new_media == de->media_type) &&
 	    (media_lock == de->media_lock) &&
 	    (ecmd->advertising == de->media_advertise))
 		return 0; /* nothing to change */
-	    
+
 	de_link_down(de);
 	de_stop_rxtx(de);
-	
+
 	de->media_type = new_media;
 	de->media_lock = media_lock;
 	de->media_advertise = ecmd->advertising;
 	de_set_media(de);
-	
+
 	return 0;
 }
 
@@ -1817,7 +1817,7 @@
 	case 0x0204:  de->media_type = DE_MEDIA_TP_FD; break;
 	default: de->media_type = DE_MEDIA_TP_AUTO; break;
 	}
-	
+
 	if (netif_msg_probe(de))
 		printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
 		       de->board_idx, ofs,
@@ -1886,7 +1886,7 @@
 				       de->media[idx].csr13,
 				       de->media[idx].csr14,
 				       de->media[idx].csr15);
-				       
+
 		} else if (netif_msg_probe(de))
 			printk("\n");
 
@@ -2118,7 +2118,7 @@
 
 		spin_unlock_irq(&de->lock);
 		enable_irq(dev->irq);
-		
+
 		/* Update the error counts. */
 		__de_get_stats(de);
 
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index f560941..da8bd0d 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -41,11 +41,11 @@
     Digital Semiconductor   SROM   Specification.    The  driver   currently
     recognises the following chips:
 
-        DC21040  (no SROM) 
-	DC21041[A]  
-	DC21140[A] 
-	DC21142 
-	DC21143 
+        DC21040  (no SROM)
+	DC21041[A]
+	DC21140[A]
+	DC21142
+	DC21143
 
     So far the driver is known to work with the following cards:
 
@@ -55,7 +55,7 @@
 	SMC8432
 	SMC9332 (w/new SROM)
 	ZNYX31[45]
-	ZNYX346 10/100 4 port (can act as a 10/100 bridge!) 
+	ZNYX346 10/100 4 port (can act as a 10/100 bridge!)
 
     The driver has been tested on a relatively busy network using the DE425,
     DE434, DE435 and DE500 cards and benchmarked with 'ttcp': it transferred
@@ -106,7 +106,7 @@
     loading by:
 
                    insmod de4x5 io=0xghh           where g = bus number
-		                                        hh = device number   
+		                                        hh = device number
 
        NB: autoprobing for modules is now supported by default. You may just
            use:
@@ -120,11 +120,11 @@
     4) if you are wanting to add a new  card, goto 5. Otherwise, recompile a
     kernel with the de4x5 configuration turned off and reboot.
     5) insmod de4x5 [io=0xghh]
-    6) run the net startup bits for your new eth?? interface(s) manually 
-    (usually /etc/rc.inet[12] at boot time). 
+    6) run the net startup bits for your new eth?? interface(s) manually
+    (usually /etc/rc.inet[12] at boot time).
     7) enjoy!
 
-    To unload a module, turn off the associated interface(s) 
+    To unload a module, turn off the associated interface(s)
     'ifconfig eth?? down' then 'rmmod de4x5'.
 
     Automedia detection is included so that in  principal you can disconnect
@@ -135,7 +135,7 @@
     By  default,  the driver will  now   autodetect any  DECchip based card.
     Should you have a need to restrict the driver to DIGITAL only cards, you
     can compile with a  DEC_ONLY define, or if  loading as a module, use the
-    'dec_only=1'  parameter. 
+    'dec_only=1'  parameter.
 
     I've changed the timing routines to  use the kernel timer and scheduling
     functions  so that the  hangs  and other assorted problems that occurred
@@ -204,7 +204,7 @@
     following parameters are allowed:
 
             fdx        for full duplex
-	    autosense  to set the media/speed; with the following 
+	    autosense  to set the media/speed; with the following
 	               sub-parameters:
 		       TP, TP_NW, BNC, AUI, BNC_AUI, 100Mb, 10Mb, AUTO
 
@@ -235,14 +235,14 @@
     this  automatically  or include  #define DE4X5_FORCE_EISA  on or  before
     line 1040 in the driver.
 
-    TO DO: 
+    TO DO:
     ------
 
     Revision History
     ----------------
 
     Version   Date        Description
-  
+
       0.1     17-Nov-94   Initial writing. ALPHA code release.
       0.2     13-Jan-95   Added PCI support for DE435's.
       0.21    19-Jan-95   Added auto media detection.
@@ -251,7 +251,7 @@
 			  Add request/release_region code.
 			  Add loadable modules support for PCI.
 			  Clean up loadable modules support.
-      0.23    28-Feb-95   Added DC21041 and DC21140 support. 
+      0.23    28-Feb-95   Added DC21041 and DC21140 support.
                           Fix missed frame counter value and initialisation.
 			  Fixed EISA probe.
       0.24    11-Apr-95   Change delay routine to use <linux/udelay>.
@@ -280,7 +280,7 @@
                           Add kernel timer code (h/w is too flaky).
 			  Add MII based PHY autosense.
 			  Add new multicasting code.
-			  Add new autosense algorithms for media/mode 
+			  Add new autosense algorithms for media/mode
 			  selection using kernel scheduling/timing.
 			  Re-formatted.
 			  Made changes suggested by <jeff@router.patch.net>:
@@ -307,10 +307,10 @@
                           Add Accton to the list of broken cards.
 			  Fix TX under-run bug for non DC21140 chips.
 			  Fix boot command probe bug in alloc_device() as
-			   reported by <koen.gadeyne@barco.com> and 
+			   reported by <koen.gadeyne@barco.com> and
 			   <orava@nether.tky.hut.fi>.
 			  Add cache locks to prevent a race condition as
-			   reported by <csd@microplex.com> and 
+			   reported by <csd@microplex.com> and
 			   <baba@beckman.uiuc.edu>.
 			  Upgraded alloc_device() code.
       0.431  28-Jun-96    Fix potential bug in queue_pkt() from discussion
@@ -322,7 +322,7 @@
                            with a loopback packet.
       0.442   9-Sep-96    Include AUI in dc21041 media printout. Bug reported
                            by <bhat@mundook.cs.mu.OZ.AU>
-      0.45    8-Dec-96    Include endian functions for PPC use, from work 
+      0.45    8-Dec-96    Include endian functions for PPC use, from work
                            by <cort@cs.nmt.edu> and <g.thomas@opengroup.org>.
       0.451  28-Dec-96    Added fix to allow autoprobe for modules after
                            suggestion from <mjacob@feral.com>.
@@ -346,14 +346,14 @@
 			   <paubert@iram.es>.
       0.52   26-Apr-97    Some changes may not credit the right people -
                            a disk crash meant I lost some mail.
-			  Change RX interrupt routine to drop rather than 
-			   defer packets to avoid hang reported by 
+			  Change RX interrupt routine to drop rather than
+			   defer packets to avoid hang reported by
 			   <g.thomas@opengroup.org>.
 			  Fix srom_exec() to return for COMPACT and type 1
 			   infoblocks.
 			  Added DC21142 and DC21143 functions.
 			  Added byte counters from <phil@tazenda.demon.co.uk>
-			  Added SA_INTERRUPT temporary fix from 
+			  Added SA_INTERRUPT temporary fix from
 			   <mjacob@feral.com>.
       0.53   12-Nov-97    Fix the *_probe() to include 'eth??' name during
                            module load: bug reported by
@@ -363,10 +363,10 @@
 			  Make above search independent of BIOS device scan
 			   direction.
 			  Completed DC2114[23] autosense functions.
-      0.531  21-Dec-97    Fix DE500-XA 100Mb/s bug reported by 
+      0.531  21-Dec-97    Fix DE500-XA 100Mb/s bug reported by
                            <robin@intercore.com
 			  Fix type1_infoblock() bug introduced in 0.53, from
-			   problem reports by 
+			   problem reports by
 			   <parmee@postecss.ncrfran.france.ncr.com> and
 			   <jo@ice.dillingen.baynet.de>.
 			  Added argument list to set up each board from either
@@ -374,7 +374,7 @@
 			  Added generic MII PHY functionality to deal with
 			   newer PHY chips.
 			  Fix the mess in 2.1.67.
-      0.532   5-Jan-98    Fix bug in mii_get_phy() reported by 
+      0.532   5-Jan-98    Fix bug in mii_get_phy() reported by
                            <redhat@cococo.net>.
                           Fix bug in pci_probe() for 64 bit systems reported
 			   by <belliott@accessone.com>.
@@ -398,7 +398,7 @@
 			   version. I hope nothing is broken...
           		  Add TX done interrupt modification from suggestion
 			   by <Austin.Donnelly@cl.cam.ac.uk>.
-			  Fix is_anc_capable() bug reported by 
+			  Fix is_anc_capable() bug reported by
 			   <Austin.Donnelly@cl.cam.ac.uk>.
 			  Fix type[13]_infoblock() bug: during MII search, PHY
 			   lp->rst not run because lp->ibn not initialised -
@@ -413,7 +413,7 @@
 			  Add an_exception() for old ZYNX346 and fix compile
 			   warning on PPC & SPARC, from <ecd@skynet.be>.
 			  Fix lastPCI to correctly work with compiled in
-			   kernels and modules from bug report by 
+			   kernels and modules from bug report by
 			   <Zlatko.Calusic@CARNet.hr> et al.
       0.542  15-Sep-98    Fix dc2114x_autoconf() to stop multiple messages
                            when media is unconnected.
@@ -425,7 +425,7 @@
       0.544   8-May-99    Fix for buggy SROM in Motorola embedded boards using
                            a 21143 by <mmporter@home.com>.
 			  Change PCI/EISA bus probing order.
-      0.545  28-Nov-99    Further Moto SROM bug fix from 
+      0.545  28-Nov-99    Further Moto SROM bug fix from
                            <mporter@eng.mcd.mot.com>
                           Remove double checking for DEBUG_RX in de4x5_dbg_rx()
 			   from report by <geert@linux-m68k.org>
@@ -434,8 +434,8 @@
                            variable 'pb', on a non de4x5 PCI device, in this
                            case a PCI bridge (DEC chip 21152). The value of
                            'pb' is now only initialized if a de4x5 chip is
-                           present. 
-                           <france@handhelds.org>  
+                           present.
+                           <france@handhelds.org>
       0.547  08-Nov-01    Use library crc32 functions by <Matt_Domsch@dell.com>
       0.548  30-Aug-03    Big 2.6 cleanup. Ported to PCI/EISA probing and
                            generic DMA APIs. Fixed DE425 support on Alpha.
@@ -584,7 +584,7 @@
 
 /*
 ** Allow per adapter set up. For modules this is simply a command line
-** parameter, e.g.: 
+** parameter, e.g.:
 ** insmod de4x5 args='eth1:fdx autosense=BNC eth0:autosense=100Mb'.
 **
 ** For a compiled in driver, place e.g.
@@ -655,7 +655,7 @@
 ** Memory Alignment. Each descriptor is 4 longwords long. To force a
 ** particular alignment on the TX descriptor, adjust DESC_SKIP_LEN and
 ** DESC_ALIGN. ALIGN aligns the start address of the private memory area
-** and hence the RX descriptor ring's first entry. 
+** and hence the RX descriptor ring's first entry.
 */
 #define DE4X5_ALIGN4      ((u_long)4 - 1)     /* 1 longword align */
 #define DE4X5_ALIGN8      ((u_long)8 - 1)     /* 2 longword align */
@@ -1081,8 +1081,8 @@
     mdelay(2);                           /* Wait for 2ms */\
 }
 
-
-static int __devinit 
+
+static int __devinit
 de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
 {
     char name[DE4X5_NAME_LENGTH + 1];
@@ -1102,12 +1102,12 @@
     mdelay(10);
 
     RESET_DE4X5;
-    
+
     if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
 	return -ENXIO;                       /* Hardware could not reset */
     }
-    
-    /* 
+
+    /*
     ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
     */
     lp->useSROM = FALSE;
@@ -1116,21 +1116,21 @@
     } else {
 	EISA_signature(name, gendev);
     }
-    
+
     if (*name == '\0') {                     /* Not found a board signature */
 	return -ENXIO;
     }
-    
+
     dev->base_addr = iobase;
     printk ("%s: %s at 0x%04lx", gendev->bus_id, name, iobase);
-    
+
     printk(", h/w address ");
     status = get_hw_addr(dev);
     for (i = 0; i < ETH_ALEN - 1; i++) {     /* get the ethernet addr. */
 	printk("%2.2x:", dev->dev_addr[i]);
     }
     printk("%2.2x,\n", dev->dev_addr[i]);
-    
+
     if (status != 0) {
 	printk("      which has an Ethernet PROM CRC error.\n");
 	return -ENXIO;
@@ -1171,10 +1171,10 @@
 	}
 
 	lp->tx_ring = lp->rx_ring + NUM_RX_DESC;
-	    
+
 	/*
 	** Set up the RX descriptor ring (Intels)
-	** Allocate contiguous receive buffers, long word aligned (Alphas) 
+	** Allocate contiguous receive buffers, long word aligned (Alphas)
 	*/
 #if !defined(__alpha__) && !defined(__powerpc__) && !defined(__sparc_v9__) && !defined(DE4X5_DO_MEMCPY)
 	for (i=0; i<NUM_RX_DESC; i++) {
@@ -1210,7 +1210,7 @@
 
 	lp->rxRingSize = NUM_RX_DESC;
 	lp->txRingSize = NUM_TX_DESC;
-	    
+
 	/* Write the end of list marker to the descriptor lists */
 	lp->rx_ring[lp->rxRingSize - 1].des1 |= cpu_to_le32(RD_RER);
 	lp->tx_ring[lp->txRingSize - 1].des1 |= cpu_to_le32(TD_TER);
@@ -1219,7 +1219,7 @@
 	outl(lp->dma_rings, DE4X5_RRBA);
 	outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
 	     DE4X5_TRBA);
-	    
+
 	/* Initialise the IRQ mask and Enable/Disable */
 	lp->irq_mask = IMR_RIM | IMR_TIM | IMR_TUM | IMR_UNM;
 	lp->irq_en   = IMR_NIM | IMR_AIM;
@@ -1252,7 +1252,7 @@
 	if ((lp->chipset != DC21040) && (lp->chipset != DC21041)) {
 	    mii_get_phy(dev);
 	}
-	
+
 #ifndef __sparc_v9__
 	printk("      and requires IRQ%d (provided by %s).\n", dev->irq,
 #else
@@ -1260,11 +1260,11 @@
 #endif
 	       ((lp->bus == PCI) ? "PCI BIOS" : "EISA CNFG"));
     }
-    
+
     if (de4x5_debug & DEBUG_VERSION) {
 	printk(version);
     }
-    
+
     /* The DE4X5-specific entries in the device structure. */
     SET_MODULE_OWNER(dev);
     SET_NETDEV_DEV(dev, gendev);
@@ -1274,23 +1274,23 @@
     dev->get_stats = &de4x5_get_stats;
     dev->set_multicast_list = &set_multicast_list;
     dev->do_ioctl = &de4x5_ioctl;
-    
+
     dev->mem_start = 0;
-    
+
     /* Fill in the generic fields of the device structure. */
     if ((status = register_netdev (dev))) {
 	    dma_free_coherent (gendev, lp->dma_size,
 			       lp->rx_ring, lp->dma_rings);
 	    return status;
     }
-    
+
     /* Let the adapter sleep to save power */
     yawn(dev, SLEEP);
-    
+
     return status;
 }
 
-
+
 static int
 de4x5_open(struct net_device *dev)
 {
@@ -1312,15 +1312,15 @@
     */
     yawn(dev, WAKEUP);
 
-    /* 
-    ** Re-initialize the DE4X5... 
+    /*
+    ** Re-initialize the DE4X5...
     */
     status = de4x5_init(dev);
     spin_lock_init(&lp->lock);
     lp->state = OPEN;
     de4x5_dbg_open(dev);
-    
-    if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ, 
+
+    if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
 		                                     lp->adapter_name, dev)) {
 	printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
 	if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
@@ -1340,11 +1340,11 @@
 
     lp->interrupt = UNMASK_INTERRUPTS;
     dev->trans_start = jiffies;
-    
+
     START_DE4X5;
-	
+
     de4x5_setup_intr(dev);
-    
+
     if (de4x5_debug & DEBUG_OPEN) {
 	printk("\tsts:  0x%08x\n", inl(DE4X5_STS));
 	printk("\tbmr:  0x%08x\n", inl(DE4X5_BMR));
@@ -1355,7 +1355,7 @@
 	printk("\tstrr: 0x%08x\n", inl(DE4X5_STRR));
 	printk("\tsigr: 0x%08x\n", inl(DE4X5_SIGR));
     }
-    
+
     return status;
 }
 
@@ -1369,15 +1369,15 @@
 */
 static int
 de4x5_init(struct net_device *dev)
-{  
+{
     /* Lock out other processes whilst setting up the hardware */
     netif_stop_queue(dev);
-    
+
     de4x5_sw_reset(dev);
-    
+
     /* Autoconfigure the connected port */
     autoconf_media(dev);
-    
+
     return 0;
 }
 
@@ -1388,7 +1388,7 @@
     u_long iobase = dev->base_addr;
     int i, j, status = 0;
     s32 bmr, omr;
-    
+
     /* Select the MII or SRL port now and RESET the MAC */
     if (!lp->useSROM) {
 	if (lp->phy[lp->active].id != 0) {
@@ -1399,7 +1399,7 @@
 	de4x5_switch_mac_port(dev);
     }
 
-    /* 
+    /*
     ** Set the programmable burst length to 8 longwords for all the DC21140
     ** Fasternet chips and 4 longwords for all others: DMA errors result
     ** without these values. Cache align 16 long.
@@ -1416,23 +1416,23 @@
     outl(lp->dma_rings, DE4X5_RRBA);
     outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
 	 DE4X5_TRBA);
-    
+
     lp->rx_new = lp->rx_old = 0;
     lp->tx_new = lp->tx_old = 0;
-    
+
     for (i = 0; i < lp->rxRingSize; i++) {
 	lp->rx_ring[i].status = cpu_to_le32(R_OWN);
     }
-    
+
     for (i = 0; i < lp->txRingSize; i++) {
 	lp->tx_ring[i].status = cpu_to_le32(0);
     }
-    
+
     barrier();
 
     /* Build the setup frame depending on filtering mode */
     SetMulticastFilter(dev);
-    
+
     load_packet(dev, lp->setup_frame, PERFECT_F|TD_SET|SETUP_FRAME_LEN, (struct sk_buff *)1);
     outl(omr|OMR_ST, DE4X5_OMR);
 
@@ -1445,18 +1445,18 @@
     outl(omr, DE4X5_OMR);                        /* Stop everything! */
 
     if (j == 0) {
-	printk("%s: Setup frame timed out, status %08x\n", dev->name, 
+	printk("%s: Setup frame timed out, status %08x\n", dev->name,
 	       inl(DE4X5_STS));
 	status = -EIO;
     }
-    
+
     lp->tx_new = (++lp->tx_new) % lp->txRingSize;
     lp->tx_old = lp->tx_new;
 
     return status;
 }
 
-/* 
+/*
 ** Writes a socket buffer address to the next available transmit descriptor.
 */
 static int
@@ -1469,9 +1469,9 @@
 
     netif_stop_queue(dev);
     if (lp->tx_enable == NO) {                   /* Cannot send for now */
-	return -1;                                
+	return -1;
     }
-    
+
     /*
     ** Clean out the TX ring asynchronously to interrupts - sometimes the
     ** interrupts are lost by delayed descriptor status updates relative to
@@ -1482,7 +1482,7 @@
     spin_unlock_irqrestore(&lp->lock, flags);
 
     /* Test if cache is already locked - requeue skb if so */
-    if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt) 
+    if (test_and_set_bit(0, (void *)&lp->cache.lock) && !lp->interrupt)
 	return -1;
 
     /* Transmit descriptor ring full or stale skb */
@@ -1509,10 +1509,10 @@
 	    load_packet(dev, skb->data, TD_IC | TD_LS | TD_FS | skb->len, skb);
  	    lp->stats.tx_bytes += skb->len;
 	    outl(POLL_DEMAND, DE4X5_TPD);/* Start the TX */
-		
+
 	    lp->tx_new = (++lp->tx_new) % lp->txRingSize;
 	    dev->trans_start = jiffies;
-		    
+
 	    if (TX_BUFFS_AVAIL) {
 		netif_start_queue(dev);         /* Another pkt may be queued */
 	    }
@@ -1521,15 +1521,15 @@
 	}
 	if (skb) de4x5_putb_cache(dev, skb);
     }
-    
+
     lp->cache.lock = 0;
 
     return status;
 }
 
 /*
-** The DE4X5 interrupt handler. 
-** 
+** The DE4X5 interrupt handler.
+**
 ** I/O Read/Writes through intermediate PCI bridges are never 'posted',
 ** so that the asserted interrupt always has some real data to work with -
 ** if these I/O accesses are ever changed to memory accesses, ensure the
@@ -1546,7 +1546,7 @@
     s32 imr, omr, sts, limit;
     u_long iobase;
     unsigned int handled = 0;
-    
+
     if (dev == NULL) {
 	printk ("de4x5_interrupt(): irq %d for unknown device.\n", irq);
 	return IRQ_NONE;
@@ -1554,35 +1554,35 @@
     lp = netdev_priv(dev);
     spin_lock(&lp->lock);
     iobase = dev->base_addr;
-	
+
     DISABLE_IRQs;                        /* Ensure non re-entrancy */
 
     if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
 	printk("%s: Re-entering the interrupt handler.\n", dev->name);
 
     synchronize_irq(dev->irq);
-	
+
     for (limit=0; limit<8; limit++) {
 	sts = inl(DE4X5_STS);            /* Read IRQ status */
 	outl(sts, DE4X5_STS);            /* Reset the board interrupts */
-	    
+
 	if (!(sts & lp->irq_mask)) break;/* All done */
 	handled = 1;
-	    
+
 	if (sts & (STS_RI | STS_RU))     /* Rx interrupt (packet[s] arrived) */
 	  de4x5_rx(dev);
-	    
+
 	if (sts & (STS_TI | STS_TU))     /* Tx interrupt (packet sent) */
-	  de4x5_tx(dev); 
-	    
+	  de4x5_tx(dev);
+
 	if (sts & STS_LNF) {             /* TP Link has failed */
 	    lp->irq_mask &= ~IMR_LFM;
 	}
-	    
+
 	if (sts & STS_UNF) {             /* Transmit underrun */
 	    de4x5_txur(dev);
 	}
-	    
+
 	if (sts & STS_SE) {              /* Bus Error */
 	    STOP_DE4X5;
 	    printk("%s: Fatal bus error occurred, sts=%#8x, device stopped.\n",
@@ -1603,7 +1603,7 @@
     lp->interrupt = UNMASK_INTERRUPTS;
     ENABLE_IRQs;
     spin_unlock(&lp->lock);
-    
+
     return IRQ_RETVAL(handled);
 }
 
@@ -1614,11 +1614,11 @@
     u_long iobase = dev->base_addr;
     int entry;
     s32 status;
-    
+
     for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
 	                                                    entry=lp->rx_new) {
 	status = (s32)le32_to_cpu(lp->rx_ring[entry].status);
-	
+
 	if (lp->rx_ovf) {
 	    if (inl(DE4X5_MFC) & MFC_FOCM) {
 		de4x5_rx_ovfc(dev);
@@ -1629,7 +1629,7 @@
 	if (status & RD_FS) {                 /* Remember the start of frame */
 	    lp->rx_old = entry;
 	}
-	
+
 	if (status & RD_LS) {                 /* Valid frame status */
 	    if (lp->tx_enable) lp->linkOK++;
 	    if (status & RD_ES) {	      /* There was an error. */
@@ -1646,9 +1646,9 @@
 		struct sk_buff *skb;
 		short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
 					                            >> 16) - 4;
-		
+
 		if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
-		    printk("%s: Insufficient memory; nuking packet.\n", 
+		    printk("%s: Insufficient memory; nuking packet.\n",
 			                                            dev->name);
 		    lp->stats.rx_dropped++;
 		} else {
@@ -1658,14 +1658,14 @@
 		    skb->protocol=eth_type_trans(skb,dev);
 		    de4x5_local_stats(dev, skb->data, pkt_len);
 		    netif_rx(skb);
-		    
+
 		    /* Update stats */
 		    dev->last_rx = jiffies;
 		    lp->stats.rx_packets++;
  		    lp->stats.rx_bytes += pkt_len;
 		}
 	    }
-	    
+
 	    /* Change buffer ownership for this frame, back to the adapter */
 	    for (;lp->rx_old!=entry;lp->rx_old=(++lp->rx_old)%lp->rxRingSize) {
 		lp->rx_ring[lp->rx_old].status = cpu_to_le32(R_OWN);
@@ -1674,13 +1674,13 @@
 	    lp->rx_ring[entry].status = cpu_to_le32(R_OWN);
 	    barrier();
 	}
-	
+
 	/*
 	** Update entry information
 	*/
 	lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
     }
-    
+
     return 0;
 }
 
@@ -1705,20 +1705,20 @@
     u_long iobase = dev->base_addr;
     int entry;
     s32 status;
-    
+
     for (entry = lp->tx_old; entry != lp->tx_new; entry = lp->tx_old) {
 	status = (s32)le32_to_cpu(lp->tx_ring[entry].status);
 	if (status < 0) {                     /* Buffer not sent yet */
 	    break;
 	} else if (status != 0x7fffffff) {    /* Not setup frame */
 	    if (status & TD_ES) {             /* An error happened */
-		lp->stats.tx_errors++; 
+		lp->stats.tx_errors++;
 		if (status & TD_NC) lp->stats.tx_carrier_errors++;
 		if (status & TD_LC) lp->stats.tx_window_errors++;
 		if (status & TD_UF) lp->stats.tx_fifo_errors++;
 		if (status & TD_EC) lp->pktStats.excessive_collisions++;
 		if (status & TD_DE) lp->stats.tx_aborted_errors++;
-	    
+
 		if (TX_PKT_PENDING) {
 		    outl(POLL_DEMAND, DE4X5_TPD);/* Restart a stalled TX */
 		}
@@ -1727,14 +1727,14 @@
 		if (lp->tx_enable) lp->linkOK++;
 	    }
 	    /* Update the collision counter */
-	    lp->stats.collisions += ((status & TD_EC) ? 16 : 
+	    lp->stats.collisions += ((status & TD_EC) ? 16 :
 				                      ((status & TD_CC) >> 3));
 
 	    /* Free the buffer. */
 	    if (lp->tx_skb[entry] != NULL)
 	    	de4x5_free_tx_buff(lp, entry);
 	}
-	
+
 	/* Update all the pointers */
 	lp->tx_old = (++lp->tx_old) % lp->txRingSize;
     }
@@ -1746,7 +1746,7 @@
 	else
 	    netif_start_queue(dev);
     }
-	
+
     return 0;
 }
 
@@ -1755,9 +1755,9 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     int next_tick = DE4X5_AUTOSENSE_MS;
-    
+
     disable_ast(dev);
-    
+
     if (lp->useSROM) {
 	next_tick = srom_autoconf(dev);
     } else if (lp->chipset == DC21140) {
@@ -1769,7 +1769,7 @@
     }
     lp->linkOK = 0;
     enable_ast(dev, next_tick);
-    
+
     return 0;
 }
 
@@ -1792,11 +1792,11 @@
 	}
 	outl(omr | OMR_ST | OMR_SR, DE4X5_OMR);
     }
-    
+
     return 0;
 }
 
-static int 
+static int
 de4x5_rx_ovfc(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -1813,7 +1813,7 @@
     }
 
     outl(omr, DE4X5_OMR);
-    
+
     return 0;
 }
 
@@ -1823,22 +1823,22 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     s32 imr, omr;
-    
+
     disable_ast(dev);
 
     netif_stop_queue(dev);
-    
+
     if (de4x5_debug & DEBUG_CLOSE) {
 	printk("%s: Shutting down ethercard, status was %8.8x.\n",
 	       dev->name, inl(DE4X5_STS));
     }
-    
-    /* 
+
+    /*
     ** We stop the DE4X5 here... mask interrupts and stop TX & RX
     */
     DISABLE_IRQs;
     STOP_DE4X5;
-    
+
     /* Free the associated irq */
     free_irq(dev->irq, dev);
     lp->state = CLOSED;
@@ -1846,10 +1846,10 @@
     /* Free any socket buffers */
     de4x5_free_rx_buffs(dev);
     de4x5_free_tx_buffs(dev);
-    
+
     /* Put the adapter to sleep to save power */
     yawn(dev, SLEEP);
-    
+
     return 0;
 }
 
@@ -1858,9 +1858,9 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
-    
+
     lp->stats.rx_missed_errors = (int)(inl(DE4X5_MFC) & (MFC_OVFL | MFC_CNTR));
-    
+
     return &lp->stats;
 }
 
@@ -1886,7 +1886,7 @@
 	       (*(s16 *)&buf[4] == *(s16 *)&dev->dev_addr[4])) {
         lp->pktStats.unicast++;
     }
-		
+
     lp->pktStats.bins[0]++;       /* Duplicates stats.rx_packets */
     if (lp->pktStats.bins[0] == 0) { /* Reset counters */
         memset((char *)&lp->pktStats, 0, sizeof(lp->pktStats));
@@ -1937,11 +1937,11 @@
 	    omr = inl(DE4X5_OMR);
 	    omr |= OMR_PR;
 	    outl(omr, DE4X5_OMR);
-	} else { 
+	} else {
 	    SetMulticastFilter(dev);
-	    load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | 
+	    load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
 			                                SETUP_FRAME_LEN, (struct sk_buff *)1);
-	    
+
 	    lp->tx_new = (++lp->tx_new) % lp->txRingSize;
 	    outl(POLL_DEMAND, DE4X5_TPD);       /* Start the TX */
 	    dev->trans_start = jiffies;
@@ -1969,20 +1969,20 @@
     omr = inl(DE4X5_OMR);
     omr &= ~(OMR_PR | OMR_PM);
     pa = build_setup_frame(dev, ALL);        /* Build the basic frame */
-    
+
     if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
 	omr |= OMR_PM;                       /* Pass all multicasts */
     } else if (lp->setup_f == HASH_PERF) {   /* Hash Filtering */
 	for (i=0;i<dev->mc_count;i++) {      /* for each address in the list */
 	    addrs=dmi->dmi_addr;
 	    dmi=dmi->next;
-	    if ((*addrs & 0x01) == 1) {      /* multicast address? */ 
+	    if ((*addrs & 0x01) == 1) {      /* multicast address? */
 		crc = ether_crc_le(ETH_ALEN, addrs);
 		hashcode = crc & HASH_BITS;  /* hashcode is 9 LSb of CRC */
-		
+
 		byte = hashcode >> 3;        /* bit[3-8] -> byte in filter */
 		bit = 1 << (hashcode & 0x07);/* bit[0-2] -> bit in byte */
-		
+
 		byte <<= 1;                  /* calc offset into setup frame */
 		if (byte & 0x02) {
 		    byte -= 1;
@@ -1994,14 +1994,14 @@
 	for (j=0; j<dev->mc_count; j++) {
 	    addrs=dmi->dmi_addr;
 	    dmi=dmi->next;
-	    for (i=0; i<ETH_ALEN; i++) { 
+	    for (i=0; i<ETH_ALEN; i++) {
 		*(pa + (i&1)) = *addrs++;
 		if (i & 0x01) pa += 4;
 	    }
 	}
     }
     outl(omr, DE4X5_OMR);
-    
+
     return;
 }
 
@@ -2031,18 +2031,18 @@
 		status = -EBUSY;
 		goto release_reg_1;
 	}
-	
+
 	if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
 		status = -ENOMEM;
 		goto release_reg_2;
 	}
 	lp = netdev_priv(dev);
-	
+
 	cfid = (u32) inl(PCI_CFID);
 	lp->cfrv = (u_short) inl(PCI_CFRV);
 	device = (cfid >> 8) & 0x00ffff00;
 	vendor = (u_short) cfid;
-	    
+
 	/* Read the EISA Configuration Registers */
 	regval = inb(EISA_REG0) & (ER0_INTL | ER0_INTT);
 #ifdef CONFIG_ALPHA
@@ -2050,7 +2050,7 @@
 	 * care about the EISA configuration, and thus doesn't
 	 * configure the PLX bridge properly. Oh well... Simply mimic
 	 * the EISA config file to sort it out. */
-	
+
 	/* EISA REG1: Assert DecChip 21040 HW Reset */
 	outb (ER1_IAM | 1, EISA_REG1);
 	mdelay (1);
@@ -2061,12 +2061,12 @@
 
 	/* EISA REG3: R/W Burst Transfer Enable */
 	outb (ER3_BWE | ER3_BRE, EISA_REG3);
-	
+
 	/* 32_bit slave/master, Preempt Time=23 bclks, Unlatched Interrupt */
 	outb (ER0_BSW | ER0_BMW | ER0_EPT | regval, EISA_REG0);
 #endif
 	irq = de4x5_irq[(regval >> 1) & 0x03];
-	
+
 	if (is_DC2114x) {
 	    device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
 	}
@@ -2077,7 +2077,7 @@
 	outl(PCI_COMMAND_IO | PCI_COMMAND_MASTER, PCI_CFCS);
 	outl(0x00006000, PCI_CFLT);
 	outl(iobase, PCI_CBIO);
-	    
+
 	DevicePresent(dev, EISA_APROM);
 
 	dev->irq = irq;
@@ -2102,7 +2102,7 @@
 
 	dev = device->driver_data;
 	iobase = dev->base_addr;
-	
+
 	unregister_netdev (dev);
 	free_netdev (dev);
 	release_region (iobase + DE4X5_EISA_IO_PORTS, DE4X5_EISA_TOTAL_SIZE);
@@ -2131,11 +2131,11 @@
 
 /*
 ** This function searches the current bus (which is >0) for a DECchip with an
-** SROM, so that in multiport cards that have one SROM shared between multiple 
+** SROM, so that in multiport cards that have one SROM shared between multiple
 ** DECchips, we can find the base SROM irrespective of the BIOS scan direction.
 ** For single port cards this is a time waster...
 */
-static void __devinit 
+static void __devinit
 srom_search(struct net_device *dev, struct pci_dev *pdev)
 {
     u_char pb;
@@ -2163,7 +2163,7 @@
 	/* Set the device number information */
 	lp->device = PCI_SLOT(this_dev->devfn);
 	lp->bus_num = pb;
-	    
+
 	/* Set the chipset information */
 	if (is_DC2114x) {
 	    device = ((cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2176,7 +2176,7 @@
 	/* Fetch the IRQ to be used */
 	irq = this_dev->irq;
 	if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) continue;
-	    
+
 	/* Check if I/O accesses are enabled */
 	pci_read_config_word(this_dev, PCI_COMMAND, &status);
 	if (!(status & PCI_COMMAND_IO)) continue;
@@ -2254,7 +2254,7 @@
 	lp = netdev_priv(dev);
 	lp->bus = PCI;
 	lp->bus_num = 0;
-	
+
 	/* Search for an SROM on this bus */
 	if (lp->bus_num != pb) {
 	    lp->bus_num = pb;
@@ -2267,7 +2267,7 @@
 	/* Set the device number information */
 	lp->device = dev_num;
 	lp->bus_num = pb;
-	
+
 	/* Set the chipset information */
 	if (is_DC2114x) {
 	    device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
@@ -2283,7 +2283,7 @@
 		error = -ENODEV;
 		goto free_dev;
 	}
-	    
+
 	/* Check if I/O accesses and Bus Mastering are enabled */
 	pci_read_config_word(pdev, PCI_COMMAND, &status);
 #ifdef __powerpc__
@@ -2322,7 +2322,7 @@
 	}
 
 	dev->irq = irq;
-	
+
 	if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
 		goto release;
 	}
@@ -2377,7 +2377,7 @@
 ** Auto configure the media here rather than setting the port at compile
 ** time. This routine is called by de4x5_init() and when a loss of media is
 ** detected (excessive collisions, loss of carrier, no carrier or link fail
-** [TP] or no recent receive activity) to check whether the user has been 
+** [TP] or no recent receive activity) to check whether the user has been
 ** sneaky and changed the port on us.
 */
 static int
@@ -2405,7 +2405,7 @@
     }
 
     enable_ast(dev, next_tick);
-    
+
     return (lp->media);
 }
 
@@ -2428,7 +2428,7 @@
     u_long iobase = dev->base_addr;
     int next_tick = DE4X5_AUTOSENSE_MS;
     s32 imr;
-    
+
     switch (lp->media) {
     case INIT:
 	DISABLE_IRQs;
@@ -2447,36 +2447,36 @@
 	lp->local_state = 0;
 	next_tick = dc21040_autoconf(dev);
 	break;
-	
+
     case TP:
-	next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI, 
+	next_tick = dc21040_state(dev, 0x8f01, 0xffff, 0x0000, 3000, BNC_AUI,
 		                                         TP_SUSPECT, test_tp);
 	break;
-	
+
     case TP_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21040_autoconf);
 	break;
-	
+
     case BNC:
     case AUI:
     case BNC_AUI:
-	next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA, 
+	next_tick = dc21040_state(dev, 0x8f09, 0x0705, 0x0006, 3000, EXT_SIA,
 		                                  BNC_AUI_SUSPECT, ping_media);
 	break;
-	
+
     case BNC_AUI_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, BNC_AUI, ping_media, dc21040_autoconf);
 	break;
-	
+
     case EXT_SIA:
-	next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000, 
+	next_tick = dc21040_state(dev, 0x3041, 0x0000, 0x0006, 3000,
 		                              NC, EXT_SIA_SUSPECT, ping_media);
 	break;
-	
+
     case EXT_SIA_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, EXT_SIA, ping_media, dc21040_autoconf);
 	break;
-	
+
     case NC:
 	/* default to TP for all */
 	reset_init_sia(dev, 0x8f01, 0xffff, 0x0000);
@@ -2488,13 +2488,13 @@
 	lp->tx_enable = NO;
 	break;
     }
-    
+
     return next_tick;
 }
 
 static int
 dc21040_state(struct net_device *dev, int csr13, int csr14, int csr15, int timeout,
-	      int next_state, int suspect_state, 
+	      int next_state, int suspect_state,
 	      int (*fn)(struct net_device *, int))
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -2507,7 +2507,7 @@
 	lp->local_state++;
 	next_tick = 500;
 	break;
-	    
+
     case 1:
 	if (!lp->tx_enable) {
 	    linkBad = fn(dev, timeout);
@@ -2527,7 +2527,7 @@
 	}
 	break;
     }
-    
+
     return next_tick;
 }
 
@@ -2582,7 +2582,7 @@
     u_long iobase = dev->base_addr;
     s32 sts, irqs, irq_mask, imr, omr;
     int next_tick = DE4X5_AUTOSENSE_MS;
-    
+
     switch (lp->media) {
     case INIT:
 	DISABLE_IRQs;
@@ -2603,7 +2603,7 @@
 	lp->local_state = 0;
 	next_tick = dc21041_autoconf(dev);
 	break;
-	
+
     case TP_NW:
 	if (lp->timeout < 0) {
 	    omr = inl(DE4X5_OMR);/* Set up full duplex for the autonegotiate */
@@ -2623,7 +2623,7 @@
 	    next_tick = dc21041_autoconf(dev);
 	}
 	break;
-	
+
     case ANS:
 	if (!lp->tx_enable) {
 	    irqs = STS_LNP;
@@ -2645,11 +2645,11 @@
 	    next_tick = 3000;
 	}
 	break;
-	
+
     case ANS_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, ANS, test_tp, dc21041_autoconf);
 	break;
-	
+
     case TP:
 	if (!lp->tx_enable) {
 	    if (lp->timeout < 0) {
@@ -2679,11 +2679,11 @@
 	    next_tick = 3000;
 	}
 	break;
-	
+
     case TP_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, TP, test_tp, dc21041_autoconf);
 	break;
-	
+
     case AUI:
 	if (!lp->tx_enable) {
 	    if (lp->timeout < 0) {
@@ -2709,11 +2709,11 @@
 	    next_tick = 3000;
 	}
 	break;
-	
+
     case AUI_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc21041_autoconf);
 	break;
-	
+
     case BNC:
 	switch (lp->local_state) {
 	case 0:
@@ -2731,7 +2731,7 @@
 		next_tick = dc21041_autoconf(dev);
 	    }
 	    break;
-	    
+
 	case 1:
 	    if (!lp->tx_enable) {
 		if ((sts = ping_media(dev, 3000)) < 0) {
@@ -2751,11 +2751,11 @@
 	    break;
 	}
 	break;
-	
+
     case BNC_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc21041_autoconf);
 	break;
-	
+
     case NC:
 	omr = inl(DE4X5_OMR);    /* Set up full duplex for the autonegotiate */
 	outl(omr | OMR_FDX, DE4X5_OMR);
@@ -2768,7 +2768,7 @@
 	lp->tx_enable = NO;
 	break;
     }
-    
+
     return next_tick;
 }
 
@@ -2784,9 +2784,9 @@
     int ana, anlpa, cap, cr, slnk, sr;
     int next_tick = DE4X5_AUTOSENSE_MS;
     u_long imr, omr, iobase = dev->base_addr;
-    
+
     switch(lp->media) {
-    case INIT: 
+    case INIT:
         if (lp->timeout < 0) {
 	    DISABLE_IRQs;
 	    lp->tx_enable = FALSE;
@@ -2813,7 +2813,7 @@
 		    lp->media = _100Mb;
 		} else if (lp->autosense == _10Mb) {
 		    lp->media = _10Mb;
-		} else if ((lp->autosense == AUTO) && 
+		} else if ((lp->autosense == AUTO) &&
 			            ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
 		    ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
 		    ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -2831,7 +2831,7 @@
 	    next_tick = dc21140m_autoconf(dev);
 	}
 	break;
-	
+
     case ANS:
 	switch (lp->local_state) {
 	case 0:
@@ -2851,7 +2851,7 @@
 		next_tick = dc21140m_autoconf(dev);
 	    }
 	    break;
-	    
+
 	case 1:
 	    if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
 		next_tick = sr & ~TIMER_CB;
@@ -2862,7 +2862,7 @@
 		    lp->tmp = MII_SR_ASSC;
 		    anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
 		    ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
-		    if (!(anlpa & MII_ANLPA_RF) && 
+		    if (!(anlpa & MII_ANLPA_RF) &&
 			 (cap = anlpa & MII_ANLPA_TAF & ana)) {
 			if (cap & MII_ANA_100M) {
 			    lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -2879,10 +2879,10 @@
 	    break;
 	}
 	break;
-	
+
     case SPD_DET:                              /* Choose 10Mb/s or 100Mb/s */
         if (lp->timeout < 0) {
-	    lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS : 
+	    lp->tmp = (lp->phy[lp->active].id ? MII_SR_LKS :
 		                                  (~gep_rd(dev) & GEP_LNP));
 	    SET_100Mb_PDET;
 	}
@@ -2899,7 +2899,7 @@
 	    next_tick = dc21140m_autoconf(dev);
 	}
 	break;
-	
+
     case _100Mb:                               /* Set 100Mb/s */
         next_tick = 3000;
 	if (!lp->tx_enable) {
@@ -2933,7 +2933,7 @@
 	    }
 	}
 	break;
-	
+
     case NC:
         if (lp->media != lp->c_media) {
 	    de4x5_dbg_media(dev);
@@ -2943,7 +2943,7 @@
 	lp->tx_enable = FALSE;
 	break;
     }
-    
+
     return next_tick;
 }
 
@@ -3002,7 +3002,7 @@
 		lp->media = AUI;
 	    } else {
 		lp->media = SPD_DET;
-		if ((lp->infoblock_media == ANS) && 
+		if ((lp->infoblock_media == ANS) &&
 		                    ((sr=is_anc_capable(dev)) & MII_SR_ANC)) {
 		    ana = (((sr >> 6) & MII_ANA_TAF) | MII_ANA_CSMA);
 		    ana &= (lp->fdx ? ~0 : ~MII_ANA_FDAM);
@@ -3014,7 +3014,7 @@
 	    next_tick = dc2114x_autoconf(dev);
         }
 	break;
-	
+
     case ANS:
 	switch (lp->local_state) {
 	case 0:
@@ -3034,7 +3034,7 @@
 		next_tick = dc2114x_autoconf(dev);
 	    }
 	    break;
-	    
+
 	case 1:
 	    if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) {
 		next_tick = sr & ~TIMER_CB;
@@ -3045,7 +3045,7 @@
 		    lp->tmp = MII_SR_ASSC;
 		    anlpa = mii_rd(MII_ANLPA, lp->phy[lp->active].addr, DE4X5_MII);
 		    ana = mii_rd(MII_ANA, lp->phy[lp->active].addr, DE4X5_MII);
-		    if (!(anlpa & MII_ANLPA_RF) && 
+		    if (!(anlpa & MII_ANLPA_RF) &&
 			 (cap = anlpa & MII_ANLPA_TAF & ana)) {
 			if (cap & MII_ANA_100M) {
 			    lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE);
@@ -3087,11 +3087,11 @@
 	    next_tick = 3000;
 	}
 	break;
-	
+
     case AUI_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, AUI, ping_media, dc2114x_autoconf);
 	break;
-	
+
     case BNC:
 	switch (lp->local_state) {
 	case 0:
@@ -3109,7 +3109,7 @@
 		next_tick = dc2114x_autoconf(dev);
 	    }
 	    break;
-	    
+
 	case 1:
 	    if (!lp->tx_enable) {
 		if ((sts = ping_media(dev, 3000)) < 0) {
@@ -3130,11 +3130,11 @@
 	    break;
 	}
 	break;
-	
+
     case BNC_SUSPECT:
 	next_tick = de4x5_suspect_state(dev, 1000, BNC, ping_media, dc2114x_autoconf);
 	break;
-	
+
     case SPD_DET:                              /* Choose 10Mb/s or 100Mb/s */
 	  if (srom_map_media(dev) < 0) {
 	      lp->tcount++;
@@ -3161,7 +3161,7 @@
 	      next_tick = dc2114x_autoconf(dev);
 	  } else if (((lp->media == _100Mb) && is_100_up(dev)) ||
 		     (((lp->media == _10Mb) || (lp->media == TP) ||
-		       (lp->media == BNC)   || (lp->media == AUI)) && 
+		       (lp->media == BNC)   || (lp->media == AUI)) &&
 		      is_10_up(dev))) {
 	      next_tick = dc2114x_autoconf(dev);
 	  } else {
@@ -3169,7 +3169,7 @@
 	      lp->media = INIT;
 	  }
 	  break;
-	
+
     case _10Mb:
         next_tick = 3000;
 	if (!lp->tx_enable) {
@@ -3208,7 +3208,7 @@
 	lp->media = INIT;
 	break;
     }
-    
+
     return next_tick;
 }
 
@@ -3231,7 +3231,7 @@
     struct de4x5_private *lp = netdev_priv(dev);
 
     lp->fdx = 0;
-    if (lp->infoblock_media == lp->media) 
+    if (lp->infoblock_media == lp->media)
       return 0;
 
     switch(lp->infoblock_media) {
@@ -3270,7 +3270,7 @@
       case SROM_100BASEFF:
 	if (!lp->params.fdx) return -1;
 	lp->fdx = TRUE;
-      case SROM_100BASEF: 
+      case SROM_100BASEF:
 	if (lp->params.fdx && !lp->fdx) return -1;
 	lp->media = _100Mb;
 	break;
@@ -3280,8 +3280,8 @@
 	lp->fdx = lp->params.fdx;
 	break;
 
-      default: 
-	printk("%s: Bad media code [%d] detected in SROM!\n", dev->name, 
+      default:
+	printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
 	                                                  lp->infoblock_media);
 	return -1;
 	break;
@@ -3359,7 +3359,7 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     s32 sts, csr12;
-    
+
     if (lp->timeout < 0) {
 	lp->timeout = msec/100;
 	if (!lp->useSROM) {      /* Already done if by SROM, else dc2104[01] */
@@ -3372,22 +3372,22 @@
 	/* clear all pending interrupts */
 	sts = inl(DE4X5_STS);
 	outl(sts, DE4X5_STS);
-	
+
 	/* clear csr12 NRA and SRA bits */
 	if ((lp->chipset == DC21041) || lp->useSROM) {
 	    csr12 = inl(DE4X5_SISR);
 	    outl(csr12, DE4X5_SISR);
 	}
     }
-    
+
     sts = inl(DE4X5_STS) & ~TIMER_CB;
-    
+
     if (!(sts & irqs) && --lp->timeout) {
 	sts = 100 | TIMER_CB;
     } else {
 	lp->timeout = -1;
     }
-    
+
     return sts;
 }
 
@@ -3397,11 +3397,11 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     int sisr;
-    
+
     if (lp->timeout < 0) {
 	lp->timeout = msec/100;
     }
-    
+
     sisr = (inl(DE4X5_SISR) & ~TIMER_CB) & (SISR_LKF | SISR_NCR);
 
     if (sisr && --lp->timeout) {
@@ -3409,7 +3409,7 @@
     } else {
 	lp->timeout = -1;
     }
-    
+
     return sisr;
 }
 
@@ -3436,7 +3436,7 @@
 	    lp->timeout = msec/SAMPLE_INTERVAL;
 	}
     }
-    
+
     if (lp->phy[lp->active].id || lp->useSROM) {
 	gep = is_100_up(dev) | is_spd_100(dev);
     } else {
@@ -3447,7 +3447,7 @@
     } else {
 	lp->timeout = -1;
     }
-    
+
     return gep;
 }
 
@@ -3459,13 +3459,13 @@
     if (lp->timeout < 0) {
 	lp->timeout = 1;
     }
-    
+
     if (lp->timeout--) {
 	return TIMER_CB;
     } else {
 	lp->timeout = -1;
     }
-    
+
     return 0;
 }
 
@@ -3479,21 +3479,21 @@
     struct de4x5_private *lp = netdev_priv(dev);
     int test;
     u_long iobase = dev->base_addr;
-    
+
     if (lp->timeout < 0) {
 	lp->timeout = msec/100;
     }
-    
+
     if (pol) pol = ~0;
     reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
     test = (reg ^ pol) & mask;
-    
+
     if (test && --lp->timeout) {
 	reg = 100 | TIMER_CB;
     } else {
 	lp->timeout = -1;
     }
-    
+
     return reg;
 }
 
@@ -3503,7 +3503,7 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     int spd;
-    
+
     if (lp->useMII) {
 	spd = mii_rd(lp->phy[lp->active].spd.reg, lp->phy[lp->active].addr, DE4X5_MII);
 	spd = ~(spd ^ lp->phy[lp->active].spd.value);
@@ -3517,7 +3517,7 @@
 	spd = (lp->asBitValid & (lp->asPolarity ^ (gep_rd(dev) & lp->asBit))) |
 	          (lp->linkOK & ~lp->asBitValid);
     }
-    
+
     return spd;
 }
 
@@ -3526,7 +3526,7 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
-    
+
     if (lp->useMII) {
 	/* Double read for sticky bits & temporary drops */
 	mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3547,7 +3547,7 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
-    
+
     if (lp->useMII) {
 	/* Double read for sticky bits & temporary drops */
 	mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII);
@@ -3570,7 +3570,7 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
-    
+
     if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
 	return (mii_rd(MII_SR, lp->phy[lp->active].addr, DE4X5_MII));
     } else if ((lp->chipset & ~0x00ff) == DC2114x) {
@@ -3590,24 +3590,24 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     int sisr;
-    
+
     if (lp->timeout < 0) {
 	lp->timeout = msec/100;
-	
+
 	lp->tmp = lp->tx_new;                /* Remember the ring position */
 	load_packet(dev, lp->frame, TD_LS | TD_FS | sizeof(lp->frame), (struct sk_buff *)1);
 	lp->tx_new = (++lp->tx_new) % lp->txRingSize;
 	outl(POLL_DEMAND, DE4X5_TPD);
     }
-    
+
     sisr = inl(DE4X5_SISR);
 
-    if ((!(sisr & SISR_NCR)) && 
-	((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) && 
+    if ((!(sisr & SISR_NCR)) &&
+	((s32)le32_to_cpu(lp->tx_ring[lp->tmp].status) < 0) &&
 	 (--lp->timeout)) {
 	sisr = 100 | TIMER_CB;
     } else {
-	if ((!(sisr & SISR_NCR)) && 
+	if ((!(sisr & SISR_NCR)) &&
 	    !(le32_to_cpu(lp->tx_ring[lp->tmp].status) & (T_OWN | TD_ES)) &&
 	    lp->timeout) {
 	    sisr = 0;
@@ -3616,7 +3616,7 @@
 	}
 	lp->timeout = -1;
     }
-    
+
     return sisr;
 }
 
@@ -3668,7 +3668,7 @@
     } else {                                           /* Linear buffer */
 	memcpy(skb_put(p,len),lp->rx_bufs + lp->rx_old * RX_BUFF_SZ,len);
     }
-		    
+
     return p;
 #endif
 }
@@ -3751,23 +3751,23 @@
 	outl(lp->dma_rings, DE4X5_RRBA);
 	outl(lp->dma_rings + NUM_RX_DESC * sizeof(struct de4x5_desc),
 	     DE4X5_TRBA);
-    
+
 	lp->rx_new = lp->rx_old = 0;
 	lp->tx_new = lp->tx_old = 0;
-    
+
 	for (i = 0; i < lp->rxRingSize; i++) {
 	    lp->rx_ring[i].status = cpu_to_le32(R_OWN);
 	}
-    
+
 	for (i = 0; i < lp->txRingSize; i++) {
 	    lp->tx_ring[i].status = cpu_to_le32(0);
 	}
-    
+
 	barrier();
 	lp->cache.save_cnt--;
 	START_DE4X5;
     }
-        
+
     return;
 }
 
@@ -3792,7 +3792,7 @@
 	    gep_wr(lp->cache.gepc, dev);
 	    gep_wr(lp->cache.gep, dev);
 	} else {
-	    reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14, 
+	    reset_init_sia(dev, lp->cache.csr13, lp->cache.csr14,
 			                                      lp->cache.csr15);
 	}
 	break;
@@ -3854,25 +3854,25 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     s32 sts, ans;
-    
+
     if (lp->timeout < 0) {
 	lp->timeout = msec/100;
 	outl(irq_mask, DE4X5_IMR);
-	
+
 	/* clear all pending interrupts */
 	sts = inl(DE4X5_STS);
 	outl(sts, DE4X5_STS);
     }
-    
+
     ans = inl(DE4X5_SISR) & SISR_ANS;
     sts = inl(DE4X5_STS) & ~TIMER_CB;
-    
+
     if (!(sts & irqs) && (ans ^ ANS_NWOK) && --lp->timeout) {
 	sts = 100 | TIMER_CB;
     } else {
 	lp->timeout = -1;
     }
-    
+
     return sts;
 }
 
@@ -3882,7 +3882,7 @@
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
     s32 imr, sts;
-    
+
     if (inl(DE4X5_OMR) & OMR_SR) {   /* Only unmask if TX/RX is enabled */
 	imr = 0;
 	UNMASK_IRQs;
@@ -3890,7 +3890,7 @@
 	outl(sts, DE4X5_STS);
 	ENABLE_IRQs;
     }
-    
+
     return;
 }
 
@@ -3936,17 +3936,17 @@
 {
     int i;
     char *buf = frame;
-    
+
     for (i=0; i<ETH_ALEN; i++) {             /* Use this source address */
 	*buf++ = dev->dev_addr[i];
     }
     for (i=0; i<ETH_ALEN; i++) {             /* Use this destination address */
 	*buf++ = dev->dev_addr[i];
     }
-    
+
     *buf++ = 0;                              /* Packet length (2 bytes) */
     *buf++ = 1;
-    
+
     return;
 }
 
@@ -3978,7 +3978,7 @@
 PCI_signature(char *name, struct de4x5_private *lp)
 {
     int i, status = 0, siglen = sizeof(de4x5_signatures)/sizeof(c_char *);
-    
+
     if (lp->chipset == DC21040) {
 	strcpy(name, "DE434/5");
 	return status;
@@ -4007,7 +4007,7 @@
     } else if ((lp->chipset & ~0x00ff) == DC2114x) {
 	lp->useSROM = TRUE;
     }
-    
+
     return status;
 }
 
@@ -4024,7 +4024,7 @@
 {
     int i, j=0;
     struct de4x5_private *lp = netdev_priv(dev);
-    
+
     if (lp->chipset == DC21040) {
 	if (lp->bus == EISA) {
 	    enet_addr_rst(aprom_addr); /* Reset Ethernet Address ROM Pointer */
@@ -4049,7 +4049,7 @@
 	}
 	de4x5_dbg_srom((struct de4x5_srom *)&lp->srom);
     }
-    
+
     return;
 }
 
@@ -4071,11 +4071,11 @@
     short sigLength=0;
     s8 data;
     int i, j;
-    
+
     dev.llsig.a = ETH_PROM_SIG;
     dev.llsig.b = ETH_PROM_SIG;
     sigLength = sizeof(u32) << 1;
-    
+
     for (i=0,j=0;j<sigLength && i<PROBE_LENGTH+sigLength-1;i++) {
 	data = inb(aprom_addr);
 	if (dev.Sig[j] == data) {    /* track signature */
@@ -4088,7 +4088,7 @@
 	    }
 	}
     }
-    
+
     return;
 }
 
@@ -4111,7 +4111,7 @@
     for (i=0,k=0,j=0;j<3;j++) {
 	k <<= 1;
 	if (k > 0xffff) k-=0xffff;
-	
+
 	if (lp->bus == PCI) {
 	    if (lp->chipset == DC21040) {
 		while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4133,11 +4133,11 @@
 	    k += (u_short) ((tmp = inb(EISA_APROM)) << 8);
 	    dev->dev_addr[i++] = (u_char) tmp;
 	}
-	
+
 	if (k > 0xffff) k-=0xffff;
     }
     if (k == 0xffff) k=0;
-    
+
     if (lp->bus == PCI) {
 	if (lp->chipset == DC21040) {
 	    while ((tmp = inl(DE4X5_APROM)) < 0);
@@ -4156,7 +4156,7 @@
     srom_repair(dev, broken);
 
 #ifdef CONFIG_PPC_MULTIPLATFORM
-    /* 
+    /*
     ** If the address starts with 00 a0, we have to bit-reverse
     ** each byte of the address.
     */
@@ -4245,7 +4245,7 @@
 
     for (tmp=0,i=0; i<ETH_ALEN; i++) tmp += (u_char)dev->dev_addr[i];
     if ((tmp == 0) || (tmp == 0x5fa)) {
-	if ((lp->chipset == last.chipset) && 
+	if ((lp->chipset == last.chipset) &&
 	    (lp->bus_num == last.bus) && (lp->bus_num > 0)) {
 	    for (i=0; i<ETH_ALEN; i++) dev->dev_addr[i] = last.addr[i];
 	    for (i=ETH_ALEN-1; i>2; --i) {
@@ -4275,7 +4275,7 @@
 static int
 an_exception(struct de4x5_private *lp)
 {
-    if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) && 
+    if ((*(u_short *)lp->srom.sub_vendor_id == 0x00c0) &&
 	(*(u_short *)lp->srom.sub_system_id == 0x95e0)) {
 	return -1;
     }
@@ -4290,11 +4290,11 @@
 srom_rd(u_long addr, u_char offset)
 {
     sendto_srom(SROM_RD | SROM_SR, addr);
-    
+
     srom_latch(SROM_RD | SROM_SR | DT_CS, addr);
     srom_command(SROM_RD | SROM_SR | DT_IN | DT_CS, addr);
     srom_address(SROM_RD | SROM_SR | DT_CS, addr, offset);
-    
+
     return srom_data(SROM_RD | SROM_SR | DT_CS, addr);
 }
 
@@ -4304,7 +4304,7 @@
     sendto_srom(command, addr);
     sendto_srom(command | DT_CLK, addr);
     sendto_srom(command, addr);
-    
+
     return;
 }
 
@@ -4314,7 +4314,7 @@
     srom_latch(command, addr);
     srom_latch(command, addr);
     srom_latch((command & 0x0000ff00) | DT_CS, addr);
-    
+
     return;
 }
 
@@ -4322,15 +4322,15 @@
 srom_address(u_int command, u_long addr, u_char offset)
 {
     int i, a;
-    
+
     a = offset << 2;
     for (i=0; i<6; i++, a <<= 1) {
 	srom_latch(command | ((a & 0x80) ? DT_IN : 0), addr);
     }
     udelay(1);
-    
+
     i = (getfrom_srom(addr) >> 3) & 0x01;
-    
+
     return;
 }
 
@@ -4340,17 +4340,17 @@
     int i;
     short word = 0;
     s32 tmp;
-    
+
     for (i=0; i<16; i++) {
 	sendto_srom(command  | DT_CLK, addr);
 	tmp = getfrom_srom(addr);
 	sendto_srom(command, addr);
-	
+
 	word = (word << 1) | ((tmp >> 3) & 0x01);
     }
-    
+
     sendto_srom(command & 0x0000ff00, addr);
-    
+
     return word;
 }
 
@@ -4359,13 +4359,13 @@
 srom_busy(u_int command, u_long addr)
 {
    sendto_srom((command & 0x0000ff00) | DT_CS, addr);
-   
+
    while (!((getfrom_srom(addr) >> 3) & 0x01)) {
        mdelay(1);
    }
-   
+
    sendto_srom(command & 0x0000ff00, addr);
-   
+
    return;
 }
 */
@@ -4375,7 +4375,7 @@
 {
     outl(command, addr);
     udelay(1);
-    
+
     return;
 }
 
@@ -4383,10 +4383,10 @@
 getfrom_srom(u_long addr)
 {
     s32 tmp;
-    
+
     tmp = inl(addr);
     udelay(1);
-    
+
     return tmp;
 }
 
@@ -4403,7 +4403,7 @@
     }
     if (i == INFOLEAF_SIZE) {
 	lp->useSROM = FALSE;
-	printk("%s: Cannot find correct chipset for SROM decoding!\n", 
+	printk("%s: Cannot find correct chipset for SROM decoding!\n",
 	                                                          dev->name);
 	return -ENXIO;
     }
@@ -4420,7 +4420,7 @@
 	}
 	if (i == 0) {
 	    lp->useSROM = FALSE;
-	    printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", 
+	    printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
 	                                               dev->name, lp->device);
 	    return -ENXIO;
 	}
@@ -4494,9 +4494,9 @@
     if (((lp->ibn != 1) && (lp->ibn != 3) && (lp->ibn != 5)) || !count) return;
 
     if (lp->chipset != DC21140) RESET_SIA;
- 
+
     while (count--) {
-	gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? 
+	gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ?
 		                                   *p++ : TWIDDLE(w++)), dev);
 	mdelay(2);                          /* 2ms per action */
     }
@@ -4514,13 +4514,13 @@
 ** unless I implement the DC21041 SROM functions. There's no need
 ** since the existing code will be satisfactory for all boards.
 */
-static int 
+static int
 dc21041_infoleaf(struct net_device *dev)
 {
     return DE4X5_AUTOSENSE_MS;
 }
 
-static int 
+static int
 dc21140_infoleaf(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4558,7 +4558,7 @@
     return next_tick & ~TIMER_CB;
 }
 
-static int 
+static int
 dc21142_infoleaf(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4593,7 +4593,7 @@
     return next_tick & ~TIMER_CB;
 }
 
-static int 
+static int
 dc21143_infoleaf(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4631,7 +4631,7 @@
 ** The compact infoblock is only designed for DC21140[A] chips, so
 ** we'll reuse the dc21140m_autoconf function. Non MII media only.
 */
-static int 
+static int
 compact_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4671,7 +4671,7 @@
 /*
 ** This block describes non MII media for the DC21140[A] only.
 */
-static int 
+static int
 type0_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4711,7 +4711,7 @@
 
 /* These functions are under construction! */
 
-static int 
+static int
 type1_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4750,7 +4750,7 @@
     return dc21140m_autoconf(dev);
 }
 
-static int 
+static int
 type2_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4791,7 +4791,7 @@
     return dc2114x_autoconf(dev);
 }
 
-static int 
+static int
 type3_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4833,7 +4833,7 @@
     return dc2114x_autoconf(dev);
 }
 
-static int 
+static int
 type4_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4878,7 +4878,7 @@
 ** This block type provides information for resetting external devices
 ** (chips) through the General Purpose Register.
 */
-static int 
+static int
 type5_infoblock(struct net_device *dev, u_char count, u_char *p)
 {
     struct de4x5_private *lp = netdev_priv(dev);
@@ -4916,7 +4916,7 @@
     mii_address(phyaddr, ioaddr);          /* PHY address to be accessed     */
     mii_address(phyreg, ioaddr);           /* PHY Register to read           */
     mii_ta(MII_STRD, ioaddr);              /* Turn around time - 2 MDC       */
-    
+
     return mii_rdata(ioaddr);              /* Read data                      */
 }
 
@@ -4931,7 +4931,7 @@
     mii_ta(MII_STWR, ioaddr);              /* Turn around time - 2 MDC       */
     data = mii_swap(data, 16);             /* Swap data bit ordering         */
     mii_wdata(data, 16, ioaddr);           /* Write data                     */
-    
+
     return;
 }
 
@@ -4940,12 +4940,12 @@
 {
     int i;
     s32 tmp = 0;
-    
+
     for (i=0; i<16; i++) {
 	tmp <<= 1;
 	tmp |= getfrom_mii(MII_MRD | MII_RD, ioaddr);
     }
-    
+
     return tmp;
 }
 
@@ -4953,12 +4953,12 @@
 mii_wdata(int data, int len, u_long ioaddr)
 {
     int i;
-    
+
     for (i=0; i<len; i++) {
 	sendto_mii(MII_MWR | MII_WR, data, ioaddr);
 	data >>= 1;
     }
-    
+
     return;
 }
 
@@ -4966,13 +4966,13 @@
 mii_address(u_char addr, u_long ioaddr)
 {
     int i;
-    
+
     addr = mii_swap(addr, 5);
     for (i=0; i<5; i++) {
 	sendto_mii(MII_MWR | MII_WR, addr, ioaddr);
 	addr >>= 1;
     }
-    
+
     return;
 }
 
@@ -4980,12 +4980,12 @@
 mii_ta(u_long rw, u_long ioaddr)
 {
     if (rw == MII_STWR) {
-	sendto_mii(MII_MWR | MII_WR, 1, ioaddr);  
-	sendto_mii(MII_MWR | MII_WR, 0, ioaddr);  
+	sendto_mii(MII_MWR | MII_WR, 1, ioaddr);
+	sendto_mii(MII_MWR | MII_WR, 0, ioaddr);
     } else {
 	getfrom_mii(MII_MRD | MII_RD, ioaddr);        /* Tri-state MDIO */
     }
-    
+
     return;
 }
 
@@ -4993,13 +4993,13 @@
 mii_swap(int data, int len)
 {
     int i, tmp = 0;
-    
+
     for (i=0; i<len; i++) {
 	tmp <<= 1;
 	tmp |= (data & 1);
 	data >>= 1;
     }
-    
+
     return tmp;
 }
 
@@ -5007,13 +5007,13 @@
 sendto_mii(u32 command, int data, u_long ioaddr)
 {
     u32 j;
-    
+
     j = (data & 1) << 17;
     outl(command | j, ioaddr);
     udelay(1);
     outl(command | MII_MDC | j, ioaddr);
     udelay(1);
-    
+
     return;
 }
 
@@ -5024,7 +5024,7 @@
     udelay(1);
     outl(command | MII_MDC, ioaddr);
     udelay(1);
-    
+
     return ((inl(ioaddr) >> 19) & 1);
 }
 
@@ -5085,7 +5085,7 @@
     u_long iobase = dev->base_addr;
     int i, j, k, n, limit=sizeof(phy_info)/sizeof(struct phy_table);
     int id;
-    
+
     lp->active = 0;
     lp->useMII = TRUE;
 
@@ -5094,7 +5094,7 @@
 	lp->phy[lp->active].addr = i;
 	if (i==0) n++;                             /* Count cycles */
 	while (de4x5_reset_phy(dev)<0) udelay(100);/* Wait for reset */
-	id = mii_get_oui(i, DE4X5_MII); 
+	id = mii_get_oui(i, DE4X5_MII);
 	if ((id == 0) || (id == 65535)) continue;  /* Valid ID? */
 	for (j=0; j<limit; j++) {                  /* Search PHY table */
 	    if (id != phy_info[j].id) continue;    /* ID match? */
@@ -5133,7 +5133,7 @@
 	for (k=0; lp->phy[k].id && (k < DE4X5_MAX_PHY); k++) { /*For each PHY*/
 	    mii_wr(MII_CR_RST, MII_CR, lp->phy[k].addr, DE4X5_MII);
 	    while (mii_rd(MII_CR, lp->phy[k].addr, DE4X5_MII) & MII_CR_RST);
-	    
+
 	    de4x5_dbg_mii(dev, k);
 	}
     }
@@ -5148,12 +5148,12 @@
     struct de4x5_private *lp = netdev_priv(dev);
     int i;
     char *pa = lp->setup_frame;
-    
+
     /* Initialise the setup frame */
     if (mode == ALL) {
 	memset(lp->setup_frame, 0, SETUP_FRAME_LEN);
     }
-    
+
     if (lp->setup_f == HASH_PERF) {
 	for (pa=lp->setup_frame+IMPERF_PA_OFFSET, i=0; i<ETH_ALEN; i++) {
 	    *(pa + i) = dev->dev_addr[i];                 /* Host address */
@@ -5170,7 +5170,7 @@
 	    if (i & 0x01) pa += 4;
 	}
     }
-    
+
     return pa;                     /* Points to the next entry */
 }
 
@@ -5178,7 +5178,7 @@
 enable_ast(struct net_device *dev, u32 time_out)
 {
     timeout(dev, (void *)&de4x5_ast, (u_long)dev, time_out);
-    
+
     return;
 }
 
@@ -5186,9 +5186,9 @@
 disable_ast(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
-    
+
     del_timer(&lp->timer);
-    
+
     return;
 }
 
@@ -5207,10 +5207,10 @@
     omr |= lp->infoblock_csr6;
     if (omr & OMR_PS) omr |= OMR_HBD;
     outl(omr, DE4X5_OMR);
-    
+
     /* Soft Reset */
     RESET_DE4X5;
-    
+
     /* Restore the GEP - especially for COMPACT and Type 0 Infoblocks */
     if (lp->chipset == DC21140) {
 	gep_wr(lp->cache.gepc, dev);
@@ -5263,21 +5263,21 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     int dt;
-    
+
     /* First, cancel any pending timer events */
     del_timer(&lp->timer);
-    
+
     /* Convert msec to ticks */
     dt = (msec * HZ) / 1000;
     if (dt==0) dt=1;
-    
+
     /* Set up timer */
     init_timer(&lp->timer);
     lp->timer.expires = jiffies + dt;
     lp->timer.function = fn;
     lp->timer.data = data;
     add_timer(&lp->timer);
-    
+
     return;
 }
 
@@ -5375,7 +5375,7 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     int i;
-    
+
     if (de4x5_debug & DEBUG_OPEN) {
 	printk("%s: de4x5 opening with irq %d\n",dev->name,dev->irq);
 	printk("\tphysical address: ");
@@ -5413,11 +5413,11 @@
 	    }
 	}
 	printk("...0x%8.8x\n", le32_to_cpu(lp->tx_ring[i].buf));
-	printk("Ring size: \nRX: %d\nTX: %d\n", 
-	       (short)lp->rxRingSize, 
-	       (short)lp->txRingSize); 
+	printk("Ring size: \nRX: %d\nTX: %d\n",
+	       (short)lp->rxRingSize,
+	       (short)lp->txRingSize);
     }
-    
+
     return;
 }
 
@@ -5426,7 +5426,7 @@
 {
     struct de4x5_private *lp = netdev_priv(dev);
     u_long iobase = dev->base_addr;
-    
+
     if (de4x5_debug & DEBUG_MII) {
 	printk("\nMII device address: %d\n", lp->phy[k].addr);
 	printk("MII CR:  %x\n",mii_rd(MII_CR,lp->phy[k].addr,DE4X5_MII));
@@ -5445,7 +5445,7 @@
 	    printk("MII 20:  %x\n",mii_rd(0x14,lp->phy[k].addr,DE4X5_MII));
 	}
     }
-    
+
     return;
 }
 
@@ -5453,17 +5453,17 @@
 de4x5_dbg_media(struct net_device *dev)
 {
     struct de4x5_private *lp = netdev_priv(dev);
-    
+
     if (lp->media != lp->c_media) {
 	if (de4x5_debug & DEBUG_MEDIA) {
 	    printk("%s: media is %s%s\n", dev->name,
 		   (lp->media == NC  ? "unconnected, link down or incompatible connection" :
 		    (lp->media == TP  ? "TP" :
 		     (lp->media == ANS ? "TP/Nway" :
-		      (lp->media == BNC ? "BNC" : 
-		       (lp->media == AUI ? "AUI" : 
-			(lp->media == BNC_AUI ? "BNC/AUI" : 
-			 (lp->media == EXT_SIA ? "EXT SIA" : 
+		      (lp->media == BNC ? "BNC" :
+		       (lp->media == AUI ? "AUI" :
+			(lp->media == BNC_AUI ? "BNC/AUI" :
+			 (lp->media == EXT_SIA ? "EXT SIA" :
 			  (lp->media == _100Mb  ? "100Mb/s" :
 			   (lp->media == _10Mb   ? "10Mb/s" :
 			    "???"
@@ -5471,7 +5471,7 @@
 	}
 	lp->c_media = lp->media;
     }
-    
+
     return;
 }
 
@@ -5554,7 +5554,7 @@
 	u32 lval[36];
     } tmp;
     u_long flags = 0;
-    
+
     switch(ioc->cmd) {
     case DE4X5_GET_HWADDR:           /* Get the hardware address */
 	ioc->len = ETH_ALEN;
@@ -5575,7 +5575,7 @@
 	}
 	build_setup_frame(dev, PHYS_ADDR_ONLY);
 	/* Set up the descriptor and give ownership to the card */
-	load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET | 
+	load_packet(dev, lp->setup_frame, TD_IC | PERFECT_F | TD_SET |
 		                                       SETUP_FRAME_LEN, (struct sk_buff *)1);
 	lp->tx_new = (++lp->tx_new) % lp->txRingSize;
 	outl(POLL_DEMAND, DE4X5_TPD);                /* Start the TX */
@@ -5617,8 +5617,8 @@
 	spin_lock_irqsave(&lp->lock, flags);
 	memcpy(&statbuf, &lp->pktStats, ioc->len);
 	spin_unlock_irqrestore(&lp->lock, flags);
-	if (copy_to_user(ioc->data, &statbuf, ioc->len)) 
-		return -EFAULT; 
+	if (copy_to_user(ioc->data, &statbuf, ioc->len))
+		return -EFAULT;
 	break;
     }
     case DE4X5_CLR_STATS:            /* Zero out the driver statistics */
@@ -5652,9 +5652,9 @@
 	ioc->len = j;
 	if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
 	break;
-	
+
 #define DE4X5_DUMP              0x0f /* Dump the DE4X5 Status */
-/*	
+/*
       case DE4X5_DUMP:
 	j = 0;
 	tmp.addr[j++] = dev->irq;
@@ -5664,7 +5664,7 @@
 	tmp.addr[j++] = lp->rxRingSize;
 	tmp.lval[j>>2] = (long)lp->rx_ring; j+=4;
 	tmp.lval[j>>2] = (long)lp->tx_ring; j+=4;
-	
+
 	for (i=0;i<lp->rxRingSize-1;i++){
 	    if (i < 3) {
 		tmp.lval[j>>2] = (long)&lp->rx_ring[i].status; j+=4;
@@ -5677,7 +5677,7 @@
 	    }
 	}
 	tmp.lval[j>>2] = (long)&lp->tx_ring[i].status; j+=4;
-	
+
 	for (i=0;i<lp->rxRingSize-1;i++){
 	    if (i < 3) {
 		tmp.lval[j>>2] = (s32)le32_to_cpu(lp->rx_ring[i].buf); j+=4;
@@ -5690,14 +5690,14 @@
 	    }
 	}
 	tmp.lval[j>>2] = (s32)le32_to_cpu(lp->tx_ring[i].buf); j+=4;
-	
+
 	for (i=0;i<lp->rxRingSize;i++){
 	    tmp.lval[j>>2] = le32_to_cpu(lp->rx_ring[i].status); j+=4;
 	}
 	for (i=0;i<lp->txRingSize;i++){
 	    tmp.lval[j>>2] = le32_to_cpu(lp->tx_ring[i].status); j+=4;
 	}
-	
+
 	tmp.lval[j>>2] = inl(DE4X5_BMR);  j+=4;
 	tmp.lval[j>>2] = inl(DE4X5_TPD);  j+=4;
 	tmp.lval[j>>2] = inl(DE4X5_RPD);  j+=4;
@@ -5706,18 +5706,18 @@
 	tmp.lval[j>>2] = inl(DE4X5_STS);  j+=4;
 	tmp.lval[j>>2] = inl(DE4X5_OMR);  j+=4;
 	tmp.lval[j>>2] = inl(DE4X5_IMR);  j+=4;
-	tmp.lval[j>>2] = lp->chipset; j+=4; 
+	tmp.lval[j>>2] = lp->chipset; j+=4;
 	if (lp->chipset == DC21140) {
 	    tmp.lval[j>>2] = gep_rd(dev);  j+=4;
 	} else {
 	    tmp.lval[j>>2] = inl(DE4X5_SISR); j+=4;
 	    tmp.lval[j>>2] = inl(DE4X5_SICR); j+=4;
 	    tmp.lval[j>>2] = inl(DE4X5_STRR); j+=4;
-	    tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4; 
+	    tmp.lval[j>>2] = inl(DE4X5_SIGR); j+=4;
 	}
-	tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4; 
+	tmp.lval[j>>2] = lp->phy[lp->active].id; j+=4;
 	if (lp->phy[lp->active].id && (!lp->useSROM || lp->useMII)) {
-	    tmp.lval[j>>2] = lp->active; j+=4; 
+	    tmp.lval[j>>2] = lp->active; j+=4;
 	    tmp.lval[j>>2]=mii_rd(MII_CR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
 	    tmp.lval[j>>2]=mii_rd(MII_SR,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
 	    tmp.lval[j>>2]=mii_rd(MII_ID0,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
@@ -5734,10 +5734,10 @@
 		tmp.lval[j>>2]=mii_rd(0x14,lp->phy[lp->active].addr,DE4X5_MII); j+=4;
 	    }
 	}
-	
+
 	tmp.addr[j++] = lp->txRingSize;
 	tmp.addr[j++] = netif_queue_stopped(dev);
-	
+
 	ioc->len = j;
 	if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
 	break;
@@ -5746,7 +5746,7 @@
     default:
 	return -EOPNOTSUPP;
     }
-    
+
     return status;
 }
 
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index ad37a40..57226e5 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -38,11 +38,11 @@
 /*
 ** EISA Register Address Map
 */
-#define EISA_ID      iobase+0x0c80   /* EISA ID Registers */ 
-#define EISA_ID0     iobase+0x0c80   /* EISA ID Register 0 */ 
-#define EISA_ID1     iobase+0x0c81   /* EISA ID Register 1 */ 
-#define EISA_ID2     iobase+0x0c82   /* EISA ID Register 2 */ 
-#define EISA_ID3     iobase+0x0c83   /* EISA ID Register 3 */ 
+#define EISA_ID      iobase+0x0c80   /* EISA ID Registers */
+#define EISA_ID0     iobase+0x0c80   /* EISA ID Register 0 */
+#define EISA_ID1     iobase+0x0c81   /* EISA ID Register 1 */
+#define EISA_ID2     iobase+0x0c82   /* EISA ID Register 2 */
+#define EISA_ID3     iobase+0x0c83   /* EISA ID Register 3 */
 #define EISA_CR      iobase+0x0c84   /* EISA Control Register */
 #define EISA_REG0    iobase+0x0c88   /* EISA Configuration Register 0 */
 #define EISA_REG1    iobase+0x0c89   /* EISA Configuration Register 1 */
@@ -1008,8 +1008,8 @@
 	unsigned char  __user *data;       /* Pointer to the data buffer */
 };
 
-/* 
-** Recognised commands for the driver 
+/*
+** Recognised commands for the driver
 */
 #define DE4X5_GET_HWADDR	0x01 /* Get the hardware address */
 #define DE4X5_SET_HWADDR	0x02 /* Set the hardware address */
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 74e9075..ba5b112 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -50,7 +50,7 @@
     forget to unmap PCI mapped skbs.
 
     Alan Cox <alan@redhat.com>
-    Added new PCI identifiers provided by Clear Zhang at ALi 
+    Added new PCI identifiers provided by Clear Zhang at ALi
     for their 1563 ethernet device.
 
     TODO
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index fbd9ab6..5ffbd5b 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -96,11 +96,11 @@
  * tulip_build_fake_mediatable - Build a fake mediatable entry.
  * @tp: Ptr to the tulip private data.
  *
- * Some cards like the 3x5 HSC cards (J3514A) do not have a standard 
+ * Some cards like the 3x5 HSC cards (J3514A) do not have a standard
  * srom and can not be handled under the fixup routine.  These cards
- * still need a valid mediatable entry for correct csr12 setup and 
+ * still need a valid mediatable entry for correct csr12 setup and
  * mii handling.
- * 
+ *
  * Since this is currently a parisc-linux specific function, the
  * #ifdef __hppa__ should completely optimize this function away for
  * non-parisc hardware.
@@ -140,7 +140,7 @@
 		tp->flags |= HAS_PHY_IRQ;
 		tp->csr12_shadow = -1;
 	}
-#endif 
+#endif
 }
 
 void __devinit tulip_parse_eeprom(struct net_device *dev)
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index bb35581..da4f759 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -139,22 +139,22 @@
 		}
                /* Acknowledge current RX interrupt sources. */
                iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);
- 
- 
+
+
                /* If we own the next entry, it is a new packet. Send it up. */
                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
- 
- 
+
+
                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                                break;
- 
+
                        if (tulip_debug > 5)
                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                       dev->name, entry, status);
                        if (--rx_work_limit < 0)
                                goto not_done;
- 
+
                        if ((status & 0x38008300) != 0x0300) {
                                if ((status & 0x38000300) != 0x0300) {
                                 /* Ingore earlier buffers. */
@@ -180,7 +180,7 @@
                                /* Omit the four octet CRC from the length. */
                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
                                struct sk_buff *skb;
-  
+
 #ifndef final_version
                                if (pkt_len > 1518) {
                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
@@ -213,7 +213,7 @@
                                } else {        /* Pass up the skb already on the Rx ring. */
                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                             pkt_len);
-  
+
 #ifndef final_version
                                        if (tp->rx_buffers[entry].mapping !=
                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
@@ -225,17 +225,17 @@
                                                       skb->head, temp);
                                        }
 #endif
-  
+
                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
-  
+
                                        tp->rx_buffers[entry].skb = NULL;
                                        tp->rx_buffers[entry].mapping = 0;
                                }
                                skb->protocol = eth_type_trans(skb, dev);
-  
+
                                netif_receive_skb(skb);
- 
+
                                dev->last_rx = jiffies;
                                tp->stats.rx_packets++;
                                tp->stats.rx_bytes += pkt_len;
@@ -245,12 +245,12 @@
                        entry = (++tp->cur_rx) % RX_RING_SIZE;
                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                                tulip_refill_rx(dev);
- 
+
                 }
- 
+
                /* New ack strategy... irq does not ack Rx any longer
                   hopefully this helps */
- 
+
                /* Really bad things can happen here... If new packet arrives
                 * and an irq arrives (tx or just due to occasionally unset
                 * mask), it will be acked by irq handler, but new thread
@@ -259,28 +259,28 @@
                 * tomorrow (night 011029). If it will not fail, we won
                 * finally: amount of IO did not increase at all. */
        } while ((ioread32(tp->base_addr + CSR5) & RxIntr));
- 
+
 done:
- 
+
  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
-  
+
           /* We use this simplistic scheme for IM. It's proven by
              real life installations. We can have IM enabled
-            continuesly but this would cause unnecessary latency. 
-            Unfortunely we can't use all the NET_RX_* feedback here. 
-            This would turn on IM for devices that is not contributing 
-            to backlog congestion with unnecessary latency. 
-  
+            continuesly but this would cause unnecessary latency.
+            Unfortunely we can't use all the NET_RX_* feedback here.
+            This would turn on IM for devices that is not contributing
+            to backlog congestion with unnecessary latency.
+
              We monitor the the device RX-ring and have:
-  
+
              HW Interrupt Mitigation either ON or OFF.
-  
-            ON:  More then 1 pkt received (per intr.) OR we are dropping 
+
+            ON:  More then 1 pkt received (per intr.) OR we are dropping
              OFF: Only 1 pkt received
-            
+
              Note. We only use min and max (0, 15) settings from mit_table */
-  
-  
+
+
           if( tp->flags &  HAS_INTR_MITIGATION) {
                  if( received > 1 ) {
                          if( ! tp->mit_on ) {
@@ -297,20 +297,20 @@
           }
 
 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
- 
+
          dev->quota -= received;
          *budget -= received;
- 
+
          tulip_refill_rx(dev);
-         
+
          /* If RX ring is not full we are out of memory. */
          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
- 
+
          /* Remove us from polling list and enable RX intr. */
- 
+
          netif_rx_complete(dev);
          iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
- 
+
          /* The last op happens after poll completion. Which means the following:
           * 1. it can race with disabling irqs in irq handler
           * 2. it can race with dise/enabling irqs in other poll threads
@@ -321,9 +321,9 @@
           * due to races in masking and due to too late acking of already
           * processed irqs. But it must not result in losing events.
           */
- 
+
          return 0;
- 
+
  not_done:
          if (!received) {
 
@@ -331,29 +331,29 @@
          }
          dev->quota -= received;
          *budget -= received;
- 
+
          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                  tulip_refill_rx(dev);
- 
+
          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
- 
+
          return 1;
- 
- 
+
+
  oom:    /* Executed with RX ints disabled */
- 
-         
+
+
          /* Start timer, stop polling, but do not enable rx interrupts. */
          mod_timer(&tp->oom_timer, jiffies+1);
-       
+
          /* Think: timer_pending() was an explicit signature of bug.
           * Timer can be pending now but fired and completed
           * before we did netif_rx_complete(). See? We would lose it. */
- 
+
          /* remove ourselves from the polling list */
          netif_rx_complete(dev);
- 
+
          return 0;
 }
 
@@ -521,9 +521,9 @@
 	/* Let's see whether the interrupt really is for us */
 	csr5 = ioread32(ioaddr + CSR5);
 
-        if (tp->flags & HAS_PHY_IRQ) 
+        if (tp->flags & HAS_PHY_IRQ)
 	        handled = phy_interrupt (dev);
-    
+
 	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
 		return IRQ_RETVAL(handled);
 
@@ -538,17 +538,17 @@
 			/* Mask RX intrs and add the device to poll list. */
 			iowrite32(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
 			netif_rx_schedule(dev);
-			
+
 			if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
                                break;
 		}
-		
+
                /* Acknowledge the interrupt sources we handle here ASAP
                   the poll function does Rx and RxNoBuf acking */
-		
+
 		iowrite32(csr5 & 0x0001ff3f, ioaddr + CSR5);
 
-#else 
+#else
 		/* Acknowledge all of the current interrupt sources ASAP. */
 		iowrite32(csr5 & 0x0001ffff, ioaddr + CSR5);
 
@@ -559,11 +559,11 @@
 		}
 
 #endif /*  CONFIG_TULIP_NAPI */
-		
+
 		if (tulip_debug > 4)
 			printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
 			       dev->name, csr5, ioread32(ioaddr + CSR5));
-		
+
 
 		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
 			unsigned int dirty_tx;
@@ -737,17 +737,17 @@
 #ifdef CONFIG_TULIP_NAPI
 		if (rxd)
 			csr5 &= ~RxPollInt;
-	} while ((csr5 & (TxNoBuf | 
-			  TxDied | 
-			  TxIntr | 
+	} while ((csr5 & (TxNoBuf |
+			  TxDied |
+			  TxIntr |
 			  TimerInt |
 			  /* Abnormal intr. */
-			  RxDied | 
-			  TxFIFOUnderflow | 
-			  TxJabber | 
-			  TPLnkFail |  
+			  RxDied |
+			  TxFIFOUnderflow |
+			  TxJabber |
+			  TPLnkFail |
 			  SytemError )) != 0);
-#else 
+#else
 	} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
 
 	tulip_refill_rx(dev);
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index f53396f..e9bc2a9 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -140,7 +140,7 @@
 		spin_unlock_irqrestore(&tp->mii_lock, flags);
 		return;
 	}
-		
+
 	/* Establish sync by sending 32 logic ones. */
 	for (i = 32; i >= 0; i--) {
 		iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 05d2d96..d25020d 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -259,7 +259,7 @@
    There are no ill effects from too-large receive rings. */
 
 #define TX_RING_SIZE	32
-#define RX_RING_SIZE	128 
+#define RX_RING_SIZE	128
 #define MEDIA_MASK     31
 
 #define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer. */
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index c67c9125..b3cf11d 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1224,7 +1224,7 @@
  *	Chips that have the MRM/reserved bit quirk and the burst quirk. That
  *	is the DM910X and the on chip ULi devices
  */
- 
+
 static int tulip_uli_dm_quirk(struct pci_dev *pdev)
 {
 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
@@ -1297,7 +1297,7 @@
 	 */
 
 	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
-	      aligned.  Aries might need this too. The Saturn errata are not 
+	      aligned.  Aries might need this too. The Saturn errata are not
 	      pretty reading but thankfully it's an old 486 chipset.
 
 	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
@@ -1500,7 +1500,7 @@
                }
 #endif
 #ifdef CONFIG_MIPS_COBALT
-               if ((pdev->bus->number == 0) && 
+               if ((pdev->bus->number == 0) &&
                    ((PCI_SLOT(pdev->devfn) == 7) ||
                     (PCI_SLOT(pdev->devfn) == 12))) {
                        /* Cobalt MAC address in first EEPROM locations. */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 238e9c7..8b3a28f 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -9,7 +9,7 @@
     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     GNU General Public License for more details.
 
-    
+
 */
 
 #define DRV_NAME	"uli526x"
@@ -185,7 +185,7 @@
 
 	/* NIC SROM data */
 	unsigned char srom[128];
-	u8 init;	
+	u8 init;
 };
 
 enum uli526x_offsets {
@@ -258,7 +258,7 @@
 	struct uli526x_board_info *db;	/* board information structure */
 	struct net_device *dev;
 	int i, err;
-	
+
 	ULI526X_DBUG(0, "uli526x_init_one()", 0);
 
 	if (!printed_version++)
@@ -316,7 +316,7 @@
 		err = -ENOMEM;
 		goto err_out_nomem;
 	}
-	
+
 	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
 	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
 	db->buf_pool_start = db->buf_pool_ptr;
@@ -324,14 +324,14 @@
 
 	db->chip_id = ent->driver_data;
 	db->ioaddr = pci_resource_start(pdev, 0);
-	
+
 	db->pdev = pdev;
 	db->init = 1;
-	
+
 	dev->base_addr = db->ioaddr;
 	dev->irq = pdev->irq;
 	pci_set_drvdata(pdev, dev);
-	
+
 	/* Register some necessary functions */
 	dev->open = &uli526x_open;
 	dev->hard_start_xmit = &uli526x_start_xmit;
@@ -341,7 +341,7 @@
 	dev->ethtool_ops = &netdev_ethtool_ops;
 	spin_lock_init(&db->lock);
 
-		
+
 	/* read 64 word srom data */
 	for (i = 0; i < 64; i++)
 		((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
@@ -374,7 +374,7 @@
 		goto err_out_res;
 
 	printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
-	
+
 	for (i = 0; i < 6; i++)
 		printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
 	printk(", irq %d.\n", dev->irq);
@@ -389,7 +389,7 @@
 	if(db->desc_pool_ptr)
 		pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
 			db->desc_pool_ptr, db->desc_pool_dma_ptr);
-			
+
 	if(db->buf_pool_ptr != NULL)
 		pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
 			db->buf_pool_ptr, db->buf_pool_dma_ptr);
@@ -433,7 +433,7 @@
 {
 	int ret;
 	struct uli526x_board_info *db = netdev_priv(dev);
-	
+
 	ULI526X_DBUG(0, "uli526x_open", 0);
 
 	ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
@@ -454,7 +454,7 @@
 	/* CR6 operation mode decision */
 	db->cr6_data |= ULI526X_TXTH_256;
 	db->cr0_data = CR0_DEFAULT;
-	
+
 	/* Initialize ULI526X board */
 	uli526x_init(dev);
 
@@ -604,7 +604,7 @@
 	/* Restore CR7 to enable interrupt */
 	spin_unlock_irqrestore(&db->lock, flags);
 	outl(db->cr7_data, dev->base_addr + DCR7);
-	
+
 	/* free this SKB */
 	dev_kfree_skb(skb);
 
@@ -782,7 +782,7 @@
 	struct sk_buff *skb;
 	int rxlen;
 	u32 rdes0;
-	
+
 	rxptr = db->rx_ready_ptr;
 
 	while(db->rx_avail_cnt) {
@@ -821,7 +821,7 @@
 			if ( !(rdes0 & 0x8000) ||
 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
 				skb = rxptr->rx_skb_ptr;
-		
+
 				/* Good packet, send to upper layer */
 				/* Shorst packet used new SKB */
 				if ( (rxlen < RX_COPY_SIZE) &&
@@ -841,7 +841,7 @@
 				dev->last_rx = jiffies;
 				db->stats.rx_packets++;
 				db->stats.rx_bytes += rxlen;
-				
+
 			} else {
 				/* Reuse SKB buffer when the packet is error */
 				ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
@@ -911,7 +911,7 @@
 	                   SUPPORTED_100baseT_Full |
 	                   SUPPORTED_Autoneg |
 	                   SUPPORTED_MII);
-		
+
 	ecmd->advertising = (ADVERTISED_10baseT_Half |
 	                   ADVERTISED_10baseT_Full |
 	                   ADVERTISED_100baseT_Half |
@@ -924,13 +924,13 @@
 	ecmd->phy_address = db->phy_addr;
 
 	ecmd->transceiver = XCVR_EXTERNAL;
-		
+
 	ecmd->speed = 10;
 	ecmd->duplex = DUPLEX_HALF;
-	
+
 	if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
 	{
-		ecmd->speed = 100;               
+		ecmd->speed = 100;
 	}
 	if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
 	{
@@ -939,11 +939,11 @@
 	if(db->link_failed)
 	{
 		ecmd->speed = -1;
-		ecmd->duplex = -1;	
+		ecmd->duplex = -1;
 	}
-	
+
 	if (db->media_mode & ULI526X_AUTO)
-	{	
+	{
 		ecmd->autoneg = AUTONEG_ENABLE;
 	}
 }
@@ -964,15 +964,15 @@
 
 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
 	struct uli526x_board_info *np = netdev_priv(dev);
-	
+
 	ULi_ethtool_gset(np, cmd);
-	
+
 	return 0;
 }
 
 static u32 netdev_get_link(struct net_device *dev) {
 	struct uli526x_board_info *np = netdev_priv(dev);
-		
+
 	if(np->link_failed)
 		return 0;
 	else
@@ -1005,11 +1005,11 @@
 	struct uli526x_board_info *db = netdev_priv(dev);
  	unsigned long flags;
 	u8 TmpSpeed=10;
-	
+
 	//ULI526X_DBUG(0, "uli526x_timer()", 0);
 	spin_lock_irqsave(&db->lock, flags);
 
-	
+
 	/* Dynamic reset ULI526X : system error or transmit time-out */
 	tmp_cr8 = inl(db->ioaddr + DCR8);
 	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
@@ -1021,9 +1021,9 @@
 	/* TX polling kick monitor */
 	if ( db->tx_packet_cnt &&
 	     time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
-		outl(0x1, dev->base_addr + DCR1);   // Tx polling again 
+		outl(0x1, dev->base_addr + DCR1);   // Tx polling again
 
-		// TX Timeout 
+		// TX Timeout
 		if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
 			db->reset_TXtimeout++;
 			db->wait_reset = 1;
@@ -1073,7 +1073,7 @@
 				uli526x_sense_speed(db) )
 				db->link_failed = 1;
 			uli526x_process_mode(db);
-			
+
 			if(db->link_failed==0)
 			{
 				if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
@@ -1404,7 +1404,7 @@
 	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
 
 	if ( (phy_mode & 0x24) == 0x24 ) {
-		
+
 		phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
 		if(phy_mode&0x8000)
 			phy_mode = 0x8000;
@@ -1414,7 +1414,7 @@
 			phy_mode = 0x2000;
 		else
 			phy_mode = 0x1000;
-		
+
 		/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
 		switch (phy_mode) {
 		case 0x1000: db->op_mode = ULI526X_10MHF; break;
@@ -1442,7 +1442,7 @@
 static void uli526x_set_phyxcer(struct uli526x_board_info *db)
 {
 	u16 phy_reg;
-	
+
 	/* Phyxcer capability setting */
 	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
 
@@ -1457,7 +1457,7 @@
 		case ULI526X_100MHF: phy_reg |= 0x80; break;
 		case ULI526X_100MFD: phy_reg |= 0x100; break;
 		}
-		
+
 	}
 
   	/* Write new capability to Phyxcer Reg4 */
@@ -1556,7 +1556,7 @@
 	/* Write a word data to PHY controller */
 	for ( i = 0x8000; i > 0; i >>= 1)
 		phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
-	
+
 }
 
 
@@ -1574,7 +1574,7 @@
 		return phy_readby_cr10(iobase, phy_addr, offset);
 	/* M5261/M5263 Chip */
 	ioaddr = iobase + DCR9;
-	
+
 	/* Send 33 synchronization clock to Phy controller */
 	for (i = 0; i < 35; i++)
 		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
@@ -1610,7 +1610,7 @@
 static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
 {
 	unsigned long ioaddr,cr10_value;
-	
+
 	ioaddr = iobase + DCR10;
 	cr10_value = phy_addr;
 	cr10_value = (cr10_value<<5) + offset;
@@ -1629,7 +1629,7 @@
 static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
 {
 	unsigned long ioaddr,cr10_value;
-	
+
 	ioaddr = iobase + DCR10;
 	cr10_value = phy_addr;
 	cr10_value = (cr10_value<<5) + offset;
@@ -1659,7 +1659,7 @@
 static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
 {
 	u16 phy_data;
-	
+
 	outl(0x50000 , ioaddr);
 	udelay(1);
 	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 136a70c..64ecf929 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -38,12 +38,12 @@
 			Copyright (C) 2001 Manfred Spraul
   	* ethtool support (jgarzik)
 	* Replace some MII-related magic numbers with constants (jgarzik)
-  
+
 	TODO:
 	* enable pci_power_off
 	* Wake-On-LAN
 */
-  
+
 #define DRV_NAME	"winbond-840"
 #define DRV_VERSION	"1.01-d"
 #define DRV_RELDATE	"Nov-17-2001"
@@ -57,7 +57,7 @@
 c-help-symbol: CONFIG_WINBOND_840
 c-help: This driver is for the Winbond W89c840 chip.  It also works with
 c-help: the TX9882 chip on the Compex RL100-ATX board.
-c-help: More specific information and updates are available from 
+c-help: More specific information and updates are available from
 c-help: http://www.scyld.com/network/drivers.html
 */
 
@@ -207,7 +207,7 @@
 
 */
 
-
+
 
 /*
   PCI probe table.
@@ -374,7 +374,7 @@
 static struct ethtool_ops netdev_ethtool_ops;
 static int  netdev_close(struct net_device *dev);
 
-
+
 
 static int __devinit w840_probe1 (struct pci_dev *pdev,
 				  const struct pci_device_id *ent)
@@ -434,7 +434,7 @@
 	np->mii_if.mdio_read = mdio_read;
 	np->mii_if.mdio_write = mdio_write;
 	np->base_addr = ioaddr;
-	
+
 	pci_set_drvdata(pdev, dev);
 
 	if (dev->mem_start)
@@ -510,7 +510,7 @@
 	return -ENODEV;
 }
 
-
+
 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.  These are
    often serial bit streams generated by the host processor.
    The example below is for the common 93c46 EEPROM, 64 16 bit words. */
@@ -660,7 +660,7 @@
 	return;
 }
 
-
+
 static int netdev_open(struct net_device *dev)
 {
 	struct netdev_private *np = netdev_priv(dev);
@@ -731,7 +731,7 @@
 				dev->name, np->phys[0]);
 		netif_carrier_on(dev);
 	}
-	
+
 	if ((np->mii & ~0xf) == MII_DAVICOM_DM9101) {
 		/* If the link partner doesn't support autonegotiation
 		 * the MII detects it's abilities with the "parallel detection".
@@ -761,7 +761,7 @@
 		result |= 0x20000000;
 	if (result != np->csr6 && debug)
 		printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
-				 dev->name, fasteth ? 100 : 10, 
+				 dev->name, fasteth ? 100 : 10,
 			   	duplex ? "full" : "half", np->phys[0]);
 	return result;
 }
@@ -947,7 +947,7 @@
 	iowrite32(i, ioaddr + PCIBusCfg);
 
 	np->csr6 = 0;
-	/* 128 byte Tx threshold; 
+	/* 128 byte Tx threshold;
 		Transmit on; Receive on; */
 	update_csr6(dev, 0x00022002 | update_link(dev) | __set_rx_mode(dev));
 
@@ -1584,7 +1584,7 @@
 static void __devexit w840_remove1 (struct pci_dev *pdev)
 {
 	struct net_device *dev = pci_get_drvdata(pdev);
-	
+
 	if (dev) {
 		struct netdev_private *np = netdev_priv(dev);
 		unregister_netdev(dev);
@@ -1640,7 +1640,7 @@
 
 		spin_unlock_wait(&dev->xmit_lock);
 		synchronize_irq(dev->irq);
-	
+
 		np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
 
 		/* no more hardware accesses behind this line. */
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 5634410..63c2175 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -1,11 +1,11 @@
 /*
- * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards 
+ * xircom_cb: A driver for the (tulip-like) Xircom Cardbus ethernet cards
  *
  * This software is (C) by the respective authors, and licensed under the GPL
  * License.
  *
  * Written by Arjan van de Ven for Red Hat, Inc.
- * Based on work by Jeff Garzik, Doug Ledford and Donald Becker 
+ * Based on work by Jeff Garzik, Doug Ledford and Donald Becker
  *
  *  	This software may be used and distributed according to the terms
  *      of the GNU General Public License, incorporated herein by reference.
@@ -93,7 +93,7 @@
 
 	unsigned long io_port;
 	int open;
-	
+
 	/* transmit_used is the rotating counter that indicates which transmit
 	   descriptor has to be used next */
 	int transmit_used;
@@ -153,10 +153,10 @@
 MODULE_DEVICE_TABLE(pci, xircom_pci_table);
 
 static struct pci_driver xircom_ops = {
-	.name		= "xircom_cb", 
-	.id_table	= xircom_pci_table, 
-	.probe		= xircom_probe, 
-	.remove		= xircom_remove, 
+	.name		= "xircom_cb",
+	.id_table	= xircom_pci_table,
+	.probe		= xircom_probe,
+	.remove		= xircom_remove,
 	.suspend =NULL,
 	.resume =NULL
 };
@@ -174,7 +174,7 @@
 			buffer[i2++]='1';
 		else
 			buffer[i2++]='0';
-		if ((i&3)==0) 
+		if ((i&3)==0)
 			buffer[i2++]=' ';
 	}
 	printk("%s\n",buffer);
@@ -196,10 +196,10 @@
 
 /* xircom_probe is the code that gets called on device insertion.
    it sets up the hardware and registers the device to the networklayer.
-   
+
    TODO: Send 1 or 2 "dummy" packets here as the card seems to discard the
          first two packets that get send, and pump hates that.
-         
+
  */
 static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
@@ -209,7 +209,7 @@
 	unsigned long flags;
 	unsigned short tmp16;
 	enter("xircom_probe");
-	
+
 	/* First do the PCI initialisation */
 
 	if (pci_enable_device(pdev))
@@ -217,24 +217,24 @@
 
 	/* disable all powermanagement */
 	pci_write_config_dword(pdev, PCI_POWERMGMT, 0x0000);
-	
+
 	pci_set_master(pdev); /* Why isn't this done by pci_enable_device ?*/
 
-	/* clear PCI status, if any */ 
-	pci_read_config_word (pdev,PCI_STATUS, &tmp16); 
+	/* clear PCI status, if any */
+	pci_read_config_word (pdev,PCI_STATUS, &tmp16);
 	pci_write_config_word (pdev, PCI_STATUS,tmp16);
-	
+
 	pci_read_config_byte(pdev, PCI_REVISION_ID, &chip_rev);
-	
+
 	if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
 		printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
 		return -ENODEV;
 	}
 
-	/* 
+	/*
 	   Before changing the hardware, allocate the memory.
 	   This way, we can fail gracefully if not enough memory
-	   is available. 
+	   is available.
 	 */
 	dev = alloc_etherdev(sizeof(struct xircom_private));
 	if (!dev) {
@@ -242,13 +242,13 @@
 		goto device_fail;
 	}
 	private = netdev_priv(dev);
-	
+
 	/* Allocate the send/receive buffers */
 	private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
 	if (private->rx_buffer == NULL) {
  		printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
 		goto rx_buf_fail;
-	}	
+	}
 	private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
 	if (private->tx_buffer == NULL) {
 		printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
@@ -265,11 +265,11 @@
 	spin_lock_init(&private->lock);
 	dev->irq = pdev->irq;
 	dev->base_addr = private->io_port;
-	
+
 	initialize_card(private);
 	read_mac_address(private);
 	setup_descriptors(private);
-	
+
 	dev->open = &xircom_open;
 	dev->hard_start_xmit = &xircom_start_xmit;
 	dev->stop = &xircom_close;
@@ -285,19 +285,19 @@
 		printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
 		goto reg_fail;
 	}
-		
+
 	printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, chip_rev, pdev->irq);
 	/* start the transmitter to get a heartbeat */
 	/* TODO: send 2 dummy packets here */
 	transceiver_voodoo(private);
-	
+
 	spin_lock_irqsave(&private->lock,flags);
 	activate_transmitter(private);
 	activate_receiver(private);
 	spin_unlock_irqrestore(&private->lock,flags);
-	
+
 	trigger_receive(private);
-	
+
 	leave("xircom_probe");
 	return 0;
 
@@ -332,7 +332,7 @@
 	free_netdev(dev);
 	pci_set_drvdata(pdev, NULL);
 	leave("xircom_remove");
-} 
+}
 
 static irqreturn_t xircom_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
 {
@@ -346,11 +346,11 @@
 	spin_lock(&card->lock);
 	status = inl(card->io_port+CSR5);
 
-#ifdef DEBUG	
+#ifdef DEBUG
 	print_binary(status);
 	printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
 	printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
-#endif	
+#endif
 	/* Handle shared irq and hotplug */
 	if (status == 0 || status == 0xffffffff) {
 		spin_unlock(&card->lock);
@@ -366,21 +366,21 @@
 			netif_carrier_on(dev);
 		else
 			netif_carrier_off(dev);
-		
+
 	}
 
-	/* Clear all remaining interrupts */	
+	/* Clear all remaining interrupts */
 	status |= 0xffffffff; /* FIXME: make this clear only the
 				        real existing bits */
 	outl(status,card->io_port+CSR5);
-	
 
-	for (i=0;i<NUMDESCRIPTORS;i++) 
+
+	for (i=0;i<NUMDESCRIPTORS;i++)
 		investigate_write_descriptor(dev,card,i,bufferoffsets[i]);
-	for (i=0;i<NUMDESCRIPTORS;i++) 
+	for (i=0;i<NUMDESCRIPTORS;i++)
 		investigate_read_descriptor(dev,card,i,bufferoffsets[i]);
 
-	
+
 	spin_unlock(&card->lock);
 	leave("xircom_interrupt");
 	return IRQ_HANDLED;
@@ -393,38 +393,38 @@
 	int nextdescriptor;
 	int desc;
 	enter("xircom_start_xmit");
-	
+
 	card = netdev_priv(dev);
 	spin_lock_irqsave(&card->lock,flags);
-	
+
 	/* First see if we can free some descriptors */
-	for (desc=0;desc<NUMDESCRIPTORS;desc++) 
+	for (desc=0;desc<NUMDESCRIPTORS;desc++)
 		investigate_write_descriptor(dev,card,desc,bufferoffsets[desc]);
-	
-	
+
+
 	nextdescriptor = (card->transmit_used +1) % (NUMDESCRIPTORS);
 	desc = card->transmit_used;
-	
+
 	/* only send the packet if the descriptor is free */
 	if (card->tx_buffer[4*desc]==0) {
 			/* Copy the packet data; zero the memory first as the card
 			   sometimes sends more than you ask it to. */
-			
+
 			memset(&card->tx_buffer[bufferoffsets[desc]/4],0,1536);
 			memcpy(&(card->tx_buffer[bufferoffsets[desc]/4]),skb->data,skb->len);
-	
-	
+
+
 			/* FIXME: The specification tells us that the length we send HAS to be a multiple of
 			   4 bytes. */
-			   
+
 			card->tx_buffer[4*desc+1] = skb->len;
 			if (desc == NUMDESCRIPTORS-1)
 				card->tx_buffer[4*desc+1] |= (1<<25);  /* bit 25: last descriptor of the ring */
 
 			card->tx_buffer[4*desc+1] |= 0xF0000000;
-						 /* 0xF0... means want interrupts*/ 
+						 /* 0xF0... means want interrupts*/
 			card->tx_skb[desc] = skb;
-			
+
 			wmb();
 			/* This gives the descriptor to the card */
 			card->tx_buffer[4*desc] = 0x80000000;
@@ -433,18 +433,18 @@
 				netif_stop_queue(dev);
 			}
 			card->transmit_used = nextdescriptor;
-			leave("xircom-start_xmit - sent");	
+			leave("xircom-start_xmit - sent");
 			spin_unlock_irqrestore(&card->lock,flags);
 			return 0;
 	}
-	
+
 
 
 	/* Uh oh... no free descriptor... drop the packet */
 	netif_stop_queue(dev);
 	spin_unlock_irqrestore(&card->lock,flags);
 	trigger_transmit(card);
-	
+
 	return -EIO;
 }
 
@@ -462,7 +462,7 @@
 		leave("xircom_open - No IRQ");
 		return retval;
 	}
-	
+
 	xircom_up(xp);
 	xp->open = 1;
 	leave("xircom_open");
@@ -473,31 +473,31 @@
 {
 	struct xircom_private *card;
 	unsigned long flags;
-	
+
 	enter("xircom_close");
 	card = netdev_priv(dev);
 	netif_stop_queue(dev); /* we don't want new packets */
 
-	
+
 	spin_lock_irqsave(&card->lock,flags);
-	
+
 	disable_all_interrupts(card);
-#if 0	
+#if 0
 	/* We can enable this again once we send dummy packets on ifconfig ethX up */
 	deactivate_receiver(card);
 	deactivate_transmitter(card);
-#endif	
+#endif
 	remove_descriptors(card);
-	
+
 	spin_unlock_irqrestore(&card->lock,flags);
-	
+
 	card->open = 0;
 	free_irq(dev->irq,dev);
-	
+
 	leave("xircom_close");
-	
+
 	return 0;
-	
+
 }
 
 
@@ -506,8 +506,8 @@
 {
         struct xircom_private *card = netdev_priv(dev);
         return &card->stats;
-} 
-                                                 
+}
+
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xircom_poll_controller(struct net_device *dev)
@@ -540,7 +540,7 @@
 	outl(val, card->io_port + CSR0);
 
 
-	val = 0;		/* Value 0x00 is a safe and conservative value 
+	val = 0;		/* Value 0x00 is a safe and conservative value
 				   for the PCI configuration settings */
 	outl(val, card->io_port + CSR0);
 
@@ -617,23 +617,23 @@
 
 		/* Rx Descr2: address of the buffer
 		   we store the buffer at the 2nd half of the page */
-	
+
 		address = (unsigned long) card->rx_dma_handle;
 		card->rx_buffer[i*4 + 2] = cpu_to_le32(address + bufferoffsets[i]);
 		/* Rx Desc3: address of 2nd buffer -> 0 */
 		card->rx_buffer[i*4 + 3] = 0;
 	}
-	
+
 	wmb();
 	/* Write the receive descriptor ring address to the card */
 	address = (unsigned long) card->rx_dma_handle;
-	val = cpu_to_le32(address); 
+	val = cpu_to_le32(address);
 	outl(val, card->io_port + CSR3);	/* Receive descr list address */
 
 
 	/* transmit descriptors */
 	memset(card->tx_buffer, 0, 128);	/* clear the descriptors */
-	
+
 	for (i=0;i<NUMDESCRIPTORS;i++ ) {
 		/* Tx Descr0: Empty, we own it, no errors -> 0x00000000 */
 		card->tx_buffer[i*4 + 0] = 0x00000000;
@@ -641,7 +641,7 @@
 		card->tx_buffer[i*4 + 1] = 1536;
 		if (i==NUMDESCRIPTORS-1)
 			card->tx_buffer[i*4 + 1] |= (1 << 25); /* bit 25 is "last descriptor" */
-		
+
 		/* Tx Descr2: address of the buffer
 		   we store the buffer at the 2nd half of the page */
 		address = (unsigned long) card->tx_dma_handle;
@@ -748,7 +748,7 @@
 activate_receiver enables the receiver on the card.
 Before being allowed to active the receiver, the receiver
 must be completely de-activated. To achieve this,
-this code actually disables the receiver first; then it waits for the 
+this code actually disables the receiver first; then it waits for the
 receiver to become inactive, then it activates the receiver and then
 it waits for the receiver to be active.
 
@@ -762,13 +762,13 @@
 
 
 	val = inl(card->io_port + CSR6);	/* Operation mode */
-	
+
 	/* If the "active" bit is set and the receiver is already
 	   active, no need to do the expensive thing */
 	if ((val&2) && (receive_active(card)))
 		return;
-	
-	
+
+
 	val = val & ~2;		/* disable the receiver */
 	outl(val, card->io_port + CSR6);
 
@@ -805,7 +805,7 @@
 
 /*
 deactivate_receiver disables the receiver on the card.
-To achieve this this code disables the receiver first; 
+To achieve this this code disables the receiver first;
 then it waits for the receiver to become inactive.
 
 must be called with the lock held and interrupts disabled.
@@ -840,7 +840,7 @@
 activate_transmitter enables the transmitter on the card.
 Before being allowed to active the transmitter, the transmitter
 must be completely de-activated. To achieve this,
-this code actually disables the transmitter first; then it waits for the 
+this code actually disables the transmitter first; then it waits for the
 transmitter to become inactive, then it activates the transmitter and then
 it waits for the transmitter to be active again.
 
@@ -856,7 +856,7 @@
 	val = inl(card->io_port + CSR6);	/* Operation mode */
 
 	/* If the "active" bit is set and the receiver is already
-	   active, no need to do the expensive thing */	 
+	   active, no need to do the expensive thing */
 	if ((val&(1<<13)) && (transmit_active(card)))
 		return;
 
@@ -896,7 +896,7 @@
 
 /*
 deactivate_transmitter disables the transmitter on the card.
-To achieve this this code disables the transmitter first; 
+To achieve this this code disables the transmitter first;
 then it waits for the transmitter to become inactive.
 
 must be called with the lock held and interrupts disabled.
@@ -990,7 +990,7 @@
 {
 	unsigned int val;
 	enter("enable_all_interrupts");
-	
+
 	val = 0;				/* disable all interrupts */
 	outl(val, card->io_port + CSR7);
 
@@ -1031,8 +1031,8 @@
 	unsigned int val;
 	enter("enable_promisc");
 
-	val = inl(card->io_port + CSR6);	
-	val = val | (1 << 6);	
+	val = inl(card->io_port + CSR6);
+	val = val | (1 << 6);
 	outl(val, card->io_port + CSR6);
 
 	leave("enable_promisc");
@@ -1042,7 +1042,7 @@
 
 
 
-/* 
+/*
 link_status() checks the the links status and will return 0 for no link, 10 for 10mbit link and 100 for.. guess what.
 
 Must be called in locked state with interrupts disabled
@@ -1051,15 +1051,15 @@
 {
 	unsigned int val;
 	enter("link_status");
-	
+
 	val = inb(card->io_port + CSR12);
-	
+
 	if (!(val&(1<<2)))  /* bit 2 is 0 for 10mbit link, 1 for not an 10mbit link */
 		return 10;
 	if (!(val&(1<<1)))  /* bit 1 is 0 for 100mbit link, 1 for not an 100mbit link */
 		return 100;
-		
-	/* If we get here -> no link at all */	
+
+	/* If we get here -> no link at all */
 
 	leave("link_status");
 	return 0;
@@ -1071,7 +1071,7 @@
 
 /*
   read_mac_address() reads the MAC address from the NIC and stores it in the "dev" structure.
- 
+
   This function will take the spinlock itself and can, as a result, not be called with the lock helt.
  */
 static void read_mac_address(struct xircom_private *card)
@@ -1081,7 +1081,7 @@
 	int i;
 
 	enter("read_mac_address");
-		
+
 	spin_lock_irqsave(&card->lock, flags);
 
 	outl(1 << 12, card->io_port + CSR9);	/* enable boot rom access */
@@ -1095,7 +1095,7 @@
 		outl(i + 3, card->io_port + CSR10);
 		data_count = inl(card->io_port + CSR9) & 0xff;
 		if ((tuple == 0x22) && (data_id == 0x04) && (data_count == 0x06)) {
-			/* 
+			/*
 			 * This is it.  We have the data we want.
 			 */
 			for (j = 0; j < 6; j++) {
@@ -1136,12 +1136,12 @@
 	spin_lock_irqsave(&card->lock, flags);
 
 	outl(0x0008, card->io_port + CSR15);
-        udelay(25);  
+        udelay(25);
         outl(0xa8050000, card->io_port + CSR15);
         udelay(25);
         outl(0xa00f0000, card->io_port + CSR15);
         udelay(25);
-        
+
         spin_unlock_irqrestore(&card->lock, flags);
 
 	netif_start_queue(card->dev);
@@ -1163,15 +1163,15 @@
 
 	spin_lock_irqsave(&card->lock, flags);
 
-	
+
 	enable_link_interrupt(card);
 	enable_transmit_interrupt(card);
 	enable_receive_interrupt(card);
 	enable_common_interrupts(card);
 	enable_promisc(card);
-	
+
 	/* The card can have received packets already, read them away now */
-	for (i=0;i<NUMDESCRIPTORS;i++) 
+	for (i=0;i<NUMDESCRIPTORS;i++)
 		investigate_read_descriptor(card->dev,card,i,bufferoffsets[i]);
 
 
@@ -1185,15 +1185,15 @@
 /* Bufferoffset is in BYTES */
 static void investigate_read_descriptor(struct net_device *dev,struct xircom_private *card, int descnr, unsigned int bufferoffset)
 {
-		int status;		
-		
+		int status;
+
 		enter("investigate_read_descriptor");
 		status = card->rx_buffer[4*descnr];
-		
+
 		if ((status > 0)) {	/* packet received */
-		
+
 			/* TODO: discard error packets */
-			
+
 			short pkt_len = ((status >> 16) & 0x7ff) - 4;	/* minus 4, we don't want the CRC */
 			struct sk_buff *skb;
 
@@ -1216,7 +1216,7 @@
 			dev->last_rx = jiffies;
 			card->stats.rx_packets++;
 			card->stats.rx_bytes += pkt_len;
-			
+
 		      out:
 			/* give the buffer back to the card */
 			card->rx_buffer[4*descnr] =  0x80000000;
@@ -1234,9 +1234,9 @@
 		int status;
 
 		enter("investigate_write_descriptor");
-		
+
 		status = card->tx_buffer[4*descnr];
-#if 0		
+#if 0
 		if (status & 0x8000) {	/* Major error */
 			printk(KERN_ERR "Major transmit error status %x \n", status);
 			card->tx_buffer[4*descnr] = 0;
@@ -1258,7 +1258,7 @@
 		}
 
 		leave("investigate_write_descriptor");
-		
+
 }
 
 
@@ -1271,8 +1271,8 @@
 static void __exit xircom_exit(void)
 {
 	pci_unregister_driver(&xircom_ops);
-} 
+}
 
-module_init(xircom_init) 
+module_init(xircom_init)
 module_exit(xircom_exit)
 
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index d9a774b..f1b2640 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -307,7 +307,7 @@
 #define TX_QUEUE_NO         4
 
 #define MAX_HW_MIB_COUNTER  32
-#define VELOCITY_MIN_MTU    (1514-14)
+#define VELOCITY_MIN_MTU    (64)
 #define VELOCITY_MAX_MTU    (9000)
 
 /*
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index eba8e5c..f485a97 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -50,10 +50,6 @@
 static int pci_clock_freq = 33000000;
 #define CLOCK_BASE pci_clock_freq
 
-#define PCI_VENDOR_ID_GORAMO	0x10B5	/* uses PLX:9050 ID - this card	*/
-#define PCI_DEVICE_ID_PCI200SYN	0x9050	/* doesn't have its own ID	*/
-
-
 /*
  *      PLX PCI9052 local configuration and shared runtime registers.
  *      This structure can be used to access 9052 registers (memory mapped).
@@ -262,7 +258,7 @@
 	int i;
 	card_t *card = pci_get_drvdata(pdev);
 
-	for(i = 0; i < 2; i++)
+	for (i = 0; i < 2; i++)
 		if (card->ports[i].card) {
 			struct net_device *dev = port_to_dev(&card->ports[i]);
 			unregister_hdlc_device(dev);
@@ -385,6 +381,15 @@
 	       " %u RX packets rings\n", ramsize / 1024, ramphys,
 	       pdev->irq, card->tx_ring_buffers, card->rx_ring_buffers);
 
+	if (pdev->subsystem_device == PCI_DEVICE_ID_PLX_9050) {
+		printk(KERN_ERR "Detected PCI200SYN card with old "
+		       "configuration data.\n");
+		printk(KERN_ERR "See <http://www.kernel.org/pub/"
+		       "linux/utils/net/hdlc/pci200syn/> for update.\n");
+		printk(KERN_ERR "The card will stop working with"
+		       " future versions of Linux if not updated.\n");
+	}
+
 	if (card->tx_ring_buffers < 1) {
 		printk(KERN_ERR "pci200syn: RAM test failed\n");
 		pci200_pci_remove_one(pdev);
@@ -396,7 +401,7 @@
 	writew(readw(p) | 0x0040, p);
 
 	/* Allocate IRQ */
-	if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
+	if (request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
 		printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
 		       pdev->irq);
 		pci200_pci_remove_one(pdev);
@@ -406,7 +411,7 @@
 
 	sca_init(card, 0);
 
-	for(i = 0; i < 2; i++) {
+	for (i = 0; i < 2; i++) {
 		port_t *port = &card->ports[i];
 		struct net_device *dev = port_to_dev(port);
 		hdlc_device *hdlc = dev_to_hdlc(dev);
@@ -425,7 +430,7 @@
 		hdlc->xmit = sca_xmit;
 		port->settings.clock_type = CLOCK_EXT;
 		port->card = card;
-		if(register_hdlc_device(dev)) {
+		if (register_hdlc_device(dev)) {
 			printk(KERN_ERR "pci200syn: unable to register hdlc "
 			       "device\n");
 			port->card = NULL;
@@ -445,8 +450,10 @@
 
 
 static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
-	{ PCI_VENDOR_ID_GORAMO, PCI_DEVICE_ID_PCI200SYN, PCI_ANY_ID,
-	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+	  PCI_DEVICE_ID_PLX_9050, 0, 0, 0 },
+	{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
+	  PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
 	{ 0, }
 };
 
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2329f94..8d107c6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -164,7 +164,6 @@
 	return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap);
 }
 
-#if 0
 /**
  * pci_find_ext_capability - Find an extended capability
  * @dev: PCI device to query
@@ -212,7 +211,7 @@
 
 	return 0;
 }
-#endif  /*  0  */
+EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 
 /**
  * pci_find_parent_resource - return resource region of parent bus of given region
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index 48d3b3d..74b3124 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -1143,6 +1143,12 @@
 {
 	struct pcmcia_socket *s = pcmcia_get_socket(skt);
 
+	if (!s) {
+		printk(KERN_ERR "PCMCIA obtaining reference to socket %p " \
+			"failed, event 0x%x lost!\n", skt, event);
+		return -ENODEV;
+	}
+
 	ds_dbg(1, "ds_event(0x%06x, %d, 0x%p)\n",
 	       event, priority, skt);
 
diff --git a/drivers/rtc/rtc-m48t86.c b/drivers/rtc/rtc-m48t86.c
index f6e7ee0..8c0d1a6 100644
--- a/drivers/rtc/rtc-m48t86.c
+++ b/drivers/rtc/rtc-m48t86.c
@@ -48,33 +48,33 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct m48t86_ops *ops = pdev->dev.platform_data;
 
-	reg = ops->readb(M48T86_REG_B);
+	reg = ops->readbyte(M48T86_REG_B);
 
 	if (reg & M48T86_REG_B_DM) {
 		/* data (binary) mode */
-		tm->tm_sec	= ops->readb(M48T86_REG_SEC);
-		tm->tm_min	= ops->readb(M48T86_REG_MIN);
-		tm->tm_hour	= ops->readb(M48T86_REG_HOUR) & 0x3F;
-		tm->tm_mday	= ops->readb(M48T86_REG_DOM);
+		tm->tm_sec	= ops->readbyte(M48T86_REG_SEC);
+		tm->tm_min	= ops->readbyte(M48T86_REG_MIN);
+		tm->tm_hour	= ops->readbyte(M48T86_REG_HOUR) & 0x3F;
+		tm->tm_mday	= ops->readbyte(M48T86_REG_DOM);
 		/* tm_mon is 0-11 */
-		tm->tm_mon	= ops->readb(M48T86_REG_MONTH) - 1;
-		tm->tm_year	= ops->readb(M48T86_REG_YEAR) + 100;
-		tm->tm_wday	= ops->readb(M48T86_REG_DOW);
+		tm->tm_mon	= ops->readbyte(M48T86_REG_MONTH) - 1;
+		tm->tm_year	= ops->readbyte(M48T86_REG_YEAR) + 100;
+		tm->tm_wday	= ops->readbyte(M48T86_REG_DOW);
 	} else {
 		/* bcd mode */
-		tm->tm_sec	= BCD2BIN(ops->readb(M48T86_REG_SEC));
-		tm->tm_min	= BCD2BIN(ops->readb(M48T86_REG_MIN));
-		tm->tm_hour	= BCD2BIN(ops->readb(M48T86_REG_HOUR) & 0x3F);
-		tm->tm_mday	= BCD2BIN(ops->readb(M48T86_REG_DOM));
+		tm->tm_sec	= BCD2BIN(ops->readbyte(M48T86_REG_SEC));
+		tm->tm_min	= BCD2BIN(ops->readbyte(M48T86_REG_MIN));
+		tm->tm_hour	= BCD2BIN(ops->readbyte(M48T86_REG_HOUR) & 0x3F);
+		tm->tm_mday	= BCD2BIN(ops->readbyte(M48T86_REG_DOM));
 		/* tm_mon is 0-11 */
-		tm->tm_mon	= BCD2BIN(ops->readb(M48T86_REG_MONTH)) - 1;
-		tm->tm_year	= BCD2BIN(ops->readb(M48T86_REG_YEAR)) + 100;
-		tm->tm_wday	= BCD2BIN(ops->readb(M48T86_REG_DOW));
+		tm->tm_mon	= BCD2BIN(ops->readbyte(M48T86_REG_MONTH)) - 1;
+		tm->tm_year	= BCD2BIN(ops->readbyte(M48T86_REG_YEAR)) + 100;
+		tm->tm_wday	= BCD2BIN(ops->readbyte(M48T86_REG_DOW));
 	}
 
 	/* correct the hour if the clock is in 12h mode */
 	if (!(reg & M48T86_REG_B_H24))
-		if (ops->readb(M48T86_REG_HOUR) & 0x80)
+		if (ops->readbyte(M48T86_REG_HOUR) & 0x80)
 			tm->tm_hour += 12;
 
 	return 0;
@@ -86,35 +86,35 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct m48t86_ops *ops = pdev->dev.platform_data;
 
-	reg = ops->readb(M48T86_REG_B);
+	reg = ops->readbyte(M48T86_REG_B);
 
 	/* update flag and 24h mode */
 	reg |= M48T86_REG_B_SET | M48T86_REG_B_H24;
-	ops->writeb(reg, M48T86_REG_B);
+	ops->writebyte(reg, M48T86_REG_B);
 
 	if (reg & M48T86_REG_B_DM) {
 		/* data (binary) mode */
-		ops->writeb(tm->tm_sec, M48T86_REG_SEC);
-		ops->writeb(tm->tm_min, M48T86_REG_MIN);
-		ops->writeb(tm->tm_hour, M48T86_REG_HOUR);
-		ops->writeb(tm->tm_mday, M48T86_REG_DOM);
-		ops->writeb(tm->tm_mon + 1, M48T86_REG_MONTH);
-		ops->writeb(tm->tm_year % 100, M48T86_REG_YEAR);
-		ops->writeb(tm->tm_wday, M48T86_REG_DOW);
+		ops->writebyte(tm->tm_sec, M48T86_REG_SEC);
+		ops->writebyte(tm->tm_min, M48T86_REG_MIN);
+		ops->writebyte(tm->tm_hour, M48T86_REG_HOUR);
+		ops->writebyte(tm->tm_mday, M48T86_REG_DOM);
+		ops->writebyte(tm->tm_mon + 1, M48T86_REG_MONTH);
+		ops->writebyte(tm->tm_year % 100, M48T86_REG_YEAR);
+		ops->writebyte(tm->tm_wday, M48T86_REG_DOW);
 	} else {
 		/* bcd mode */
-		ops->writeb(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
-		ops->writeb(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
-		ops->writeb(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
-		ops->writeb(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
-		ops->writeb(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
-		ops->writeb(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
-		ops->writeb(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
+		ops->writebyte(BIN2BCD(tm->tm_sec), M48T86_REG_SEC);
+		ops->writebyte(BIN2BCD(tm->tm_min), M48T86_REG_MIN);
+		ops->writebyte(BIN2BCD(tm->tm_hour), M48T86_REG_HOUR);
+		ops->writebyte(BIN2BCD(tm->tm_mday), M48T86_REG_DOM);
+		ops->writebyte(BIN2BCD(tm->tm_mon + 1), M48T86_REG_MONTH);
+		ops->writebyte(BIN2BCD(tm->tm_year % 100), M48T86_REG_YEAR);
+		ops->writebyte(BIN2BCD(tm->tm_wday), M48T86_REG_DOW);
 	}
 
 	/* update ended */
 	reg &= ~M48T86_REG_B_SET;
-	ops->writeb(reg, M48T86_REG_B);
+	ops->writebyte(reg, M48T86_REG_B);
 
 	return 0;
 }
@@ -125,12 +125,12 @@
 	struct platform_device *pdev = to_platform_device(dev);
 	struct m48t86_ops *ops = pdev->dev.platform_data;
 
-	reg = ops->readb(M48T86_REG_B);
+	reg = ops->readbyte(M48T86_REG_B);
 
 	seq_printf(seq, "mode\t\t: %s\n",
 		 (reg & M48T86_REG_B_DM) ? "binary" : "bcd");
 
-	reg = ops->readb(M48T86_REG_D);
+	reg = ops->readbyte(M48T86_REG_D);
 
 	seq_printf(seq, "battery\t\t: %s\n",
 		 (reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
@@ -157,7 +157,7 @@
 	platform_set_drvdata(dev, rtc);
 
 	/* read battery status */
-	reg = ops->readb(M48T86_REG_D);
+	reg = ops->readbyte(M48T86_REG_D);
 	dev_info(&dev->dev, "battery %s\n",
 		(reg & M48T86_REG_D_VRT) ? "ok" : "exhausted");
 
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 74a257b..e210f89 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -45,11 +45,11 @@
 	union {
 		__u8 fc;   	/* SPID function code */
 		struct path_state ps;	/* SNID path state */
-	} inf;
+	} __attribute__ ((packed)) inf;
 	union {
 		__u32 cpu_addr	: 16;	/* CPU address */
 		struct extended_cssid ext_cssid;
-	} pgid_high;
+	} __attribute__ ((packed)) pgid_high;
 	__u32 cpu_id	: 24;	/* CPU identification */
 	__u32 cpu_model : 16;	/* CPU model */
 	__u32 tod_high;		/* high word TOD clock */
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 180b3bf..49ec562 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -749,7 +749,7 @@
 			/* Unit check but no sense data. Need basic sense. */
 			if (ccw_device_do_sense(cdev, irb) != 0)
 				goto call_handler_unsol;
-			memcpy(irb, &cdev->private->irb, sizeof(struct irb));
+			memcpy(&cdev->private->irb, irb, sizeof(struct irb));
 			cdev->private->state = DEV_STATE_W4SENSE;
 			cdev->private->intparm = 0;
 			return;
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 90d4d0e..6775a83 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -2,7 +2,7 @@
 # S/390 network devices
 #
 
-ctc-objs := ctcmain.o ctctty.o ctcdbug.o
+ctc-objs := ctcmain.o ctcdbug.o
 
 obj-$(CONFIG_IUCV) += iucv.o
 obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
@@ -10,6 +10,7 @@
 obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o
 obj-$(CONFIG_LCS) += lcs.o cu3088.o
 obj-$(CONFIG_CLAW) += claw.o cu3088.o
+obj-$(CONFIG_MPC) += ctcmpc.o fsm.o cu3088.o
 qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o 
 qeth-$(CONFIG_PROC_FS) += qeth_proc.o
 obj-$(CONFIG_QETH) += qeth.o
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c
index fe986af..20c8eb1 100644
--- a/drivers/s390/net/ctcmain.c
+++ b/drivers/s390/net/ctcmain.c
@@ -6,7 +6,7 @@
  * Fixes by : Jochen Röhrig (roehrig@de.ibm.com)
  *            Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 	      Peter Tiedemann (ptiedem@de.ibm.com)
- * Driver Model stuff by : Cornelia Huck <huckc@de.ibm.com>
+ * Driver Model stuff by : Cornelia Huck <cornelia.huck@de.ibm.com>
  *
  * Documentation used:
  *  - Principles of Operation (IBM doc#: SA22-7201-06)
@@ -65,7 +65,6 @@
 
 #include <asm/idals.h>
 
-#include "ctctty.h"
 #include "fsm.h"
 #include "cu3088.h"
 
@@ -479,10 +478,7 @@
 		skb->dev = pskb->dev;
 		skb->protocol = pskb->protocol;
 		pskb->ip_summed = CHECKSUM_UNNECESSARY;
-		if (ch->protocol == CTC_PROTO_LINUX_TTY)
-			ctc_tty_netif_rx(skb);
-		else
-			netif_rx_ni(skb);
+		netif_rx_ni(skb);
 		/**
 		 * Successful rx; reset logflags
 		 */
@@ -557,8 +553,7 @@
 	DBF_TEXT(trace, 5, __FUNCTION__);
 	if (sense & SNS0_INTERVENTION_REQ) {
 		if (sense & 0x01) {
-			if (ch->protocol != CTC_PROTO_LINUX_TTY)
-				ctc_pr_debug("%s: Interface disc. or Sel. reset "
+			ctc_pr_debug("%s: Interface disc. or Sel. reset "
 					"(remote)\n", ch->id);
 			fsm_event(ch->fsm, CH_EVENT_UC_RCRESET, ch);
 		} else {
@@ -2034,7 +2029,6 @@
 dev_action_chup(fsm_instance * fi, int event, void *arg)
 {
 	struct net_device *dev = (struct net_device *) arg;
-	struct ctc_priv *privptr = dev->priv;
 
 	DBF_TEXT(trace, 3, __FUNCTION__);
 	switch (fsm_getstate(fi)) {
@@ -2049,8 +2043,6 @@
 				fsm_newstate(fi, DEV_STATE_RUNNING);
 				ctc_pr_info("%s: connected with remote side\n",
 					    dev->name);
-				if (privptr->protocol == CTC_PROTO_LINUX_TTY)
-					ctc_tty_setcarrier(dev, 1);
 				ctc_clear_busy(dev);
 			}
 			break;
@@ -2059,8 +2051,6 @@
 				fsm_newstate(fi, DEV_STATE_RUNNING);
 				ctc_pr_info("%s: connected with remote side\n",
 					    dev->name);
-				if (privptr->protocol == CTC_PROTO_LINUX_TTY)
-					ctc_tty_setcarrier(dev, 1);
 				ctc_clear_busy(dev);
 			}
 			break;
@@ -2086,14 +2076,10 @@
 static void
 dev_action_chdown(fsm_instance * fi, int event, void *arg)
 {
-	struct net_device *dev = (struct net_device *) arg;
-	struct ctc_priv *privptr = dev->priv;
 
 	DBF_TEXT(trace, 3, __FUNCTION__);
 	switch (fsm_getstate(fi)) {
 		case DEV_STATE_RUNNING:
-			if (privptr->protocol == CTC_PROTO_LINUX_TTY)
-				ctc_tty_setcarrier(dev, 0);
 			if (event == DEV_EVENT_TXDOWN)
 				fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
 			else
@@ -2397,8 +2383,6 @@
 	 */
 	if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) {
 		fsm_event(privptr->fsm, DEV_EVENT_START, dev);
-		if (privptr->protocol == CTC_PROTO_LINUX_TTY)
-			return -EBUSY;
 		dev_kfree_skb(skb);
 		privptr->stats.tx_dropped++;
 		privptr->stats.tx_errors++;
@@ -2608,20 +2592,13 @@
 	if (!dev)
 		return;
 	privptr = (struct ctc_priv *) dev->priv;
-	if (privptr->protocol != CTC_PROTO_LINUX_TTY)
-		unregister_netdev(dev);
-	else
-		ctc_tty_unregister_netdev(dev);
+	unregister_netdev(dev);
 }
 
 static int
 ctc_netdev_register(struct net_device * dev)
 {
-	struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
-	if (privptr->protocol != CTC_PROTO_LINUX_TTY)
-		return register_netdev(dev);
-	else
-		return ctc_tty_register_netdev(dev);
+	return register_netdev(dev);
 }
 
 static void
@@ -2667,7 +2644,9 @@
 	if (!priv)
 		return -ENODEV;
 	sscanf(buf, "%u", &value);
-	if ((value < 0) || (value > CTC_PROTO_MAX))
+	if (!((value == CTC_PROTO_S390)  ||
+	      (value == CTC_PROTO_LINUX) ||
+	      (value == CTC_PROTO_OS390)))
 		return -EINVAL;
 	priv->protocol = value;
 
@@ -2897,10 +2876,7 @@
 		goto out;
 	}
 
-	if (privptr->protocol == CTC_PROTO_LINUX_TTY)
-		strlcpy(dev->name, "ctctty%d", IFNAMSIZ);
-	else
-		strlcpy(dev->name, "ctc%d", IFNAMSIZ);
+	strlcpy(dev->name, "ctc%d", IFNAMSIZ);
 
 	for (direction = READ; direction <= WRITE; direction++) {
 		privptr->channel[direction] =
@@ -3046,7 +3022,6 @@
 {
 	DBF_TEXT(setup, 3, __FUNCTION__);
 	unregister_cu3088_discipline(&ctc_group_driver);
-	ctc_tty_cleanup();
 	ctc_unregister_dbf_views();
 	ctc_pr_info("CTC driver unloaded\n");
 }
@@ -3073,10 +3048,8 @@
 		ctc_pr_crit("ctc_init failed with ctc_register_dbf_views rc = %d\n", ret);
 		return ret;
 	}
-	ctc_tty_init();
 	ret = register_cu3088_discipline(&ctc_group_driver);
 	if (ret) {
-		ctc_tty_cleanup();
 		ctc_unregister_dbf_views();
 	}
 	return ret;
diff --git a/drivers/s390/net/ctcmain.h b/drivers/s390/net/ctcmain.h
index d2e835c..7f305d1 100644
--- a/drivers/s390/net/ctcmain.h
+++ b/drivers/s390/net/ctcmain.h
@@ -35,7 +35,9 @@
 #include <asm/ccwdev.h>
 #include <asm/ccwgroup.h>
 
-#include "ctctty.h"
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
 #include "fsm.h"
 #include "cu3088.h"
 
@@ -50,9 +52,7 @@
 
 #define CTC_PROTO_S390          0
 #define CTC_PROTO_LINUX         1
-#define CTC_PROTO_LINUX_TTY     2
 #define CTC_PROTO_OS390         3
-#define CTC_PROTO_MAX           3
 
 #define CTC_BUFSIZE_LIMIT       65535
 #define CTC_BUFSIZE_DEFAULT     32768
@@ -257,15 +257,13 @@
 ctc_clear_busy(struct net_device * dev)
 {
 	clear_bit(0, &(((struct ctc_priv *) dev->priv)->tbusy));
-	if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
-		netif_wake_queue(dev);
+	netif_wake_queue(dev);
 }
 
 static __inline__ int
 ctc_test_and_set_busy(struct net_device * dev)
 {
-	if (((struct ctc_priv *)dev->priv)->protocol != CTC_PROTO_LINUX_TTY)
-		netif_stop_queue(dev);
+	netif_stop_queue(dev);
 	return test_and_set_bit(0, &((struct ctc_priv *) dev->priv)->tbusy);
 }
 
diff --git a/drivers/s390/net/ctctty.c b/drivers/s390/net/ctctty.c
deleted file mode 100644
index af54d1d..0000000
--- a/drivers/s390/net/ctctty.c
+++ /dev/null
@@ -1,1259 +0,0 @@
-/*
- * CTC / ESCON network driver, tty interface.
- *
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- */
-
-#include <linux/config.h>
-#include <linux/module.h>
-#include <linux/tty.h>
-#include <linux/tty_flip.h>
-#include <linux/serial_reg.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <asm/uaccess.h>
-#include <linux/devfs_fs_kernel.h>
-#include "ctctty.h"
-#include "ctcdbug.h"
-
-#define CTC_TTY_MAJOR       43
-#define CTC_TTY_MAX_DEVICES 64
-
-#define CTC_ASYNC_MAGIC          0x49344C01 /* for paranoia-checking        */
-#define CTC_ASYNC_INITIALIZED    0x80000000 /* port was initialized         */
-#define CTC_ASYNC_NORMAL_ACTIVE  0x20000000 /* Normal device active         */
-#define CTC_ASYNC_CLOSING        0x08000000 /* Serial port is closing       */
-#define CTC_ASYNC_CTS_FLOW       0x04000000 /* Do CTS flow control          */
-#define CTC_ASYNC_CHECK_CD       0x02000000 /* i.e., CLOCAL                 */
-#define CTC_ASYNC_HUP_NOTIFY         0x0001 /* Notify tty on hangups/closes */
-#define CTC_ASYNC_NETDEV_OPEN        0x0002 /* Underlying netdev is open    */
-#define CTC_ASYNC_TX_LINESTAT        0x0004 /* Must send line status        */
-#define CTC_ASYNC_SPLIT_TERMIOS      0x0008 /* Sep. termios for dialin/out  */
-#define CTC_TTY_XMIT_SIZE              1024 /* Default bufsize for write    */
-#define CTC_SERIAL_XMIT_MAX            4000 /* Maximum bufsize for write    */
-
-/* Private data (similar to async_struct in <linux/serial.h>) */
-typedef struct {
-  int			magic;
-  int			flags;		 /* defined in tty.h               */
-  int			mcr;		 /* Modem control register         */
-  int                   msr;             /* Modem status register          */
-  int                   lsr;             /* Line status register           */
-  int			line;
-  int			count;		 /* # of fd on device              */
-  int			blocked_open;	 /* # of blocked opens             */
-  struct net_device     *netdev;
-  struct sk_buff_head   tx_queue;        /* transmit queue                 */
-  struct sk_buff_head   rx_queue;        /* receive queue                  */
-  struct tty_struct 	*tty;            /* Pointer to corresponding tty   */
-  wait_queue_head_t	open_wait;
-  wait_queue_head_t	close_wait;
-  struct semaphore      write_sem;
-  struct tasklet_struct tasklet;
-  struct timer_list     stoptimer;
-} ctc_tty_info;
-
-/* Description of one CTC-tty */
-typedef struct {
-  struct tty_driver  *ctc_tty_device;		   /* tty-device             */
-  ctc_tty_info       info[CTC_TTY_MAX_DEVICES];	   /* Private data           */
-} ctc_tty_driver;
-
-static ctc_tty_driver *driver;
-
-/* Leave this unchanged unless you know what you do! */
-#define MODEM_PARANOIA_CHECK
-#define MODEM_DO_RESTART
-
-#define CTC_TTY_NAME "ctctty"
-
-static __u32 ctc_tty_magic = CTC_ASYNC_MAGIC;
-static int ctc_tty_shuttingdown = 0;
-
-static spinlock_t ctc_tty_lock;
-
-/* ctc_tty_try_read() is called from within ctc_tty_rcv_skb()
- * to stuff incoming data directly into a tty's flip-buffer. If the
- * flip buffer is full, the packet gets queued up.
- *
- * Return:
- *  1 = Success
- *  0 = Failure, data has to be buffered and later processed by
- *      ctc_tty_readmodem().
- */
-static int
-ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
-{
-	int len;
-	struct tty_struct *tty;
-
-	DBF_TEXT(trace, 5, __FUNCTION__);
-	if ((tty = info->tty)) {
-		if (info->mcr & UART_MCR_RTS) {
-			len = skb->len;
-			tty_insert_flip_string(tty, skb->data, len);
-			tty_flip_buffer_push(tty);
-			kfree_skb(skb);
-			return 1;
-		}
-	}
-	return 0;
-}
-
-/* ctc_tty_readmodem() is called periodically from within timer-interrupt.
- * It tries getting received data from the receive queue an stuff it into
- * the tty's flip-buffer.
- */
-static int
-ctc_tty_readmodem(ctc_tty_info *info)
-{
-	int ret = 1;
-	struct tty_struct *tty;
-
-	DBF_TEXT(trace, 5, __FUNCTION__);
-	if ((tty = info->tty)) {
-		if (info->mcr & UART_MCR_RTS) {
-			struct sk_buff *skb;
-
-			if ((skb = skb_dequeue(&info->rx_queue))) {
-				int len = skb->len;
-				tty_insert_flip_string(tty, skb->data, len);
-				skb_pull(skb, len);
-				tty_flip_buffer_push(tty);
-				if (skb->len > 0)
-					skb_queue_head(&info->rx_queue, skb);
-				else {
-					kfree_skb(skb);
-					ret = !skb_queue_empty(&info->rx_queue);
-				}
-			}
-		}
-	}
-	return ret;
-}
-
-void
-ctc_tty_setcarrier(struct net_device *netdev, int on)
-{
-	int i;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if ((!driver) || ctc_tty_shuttingdown)
-		return;
-	for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
-		if (driver->info[i].netdev == netdev) {
-			ctc_tty_info *info = &driver->info[i];
-			if (on)
-				info->msr |= UART_MSR_DCD;
-			else
-				info->msr &= ~UART_MSR_DCD;
-			if ((info->flags & CTC_ASYNC_CHECK_CD) && (!on))
-				tty_hangup(info->tty);
-		}
-}
-
-void
-ctc_tty_netif_rx(struct sk_buff *skb)
-{
-	int i;
-	ctc_tty_info *info = NULL;
-
-	DBF_TEXT(trace, 5, __FUNCTION__);
-	if (!skb)
-		return;
-	if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
-		dev_kfree_skb(skb);
-		return;
-	}
-	for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
-		if (driver->info[i].netdev == skb->dev) {
-			info = &driver->info[i];
-			break;
-		}
-	if (!info) {
-		dev_kfree_skb(skb);
-		return;
-	}
-	if (skb->len < 6) {
-		dev_kfree_skb(skb);
-		return;
-	}
-	if (memcmp(skb->data, &ctc_tty_magic, sizeof(__u32))) {
-		dev_kfree_skb(skb);
-		return;
-	}
-	skb_pull(skb, sizeof(__u32));
-
-	i = *((int *)skb->data);
-	skb_pull(skb, sizeof(info->mcr));
-	if (i & UART_MCR_RTS) {
-		info->msr |= UART_MSR_CTS;
-		if (info->flags & CTC_ASYNC_CTS_FLOW)
-			info->tty->hw_stopped = 0;
-	} else {
-		info->msr &= ~UART_MSR_CTS;
-		if (info->flags & CTC_ASYNC_CTS_FLOW)
-			info->tty->hw_stopped = 1;
-	}
-	if (i & UART_MCR_DTR)
-		info->msr |= UART_MSR_DSR;
-	else
-		info->msr &= ~UART_MSR_DSR;
-	if (skb->len <= 0) {
-		kfree_skb(skb);
-		return;
-	}
-	/* Try to deliver directly via tty-flip-buf if queue is empty */
-	if (skb_queue_empty(&info->rx_queue))
-		if (ctc_tty_try_read(info, skb))
-			return;
-	/* Direct deliver failed or queue wasn't empty.
-	 * Queue up for later dequeueing via timer-irq.
-	 */
-	skb_queue_tail(&info->rx_queue, skb);
-	/* Schedule dequeuing */
-	tasklet_schedule(&info->tasklet);
-}
-
-static int
-ctc_tty_tint(ctc_tty_info * info)
-{
-	struct sk_buff *skb = skb_dequeue(&info->tx_queue);
-	int stopped = (info->tty->hw_stopped || info->tty->stopped);
-	int wake = 1;
-	int rc;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (!info->netdev) {
-		if (skb)
-			kfree_skb(skb);
-		return 0;
-	}
-	if (info->flags & CTC_ASYNC_TX_LINESTAT) {
-		int skb_res = info->netdev->hard_header_len +
-			sizeof(info->mcr) + sizeof(__u32);
-		/* If we must update line status,
-		 * create an empty dummy skb and insert it.
-		 */
-		if (skb)
-			skb_queue_head(&info->tx_queue, skb);
-
-		skb = dev_alloc_skb(skb_res);
-		if (!skb) {
-			printk(KERN_WARNING
-			       "ctc_tty: Out of memory in %s%d tint\n",
-			       CTC_TTY_NAME, info->line);
-			return 1;
-		}
-		skb_reserve(skb, skb_res);
-		stopped = 0;
-		wake = 0;
-	}
-	if (!skb)
-		return 0;
-	if (stopped) {
-		skb_queue_head(&info->tx_queue, skb);
-		return 1;
-	}
-#if 0
-	if (skb->len > 0)
-		printk(KERN_DEBUG "tint: %d %02x\n", skb->len, *(skb->data));
-	else
-		printk(KERN_DEBUG "tint: %d STAT\n", skb->len);
-#endif
-	memcpy(skb_push(skb, sizeof(info->mcr)), &info->mcr, sizeof(info->mcr));
-	memcpy(skb_push(skb, sizeof(__u32)), &ctc_tty_magic, sizeof(__u32));
-	rc = info->netdev->hard_start_xmit(skb, info->netdev);
-	if (rc) {
-		skb_pull(skb, sizeof(info->mcr) + sizeof(__u32));
-		if (skb->len > 0)
-			skb_queue_head(&info->tx_queue, skb);
-		else
-			kfree_skb(skb);
-	} else {
-		struct tty_struct *tty = info->tty;
-
-		info->flags &= ~CTC_ASYNC_TX_LINESTAT;
-		if (tty) {
-			tty_wakeup(tty);
-		}
-	}
-	return (skb_queue_empty(&info->tx_queue) ? 0 : 1);
-}
-
-/************************************************************
- *
- * Modem-functions
- *
- * mostly "stolen" from original Linux-serial.c and friends.
- *
- ************************************************************/
-
-static inline int
-ctc_tty_paranoia_check(ctc_tty_info * info, char *name, const char *routine)
-{
-#ifdef MODEM_PARANOIA_CHECK
-	if (!info) {
-		printk(KERN_WARNING "ctc_tty: null info_struct for %s in %s\n",
-		       name, routine);
-		return 1;
-	}
-	if (info->magic != CTC_ASYNC_MAGIC) {
-		printk(KERN_WARNING "ctc_tty: bad magic for info struct %s in %s\n",
-		       name, routine);
-		return 1;
-	}
-#endif
-	return 0;
-}
-
-static void
-ctc_tty_inject(ctc_tty_info *info, char c)
-{
-	int skb_res;
-	struct sk_buff *skb;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_shuttingdown)
-		return;
-	skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
-		sizeof(__u32) + 1;
-	skb = dev_alloc_skb(skb_res);
-	if (!skb) {
-		printk(KERN_WARNING
-		       "ctc_tty: Out of memory in %s%d tx_inject\n",
-		       CTC_TTY_NAME, info->line);
-		return;
-	}
-	skb_reserve(skb, skb_res);
-	*(skb_put(skb, 1)) = c;
-	skb_queue_head(&info->tx_queue, skb);
-	tasklet_schedule(&info->tasklet);
-}
-
-static void
-ctc_tty_transmit_status(ctc_tty_info *info)
-{
-	DBF_TEXT(trace, 5, __FUNCTION__);
-	if (ctc_tty_shuttingdown)
-		return;
-	info->flags |= CTC_ASYNC_TX_LINESTAT;
-	tasklet_schedule(&info->tasklet);
-}
-
-static void
-ctc_tty_change_speed(ctc_tty_info * info)
-{
-	unsigned int cflag;
-	unsigned int quot;
-	int i;
-
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	if (!info->tty || !info->tty->termios)
-		return;
-	cflag = info->tty->termios->c_cflag;
-
-	quot = i = cflag & CBAUD;
-	if (i & CBAUDEX) {
-		i &= ~CBAUDEX;
-		if (i < 1 || i > 2)
-			info->tty->termios->c_cflag &= ~CBAUDEX;
-		else
-			i += 15;
-	}
-	if (quot) {
-		info->mcr |= UART_MCR_DTR;
-		info->mcr |= UART_MCR_RTS;
-		ctc_tty_transmit_status(info);
-	} else {
-		info->mcr &= ~UART_MCR_DTR;
-		info->mcr &= ~UART_MCR_RTS;
-		ctc_tty_transmit_status(info);
-		return;
-	}
-
-	/* CTS flow control flag and modem status interrupts */
-	if (cflag & CRTSCTS) {
-		info->flags |= CTC_ASYNC_CTS_FLOW;
-	} else
-		info->flags &= ~CTC_ASYNC_CTS_FLOW;
-	if (cflag & CLOCAL)
-		info->flags &= ~CTC_ASYNC_CHECK_CD;
-	else {
-		info->flags |= CTC_ASYNC_CHECK_CD;
-	}
-}
-
-static int
-ctc_tty_startup(ctc_tty_info * info)
-{
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	if (info->flags & CTC_ASYNC_INITIALIZED)
-		return 0;
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "starting up %s%d ...\n", CTC_TTY_NAME, info->line);
-#endif
-	/*
-	 * Now, initialize the UART
-	 */
-	info->mcr = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2;
-	if (info->tty)
-		clear_bit(TTY_IO_ERROR, &info->tty->flags);
-	/*
-	 * and set the speed of the serial port
-	 */
-	ctc_tty_change_speed(info);
-
-	info->flags |= CTC_ASYNC_INITIALIZED;
-	if (!(info->flags & CTC_ASYNC_NETDEV_OPEN))
-		info->netdev->open(info->netdev);
-	info->flags |= CTC_ASYNC_NETDEV_OPEN;
-	return 0;
-}
-
-static void
-ctc_tty_stopdev(unsigned long data)
-{
-	ctc_tty_info *info = (ctc_tty_info *)data;
-
-	if ((!info) || (!info->netdev) ||
-	    (info->flags & CTC_ASYNC_INITIALIZED))
-		return;
-	info->netdev->stop(info->netdev);
-	info->flags &= ~CTC_ASYNC_NETDEV_OPEN;
-}
-
-/*
- * This routine will shutdown a serial port; interrupts are disabled, and
- * DTR is dropped if the hangup on close termio flag is on.
- */
-static void
-ctc_tty_shutdown(ctc_tty_info * info)
-{
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	if (!(info->flags & CTC_ASYNC_INITIALIZED))
-		return;
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "Shutting down %s%d ....\n", CTC_TTY_NAME, info->line);
-#endif
-	info->msr &= ~UART_MSR_RI;
-	if (!info->tty || (info->tty->termios->c_cflag & HUPCL))
-		info->mcr &= ~(UART_MCR_DTR | UART_MCR_RTS);
-	if (info->tty)
-		set_bit(TTY_IO_ERROR, &info->tty->flags);
-	mod_timer(&info->stoptimer, jiffies + (10 * HZ));
-	skb_queue_purge(&info->tx_queue);
-	skb_queue_purge(&info->rx_queue);
-	info->flags &= ~CTC_ASYNC_INITIALIZED;
-}
-
-/* ctc_tty_write() is the main send-routine. It is called from the upper
- * levels within the kernel to perform sending data. Depending on the
- * online-flag it either directs output to the at-command-interpreter or
- * to the lower level. Additional tasks done here:
- *  - If online, check for escape-sequence (+++)
- *  - If sending audio-data, call ctc_tty_DLEdown() to parse DLE-codes.
- *  - If receiving audio-data, call ctc_tty_end_vrx() to abort if needed.
- *  - If dialing, abort dial.
- */
-static int
-ctc_tty_write(struct tty_struct *tty, const u_char * buf, int count)
-{
-	int c;
-	int total = 0;
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	DBF_TEXT(trace, 5, __FUNCTION__);
-	if (ctc_tty_shuttingdown)
-		goto ex;
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
-		goto ex;
-	if (!tty)
-		goto ex;
-	if (!info->netdev) {
-		total = -ENODEV;
-		goto ex;
-	}
-	while (1) {
-		struct sk_buff *skb;
-		int skb_res;
-
-		c = (count < CTC_TTY_XMIT_SIZE) ? count : CTC_TTY_XMIT_SIZE;
-		if (c <= 0)
-			break;
-
-		skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
-			+ sizeof(__u32);
-		skb = dev_alloc_skb(skb_res + c);
-		if (!skb) {
-			printk(KERN_WARNING
-			       "ctc_tty: Out of memory in %s%d write\n",
-			       CTC_TTY_NAME, info->line);
-			break;
-		}
-		skb_reserve(skb, skb_res);
-		memcpy(skb_put(skb, c), buf, c);
-		skb_queue_tail(&info->tx_queue, skb);
-		buf += c;
-		total += c;
-		count -= c;
-	}
-	if (!skb_queue_empty(&info->tx_queue)) {
-		info->lsr &= ~UART_LSR_TEMT;
-		tasklet_schedule(&info->tasklet);
-	}
-ex:
-	DBF_TEXT(trace, 6, __FUNCTION__);
-	return total;
-}
-
-static int
-ctc_tty_write_room(struct tty_struct *tty)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write_room"))
-		return 0;
-	return CTC_TTY_XMIT_SIZE;
-}
-
-static int
-ctc_tty_chars_in_buffer(struct tty_struct *tty)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_chars_in_buffer"))
-		return 0;
-	return 0;
-}
-
-static void
-ctc_tty_flush_buffer(struct tty_struct *tty)
-{
-	ctc_tty_info *info;
-	unsigned long flags;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (!tty)
-		goto ex;
-	spin_lock_irqsave(&ctc_tty_lock, flags);
-	info = (ctc_tty_info *) tty->driver_data;
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
-		spin_unlock_irqrestore(&ctc_tty_lock, flags);
-		goto ex;
-	}
-	skb_queue_purge(&info->tx_queue);
-	info->lsr |= UART_LSR_TEMT;
-	spin_unlock_irqrestore(&ctc_tty_lock, flags);
-	wake_up_interruptible(&tty->write_wait);
-	tty_wakeup(tty);
-ex:
-	DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
-	return;
-}
-
-static void
-ctc_tty_flush_chars(struct tty_struct *tty)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_shuttingdown)
-		return;
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_chars"))
-		return;
-	if (tty->stopped || tty->hw_stopped || skb_queue_empty(&info->tx_queue))
-		return;
-	tasklet_schedule(&info->tasklet);
-}
-
-/*
- * ------------------------------------------------------------
- * ctc_tty_throttle()
- *
- * This routine is called by the upper-layer tty layer to signal that
- * incoming characters should be throttled.
- * ------------------------------------------------------------
- */
-static void
-ctc_tty_throttle(struct tty_struct *tty)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_throttle"))
-		return;
-	info->mcr &= ~UART_MCR_RTS;
-	if (I_IXOFF(tty))
-		ctc_tty_inject(info, STOP_CHAR(tty));
-	ctc_tty_transmit_status(info);
-}
-
-static void
-ctc_tty_unthrottle(struct tty_struct *tty)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_unthrottle"))
-		return;
-	info->mcr |= UART_MCR_RTS;
-	if (I_IXOFF(tty))
-		ctc_tty_inject(info, START_CHAR(tty));
-	ctc_tty_transmit_status(info);
-}
-
-/*
- * ------------------------------------------------------------
- * ctc_tty_ioctl() and friends
- * ------------------------------------------------------------
- */
-
-/*
- * ctc_tty_get_lsr_info - get line status register info
- *
- * Purpose: Let user call ioctl() to get info when the UART physically
- *          is emptied.  On bus types like RS485, the transmitter must
- *          release the bus after transmitting. This must be done when
- *          the transmit shift register is empty, not be done when the
- *          transmit holding register is empty.  This functionality
- *          allows RS485 driver to be written in user space.
- */
-static int
-ctc_tty_get_lsr_info(ctc_tty_info * info, uint __user *value)
-{
-	u_char status;
-	uint result;
-	ulong flags;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	spin_lock_irqsave(&ctc_tty_lock, flags);
-	status = info->lsr;
-	spin_unlock_irqrestore(&ctc_tty_lock, flags);
-	result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0);
-	put_user(result, value);
-	return 0;
-}
-
-
-static int ctc_tty_tiocmget(struct tty_struct *tty, struct file *file)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-	u_char control,
-	 status;
-	uint result;
-	ulong flags;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
-		return -ENODEV;
-	if (tty->flags & (1 << TTY_IO_ERROR))
-		return -EIO;
-
-	control = info->mcr;
-	spin_lock_irqsave(&ctc_tty_lock, flags);
-	status = info->msr;
-	spin_unlock_irqrestore(&ctc_tty_lock, flags);
-	result = ((control & UART_MCR_RTS) ? TIOCM_RTS : 0)
-	    | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0)
-	    | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0)
-	    | ((status & UART_MSR_RI) ? TIOCM_RNG : 0)
-	    | ((status & UART_MSR_DSR) ? TIOCM_DSR : 0)
-	    | ((status & UART_MSR_CTS) ? TIOCM_CTS : 0);
-	return result;
-}
-
-static int
-ctc_tty_tiocmset(struct tty_struct *tty, struct file *file,
-		 unsigned int set, unsigned int clear)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
-		return -ENODEV;
-	if (tty->flags & (1 << TTY_IO_ERROR))
-		return -EIO;
-
-	if (set & TIOCM_RTS)
-		info->mcr |= UART_MCR_RTS;
-	if (set & TIOCM_DTR)
-		info->mcr |= UART_MCR_DTR;
-
-	if (clear & TIOCM_RTS)
-		info->mcr &= ~UART_MCR_RTS;
-	if (clear & TIOCM_DTR)
-		info->mcr &= ~UART_MCR_DTR;
-
-	if ((set | clear) & (TIOCM_RTS|TIOCM_DTR))
-		ctc_tty_transmit_status(info);
-	return 0;
-}
-
-static int
-ctc_tty_ioctl(struct tty_struct *tty, struct file *file,
-	       uint cmd, ulong arg)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-	int error;
-	int retval;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_ioctl"))
-		return -ENODEV;
-	if (tty->flags & (1 << TTY_IO_ERROR))
-		return -EIO;
-	switch (cmd) {
-		case TCSBRK:   /* SVID version: non-zero arg --> no break */
-#ifdef CTC_DEBUG_MODEM_IOCTL
-			printk(KERN_DEBUG "%s%d ioctl TCSBRK\n", CTC_TTY_NAME, info->line);
-#endif
-			retval = tty_check_change(tty);
-			if (retval)
-				return retval;
-			tty_wait_until_sent(tty, 0);
-			return 0;
-		case TCSBRKP:  /* support for POSIX tcsendbreak() */
-#ifdef CTC_DEBUG_MODEM_IOCTL
-			printk(KERN_DEBUG "%s%d ioctl TCSBRKP\n", CTC_TTY_NAME, info->line);
-#endif
-			retval = tty_check_change(tty);
-			if (retval)
-				return retval;
-			tty_wait_until_sent(tty, 0);
-			return 0;
-		case TIOCGSOFTCAR:
-#ifdef CTC_DEBUG_MODEM_IOCTL
-			printk(KERN_DEBUG "%s%d ioctl TIOCGSOFTCAR\n", CTC_TTY_NAME,
-			       info->line);
-#endif
-			error = put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg);
-			return error;
-		case TIOCSSOFTCAR:
-#ifdef CTC_DEBUG_MODEM_IOCTL
-			printk(KERN_DEBUG "%s%d ioctl TIOCSSOFTCAR\n", CTC_TTY_NAME,
-			       info->line);
-#endif
-			error = get_user(arg, (ulong __user *) arg);
-			if (error)
-				return error;
-			tty->termios->c_cflag =
-			    ((tty->termios->c_cflag & ~CLOCAL) |
-			     (arg ? CLOCAL : 0));
-			return 0;
-		case TIOCSERGETLSR:	/* Get line status register */
-#ifdef CTC_DEBUG_MODEM_IOCTL
-			printk(KERN_DEBUG "%s%d ioctl TIOCSERGETLSR\n", CTC_TTY_NAME,
-			       info->line);
-#endif
-			if (access_ok(VERIFY_WRITE, (void __user *) arg, sizeof(uint)))
-				return ctc_tty_get_lsr_info(info, (uint __user *) arg);
-			else
-				return -EFAULT;
-		default:
-#ifdef CTC_DEBUG_MODEM_IOCTL
-			printk(KERN_DEBUG "UNKNOWN ioctl 0x%08x on %s%d\n", cmd,
-			       CTC_TTY_NAME, info->line);
-#endif
-			return -ENOIOCTLCMD;
-	}
-	return 0;
-}
-
-static void
-ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-	unsigned int cflag = tty->termios->c_cflag;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	ctc_tty_change_speed(info);
-
-	/* Handle transition to B0 */
-	if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) {
-		info->mcr &= ~(UART_MCR_DTR|UART_MCR_RTS);
-		ctc_tty_transmit_status(info);
-	}
-
-	/* Handle transition from B0 to other */
-	if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
-		info->mcr |= UART_MCR_DTR;
-		if (!(tty->termios->c_cflag & CRTSCTS) ||
-                    !test_bit(TTY_THROTTLED, &tty->flags)) {
-                        info->mcr |= UART_MCR_RTS;
-                }
-		ctc_tty_transmit_status(info);
-	}
-
-	/* Handle turning off CRTSCTS */
-	if ((old_termios->c_cflag & CRTSCTS) &&
-            !(tty->termios->c_cflag & CRTSCTS))
-                tty->hw_stopped = 0;
-}
-
-/*
- * ------------------------------------------------------------
- * ctc_tty_open() and friends
- * ------------------------------------------------------------
- */
-static int
-ctc_tty_block_til_ready(struct tty_struct *tty, struct file *filp, ctc_tty_info *info)
-{
-	DECLARE_WAITQUEUE(wait, NULL);
-	int do_clocal = 0;
-	unsigned long flags;
-	int retval;
-
-	DBF_TEXT(trace, 4, __FUNCTION__);
-	/*
-	 * If the device is in the middle of being closed, then block
-	 * until it's done, and then try again.
-	 */
-	if (tty_hung_up_p(filp) ||
-	    (info->flags & CTC_ASYNC_CLOSING)) {
-		if (info->flags & CTC_ASYNC_CLOSING)
-			wait_event(info->close_wait,
-				   !(info->flags & CTC_ASYNC_CLOSING));
-#ifdef MODEM_DO_RESTART
-		if (info->flags & CTC_ASYNC_HUP_NOTIFY)
-			return -EAGAIN;
-		else
-			return -ERESTARTSYS;
-#else
-		return -EAGAIN;
-#endif
-	}
-	/*
-	 * If non-blocking mode is set, then make the check up front
-	 * and then exit.
-	 */
-	if ((filp->f_flags & O_NONBLOCK) ||
-	    (tty->flags & (1 << TTY_IO_ERROR))) {
-		info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
-		return 0;
-	}
-	if (tty->termios->c_cflag & CLOCAL)
-		do_clocal = 1;
-	/*
-	 * Block waiting for the carrier detect and the line to become
-	 * free (i.e., not in use by the callout).  While we are in
-	 * this loop, info->count is dropped by one, so that
-	 * ctc_tty_close() knows when to free things.  We restore it upon
-	 * exit, either normal or abnormal.
-	 */
-	retval = 0;
-	add_wait_queue(&info->open_wait, &wait);
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "ctc_tty_block_til_ready before block: %s%d, count = %d\n",
-	       CTC_TTY_NAME, info->line, info->count);
-#endif
-	spin_lock_irqsave(&ctc_tty_lock, flags);
-	if (!(tty_hung_up_p(filp)))
-		info->count--;
-	spin_unlock_irqrestore(&ctc_tty_lock, flags);
-	info->blocked_open++;
-	while (1) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		if (tty_hung_up_p(filp) ||
-		    !(info->flags & CTC_ASYNC_INITIALIZED)) {
-#ifdef MODEM_DO_RESTART
-			if (info->flags & CTC_ASYNC_HUP_NOTIFY)
-				retval = -EAGAIN;
-			else
-				retval = -ERESTARTSYS;
-#else
-			retval = -EAGAIN;
-#endif
-			break;
-		}
-		if (!(info->flags & CTC_ASYNC_CLOSING) &&
-		    (do_clocal || (info->msr & UART_MSR_DCD))) {
-			break;
-		}
-		if (signal_pending(current)) {
-			retval = -ERESTARTSYS;
-			break;
-		}
-#ifdef CTC_DEBUG_MODEM_OPEN
-		printk(KERN_DEBUG "ctc_tty_block_til_ready blocking: %s%d, count = %d\n",
-		       CTC_TTY_NAME, info->line, info->count);
-#endif
-		schedule();
-	}
-	current->state = TASK_RUNNING;
-	remove_wait_queue(&info->open_wait, &wait);
-	if (!tty_hung_up_p(filp))
-		info->count++;
-	info->blocked_open--;
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "ctc_tty_block_til_ready after blocking: %s%d, count = %d\n",
-	       CTC_TTY_NAME, info->line, info->count);
-#endif
-	if (retval)
-		return retval;
-	info->flags |= CTC_ASYNC_NORMAL_ACTIVE;
-	return 0;
-}
-
-/*
- * This routine is called whenever a serial port is opened.  It
- * enables interrupts for a serial port, linking in its async structure into
- * the IRQ chain.   It also performs the serial-specific
- * initialization for the tty structure.
- */
-static int
-ctc_tty_open(struct tty_struct *tty, struct file *filp)
-{
-	ctc_tty_info *info;
-	unsigned long saveflags;
-	int retval,
-	 line;
-
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	line = tty->index;
-	if (line < 0 || line > CTC_TTY_MAX_DEVICES)
-		return -ENODEV;
-	info = &driver->info[line];
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_open"))
-		return -ENODEV;
-	if (!info->netdev)
-		return -ENODEV;
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "ctc_tty_open %s, count = %d\n", tty->name,
-	       info->count);
-#endif
-	spin_lock_irqsave(&ctc_tty_lock, saveflags);
-	info->count++;
-	tty->driver_data = info;
-	info->tty = tty;
-	spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-	/*
-	 * Start up serial port
-	 */
-	retval = ctc_tty_startup(info);
-	if (retval) {
-#ifdef CTC_DEBUG_MODEM_OPEN
-		printk(KERN_DEBUG "ctc_tty_open return after startup\n");
-#endif
-		return retval;
-	}
-	retval = ctc_tty_block_til_ready(tty, filp, info);
-	if (retval) {
-#ifdef CTC_DEBUG_MODEM_OPEN
-		printk(KERN_DEBUG "ctc_tty_open return after ctc_tty_block_til_ready \n");
-#endif
-		return retval;
-	}
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "ctc_tty_open %s successful...\n", tty->name);
-#endif
-	return 0;
-}
-
-static void
-ctc_tty_close(struct tty_struct *tty, struct file *filp)
-{
-	ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
-	ulong flags;
-	ulong timeout;
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	if (!info || ctc_tty_paranoia_check(info, tty->name, "ctc_tty_close"))
-		return;
-	spin_lock_irqsave(&ctc_tty_lock, flags);
-	if (tty_hung_up_p(filp)) {
-		spin_unlock_irqrestore(&ctc_tty_lock, flags);
-#ifdef CTC_DEBUG_MODEM_OPEN
-		printk(KERN_DEBUG "ctc_tty_close return after tty_hung_up_p\n");
-#endif
-		return;
-	}
-	if ((tty->count == 1) && (info->count != 1)) {
-		/*
-		 * Uh, oh.  tty->count is 1, which means that the tty
-		 * structure will be freed.  Info->count should always
-		 * be one in these conditions.  If it's greater than
-		 * one, we've got real problems, since it means the
-		 * serial port won't be shutdown.
-		 */
-		printk(KERN_ERR "ctc_tty_close: bad port count; tty->count is 1, "
-		       "info->count is %d\n", info->count);
-		info->count = 1;
-	}
-	if (--info->count < 0) {
-		printk(KERN_ERR "ctc_tty_close: bad port count for %s%d: %d\n",
-		       CTC_TTY_NAME, info->line, info->count);
-		info->count = 0;
-	}
-	if (info->count) {
-		local_irq_restore(flags);
-#ifdef CTC_DEBUG_MODEM_OPEN
-		printk(KERN_DEBUG "ctc_tty_close after info->count != 0\n");
-#endif
-		return;
-	}
-	info->flags |= CTC_ASYNC_CLOSING;
-	tty->closing = 1;
-	/*
-	 * At this point we stop accepting input.  To do this, we
-	 * disable the receive line status interrupts, and tell the
-	 * interrupt driver to stop checking the data ready bit in the
-	 * line status register.
-	 */
-	if (info->flags & CTC_ASYNC_INITIALIZED) {
-		tty_wait_until_sent(tty, 30*HZ); /* 30 seconds timeout */
-		/*
-		 * Before we drop DTR, make sure the UART transmitter
-		 * has completely drained; this is especially
-		 * important if there is a transmit FIFO!
-		 */
-		timeout = jiffies + HZ;
-		while (!(info->lsr & UART_LSR_TEMT)) {
-			spin_unlock_irqrestore(&ctc_tty_lock, flags);
-			msleep(500);
-			spin_lock_irqsave(&ctc_tty_lock, flags);
-			if (time_after(jiffies,timeout))
-				break;
-		}
-	}
-	ctc_tty_shutdown(info);
-	if (tty->driver->flush_buffer) {
-		skb_queue_purge(&info->tx_queue);
-		info->lsr |= UART_LSR_TEMT;
-	}
-	tty_ldisc_flush(tty);
-	info->tty = 0;
-	tty->closing = 0;
-	if (info->blocked_open) {
-		msleep_interruptible(500);
-		wake_up_interruptible(&info->open_wait);
-	}
-	info->flags &= ~(CTC_ASYNC_NORMAL_ACTIVE | CTC_ASYNC_CLOSING);
-	wake_up_interruptible(&info->close_wait);
-	spin_unlock_irqrestore(&ctc_tty_lock, flags);
-#ifdef CTC_DEBUG_MODEM_OPEN
-	printk(KERN_DEBUG "ctc_tty_close normal exit\n");
-#endif
-}
-
-/*
- * ctc_tty_hangup() --- called by tty_hangup() when a hangup is signaled.
- */
-static void
-ctc_tty_hangup(struct tty_struct *tty)
-{
-	ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
-	unsigned long saveflags;
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
-		return;
-	ctc_tty_shutdown(info);
-	info->count = 0;
-	info->flags &= ~CTC_ASYNC_NORMAL_ACTIVE;
-	spin_lock_irqsave(&ctc_tty_lock, saveflags);
-	info->tty = 0;
-	spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-	wake_up_interruptible(&info->open_wait);
-}
-
-
-/*
- * For all online tty's, try sending data to
- * the lower levels.
- */
-static void
-ctc_tty_task(unsigned long arg)
-{
-	ctc_tty_info *info = (void *)arg;
-	unsigned long saveflags;
-	int again;
-
-	DBF_TEXT(trace, 3, __FUNCTION__);
-	spin_lock_irqsave(&ctc_tty_lock, saveflags);
-	if ((!ctc_tty_shuttingdown) && info) {
-		again = ctc_tty_tint(info);
-		if (!again)
-			info->lsr |= UART_LSR_TEMT;
-		again |= ctc_tty_readmodem(info);
-		if (again) {
-			tasklet_schedule(&info->tasklet);
-		}
-	}
-	spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-}
-
-static struct tty_operations ctc_ops = {
-	.open = ctc_tty_open,
-	.close = ctc_tty_close,
-	.write = ctc_tty_write,
-	.flush_chars = ctc_tty_flush_chars,
-	.write_room = ctc_tty_write_room,
-	.chars_in_buffer = ctc_tty_chars_in_buffer,
-	.flush_buffer = ctc_tty_flush_buffer,
-	.ioctl = ctc_tty_ioctl,
-	.throttle = ctc_tty_throttle,
-	.unthrottle = ctc_tty_unthrottle,
-	.set_termios = ctc_tty_set_termios,
-	.hangup = ctc_tty_hangup,
-	.tiocmget = ctc_tty_tiocmget,
-	.tiocmset = ctc_tty_tiocmset,
-};
-
-int
-ctc_tty_init(void)
-{
-	int i;
-	ctc_tty_info *info;
-	struct tty_driver *device;
-
-	DBF_TEXT(trace, 2, __FUNCTION__);
-	driver = kmalloc(sizeof(ctc_tty_driver), GFP_KERNEL);
-	if (driver == NULL) {
-		printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
-		return -ENOMEM;
-	}
-	memset(driver, 0, sizeof(ctc_tty_driver));
-	device = alloc_tty_driver(CTC_TTY_MAX_DEVICES);
-	if (!device) {
-		kfree(driver);
-		printk(KERN_WARNING "Out of memory in ctc_tty_modem_init\n");
-		return -ENOMEM;
-	}
-
-	device->devfs_name = "ctc/" CTC_TTY_NAME;
-	device->name = CTC_TTY_NAME;
-	device->major = CTC_TTY_MAJOR;
-	device->minor_start = 0;
-	device->type = TTY_DRIVER_TYPE_SERIAL;
-	device->subtype = SERIAL_TYPE_NORMAL;
-	device->init_termios = tty_std_termios;
-	device->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
-	device->flags = TTY_DRIVER_REAL_RAW;
-	device->driver_name = "ctc_tty",
-	tty_set_operations(device, &ctc_ops);
-	if (tty_register_driver(device)) {
-		printk(KERN_WARNING "ctc_tty: Couldn't register serial-device\n");
-		put_tty_driver(device);
-		kfree(driver);
-		return -1;
-	}
-	driver->ctc_tty_device = device;
-	for (i = 0; i < CTC_TTY_MAX_DEVICES; i++) {
-		info = &driver->info[i];
-		init_MUTEX(&info->write_sem);
-		tasklet_init(&info->tasklet, ctc_tty_task,
-				(unsigned long) info);
-		info->magic = CTC_ASYNC_MAGIC;
-		info->line = i;
-		info->tty = 0;
-		info->count = 0;
-		info->blocked_open = 0;
-		init_waitqueue_head(&info->open_wait);
-		init_waitqueue_head(&info->close_wait);
-		skb_queue_head_init(&info->tx_queue);
-		skb_queue_head_init(&info->rx_queue);
-		init_timer(&info->stoptimer);
-		info->stoptimer.function = ctc_tty_stopdev;
-		info->stoptimer.data = (unsigned long)info;
-		info->mcr = UART_MCR_RTS;
-	}
-	return 0;
-}
-
-int
-ctc_tty_register_netdev(struct net_device *dev) {
-	int ttynum;
-	char *err;
-	char *p;
-
-	DBF_TEXT(trace, 2, __FUNCTION__);
-	if ((!dev) || (!dev->name)) {
-		printk(KERN_WARNING
-		       "ctc_tty_register_netdev called "
-		       "with NULL dev or NULL dev-name\n");
-		return -1;
-	}
-
-	/*
-	 *	If the name is a format string the caller wants us to
-	 *	do a name allocation : format string must end with %d
-	 */
-	if (strchr(dev->name, '%'))
-	{
-		int err = dev_alloc_name(dev, dev->name);	// dev->name is changed by this
-		if (err < 0) {
-			printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
-			return err;
-		}
-
-	}
-
-	for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
-	ttynum = simple_strtoul(p, &err, 0);
-	if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
-	    (err && *err)) {
-		printk(KERN_WARNING
-		       "ctc_tty_register_netdev called "
-		       "with number in name '%s'\n", dev->name);
-		return -1;
-	}
-	if (driver->info[ttynum].netdev) {
-		printk(KERN_WARNING
-		       "ctc_tty_register_netdev called "
-		       "for already registered device '%s'\n",
-		       dev->name);
-		return -1;
-	}
-	driver->info[ttynum].netdev = dev;
-	return 0;
-}
-
-void
-ctc_tty_unregister_netdev(struct net_device *dev) {
-	int i;
-	unsigned long saveflags;
-	ctc_tty_info *info = NULL;
-
-	DBF_TEXT(trace, 2, __FUNCTION__);
-	spin_lock_irqsave(&ctc_tty_lock, saveflags);
-	for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
-		if (driver->info[i].netdev == dev) {
-			info = &driver->info[i];
-			break;
-		}
-	if (info) {
-		info->netdev = NULL;
-		skb_queue_purge(&info->tx_queue);
-		skb_queue_purge(&info->rx_queue);
-	}
-	spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-}
-
-void
-ctc_tty_cleanup(void) {
-	unsigned long saveflags;
-
-	DBF_TEXT(trace, 2, __FUNCTION__);
-	spin_lock_irqsave(&ctc_tty_lock, saveflags);
-	ctc_tty_shuttingdown = 1;
-	spin_unlock_irqrestore(&ctc_tty_lock, saveflags);
-	tty_unregister_driver(driver->ctc_tty_device);
-	put_tty_driver(driver->ctc_tty_device);
-	kfree(driver);
-	driver = NULL;
-}
diff --git a/drivers/s390/net/ctctty.h b/drivers/s390/net/ctctty.h
deleted file mode 100644
index 7254dc0..0000000
--- a/drivers/s390/net/ctctty.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * CTC / ESCON network driver, tty interface.
- *
- * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#ifndef _CTCTTY_H_
-#define _CTCTTY_H_
-
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-
-extern int  ctc_tty_register_netdev(struct net_device *);
-extern void ctc_tty_unregister_netdev(struct net_device *);
-extern void ctc_tty_netif_rx(struct sk_buff *);
-extern int  ctc_tty_init(void);
-extern void ctc_tty_cleanup(void);
-extern void ctc_tty_setcarrier(struct net_device *, int);
-
-#endif
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index f7264fd..cb9082f 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -454,7 +454,7 @@
 	 */
 	msleep(10);
 
-	prb->ctrl = PRB_CTRL_SRST;
+	prb->ctrl = cpu_to_le16(PRB_CTRL_SRST);
 	prb->fis[1] = 0; /* no PM yet */
 
 	writel((u32)paddr, port + PORT_CMD_ACTIVATE);
@@ -551,9 +551,9 @@
 
 		if (qc->tf.protocol != ATA_PROT_ATAPI_NODATA) {
 			if (qc->tf.flags & ATA_TFLAG_WRITE)
-				prb->ctrl = PRB_CTRL_PACKET_WRITE;
+				prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_WRITE);
 			else
-				prb->ctrl = PRB_CTRL_PACKET_READ;
+				prb->ctrl = cpu_to_le16(PRB_CTRL_PACKET_READ);
 		} else
 			prb->ctrl = 0;
 
diff --git a/fs/namei.c b/fs/namei.c
index 96723ae..d6e2ee2 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1080,8 +1080,8 @@
 	nd->flags = flags;
 	nd->depth = 0;
 
-	read_lock(&current->fs->lock);
 	if (*name=='/') {
+		read_lock(&current->fs->lock);
 		if (current->fs->altroot && !(nd->flags & LOOKUP_NOALT)) {
 			nd->mnt = mntget(current->fs->altrootmnt);
 			nd->dentry = dget(current->fs->altroot);
@@ -1092,33 +1092,35 @@
 		}
 		nd->mnt = mntget(current->fs->rootmnt);
 		nd->dentry = dget(current->fs->root);
+		read_unlock(&current->fs->lock);
 	} else if (dfd == AT_FDCWD) {
+		read_lock(&current->fs->lock);
 		nd->mnt = mntget(current->fs->pwdmnt);
 		nd->dentry = dget(current->fs->pwd);
+		read_unlock(&current->fs->lock);
 	} else {
 		struct dentry *dentry;
 
 		file = fget_light(dfd, &fput_needed);
 		retval = -EBADF;
 		if (!file)
-			goto unlock_fail;
+			goto out_fail;
 
 		dentry = file->f_dentry;
 
 		retval = -ENOTDIR;
 		if (!S_ISDIR(dentry->d_inode->i_mode))
-			goto fput_unlock_fail;
+			goto fput_fail;
 
 		retval = file_permission(file, MAY_EXEC);
 		if (retval)
-			goto fput_unlock_fail;
+			goto fput_fail;
 
 		nd->mnt = mntget(file->f_vfsmnt);
 		nd->dentry = dget(dentry);
 
 		fput_light(file, fput_needed);
 	}
-	read_unlock(&current->fs->lock);
 	current->total_link_count = 0;
 	retval = link_path_walk(name, nd);
 out:
@@ -1127,13 +1129,12 @@
 				nd->dentry->d_inode))
 		audit_inode(name, nd->dentry->d_inode, flags);
 	}
+out_fail:
 	return retval;
 
-fput_unlock_fail:
+fput_fail:
 	fput_light(file, fput_needed);
-unlock_fail:
-	read_unlock(&current->fs->lock);
-	return retval;
+	goto out_fail;
 }
 
 int fastcall path_lookup(const char *name, unsigned int flags,
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index 9950706..e143210 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -45,10 +45,8 @@
 #define hard_smp_processor_id()	__hard_smp_processor_id()
 #define raw_smp_processor_id()	(current_thread_info()->cpu)
 
-extern cpumask_t cpu_present_mask;
-extern cpumask_t cpu_online_map;
 extern int smp_num_cpus;
-#define cpu_possible_map	cpu_present_mask
+#define cpu_possible_map	cpu_present_map
 
 int smp_call_function_on_cpu(void (*func) (void *info), void *info,int retry, int wait, cpumask_t cpu);
 
diff --git a/include/asm-arm/arch-l7200/serial_l7200.h b/include/asm-arm/arch-l7200/serial_l7200.h
index 238c595..b1008a9 100644
--- a/include/asm-arm/arch-l7200/serial_l7200.h
+++ b/include/asm-arm/arch-l7200/serial_l7200.h
@@ -28,7 +28,7 @@
 #define UARTDR			0x00	/* Tx/Rx data */
 #define RXSTAT			0x04	/* Rx status */
 #define H_UBRLCR		0x08	/* mode register high */
-#define M_UBRLCR		0x0C	/* mode reg mid (MSB of buad)*/
+#define M_UBRLCR		0x0C	/* mode reg mid (MSB of baud)*/
 #define L_UBRLCR		0x10	/* mode reg low (LSB of baud)*/
 #define UARTCON			0x14	/* control register */
 #define UARTFLG			0x18	/* flag register */
diff --git a/include/asm-arm/arch-l7200/uncompress.h b/include/asm-arm/arch-l7200/uncompress.h
index 9fcd40a..04be2a0 100644
--- a/include/asm-arm/arch-l7200/uncompress.h
+++ b/include/asm-arm/arch-l7200/uncompress.h
@@ -6,7 +6,7 @@
  * Changelog:
  *  05-01-2000	SJH	Created
  *  05-13-2000	SJH	Filled in function bodies
- *  07-26-2000	SJH	Removed hard coded buad rate
+ *  07-26-2000	SJH	Removed hard coded baud rate
  */
 
 #include <asm/hardware.h>
diff --git a/include/asm-mips/addrspace.h b/include/asm-mips/addrspace.h
index 42520cc..1386af1 100644
--- a/include/asm-mips/addrspace.h
+++ b/include/asm-mips/addrspace.h
@@ -129,6 +129,7 @@
 #if defined (CONFIG_CPU_R4300)						\
     || defined (CONFIG_CPU_R4X00)					\
     || defined (CONFIG_CPU_R5000)					\
+    || defined (CONFIG_CPU_RM7000)					\
     || defined (CONFIG_CPU_NEVADA)					\
     || defined (CONFIG_CPU_TX49XX)					\
     || defined (CONFIG_CPU_MIPS64)
diff --git a/include/asm-mips/delay.h b/include/asm-mips/delay.h
index 64dd4515..928f30f 100644
--- a/include/asm-mips/delay.h
+++ b/include/asm-mips/delay.h
@@ -19,20 +19,22 @@
 {
 	if (sizeof(long) == 4)
 		__asm__ __volatile__ (
-		".set\tnoreorder\n"
-		"1:\tbnez\t%0,1b\n\t"
-		"subu\t%0,1\n\t"
-		".set\treorder"
+		"	.set	noreorder				\n"
+		"	.align	3					\n"
+		"1:	bnez	%0, 1b					\n"
+		"	subu	%0, 1					\n"
+		"	.set	reorder					\n"
 		: "=r" (loops)
 		: "0" (loops));
 	else if (sizeof(long) == 8)
 		__asm__ __volatile__ (
-		".set\tnoreorder\n"
-		"1:\tbnez\t%0,1b\n\t"
-		"dsubu\t%0,1\n\t"
-		".set\treorder"
-		:"=r" (loops)
-		:"0" (loops));
+		"	.set	noreorder				\n"
+		"	.align	3					\n"
+		"1:	bnez	%0, 1b					\n"
+		"	dsubu	%0, 1					\n"
+		"	.set	reorder					\n"
+		: "=r" (loops)
+		: "0" (loops));
 }
 
 
diff --git a/include/asm-mips/page.h b/include/asm-mips/page.h
index a1eab13..4035ec7 100644
--- a/include/asm-mips/page.h
+++ b/include/asm-mips/page.h
@@ -139,9 +139,11 @@
 
 #define pfn_to_kaddr(pfn)	__va((pfn) << PAGE_SHIFT)
 
+#ifndef CONFIG_SPARSEMEM
 #ifndef CONFIG_NEED_MULTIPLE_NODES
 #define pfn_valid(pfn)		((pfn) < max_mapnr)
 #endif
+#endif
 
 #define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 #define virt_addr_valid(kaddr)	pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4d6bc45..087c207 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -177,48 +177,67 @@
 	((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
 
 /*
- * Bits 0, 1, 2, 9 and 10 are taken, split up the 27 bits of offset
- * into this range:
+ * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
  */
-#define PTE_FILE_MAX_BITS	27
+#define PTE_FILE_MAX_BITS	28
 
-#define pte_to_pgoff(_pte) \
-	((((_pte).pte >> 3) & 0x3f ) + (((_pte).pte >> 11) << 8 ))
+#define pte_to_pgoff(_pte)	((((_pte).pte >> 1 ) & 0x07) | \
+				 (((_pte).pte >> 2 ) & 0x38) | \
+				 (((_pte).pte >> 10) <<  6 ))
 
-#define pgoff_to_pte(off) \
-	((pte_t) { (((off) & 0x3f) << 3) + (((off) >> 8) << 11) + _PAGE_FILE })
+#define pgoff_to_pte(off)	((pte_t) { (((off) & 0x07) << 1 ) | \
+					   (((off) & 0x38) << 2 ) | \
+					   (((off) >>  6 ) << 10) | \
+					   _PAGE_FILE })
 
 #else
 
 /* Swap entries must have VALID and GLOBAL bits cleared. */
-#define __swp_type(x)		(((x).val >> 8) & 0x1f)
-#define __swp_offset(x)		((x).val >> 13)
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#define __swp_type(x)		(((x).val >> 2) & 0x1f)
+#define __swp_offset(x) 	 ((x).val >> 7)
 #define __swp_entry(type,offset)	\
-		((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
+		((swp_entry_t)  { ((type) << 2) | ((offset) << 7) })
+#else
+#define __swp_type(x)		(((x).val >> 8) & 0x1f)
+#define __swp_offset(x) 	 ((x).val >> 13)
+#define __swp_entry(type,offset)	\
+		((swp_entry_t)  { ((type) << 8) | ((offset) << 13) })
+#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
 
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
 /*
- * Bits 0, 1, 2, 7 and 8 are taken, split up the 27 bits of offset
- * into this range:
+ * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
  */
-#define PTE_FILE_MAX_BITS	27
+#define PTE_FILE_MAX_BITS	30
 
-#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
-	/* fixme */
-#define pte_to_pgoff(_pte) (((_pte).pte_high >> 6) + ((_pte).pte_high & 0x3f))
-#define pgoff_to_pte(off) \
-	((pte_t){(((off) & 0x3f) + ((off) << 6) + _PAGE_FILE)})
+#define pte_to_pgoff(_pte)	((_pte).pte_high >> 2)
+#define pgoff_to_pte(off) 	((pte_t) { _PAGE_FILE, (off) << 2 })
 
 #else
-#define pte_to_pgoff(_pte) \
-	((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
+/*
+ * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range:
+ */
+#define PTE_FILE_MAX_BITS	28
 
-#define pgoff_to_pte(off) \
-	((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
+#define pte_to_pgoff(_pte)	((((_pte).pte >> 1) & 0x7) | \
+				 (((_pte).pte >> 2) & 0x8) | \
+				 (((_pte).pte >> 8) <<  4))
+
+#define pgoff_to_pte(off)	((pte_t) { (((off) & 0x7) << 1) | \
+					   (((off) & 0x8) << 2) | \
+					   (((off) >>  4) << 8) | \
+					   _PAGE_FILE })
 #endif
 
 #endif
 
+#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x)	((pte_t) { 0, (x).val })
+#else
 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
+#endif
 
 #endif /* _ASM_PGTABLE_32_H */
diff --git a/include/asm-mips/pgtable-64.h b/include/asm-mips/pgtable-64.h
index 82166b2..2faf5c9 100644
--- a/include/asm-mips/pgtable-64.h
+++ b/include/asm-mips/pgtable-64.h
@@ -224,15 +224,12 @@
 #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
 
 /*
- * Bits 0, 1, 2, 7 and 8 are taken, split up the 32 bits of offset
- * into this range:
+ * Bits 0, 4, 6, and 7 are taken. Let's leave bits 1, 2, 3, and 5 alone to
+ * make things easier, and only use the upper 56 bits for the page offset...
  */
-#define PTE_FILE_MAX_BITS	32
+#define PTE_FILE_MAX_BITS	56
 
-#define pte_to_pgoff(_pte) \
-	((((_pte).pte >> 3) & 0x1f ) + (((_pte).pte >> 9) << 6 ))
-
-#define pgoff_to_pte(off) \
-	((pte_t) { (((off) & 0x1f) << 3) + (((off) >> 6) << 9) + _PAGE_FILE })
+#define pte_to_pgoff(_pte)	((_pte).pte >> 8)
+#define pgoff_to_pte(off)	((pte_t) { ((off) << 8) | _PAGE_FILE })
 
 #endif /* _ASM_PGTABLE_64_H */
diff --git a/include/asm-mips/pgtable.h b/include/asm-mips/pgtable.h
index f80fe75..d0af2a3 100644
--- a/include/asm-mips/pgtable.h
+++ b/include/asm-mips/pgtable.h
@@ -353,8 +353,9 @@
 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 {
-	pte.pte_low &= _PAGE_CHG_MASK;
-	pte.pte_low |= pgprot_val(newprot);
+	pte.pte_low  &= _PAGE_CHG_MASK;
+	pte.pte_high &= ~0x3f;
+	pte.pte_low  |= pgprot_val(newprot);
 	pte.pte_high |= pgprot_val(newprot) & 0x3f;
 	return pte;
 }
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 75c6fe7c..e14e4b6 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -48,7 +48,6 @@
 #define SMP_CALL_FUNCTION	0x2
 
 extern cpumask_t phys_cpu_present_map;
-extern cpumask_t cpu_online_map;
 #define cpu_possible_map	phys_cpu_present_map
 
 extern cpumask_t cpu_callout_map;
@@ -86,9 +85,9 @@
 extern void plat_smp_setup(void);
 
 /*
- * Called after init_IRQ but before __cpu_up.
+ * Called in smp_prepare_cpus.
  */
-extern void prom_prepare_cpus(unsigned int max_cpus);
+extern void plat_prepare_cpus(unsigned int max_cpus);
 
 /*
  * Last chance for the board code to finish SMP initialization before
diff --git a/include/asm-mips/sparsemem.h b/include/asm-mips/sparsemem.h
new file mode 100644
index 0000000..795ac6c
--- /dev/null
+++ b/include/asm-mips/sparsemem.h
@@ -0,0 +1,14 @@
+#ifndef _MIPS_SPARSEMEM_H
+#define _MIPS_SPARSEMEM_H
+#ifdef CONFIG_SPARSEMEM
+
+/*
+ * SECTION_SIZE_BITS		2^N: how big each section will be
+ * MAX_PHYSMEM_BITS		2^N: how much memory we can have in that space
+ */
+#define SECTION_SIZE_BITS       28
+#define MAX_PHYSMEM_BITS        35
+
+#endif /* CONFIG_SPARSEMEM */
+#endif /* _MIPS_SPARSEMEM_H */
+
diff --git a/include/asm-s390/lowcore.h b/include/asm-s390/lowcore.h
index db0606c1..bea7279 100644
--- a/include/asm-s390/lowcore.h
+++ b/include/asm-s390/lowcore.h
@@ -98,8 +98,8 @@
 #define __LC_KERNEL_ASCE		0xD58
 #define __LC_USER_ASCE			0xD60
 #define __LC_PANIC_STACK                0xD68
-#define __LC_CPUID                      0xD90
-#define __LC_CPUADDR                    0xD98
+#define __LC_CPUID			0xD80
+#define __LC_CPUADDR			0xD88
 #define __LC_IPLDEV                     0xDB8
 #define __LC_JIFFY_TIMER		0xDC0
 #define __LC_CURRENT			0xDD8
diff --git a/include/asm-um/irqflags.h b/include/asm-um/irqflags.h
new file mode 100644
index 0000000..659b9ab
--- /dev/null
+++ b/include/asm-um/irqflags.h
@@ -0,0 +1,6 @@
+#ifndef __UM_IRQFLAGS_H
+#define __UM_IRQFLAGS_H
+
+/* Empty for now */
+
+#endif
diff --git a/include/asm-um/uaccess.h b/include/asm-um/uaccess.h
index bea5a01..16c734a 100644
--- a/include/asm-um/uaccess.h
+++ b/include/asm-um/uaccess.h
@@ -41,11 +41,11 @@
 
 #define __get_user(x, ptr) \
 ({ \
-	const __typeof__(ptr) __private_ptr = ptr;	\
+	const __typeof__(*(ptr)) __user *__private_ptr = (ptr);	\
 	__typeof__(x) __private_val;			\
 	int __private_ret = -EFAULT;			\
 	(x) = (__typeof__(*(__private_ptr)))0;				\
-	if (__copy_from_user((void *) &__private_val, (__private_ptr),	\
+	if (__copy_from_user((__force void *)&__private_val, (__private_ptr),\
 			     sizeof(*(__private_ptr))) == 0) {		\
 		(x) = (__typeof__(*(__private_ptr))) __private_val;	\
 		__private_ret = 0;					\
@@ -62,7 +62,7 @@
 
 #define __put_user(x, ptr) \
 ({ \
-        __typeof__(ptr) __private_ptr = ptr; \
+        __typeof__(*(ptr)) __user *__private_ptr = ptr; \
         __typeof__(*(__private_ptr)) __private_val; \
         int __private_ret = -EFAULT; \
         __private_val = (__typeof__(*(__private_ptr))) (x); \
diff --git a/include/linux/m48t86.h b/include/linux/m48t86.h
index 9065199..915d6b4 100644
--- a/include/linux/m48t86.h
+++ b/include/linux/m48t86.h
@@ -11,6 +11,6 @@
 
 struct m48t86_ops
 {
-	void (*writeb)(unsigned char value, unsigned long addr);
-	unsigned char (*readb)(unsigned long addr);
+	void (*writebyte)(unsigned char value, unsigned long addr);
+	unsigned char (*readbyte)(unsigned long addr);
 };
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3674035..2d83371 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -15,6 +15,7 @@
 #include <linux/seqlock.h>
 #include <linux/nodemask.h>
 #include <asm/atomic.h>
+#include <asm/page.h>
 
 /* Free memory management - zoned buddy allocator.  */
 #ifndef CONFIG_FORCE_MAX_ZONEORDER
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3a6a4e3..6fd36cb 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -442,6 +442,7 @@
 struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
 int pci_find_capability (struct pci_dev *dev, int cap);
 int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
+int pci_find_ext_capability (struct pci_dev *dev, int cap);
 struct pci_bus * pci_find_next_bus(const struct pci_bus *from);
 
 struct pci_dev *pci_get_device (unsigned int vendor, unsigned int device, struct pci_dev *from);
@@ -662,6 +663,7 @@
 static inline void pci_unregister_driver(struct pci_driver *drv) { }
 static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
 static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
+static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
 static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
 
 /* Power management related routines */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 590dc6d..c3fe769c 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -935,6 +935,7 @@
 #define PCI_DEVICE_ID_PLX_DJINN_ITOO	0x1151
 #define PCI_DEVICE_ID_PLX_R753		0x1152
 #define PCI_DEVICE_ID_PLX_OLITEC	0x1187
+#define PCI_DEVICE_ID_PLX_PCI200SYN	0x3196
 #define PCI_DEVICE_ID_PLX_9050		0x9050
 #define PCI_DEVICE_ID_PLX_9080		0x9080
 #define PCI_DEVICE_ID_PLX_GTEK_SERIAL2	0xa001
@@ -1182,6 +1183,14 @@
 #define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100         0x034E
 #define PCI_DEVICE_ID_NVIDIA_NVENET_14              0x0372
 #define PCI_DEVICE_ID_NVIDIA_NVENET_15              0x0373
+#define PCI_DEVICE_ID_NVIDIA_NVENET_16              0x03E5
+#define PCI_DEVICE_ID_NVIDIA_NVENET_17              0x03E6
+#define PCI_DEVICE_ID_NVIDIA_NVENET_18              0x03EE
+#define PCI_DEVICE_ID_NVIDIA_NVENET_19              0x03EF
+#define PCI_DEVICE_ID_NVIDIA_NVENET_20              0x0450
+#define PCI_DEVICE_ID_NVIDIA_NVENET_21              0x0451
+#define PCI_DEVICE_ID_NVIDIA_NVENET_22              0x0452
+#define PCI_DEVICE_ID_NVIDIA_NVENET_23              0x0453
 
 #define PCI_VENDOR_ID_IMS		0x10e0
 #define PCI_DEVICE_ID_IMS_TT128		0x9128
@@ -1827,6 +1836,7 @@
 
 #define PCI_VENDOR_ID_SAMSUNG		0x144d
 
+#define PCI_VENDOR_ID_MYRICOM		0x14c1
 
 #define PCI_VENDOR_ID_TITAN		0x14D2
 #define PCI_DEVICE_ID_TITAN_010L	0x8001
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ad1c7af..f5d47bf 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -300,25 +300,20 @@
 	rtnl_lock();
 	if (strchr(dev->name, '%')) {
 		ret = dev_alloc_name(dev, dev->name);
-		if (ret < 0)
-			goto err1;
+		if (ret < 0) {
+			free_netdev(dev);
+			goto out;
+		}
 	}
 
 	ret = register_netdevice(dev);
 	if (ret)
-		goto err2;
+		goto out;
 
 	ret = br_sysfs_addbr(dev);
 	if (ret)
-		goto err3;
-	rtnl_unlock();
-	return 0;
-
- err3:
-	unregister_netdev(dev);
- err2:
-	free_netdev(dev);
- err1:
+		unregister_netdevice(dev);
+ out:
 	rtnl_unlock();
 	return ret;
 }
diff --git a/net/ethernet/Makefile b/net/ethernet/Makefile
index 69b74a9..7cef1d8 100644
--- a/net/ethernet/Makefile
+++ b/net/ethernet/Makefile
@@ -3,6 +3,5 @@
 #
 
 obj-y					+= eth.o
-obj-$(CONFIG_SYSCTL)			+= sysctl_net_ether.o
 obj-$(subst m,y,$(CONFIG_IPX))		+= pe2.o
 obj-$(subst m,y,$(CONFIG_ATALK))	+= pe2.o
diff --git a/net/ethernet/sysctl_net_ether.c b/net/ethernet/sysctl_net_ether.c
deleted file mode 100644
index 66b39fc..0000000
--- a/net/ethernet/sysctl_net_ether.c
+++ /dev/null
@@ -1,14 +0,0 @@
-/* -*- linux-c -*-
- * sysctl_net_ether.c: sysctl interface to net Ethernet subsystem.
- *
- * Begun April 1, 1996, Mike Shaver.
- * Added /proc/sys/net/ether directory entry (empty =) ). [MS]
- */
-
-#include <linux/mm.h>
-#include <linux/sysctl.h>
-#include <linux/if_ether.h>
-
-ctl_table ether_table[] = {
-	{0}
-};
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 743016b..f33c9dd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -642,7 +642,7 @@
  * eventually). The difference is that pulled data not copied, but
  * immediately discarded.
  */
-static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len)
+static void __pskb_trim_head(struct sk_buff *skb, int len)
 {
 	int i, k, eat;
 
@@ -667,7 +667,6 @@
 	skb->tail = skb->data;
 	skb->data_len -= len;
 	skb->len = skb->data_len;
-	return skb->tail;
 }
 
 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
@@ -676,12 +675,11 @@
 	    pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
 		return -ENOMEM;
 
-	if (len <= skb_headlen(skb)) {
+	/* If len == headlen, we avoid __skb_pull to preserve alignment. */
+	if (unlikely(len < skb_headlen(skb)))
 		__skb_pull(skb, len);
-	} else {
-		if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL)
-			return -ENOMEM;
-	}
+	else
+		__pskb_trim_head(skb, len - skb_headlen(skb));
 
 	TCP_SKB_CB(skb)->seq += len;
 	skb->ip_summed = CHECKSUM_HW;
diff --git a/net/irda/irlap.c b/net/irda/irlap.c
index 7029618..a165286 100644
--- a/net/irda/irlap.c
+++ b/net/irda/irlap.c
@@ -884,7 +884,8 @@
 	if (now) {
 		/* Send down empty frame to trigger speed change */
 		skb = dev_alloc_skb(0);
-		irlap_queue_xmit(self, skb);
+		if (skb)
+			irlap_queue_xmit(self, skb);
 	}
 }
 
diff --git a/net/sysctl_net.c b/net/sysctl_net.c
index 55538f6..58a1b6b 100644
--- a/net/sysctl_net.c
+++ b/net/sysctl_net.c
@@ -37,14 +37,6 @@
 		.mode		= 0555,
 		.child		= core_table,
 	},
-#ifdef CONFIG_NET
-	{
-		.ctl_name	= NET_ETHER,
-		.procname	= "ethernet",
-		.mode		= 0555,
-		.child		= ether_table,
-	},
-#endif
 #ifdef CONFIG_INET
 	{
 		.ctl_name	= NET_IPV4,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 21dad41..90b4cdc 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4422,6 +4422,7 @@
 
 	/* Set up any superblocks initialized prior to the policy load. */
 	printk(KERN_INFO "SELinux:  Setting up existing superblocks.\n");
+	spin_lock(&sb_lock);
 	spin_lock(&sb_security_lock);
 next_sb:
 	if (!list_empty(&superblock_security_head)) {
@@ -4430,19 +4431,20 @@
 				           struct superblock_security_struct,
 				           list);
 		struct super_block *sb = sbsec->sb;
-		spin_lock(&sb_lock);
 		sb->s_count++;
-		spin_unlock(&sb_lock);
 		spin_unlock(&sb_security_lock);
+		spin_unlock(&sb_lock);
 		down_read(&sb->s_umount);
 		if (sb->s_root)
 			superblock_doinit(sb, NULL);
 		drop_super(sb);
+		spin_lock(&sb_lock);
 		spin_lock(&sb_security_lock);
 		list_del_init(&sbsec->list);
 		goto next_sb;
 	}
 	spin_unlock(&sb_security_lock);
+	spin_unlock(&sb_lock);
 }
 
 /* SELinux requires early initialization in order to label