/spare/repo/libata-dev branch 'iomap-try3'
diff --git a/Documentation/networking/cxgb.txt b/Documentation/networking/cxgb.txt
new file mode 100644
index 0000000..7632463
--- /dev/null
+++ b/Documentation/networking/cxgb.txt
@@ -0,0 +1,352 @@
+                 Chelsio N210 10Gb Ethernet Network Controller
+
+                         Driver Release Notes for Linux
+
+                                 Version 2.1.1
+
+                                 June 20, 2005
+
+CONTENTS
+========
+ INTRODUCTION
+ FEATURES
+ PERFORMANCE
+ DRIVER MESSAGES
+ KNOWN ISSUES
+ SUPPORT
+
+
+INTRODUCTION
+============
+
+ This document describes the Linux driver for Chelsio 10Gb Ethernet Network
+ Controller. This driver supports the Chelsio N210 NIC and is backward
+ compatible with the Chelsio N110 model 10Gb NICs.
+
+
+FEATURES
+========
+
+ Adaptive Interrupts (adaptive-rx)
+ ---------------------------------
+
+  This feature provides an adaptive algorithm that adjusts the interrupt
+  coalescing parameters, allowing the driver to dynamically adapt the latency
+  settings to achieve the highest performance during various types of network
+  load.
+
+  The interface used to control this feature is ethtool. Please see the
+  ethtool manpage for additional usage information.
+
+  By default, adaptive-rx is disabled.
+  To enable adaptive-rx:
+
+      ethtool -C <interface> adaptive-rx on
+
+  To disable adaptive-rx, use ethtool:
+
+      ethtool -C <interface> adaptive-rx off
+
+  After disabling adaptive-rx, the timer latency value will be set to 50us.
+  You may set the timer latency after disabling adaptive-rx:
+
+      ethtool -C <interface> rx-usecs <microseconds>
+
+  An example to set the timer latency value to 100us on eth0:
+
+      ethtool -C eth0 rx-usecs 100
+
+  You may also provide a timer latency value while disabling adpative-rx:
+
+      ethtool -C <interface> adaptive-rx off rx-usecs <microseconds>
+
+  If adaptive-rx is disabled and a timer latency value is specified, the timer
+  will be set to the specified value until changed by the user or until
+  adaptive-rx is enabled.
+
+  To view the status of the adaptive-rx and timer latency values:
+
+      ethtool -c <interface>
+
+
+ TCP Segmentation Offloading (TSO) Support
+ -----------------------------------------
+
+  This feature, also known as "large send", enables a system's protocol stack
+  to offload portions of outbound TCP processing to a network interface card
+  thereby reducing system CPU utilization and enhancing performance.
+
+  The interface used to control this feature is ethtool version 1.8 or higher.
+  Please see the ethtool manpage for additional usage information.
+
+  By default, TSO is enabled.
+  To disable TSO:
+
+      ethtool -K <interface> tso off
+
+  To enable TSO:
+
+      ethtool -K <interface> tso on
+
+  To view the status of TSO:
+
+      ethtool -k <interface>
+
+
+PERFORMANCE
+===========
+
+ The following information is provided as an example of how to change system
+ parameters for "performance tuning" an what value to use. You may or may not
+ want to change these system parameters, depending on your server/workstation
+ application. Doing so is not warranted in any way by Chelsio Communications,
+ and is done at "YOUR OWN RISK". Chelsio will not be held responsible for loss
+ of data or damage to equipment.
+
+ Your distribution may have a different way of doing things, or you may prefer
+ a different method. These commands are shown only to provide an example of
+ what to do and are by no means definitive.
+
+ Making any of the following system changes will only last until you reboot
+ your system. You may want to write a script that runs at boot-up which
+ includes the optimal settings for your system.
+
+  Setting PCI Latency Timer:
+      setpci -d 1425:* 0x0c.l=0x0000F800
+
+  Disabling TCP timestamp:
+      sysctl -w net.ipv4.tcp_timestamps=0
+
+  Disabling SACK:
+      sysctl -w net.ipv4.tcp_sack=0
+
+  Setting large number of incoming connection requests:
+      sysctl -w net.ipv4.tcp_max_syn_backlog=3000
+
+  Setting maximum receive socket buffer size:
+      sysctl -w net.core.rmem_max=1024000
+
+  Setting maximum send socket buffer size:
+      sysctl -w net.core.wmem_max=1024000
+
+  Set smp_affinity (on a multiprocessor system) to a single CPU:
+      echo 1 > /proc/irq/<interrupt_number>/smp_affinity
+
+  Setting default receive socket buffer size:
+      sysctl -w net.core.rmem_default=524287
+
+  Setting default send socket buffer size:
+      sysctl -w net.core.wmem_default=524287
+
+  Setting maximum option memory buffers:
+      sysctl -w net.core.optmem_max=524287
+
+  Setting maximum backlog (# of unprocessed packets before kernel drops):
+      sysctl -w net.core.netdev_max_backlog=300000
+
+  Setting TCP read buffers (min/default/max):
+      sysctl -w net.ipv4.tcp_rmem="10000000 10000000 10000000"
+
+  Setting TCP write buffers (min/pressure/max):
+      sysctl -w net.ipv4.tcp_wmem="10000000 10000000 10000000"
+
+  Setting TCP buffer space (min/pressure/max):
+      sysctl -w net.ipv4.tcp_mem="10000000 10000000 10000000"
+
+  TCP window size for single connections:
+   The receive buffer (RX_WINDOW) size must be at least as large as the
+   Bandwidth-Delay Product of the communication link between the sender and
+   receiver. Due to the variations of RTT, you may want to increase the buffer
+   size up to 2 times the Bandwidth-Delay Product. Reference page 289 of
+   "TCP/IP Illustrated, Volume 1, The Protocols" by W. Richard Stevens.
+   At 10Gb speeds, use the following formula:
+       RX_WINDOW >= 1.25MBytes * RTT(in milliseconds)
+       Example for RTT with 100us: RX_WINDOW = (1,250,000 * 0.1) = 125,000
+   RX_WINDOW sizes of 256KB - 512KB should be sufficient.
+   Setting the min, max, and default receive buffer (RX_WINDOW) size:
+       sysctl -w net.ipv4.tcp_rmem="<min> <default> <max>"
+
+  TCP window size for multiple connections:
+   The receive buffer (RX_WINDOW) size may be calculated the same as single
+   connections, but should be divided by the number of connections. The
+   smaller window prevents congestion and facilitates better pacing,
+   especially if/when MAC level flow control does not work well or when it is
+   not supported on the machine. Experimentation may be necessary to attain
+   the correct value. This method is provided as a starting point fot the
+   correct receive buffer size.
+   Setting the min, max, and default receive buffer (RX_WINDOW) size is
+   performed in the same manner as single connection.
+
+
+DRIVER MESSAGES
+===============
+
+ The following messages are the most common messages logged by syslog. These
+ may be found in /var/log/messages.
+
+  Driver up:
+     Chelsio Network Driver - version 2.1.1
+
+  NIC detected:
+     eth#: Chelsio N210 1x10GBaseX NIC (rev #), PCIX 133MHz/64-bit
+
+  Link up:
+     eth#: link is up at 10 Gbps, full duplex
+
+  Link down:
+     eth#: link is down
+
+
+KNOWN ISSUES
+============
+
+ These issues have been identified during testing. The following information
+ is provided as a workaround to the problem. In some cases, this problem is
+ inherent to Linux or to a particular Linux Distribution and/or hardware
+ platform.
+
+  1. Large number of TCP retransmits on a multiprocessor (SMP) system.
+
+      On a system with multiple CPUs, the interrupt (IRQ) for the network
+      controller may be bound to more than one CPU. This will cause TCP
+      retransmits if the packet data were to be split across different CPUs
+      and re-assembled in a different order than expected.
+
+      To eliminate the TCP retransmits, set smp_affinity on the particular
+      interrupt to a single CPU. You can locate the interrupt (IRQ) used on
+      the N110/N210 by using ifconfig:
+          ifconfig <dev_name> | grep Interrupt
+      Set the smp_affinity to a single CPU:
+          echo 1 > /proc/irq/<interrupt_number>/smp_affinity
+
+      It is highly suggested that you do not run the irqbalance daemon on your
+      system, as this will change any smp_affinity setting you have applied.
+      The irqbalance daemon runs on a 10 second interval and binds interrupts
+      to the least loaded CPU determined by the daemon. To disable this daemon:
+          chkconfig --level 2345 irqbalance off
+
+      By default, some Linux distributions enable the kernel feature,
+      irqbalance, which performs the same function as the daemon. To disable
+      this feature, add the following line to your bootloader:
+          noirqbalance
+
+          Example using the Grub bootloader:
+              title Red Hat Enterprise Linux AS (2.4.21-27.ELsmp)
+              root (hd0,0)
+              kernel /vmlinuz-2.4.21-27.ELsmp ro root=/dev/hda3 noirqbalance
+              initrd /initrd-2.4.21-27.ELsmp.img
+
+  2. After running insmod, the driver is loaded and the incorrect network
+     interface is brought up without running ifup.
+
+      When using 2.4.x kernels, including RHEL kernels, the Linux kernel
+      invokes a script named "hotplug". This script is primarily used to
+      automatically bring up USB devices when they are plugged in, however,
+      the script also attempts to automatically bring up a network interface
+      after loading the kernel module. The hotplug script does this by scanning
+      the ifcfg-eth# config files in /etc/sysconfig/network-scripts, looking
+      for HWADDR=<mac_address>.
+
+      If the hotplug script does not find the HWADDRR within any of the
+      ifcfg-eth# files, it will bring up the device with the next available
+      interface name. If this interface is already configured for a different
+      network card, your new interface will have incorrect IP address and
+      network settings.
+
+      To solve this issue, you can add the HWADDR=<mac_address> key to the
+      interface config file of your network controller.
+
+      To disable this "hotplug" feature, you may add the driver (module name)
+      to the "blacklist" file located in /etc/hotplug. It has been noted that
+      this does not work for network devices because the net.agent script
+      does not use the blacklist file. Simply remove, or rename, the net.agent
+      script located in /etc/hotplug to disable this feature.
+
+  3. Transport Protocol (TP) hangs when running heavy multi-connection traffic
+     on an AMD Opteron system with HyperTransport PCI-X Tunnel chipset.
+
+      If your AMD Opteron system uses the AMD-8131 HyperTransport PCI-X Tunnel
+      chipset, you may experience the "133-Mhz Mode Split Completion Data
+      Corruption" bug identified by AMD while using a 133Mhz PCI-X card on the
+      bus PCI-X bus.
+
+      AMD states, "Under highly specific conditions, the AMD-8131 PCI-X Tunnel
+      can provide stale data via split completion cycles to a PCI-X card that
+      is operating at 133 Mhz", causing data corruption.
+
+      AMD's provides three workarounds for this problem, however, Chelsio
+      recommends the first option for best performance with this bug:
+
+        For 133Mhz secondary bus operation, limit the transaction length and
+        the number of outstanding transactions, via BIOS configuration
+        programming of the PCI-X card, to the following:
+
+           Data Length (bytes): 1k
+           Total allowed outstanding transactions: 2
+
+      Please refer to AMD 8131-HT/PCI-X Errata 26310 Rev 3.08 August 2004,
+      section 56, "133-MHz Mode Split Completion Data Corruption" for more
+      details with this bug and workarounds suggested by AMD.
+
+      It may be possible to work outside AMD's recommended PCI-X settings, try
+      increasing the Data Length to 2k bytes for increased performance. If you
+      have issues with these settings, please revert to the "safe" settings
+      and duplicate the problem before submitting a bug or asking for support.
+
+      NOTE: The default setting on most systems is 8 outstanding transactions
+            and 2k bytes data length.
+
+  4. On multiprocessor systems, it has been noted that an application which
+     is handling 10Gb networking can switch between CPUs causing degraded
+     and/or unstable performance.
+
+      If running on an SMP system and taking performance measurements, it
+      is suggested you either run the latest netperf-2.4.0+ or use a binding
+      tool such as Tim Hockin's procstate utilities (runon)
+      <http://www.hockin.org/~thockin/procstate/>.
+
+      Binding netserver and netperf (or other applications) to particular
+      CPUs will have a significant difference in performance measurements.
+      You may need to experiment which CPU to bind the application to in
+      order to achieve the best performance for your system.
+
+      If you are developing an application designed for 10Gb networking,
+      please keep in mind you may want to look at kernel functions
+      sched_setaffinity & sched_getaffinity to bind your application.
+
+      If you are just running user-space applications such as ftp, telnet,
+      etc., you may want to try the runon tool provided by Tim Hockin's
+      procstate utility. You could also try binding the interface to a
+      particular CPU: runon 0 ifup eth0
+
+
+SUPPORT
+=======
+
+ If you have problems with the software or hardware, please contact our
+ customer support team via email at support@chelsio.com or check our website
+ at http://www.chelsio.com
+
+===============================================================================
+
+ Chelsio Communications
+ 370 San Aleso Ave.
+ Suite 100
+ Sunnyvale, CA 94085
+ http://www.chelsio.com
+
+This program is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License, version 2, as
+published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License along
+with this program; if not, write to the Free Software Foundation, Inc.,
+59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
+
+ Copyright (c) 2003-2005 Chelsio Communications. All rights reserved.
+
+===============================================================================
diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
index a18ecb9..5c49ba0 100644
--- a/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -132,6 +132,7 @@
     mpu_irq	- IRQ # for MPU-401 UART (PnP setup)
     dma1	- first DMA # for AD1816A chip (PnP setup)
     dma2	- second DMA # for AD1816A chip (PnP setup)
+    clockfreq   - Clock frequency for AD1816A chip (default = 0, 33000Hz)
     
     Module supports up to 8 cards, autoprobe and PnP.
     
diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
index db0b7d2..0475478 100644
--- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl
@@ -3422,10 +3422,17 @@
 
       <para>
         The <structfield>iface</structfield> field specifies the type of
-      the control,
-      <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>. There are
-      <constant>MIXER</constant>, <constant>PCM</constant>,
-      <constant>CARD</constant>, etc.
+      the control, <constant>SNDRV_CTL_ELEM_IFACE_XXX</constant>, which
+      is usually <constant>MIXER</constant>.
+      Use <constant>CARD</constant> for global controls that are not
+      logically part of the mixer.
+      If the control is closely associated with some specific device on
+      the sound card, use <constant>HWDEP</constant>,
+      <constant>PCM</constant>, <constant>RAWMIDI</constant>,
+      <constant>TIMER</constant>, or <constant>SEQUENCER</constant>, and
+      specify the device number with the
+      <structfield>device</structfield> and
+      <structfield>subdevice</structfield> fields.
       </para>
 
       <para>
diff --git a/MAINTAINERS b/MAINTAINERS
index 564a03e..e8214fe 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2092,6 +2092,12 @@
 W:	http://www.simtec.co.uk/products/EB2410ITX/
 S:	Supported
 
+SIS 190 ETHERNET DRIVER
+P:	Francois Romieu
+M:	romieu@fr.zoreil.com
+L:	netdev@vger.kernel.org
+S:	Maintained
+
 SIS 5513 IDE CONTROLLER DRIVER
 P:	Lionel Bouton
 M:	Lionel.Bouton@inet6.fr
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index ade5bc5..c96bea1 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -165,7 +165,6 @@
 	if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
 		pcibios_sort();
 #endif
-	pci_assign_unassigned_resources();
 	return 0;
 }
 
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index 93a364c..3cc4809 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -170,43 +170,26 @@
 static int __init pcibios_assign_resources(void)
 {
 	struct pci_dev *dev = NULL;
-	int idx;
-	struct resource *r;
+	struct resource *r, *pr;
 
-	for_each_pci_dev(dev) {
-		int class = dev->class >> 8;
-
-		/* Don't touch classless devices and host bridges */
-		if (!class || class == PCI_CLASS_BRIDGE_HOST)
-			continue;
-
-		for(idx=0; idx<6; idx++) {
-			r = &dev->resource[idx];
-
-			/*
-			 *  Don't touch IDE controllers and I/O ports of video cards!
-			 */
-			if ((class == PCI_CLASS_STORAGE_IDE && idx < 4) ||
-			    (class == PCI_CLASS_DISPLAY_VGA && (r->flags & IORESOURCE_IO)))
-				continue;
-
-			/*
-			 *  We shall assign a new address to this resource, either because
-			 *  the BIOS forgot to do so or because we have decided the old
-			 *  address was unusable for some reason.
-			 */
-			if (!r->start && r->end)
-				pci_assign_resource(dev, idx);
-		}
-
-		if (pci_probe & PCI_ASSIGN_ROMS) {
+	if (!(pci_probe & PCI_ASSIGN_ROMS)) {
+		/* Try to use BIOS settings for ROMs, otherwise let
+		   pci_assign_unassigned_resources() allocate the new
+		   addresses. */
+		for_each_pci_dev(dev) {
 			r = &dev->resource[PCI_ROM_RESOURCE];
-			r->end -= r->start;
-			r->start = 0;
-			if (r->end)
-				pci_assign_resource(dev, PCI_ROM_RESOURCE);
+			if (!r->flags || !r->start)
+				continue;
+			pr = pci_find_parent_resource(dev, r);
+			if (!pr || request_resource(pr, r) < 0) {
+				r->end -= r->start;
+				r->start = 0;
+			}
 		}
 	}
+
+	pci_assign_unassigned_resources();
+
 	return 0;
 }
 
diff --git a/arch/ppc/syslib/m8xx_setup.c b/arch/ppc/syslib/m8xx_setup.c
index a3702cf..4c888da 100644
--- a/arch/ppc/syslib/m8xx_setup.c
+++ b/arch/ppc/syslib/m8xx_setup.c
@@ -57,7 +57,7 @@
 extern void m8xx_ide_init(void);
 
 extern unsigned long find_available_memory(void);
-extern void m8xx_cpm_reset();
+extern void m8xx_cpm_reset(void);
 extern void m8xx_wdt_handler_install(bd_t *bp);
 extern void rpxfb_alloc_pages(void);
 extern void cpm_interrupt_init(void);
@@ -266,8 +266,8 @@
 
 	bp = (bd_t *)__res;
 
-	seq_printf(m, "clock\t\t: %ldMHz\n"
-		   "bus clock\t: %ldMHz\n",
+	seq_printf(m, "clock\t\t: %uMHz\n"
+		   "bus clock\t: %uMHz\n",
 		   bp->bi_intfreq / 1000000,
 		   bp->bi_busfreq / 1000000);
 
diff --git a/drivers/char/drm/Kconfig b/drivers/char/drm/Kconfig
index 123417e..56ace9d 100644
--- a/drivers/char/drm/Kconfig
+++ b/drivers/char/drm/Kconfig
@@ -23,13 +23,6 @@
 	  Choose this option if you have a 3dfx Banshee or Voodoo3 (or later),
 	  graphics card.  If M is selected, the module will be called tdfx.
 
-config DRM_GAMMA
-	tristate "3dlabs GMX 2000"
-	depends on DRM && BROKEN
-	help
-	  This is the old gamma driver, please tell me if it might actually
-	  work.
-
 config DRM_R128
 	tristate "ATI Rage 128"
 	depends on DRM && PCI
@@ -82,7 +75,7 @@
 
 config DRM_MGA
 	tristate "Matrox g200/g400"
-	depends on DRM && AGP
+	depends on DRM
 	help
 	  Choose this option if you have a Matrox G200, G400 or G450 graphics
 	  card.  If M is selected, the module will be called mga.  AGP
@@ -103,3 +96,10 @@
 	  Choose this option if you have a Via unichrome or compatible video
 	  chipset. If M is selected the module will be called via.
 
+config DRM_SAVAGE
+	tristate "Savage video cards"
+	depends on DRM
+	help
+	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
+	  chipset. If M is selected the module will be called savage.
+
diff --git a/drivers/char/drm/Makefile b/drivers/char/drm/Makefile
index ddd9410..e41060c 100644
--- a/drivers/char/drm/Makefile
+++ b/drivers/char/drm/Makefile
@@ -8,16 +8,16 @@
 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
 		drm_sysfs.o
 
-gamma-objs  := gamma_drv.o gamma_dma.o
 tdfx-objs   := tdfx_drv.o
 r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o
 mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
 i810-objs   := i810_drv.o i810_dma.o
 i830-objs   := i830_drv.o i830_dma.o i830_irq.o
 i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
-radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o
+radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
 ffb-objs    := ffb_drv.o ffb_context.o
 sis-objs    := sis_drv.o sis_ds.o sis_mm.o
+savage-objs := savage_drv.o savage_bci.o savage_state.o
 via-objs    := via_irq.o via_drv.o via_ds.o via_map.o via_mm.o via_dma.o via_verifier.o via_video.o
 
 ifeq ($(CONFIG_COMPAT),y)
@@ -29,7 +29,6 @@
 endif
 
 obj-$(CONFIG_DRM)	+= drm.o
-obj-$(CONFIG_DRM_GAMMA) += gamma.o
 obj-$(CONFIG_DRM_TDFX)	+= tdfx.o
 obj-$(CONFIG_DRM_R128)	+= r128.o
 obj-$(CONFIG_DRM_RADEON)+= radeon.o
@@ -39,5 +38,7 @@
 obj-$(CONFIG_DRM_I915)  += i915.o
 obj-$(CONFIG_DRM_FFB)   += ffb.o
 obj-$(CONFIG_DRM_SIS)   += sis.o
+obj-$(CONFIG_DRM_SAVAGE)+= savage.o
 obj-$(CONFIG_DRM_VIA)	+=via.o
 
+
diff --git a/drivers/char/drm/drm.h b/drivers/char/drm/drm.h
index e8371dd..fc6598a 100644
--- a/drivers/char/drm/drm.h
+++ b/drivers/char/drm/drm.h
@@ -98,7 +98,7 @@
 #define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
 
 
-typedef unsigned long drm_handle_t;
+typedef unsigned int  drm_handle_t;
 typedef unsigned int  drm_context_t;
 typedef unsigned int  drm_drawable_t;
 typedef unsigned int  drm_magic_t;
@@ -209,7 +209,8 @@
 	_DRM_REGISTERS	    = 1,  /**< no caching, no core dump */
 	_DRM_SHM	    = 2,  /**< shared, cached */
 	_DRM_AGP            = 3,  /**< AGP/GART */
-	_DRM_SCATTER_GATHER = 4	  /**< Scatter/gather memory for PCI DMA */
+	_DRM_SCATTER_GATHER = 4,  /**< Scatter/gather memory for PCI DMA */
+	_DRM_CONSISTENT     = 5,  /**< Consistent memory for PCI DMA */
 } drm_map_type_t;
 
 
@@ -368,7 +369,8 @@
 	enum {
 		_DRM_PAGE_ALIGN = 0x01, /**< Align on page boundaries for DMA */
 		_DRM_AGP_BUFFER = 0x02, /**< Buffer is in AGP space */
-		_DRM_SG_BUFFER  = 0x04  /**< Scatter/gather memory buffer */
+		_DRM_SG_BUFFER  = 0x04, /**< Scatter/gather memory buffer */
+		_DRM_FB_BUFFER  = 0x08  /**< Buffer is in frame buffer */
 	}	      flags;
 	unsigned long agp_start; /**< 
 				  * Start address of where the AGP buffers are
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h
index 5df09cc..6f98701 100644
--- a/drivers/char/drm/drmP.h
+++ b/drivers/char/drm/drmP.h
@@ -53,7 +53,6 @@
 #include <linux/init.h>
 #include <linux/file.h>
 #include <linux/pci.h>
-#include <linux/version.h>
 #include <linux/jiffies.h>
 #include <linux/smp_lock.h>	/* For (un)lock_kernel */
 #include <linux/mm.h>
@@ -96,6 +95,7 @@
 #define DRIVER_IRQ_SHARED  0x80
 #define DRIVER_IRQ_VBL     0x100
 #define DRIVER_DMA_QUEUE   0x200
+#define DRIVER_FB_DMA      0x400
 
 /***********************************************************************/
 /** \name Begin the DRM... */
@@ -160,36 +160,7 @@
 #define pte_unmap(pte)
 #endif
 
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19)
-static inline struct page * vmalloc_to_page(void * vmalloc_addr)
-{
-	unsigned long addr = (unsigned long) vmalloc_addr;
-	struct page *page = NULL;
-	pgd_t *pgd = pgd_offset_k(addr);
-	pmd_t *pmd;
-	pte_t *ptep, pte;
-  
-	if (!pgd_none(*pgd)) {
-		pmd = pmd_offset(pgd, addr);
-		if (!pmd_none(*pmd)) {
-			preempt_disable();
-			ptep = pte_offset_map(pmd, addr);
-			pte = *ptep;
-			if (pte_present(pte))
-				page = pte_page(pte);
-			pte_unmap(ptep);
-			preempt_enable();
-		}
-	}
-	return page;
-}
-#endif
-
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
-#define DRM_RPR_ARG(vma)
-#else
 #define DRM_RPR_ARG(vma) vma,
-#endif
 
 #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT)
 
@@ -474,7 +445,8 @@
 	unsigned long	  byte_count;
 	enum {
 		_DRM_DMA_USE_AGP = 0x01,
-		_DRM_DMA_USE_SG  = 0x02
+		_DRM_DMA_USE_SG  = 0x02,
+		_DRM_DMA_USE_FB  = 0x04
 	} flags;
 
 } drm_device_dma_t;
@@ -525,12 +497,19 @@
 	drm_hw_lock_t *lock;
 } drm_sigdata_t;
 
+typedef struct drm_dma_handle {
+	dma_addr_t busaddr;
+	void *vaddr;
+	size_t size;
+} drm_dma_handle_t;
+
 /**
  * Mappings list
  */
 typedef struct drm_map_list {
 	struct list_head	head;	/**< list head */
 	drm_map_t		*map;	/**< mapping */
+	unsigned int user_token;
 } drm_map_list_t;
 
 typedef drm_map_t drm_local_map_t;
@@ -578,7 +557,22 @@
  	int (*kernel_context_switch)(struct drm_device *dev, int old, int new);
 	void (*kernel_context_switch_unlock)(struct drm_device *dev, drm_lock_t *lock);
 	int (*vblank_wait)(struct drm_device *dev, unsigned int *sequence);
+	
+	/**
+	 * Called by \c drm_device_is_agp.  Typically used to determine if a
+	 * card is really attached to AGP or not.
+	 *
+	 * \param dev  DRM device handle
+	 *
+	 * \returns
+	 * One of three values is returned depending on whether or not the
+	 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP
+	 * (return of 1), or may or may not be AGP (return of 2).
+	 */
+	int (*device_is_agp) (struct drm_device * dev);
+
 	/* these have to be filled in */
+  
  	int (*postinit)(struct drm_device *, unsigned long flags);
 	irqreturn_t (*irq_handler)( DRM_IRQ_ARGS );
  	void (*irq_preinstall)(struct drm_device *dev);
@@ -722,12 +716,8 @@
 	int               pci_slot;	/**< PCI slot number */
 	int               pci_func;	/**< PCI function number */
 #ifdef __alpha__
-#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3)
-	struct pci_controler *hose;
-#else
 	struct pci_controller *hose;
 #endif
-#endif
 	drm_sg_mem_t      *sg;  /**< Scatter gather memory */
 	unsigned long     *ctx_bitmap;	/**< context bitmap */
 	void		  *dev_private; /**< device private data */
@@ -736,6 +726,7 @@
 
 	struct            drm_driver *driver;
 	drm_local_map_t   *agp_buffer_map;
+	unsigned int agp_buffer_token;
 	drm_head_t primary;		/**< primary screen head */
 } drm_device_t;
 
@@ -806,7 +797,7 @@
 					   drm_device_t *dev);
 extern void	     drm_ioremapfree(void *pt, unsigned long size, drm_device_t *dev);
 
-extern DRM_AGP_MEM   *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type);
+extern DRM_AGP_MEM   *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
 extern int           drm_free_agp(DRM_AGP_MEM *handle, int pages);
 extern int           drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start);
 extern int           drm_unbind_agp(DRM_AGP_MEM *handle);
@@ -881,11 +872,19 @@
 				    unsigned int context);
 
 				/* Buffer management support (drm_bufs.h) */
+extern int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
+extern int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request);
+extern int drm_addmap(drm_device_t *dev, unsigned int offset,
+		      unsigned int size, drm_map_type_t type,
+		      drm_map_flags_t flags, drm_local_map_t **map_ptr);
+extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
+			    unsigned int cmd, unsigned long arg);
+extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
+extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
+extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg);
+
 extern int	     drm_order( unsigned long size );
-extern int	     drm_addmap( struct inode *inode, struct file *filp,
-				  unsigned int cmd, unsigned long arg );
-extern int	     drm_rmmap( struct inode *inode, struct file *filp,
-				 unsigned int cmd, unsigned long arg );
 extern int	     drm_addbufs( struct inode *inode, struct file *filp,
 				   unsigned int cmd, unsigned long arg );
 extern int	     drm_infobufs( struct inode *inode, struct file *filp,
@@ -896,6 +895,10 @@
 				    unsigned int cmd, unsigned long arg );
 extern int	     drm_mapbufs( struct inode *inode, struct file *filp,
 				   unsigned int cmd, unsigned long arg );
+extern unsigned long drm_get_resource_start(drm_device_t *dev,
+					    unsigned int resource);
+extern unsigned long drm_get_resource_len(drm_device_t *dev,
+					  unsigned int resource);
 
 				/* DMA support (drm_dma.h) */
 extern int	     drm_dma_setup(drm_device_t *dev);
@@ -919,15 +922,18 @@
 
 				/* AGP/GART support (drm_agpsupport.h) */
 extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
-extern int            drm_agp_acquire(struct inode *inode, struct file *filp,
-				       unsigned int cmd, unsigned long arg);
-extern void           drm_agp_do_release(drm_device_t *dev);
-extern int            drm_agp_release(struct inode *inode, struct file *filp,
-				       unsigned int cmd, unsigned long arg);
-extern int            drm_agp_enable(struct inode *inode, struct file *filp,
-				      unsigned int cmd, unsigned long arg);
-extern int            drm_agp_info(struct inode *inode, struct file *filp,
-				    unsigned int cmd, unsigned long arg);
+extern int drm_agp_acquire(drm_device_t * dev);
+extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg);
+extern int drm_agp_release(drm_device_t *dev);
+extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
+			   unsigned int cmd, unsigned long arg);
+extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
+extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg);
+extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
+extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
+			unsigned int cmd, unsigned long arg);
 extern int            drm_agp_alloc(struct inode *inode, struct file *filp,
 				     unsigned int cmd, unsigned long arg);
 extern int            drm_agp_free(struct inode *inode, struct file *filp,
@@ -976,12 +982,10 @@
 					       unsigned long addr,
 					       dma_addr_t bus_addr);
 
-extern void *drm_pci_alloc(drm_device_t * dev, size_t size,
-			   size_t align, dma_addr_t maxaddr,
-			   dma_addr_t * busaddr);
-
-extern void drm_pci_free(drm_device_t * dev, size_t size,
-			 void *vaddr, dma_addr_t busaddr);
+extern drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size,
+				       size_t align, dma_addr_t maxaddr);
+extern void __drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
+extern void drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah);
 
 			       /* sysfs support (drm_sysfs.c) */
 struct drm_sysfs_class;
@@ -1012,17 +1016,26 @@
 		drm_ioremapfree( map->handle, map->size, dev );
 }
 
-static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned long offset)
+static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token)
 {
-	struct list_head *_list;
-	list_for_each( _list, &dev->maplist->head ) {
-		drm_map_list_t *_entry = list_entry( _list, drm_map_list_t, head );
-		if ( _entry->map &&
-		     _entry->map->offset == offset ) {
+	drm_map_list_t *_entry;
+	list_for_each_entry(_entry, &dev->maplist->head, head)
+		if (_entry->user_token == token)
 			return _entry->map;
+	return NULL;
+}
+
+static __inline__ int drm_device_is_agp(drm_device_t *dev)
+{
+	if ( dev->driver->device_is_agp != NULL ) {
+		int err = (*dev->driver->device_is_agp)( dev );
+	
+		if (err != 2) {
+			return err;
 		}
 	}
-	return NULL;
+
+	return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
 }
 
 static __inline__ void drm_core_dropmap(struct drm_map *map)
diff --git a/drivers/char/drm/drm_agpsupport.c b/drivers/char/drm/drm_agpsupport.c
index 8d94c0b..8c215ad 100644
--- a/drivers/char/drm/drm_agpsupport.c
+++ b/drivers/char/drm/drm_agpsupport.c
@@ -37,7 +37,7 @@
 #if __OS_HAS_AGP
 
 /**
- * AGP information ioctl.
+ * Get AGP information.
  *
  * \param inode device inode.
  * \param filp file pointer.
@@ -48,51 +48,56 @@
  * Verifies the AGP device has been initialized and acquired and fills in the
  * drm_agp_info structure with the information in drm_agp_head::agp_info.
  */
-int drm_agp_info(struct inode *inode, struct file *filp,
-		  unsigned int cmd, unsigned long arg)
+int drm_agp_info(drm_device_t *dev, drm_agp_info_t *info)
 {
-	drm_file_t	 *priv	 = filp->private_data;
-	drm_device_t	 *dev	 = priv->head->dev;
 	DRM_AGP_KERN     *kern;
-	drm_agp_info_t   info;
 
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
 
 	kern                   = &dev->agp->agp_info;
-	info.agp_version_major = kern->version.major;
-	info.agp_version_minor = kern->version.minor;
-	info.mode              = kern->mode;
-	info.aperture_base     = kern->aper_base;
-	info.aperture_size     = kern->aper_size * 1024 * 1024;
-	info.memory_allowed    = kern->max_memory << PAGE_SHIFT;
-	info.memory_used       = kern->current_memory << PAGE_SHIFT;
-	info.id_vendor         = kern->device->vendor;
-	info.id_device         = kern->device->device;
+	info->agp_version_major = kern->version.major;
+	info->agp_version_minor = kern->version.minor;
+	info->mode              = kern->mode;
+	info->aperture_base     = kern->aper_base;
+	info->aperture_size     = kern->aper_size * 1024 * 1024;
+	info->memory_allowed    = kern->max_memory << PAGE_SHIFT;
+	info->memory_used       = kern->current_memory << PAGE_SHIFT;
+	info->id_vendor         = kern->device->vendor;
+	info->id_device         = kern->device->device;
 
-	if (copy_to_user((drm_agp_info_t __user *)arg, &info, sizeof(info)))
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_info);
+
+int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
+		 unsigned int cmd, unsigned long arg)
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->head->dev;
+	drm_agp_info_t info;
+	int err;
+
+	err = drm_agp_info(dev, &info);
+	if (err)
+		return err;
+	
+	if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
 		return -EFAULT;
 	return 0;
 }
 
 /**
- * Acquire the AGP device (ioctl).
+ * Acquire the AGP device.
  *
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg user argument.
+ * \param dev DRM device that is to acquire AGP
  * \return zero on success or a negative number on failure. 
  *
  * Verifies the AGP device hasn't been acquired before and calls
- * agp_acquire().
+ * \c agp_backend_acquire.
  */
-int drm_agp_acquire(struct inode *inode, struct file *filp,
-		     unsigned int cmd, unsigned long arg)
+int drm_agp_acquire(drm_device_t *dev)
 {
-	drm_file_t	 *priv	 = filp->private_data;
-	drm_device_t	 *dev	 = priv->head->dev;
-
 	if (!dev->agp)
 		return -ENODEV;
 	if (dev->agp->acquired)
@@ -102,9 +107,10 @@
 	dev->agp->acquired = 1;
 	return 0;
 }
+EXPORT_SYMBOL(drm_agp_acquire);
 
 /**
- * Release the AGP device (ioctl).
+ * Acquire the AGP device (ioctl).
  *
  * \param inode device inode.
  * \param filp file pointer.
@@ -112,63 +118,80 @@
  * \param arg user argument.
  * \return zero on success or a negative number on failure.
  *
- * Verifies the AGP device has been acquired and calls agp_backend_release().
+ * Verifies the AGP device hasn't been acquired before and calls
+ * \c agp_backend_acquire.
  */
-int drm_agp_release(struct inode *inode, struct file *filp,
-		     unsigned int cmd, unsigned long arg)
+int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg)
 {
-	drm_file_t	 *priv	 = filp->private_data;
-	drm_device_t	 *dev	 = priv->head->dev;
-
-	if (!dev->agp || !dev->agp->acquired)
-		return -EINVAL;
-	agp_backend_release(dev->agp->bridge);
-	dev->agp->acquired = 0;
-	return 0;
-
+	drm_file_t *priv = filp->private_data;
+	
+	return drm_agp_acquire( (drm_device_t *) priv->head->dev );
 }
 
 /**
  * Release the AGP device.
  *
- * Calls agp_backend_release().
+ * \param dev DRM device that is to release AGP
+ * \return zero on success or a negative number on failure.
+ *
+ * Verifies the AGP device has been acquired and calls \c agp_backend_release.
  */
-void drm_agp_do_release(drm_device_t *dev)
+int drm_agp_release(drm_device_t *dev)
 {
-  agp_backend_release(dev->agp->bridge);
+	if (!dev->agp || !dev->agp->acquired)
+		return -EINVAL;
+	agp_backend_release(dev->agp->bridge);
+	dev->agp->acquired = 0;
+	return 0;
+}
+EXPORT_SYMBOL(drm_agp_release);
+
+int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
+			  unsigned int cmd, unsigned long arg)
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->head->dev;
+	
+	return drm_agp_release(dev);
 }
 
 /**
  * Enable the AGP bus.
  * 
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg pointer to a drm_agp_mode structure.
+ * \param dev DRM device that has previously acquired AGP.
+ * \param mode Requested AGP mode.
  * \return zero on success or a negative number on failure.
  *
  * Verifies the AGP device has been acquired but not enabled, and calls
- * agp_enable().
+ * \c agp_enable.
  */
-int drm_agp_enable(struct inode *inode, struct file *filp,
-		    unsigned int cmd, unsigned long arg)
+int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
 {
-	drm_file_t	 *priv	 = filp->private_data;
-	drm_device_t	 *dev	 = priv->head->dev;
-	drm_agp_mode_t   mode;
-
 	if (!dev->agp || !dev->agp->acquired)
 		return -EINVAL;
 
-	if (copy_from_user(&mode, (drm_agp_mode_t __user *)arg, sizeof(mode)))
-		return -EFAULT;
-
 	dev->agp->mode    = mode.mode;
 	agp_enable(dev->agp->bridge, mode.mode);
 	dev->agp->base    = dev->agp->agp_info.aper_base;
 	dev->agp->enabled = 1;
 	return 0;
 }
+EXPORT_SYMBOL(drm_agp_enable);
+
+int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
+		   unsigned int cmd, unsigned long arg)
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->head->dev;
+	drm_agp_mode_t mode;
+
+
+	if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
+		return -EFAULT;
+
+	return drm_agp_enable(dev, mode);
+}
 
 /**
  * Allocate AGP memory.
@@ -206,7 +229,7 @@
 	pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
 	type = (u32) request.type;
 
-	if (!(memory = drm_alloc_agp(dev->agp->bridge, pages, type))) {
+	if (!(memory = drm_alloc_agp(dev, pages, type))) {
 		drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
 		return -ENOMEM;
 	}
@@ -403,13 +426,8 @@
 		return NULL;
 	}
 	head->memory = NULL;
-#if LINUX_VERSION_CODE <= 0x020408
-	head->cant_use_aperture = 0;
-	head->page_mask = ~(0xfff);
-#else
 	head->cant_use_aperture = head->agp_info.cant_use_aperture;
 	head->page_mask = head->agp_info.page_mask;
-#endif
 
 	return head;
 }
@@ -436,6 +454,7 @@
 		return -EINVAL;
 	return agp_bind_memory(handle, start);
 }
+EXPORT_SYMBOL(drm_agp_bind_memory);
 
 /** Calls agp_unbind_memory() */
 int drm_agp_unbind_memory(DRM_AGP_MEM *handle)
diff --git a/drivers/char/drm/drm_bufs.c b/drivers/char/drm/drm_bufs.c
index 4c6191d..e0743eb 100644
--- a/drivers/char/drm/drm_bufs.c
+++ b/drivers/char/drm/drm_bufs.c
@@ -36,37 +36,69 @@
 #include <linux/vmalloc.h>
 #include "drmP.h"
 
-/**
- * Compute size order.  Returns the exponent of the smaller power of two which
- * is greater or equal to given number.
- * 
- * \param size size.
- * \return order.
- *
- * \todo Can be made faster.
- */
-int drm_order( unsigned long size )
+unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
 {
-	int order;
-	unsigned long tmp;
-
-	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
-		;
-
-	if (size & (size - 1))
-		++order;
-
-	return order;
+	return pci_resource_start(dev->pdev, resource);
 }
-EXPORT_SYMBOL(drm_order);
+EXPORT_SYMBOL(drm_get_resource_start);
 
-#ifdef CONFIG_COMPAT
+unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
+{
+	return pci_resource_len(dev->pdev, resource);
+}
+EXPORT_SYMBOL(drm_get_resource_len);
+
+static drm_local_map_t *drm_find_matching_map(drm_device_t *dev,
+					      drm_local_map_t *map)
+{
+	struct list_head *list;
+
+	list_for_each(list, &dev->maplist->head) {
+		drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
+		if (entry->map && map->type == entry->map->type &&
+		    entry->map->offset == map->offset) {
+			return entry->map;
+		}
+	}
+
+	return NULL;
+}
+
 /*
- * Used to allocate 32-bit handles for _DRM_SHM regions
- * The 0x10000000 value is chosen to be out of the way of
- * FB/register and GART physical addresses.
+ * Used to allocate 32-bit handles for mappings.
  */
-static unsigned int map32_handle = 0x10000000;
+#define START_RANGE 0x10000000
+#define END_RANGE 0x40000000
+
+#ifdef _LP64
+static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev) 
+{
+	static unsigned int map32_handle = START_RANGE;
+	unsigned int hash;
+
+	if (lhandle & 0xffffffff00000000) {
+		hash = map32_handle;
+		map32_handle += PAGE_SIZE;
+		if (map32_handle > END_RANGE)
+			map32_handle = START_RANGE;
+	} else 
+		hash = lhandle;
+
+	while (1) {
+		drm_map_list_t *_entry;
+		list_for_each_entry(_entry, &dev->maplist->head,head) {
+			if (_entry->user_token == hash)
+				break;
+		}
+		if (&_entry->head == &dev->maplist->head)
+			return hash;
+
+		hash += PAGE_SIZE;
+		map32_handle += PAGE_SIZE;
+	}
+}
+#else
+# define HandleID(x,dev) (unsigned int)(x)
 #endif
 
 /**
@@ -82,25 +114,23 @@
  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
  * applicable and if supported by the kernel.
  */
-int drm_addmap( struct inode *inode, struct file *filp,
-		 unsigned int cmd, unsigned long arg )
+int drm_addmap(drm_device_t * dev, unsigned int offset,
+	       unsigned int size, drm_map_type_t type,
+	       drm_map_flags_t flags, drm_local_map_t ** map_ptr)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
 	drm_map_t *map;
-	drm_map_t __user *argp = (void __user *)arg;
 	drm_map_list_t *list;
-
-	if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
+	drm_dma_handle_t *dmah;
+	drm_local_map_t *found_map;
 
 	map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
 	if ( !map )
 		return -ENOMEM;
 
-	if ( copy_from_user( map, argp, sizeof(*map) ) ) {
-		drm_free( map, sizeof(*map), DRM_MEM_MAPS );
-		return -EFAULT;
-	}
+	map->offset = offset;
+	map->size = size;
+	map->flags = flags;
+	map->type = type;
 
 	/* Only allow shared memory to be removable since we only keep enough
 	 * book keeping information about shared memory to allow for removal
@@ -122,7 +152,7 @@
 	switch ( map->type ) {
 	case _DRM_REGISTERS:
 	case _DRM_FRAME_BUFFER:
-#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
+#if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
 		if ( map->offset + map->size < map->offset ||
 		     map->offset < virt_to_phys(high_memory) ) {
 			drm_free( map, sizeof(*map), DRM_MEM_MAPS );
@@ -132,6 +162,24 @@
 #ifdef __alpha__
 		map->offset += dev->hose->mem_space->start;
 #endif
+		/* Some drivers preinitialize some maps, without the X Server
+		 * needing to be aware of it.  Therefore, we just return success
+		 * when the server tries to create a duplicate map.
+		 */
+		found_map = drm_find_matching_map(dev, map);
+		if (found_map != NULL) {
+			if (found_map->size != map->size) {
+				DRM_DEBUG("Matching maps of type %d with "
+				   "mismatched sizes, (%ld vs %ld)\n",
+				    map->type, map->size, found_map->size);
+				found_map->size = map->size;
+			}
+
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			*map_ptr = found_map;
+			return 0;
+		}
+
 		if (drm_core_has_MTRR(dev)) {
 			if ( map->type == _DRM_FRAME_BUFFER ||
 			     (map->flags & _DRM_WRITE_COMBINING) ) {
@@ -178,9 +226,22 @@
 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 			return -EINVAL;
 		}
-		map->offset += dev->sg->handle;
+		map->offset += (unsigned long)dev->sg->virtual;
 		break;
-
+	case _DRM_CONSISTENT: 
+		/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
+		 * As we're limiting the address to 2^32-1 (or less),
+		 * casting it down to 32 bits is no problem, but we
+		 * need to point to a 64bit variable first. */
+		dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
+		if (!dmah) {
+			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+			return -ENOMEM;
+		}
+		map->handle = dmah->vaddr;
+		map->offset = (unsigned long)dmah->busaddr;
+		kfree(dmah);
+		break;
 	default:
 		drm_free( map, sizeof(*map), DRM_MEM_MAPS );
 		return -EINVAL;
@@ -196,17 +257,56 @@
 
 	down(&dev->struct_sem);
 	list_add(&list->head, &dev->maplist->head);
-#ifdef CONFIG_COMPAT
-	/* Assign a 32-bit handle for _DRM_SHM mappings */
+	/* Assign a 32-bit handle */
 	/* We do it here so that dev->struct_sem protects the increment */
-	if (map->type == _DRM_SHM)
-		map->offset = map32_handle += PAGE_SIZE;
-#endif
+	list->user_token = HandleID(map->type==_DRM_SHM
+				    ? (unsigned long)map->handle
+				    : map->offset, dev);
  	up(&dev->struct_sem);
 
-	if ( copy_to_user( argp, map, sizeof(*map) ) )
+	*map_ptr = map;
+	return 0;
+}
+EXPORT_SYMBOL(drm_addmap);
+
+int drm_addmap_ioctl(struct inode *inode, struct file *filp,
+		     unsigned int cmd, unsigned long arg)
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->head->dev;
+	drm_map_t map;
+	drm_map_t *map_ptr;
+	drm_map_t __user *argp = (void __user *)arg;
+	int err;
+	unsigned long handle = 0;
+
+	if (!(filp->f_mode & 3))
+		return -EACCES;	/* Require read/write */
+
+	if (copy_from_user(& map, argp, sizeof(map))) {
 		return -EFAULT;
-	if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
+	}
+
+	err = drm_addmap(dev, map.offset, map.size, map.type, map.flags,
+			 &map_ptr);
+
+	if (err) {
+		return err;
+	}
+
+	{
+		drm_map_list_t *_entry;
+		list_for_each_entry(_entry, &dev->maplist->head, head) {
+			if (_entry->map == map_ptr)
+				handle = _entry->user_token;
+		}
+		if (!handle)
+			return -EFAULT;
+	}
+
+	if (copy_to_user(argp, map_ptr, sizeof(*map_ptr)))
+		return -EFAULT;
+	if (put_user(handle, &argp->handle))
 		return -EFAULT;
 	return 0;
 }
@@ -226,81 +326,138 @@
  * its being used, and free any associate resource (such as MTRR's) if it's not
  * being on use.
  *
- * \sa addmap().
+ * \sa drm_addmap
  */
-int drm_rmmap(struct inode *inode, struct file *filp,
-	       unsigned int cmd, unsigned long arg)
+int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
 {
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->head->dev;
 	struct list_head *list;
 	drm_map_list_t *r_list = NULL;
-	drm_vma_entry_t *pt, *prev;
-	drm_map_t *map;
-	drm_map_t request;
-	int found_maps = 0;
+	drm_dma_handle_t dmah;
 
-	if (copy_from_user(&request, (drm_map_t __user *)arg,
-			   sizeof(request))) {
+	/* Find the list entry for the map and remove it */
+	list_for_each(list, &dev->maplist->head) {
+		r_list = list_entry(list, drm_map_list_t, head);
+
+		if (r_list->map == map) {
+			list_del(list);
+			drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+			break;
+		}
+	}
+
+	/* List has wrapped around to the head pointer, or it's empty and we
+	 * didn't find anything.
+	 */
+	if (list == (&dev->maplist->head)) {
+		return -EINVAL;
+	}
+
+	switch (map->type) {
+	case _DRM_REGISTERS:
+		drm_ioremapfree(map->handle, map->size, dev);
+		/* FALLTHROUGH */
+	case _DRM_FRAME_BUFFER:
+		if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
+			int retcode;
+			retcode = mtrr_del(map->mtrr, map->offset,
+					   map->size);
+			DRM_DEBUG ("mtrr_del=%d\n", retcode);
+		}
+		break;
+	case _DRM_SHM:
+		vfree(map->handle);
+		break;
+	case _DRM_AGP:
+	case _DRM_SCATTER_GATHER:
+		break;
+	case _DRM_CONSISTENT:
+		dmah.vaddr = map->handle;
+		dmah.busaddr = map->offset;
+		dmah.size = map->size;
+		__drm_pci_free(dev, &dmah);
+		break;
+	}
+	drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_rmmap_locked);
+
+int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+{
+	int ret;
+
+	down(&dev->struct_sem);
+	ret = drm_rmmap_locked(dev, map);
+	up(&dev->struct_sem);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_rmmap);
+
+/* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
+ * the last close of the device, and this is necessary for cleanup when things
+ * exit uncleanly.  Therefore, having userland manually remove mappings seems
+ * like a pointless exercise since they're going away anyway.
+ *
+ * One use case might be after addmap is allowed for normal users for SHM and
+ * gets used by drivers that the server doesn't need to care about.  This seems
+ * unlikely.
+ */
+int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long arg)
+{
+	drm_file_t *priv = filp->private_data;
+	drm_device_t *dev = priv->head->dev;
+	drm_map_t request;
+	drm_local_map_t *map = NULL;
+	struct list_head *list;
+	int ret;
+
+	if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
 		return -EFAULT;
 	}
 
 	down(&dev->struct_sem);
-	list = &dev->maplist->head;
 	list_for_each(list, &dev->maplist->head) {
-		r_list = list_entry(list, drm_map_list_t, head);
+		drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
 
-		if(r_list->map &&
-		   r_list->map->offset == (unsigned long) request.handle &&
-		   r_list->map->flags & _DRM_REMOVABLE) break;
+		if (r_list->map &&
+		    r_list->user_token == (unsigned long) request.handle &&
+		    r_list->map->flags & _DRM_REMOVABLE) {
+			map = r_list->map;
+			break;
+		}
 	}
 
 	/* List has wrapped around to the head pointer, or its empty we didn't
 	 * find anything.
 	 */
-	if(list == (&dev->maplist->head)) {
+	if (list == (&dev->maplist->head)) {
 		up(&dev->struct_sem);
 		return -EINVAL;
 	}
-	map = r_list->map;
-	list_del(list);
-	drm_free(list, sizeof(*list), DRM_MEM_MAPS);
 
-	for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
-		if (pt->vma->vm_private_data == map) found_maps++;
+	if (!map)
+		return -EINVAL;
+
+	/* Register and framebuffer maps are permanent */
+	if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
+		up(&dev->struct_sem);
+		return 0;
 	}
 
-	if(!found_maps) {
-		switch (map->type) {
-		case _DRM_REGISTERS:
-		case _DRM_FRAME_BUFFER:
-		  if (drm_core_has_MTRR(dev)) {
-				if (map->mtrr >= 0) {
-					int retcode;
-					retcode = mtrr_del(map->mtrr,
-							   map->offset,
-							   map->size);
-					DRM_DEBUG("mtrr_del = %d\n", retcode);
-				}
-			}
-			drm_ioremapfree(map->handle, map->size, dev);
-			break;
-		case _DRM_SHM:
-			vfree(map->handle);
-			break;
-		case _DRM_AGP:
-		case _DRM_SCATTER_GATHER:
-			break;
-		}
-		drm_free(map, sizeof(*map), DRM_MEM_MAPS);
-	}
+	ret = drm_rmmap_locked(dev, map);
+
 	up(&dev->struct_sem);
-	return 0;
+
+	return ret;
 }
 
 /**
  * Cleanup after an error on one of the addbufs() functions.
  *
+ * \param dev DRM device.
  * \param entry buffer entry where the error occurred.
  *
  * Frees any pages and buffers associated with the given entry.
@@ -344,25 +501,19 @@
 
 #if __OS_HAS_AGP
 /**
- * Add AGP buffers for DMA transfers (ioctl).
+ * Add AGP buffers for DMA transfers.
  *
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg pointer to a drm_buf_desc_t request.
+ * \param dev drm_device_t to which the buffers are to be added.
+ * \param request pointer to a drm_buf_desc_t describing the request.
  * \return zero on success or a negative number on failure.
  * 
  * After some sanity checks creates a drm_buf structure for each buffer and
  * reallocates the buffer list of the same size order to accommodate the new
  * buffers.
  */
-static int drm_addbufs_agp( struct inode *inode, struct file *filp,
-			    unsigned int cmd, unsigned long arg )
+int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
 	drm_device_dma_t *dma = dev->dma;
-	drm_buf_desc_t request;
 	drm_buf_entry_t *entry;
 	drm_buf_t *buf;
 	unsigned long offset;
@@ -376,25 +527,20 @@
 	int byte_count;
 	int i;
 	drm_buf_t **temp_buflist;
-	drm_buf_desc_t __user *argp = (void __user *)arg;
 
 	if ( !dma ) return -EINVAL;
 
-	if ( copy_from_user( &request, argp,
-			     sizeof(request) ) )
-		return -EFAULT;
-
-	count = request.count;
-	order = drm_order( request.size );
+	count = request->count;
+	order = drm_order(request->size);
 	size = 1 << order;
 
-	alignment  = (request.flags & _DRM_PAGE_ALIGN)
+	alignment  = (request->flags & _DRM_PAGE_ALIGN)
 		? PAGE_ALIGN(size) : size;
 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 	total = PAGE_SIZE << page_order;
 
 	byte_count = 0;
-	agp_offset = dev->agp->base + request.agp_start;
+	agp_offset = dev->agp->base + request->agp_start;
 
 	DRM_DEBUG( "count:      %d\n",  count );
 	DRM_DEBUG( "order:      %d\n",  order );
@@ -508,26 +654,20 @@
 
 	up( &dev->struct_sem );
 
-	request.count = entry->buf_count;
-	request.size = size;
-
-	if ( copy_to_user( argp, &request, sizeof(request) ) )
-		return -EFAULT;
+	request->count = entry->buf_count;
+	request->size = size;
 
 	dma->flags = _DRM_DMA_USE_AGP;
 
 	atomic_dec( &dev->buf_alloc );
 	return 0;
 }
+EXPORT_SYMBOL(drm_addbufs_agp);
 #endif /* __OS_HAS_AGP */
 
-static int drm_addbufs_pci( struct inode *inode, struct file *filp,
-			    unsigned int cmd, unsigned long arg )
+int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
 {
-   	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
 	drm_device_dma_t *dma = dev->dma;
-	drm_buf_desc_t request;
 	int count;
 	int order;
 	int size;
@@ -543,26 +683,22 @@
 	int page_count;
 	unsigned long *temp_pagelist;
 	drm_buf_t **temp_buflist;
-	drm_buf_desc_t __user *argp = (void __user *)arg;
 
 	if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
 	if ( !dma ) return -EINVAL;
 
-	if ( copy_from_user( &request, argp, sizeof(request) ) )
-		return -EFAULT;
-
-	count = request.count;
-	order = drm_order( request.size );
+	count = request->count;
+	order = drm_order(request->size);
 	size = 1 << order;
 
 	DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
-		   request.count, request.size, size,
+		   request->count, request->size, size,
 		   order, dev->queue_count );
 
 	if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
 	if ( dev->queue_count ) return -EBUSY; /* Not while in use */
 
-	alignment = (request.flags & _DRM_PAGE_ALIGN)
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
 		? PAGE_ALIGN(size) : size;
 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 	total = PAGE_SIZE << page_order;
@@ -740,25 +876,18 @@
 
 	up( &dev->struct_sem );
 
-	request.count = entry->buf_count;
-	request.size = size;
-
-	if ( copy_to_user( argp, &request, sizeof(request) ) )
-		return -EFAULT;
+	request->count = entry->buf_count;
+	request->size = size;
 
 	atomic_dec( &dev->buf_alloc );
 	return 0;
 
 }
+EXPORT_SYMBOL(drm_addbufs_pci);
 
-static int drm_addbufs_sg( struct inode *inode, struct file *filp,
-			   unsigned int cmd, unsigned long arg )
+static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
 {
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->head->dev;
 	drm_device_dma_t *dma = dev->dma;
-	drm_buf_desc_t __user *argp = (void __user *)arg;
-	drm_buf_desc_t request;
 	drm_buf_entry_t *entry;
 	drm_buf_t *buf;
 	unsigned long offset;
@@ -777,20 +906,17 @@
 	
 	if ( !dma ) return -EINVAL;
 
-	if ( copy_from_user( &request, argp, sizeof(request) ) )
-		return -EFAULT;
-
-	count = request.count;
-	order = drm_order( request.size );
+	count = request->count;
+	order = drm_order(request->size);
 	size = 1 << order;
 
-	alignment  = (request.flags & _DRM_PAGE_ALIGN)
+	alignment  = (request->flags & _DRM_PAGE_ALIGN)
 			? PAGE_ALIGN(size) : size;
 	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
 	total = PAGE_SIZE << page_order;
 
 	byte_count = 0;
-	agp_offset = request.agp_start;
+	agp_offset = request->agp_start;
 
 	DRM_DEBUG( "count:      %d\n",  count );
 	DRM_DEBUG( "order:      %d\n",  order );
@@ -848,7 +974,8 @@
 
 		buf->offset  = (dma->byte_count + offset);
 		buf->bus_address = agp_offset + offset;
-		buf->address = (void *)(agp_offset + offset + dev->sg->handle);
+		buf->address = (void *)(agp_offset + offset 
+					+ (unsigned long)dev->sg->virtual);
 		buf->next    = NULL;
 		buf->waiting = 0;
 		buf->pending = 0;
@@ -905,11 +1032,8 @@
 
 	up( &dev->struct_sem );
 
-	request.count = entry->buf_count;
-	request.size = size;
-
-	if ( copy_to_user( argp, &request, sizeof(request) ) )
-		return -EFAULT;
+	request->count = entry->buf_count;
+	request->size = size;
 
 	dma->flags = _DRM_DMA_USE_SG;
 
@@ -917,6 +1041,161 @@
 	return 0;
 }
 
+int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
+{
+	drm_device_dma_t *dma = dev->dma;
+	drm_buf_entry_t *entry;
+	drm_buf_t *buf;
+	unsigned long offset;
+	unsigned long agp_offset;
+	int count;
+	int order;
+	int size;
+	int alignment;
+	int page_order;
+	int total;
+	int byte_count;
+	int i;
+	drm_buf_t **temp_buflist;
+
+	if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
+		return -EINVAL;
+    
+	if (!dma)
+		return -EINVAL;
+
+	count = request->count;
+	order = drm_order(request->size);
+	size = 1 << order;
+
+	alignment = (request->flags & _DRM_PAGE_ALIGN)
+	    ? PAGE_ALIGN(size) : size;
+	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+	total = PAGE_SIZE << page_order;
+
+	byte_count = 0;
+	agp_offset = request->agp_start;
+
+	DRM_DEBUG("count:      %d\n", count);
+	DRM_DEBUG("order:      %d\n", order);
+	DRM_DEBUG("size:       %d\n", size);
+	DRM_DEBUG("agp_offset: %lu\n", agp_offset);
+	DRM_DEBUG("alignment:  %d\n", alignment);
+	DRM_DEBUG("page_order: %d\n", page_order);
+	DRM_DEBUG("total:      %d\n", total);
+
+	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
+		return -EINVAL;
+	if (dev->queue_count)
+		return -EBUSY;	/* Not while in use */
+
+	spin_lock(&dev->count_lock);
+	if (dev->buf_use) {
+		spin_unlock(&dev->count_lock);
+		return -EBUSY;
+	}
+	atomic_inc(&dev->buf_alloc);
+	spin_unlock(&dev->count_lock);
+
+	down(&dev->struct_sem);
+	entry = &dma->bufs[order];
+	if (entry->buf_count) {
+		up(&dev->struct_sem);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;	/* May only call once for each order */
+	}
+
+	if (count < 0 || count > 4096) {
+		up(&dev->struct_sem);
+		atomic_dec(&dev->buf_alloc);
+		return -EINVAL;
+	}
+
+	entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+				   DRM_MEM_BUFS);
+	if (!entry->buflist) {
+		up(&dev->struct_sem);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+	entry->buf_size = size;
+	entry->page_order = page_order;
+
+	offset = 0;
+
+	while (entry->buf_count < count) {
+		buf = &entry->buflist[entry->buf_count];
+		buf->idx = dma->buf_count + entry->buf_count;
+		buf->total = alignment;
+		buf->order = order;
+		buf->used = 0;
+
+		buf->offset = (dma->byte_count + offset);
+		buf->bus_address = agp_offset + offset;
+		buf->address = (void *)(agp_offset + offset);
+		buf->next = NULL;
+		buf->waiting = 0;
+		buf->pending = 0;
+		init_waitqueue_head(&buf->dma_wait);
+		buf->filp = NULL;
+
+		buf->dev_priv_size = dev->driver->dev_priv_size;
+		buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
+		if (!buf->dev_private) {
+			/* Set count correctly so we free the proper amount. */
+			entry->buf_count = count;
+			drm_cleanup_buf_error(dev, entry);
+			up(&dev->struct_sem);
+			atomic_dec(&dev->buf_alloc);
+			return -ENOMEM;
+		}
+		memset(buf->dev_private, 0, buf->dev_priv_size);
+
+		DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
+
+		offset += alignment;
+		entry->buf_count++;
+		byte_count += PAGE_SIZE << page_order;
+	}
+
+	DRM_DEBUG("byte_count: %d\n", byte_count);
+
+	temp_buflist = drm_realloc(dma->buflist,
+				   dma->buf_count * sizeof(*dma->buflist),
+				   (dma->buf_count + entry->buf_count)
+				   * sizeof(*dma->buflist), DRM_MEM_BUFS);
+	if (!temp_buflist) {
+		/* Free the entry because it isn't valid */
+		drm_cleanup_buf_error(dev, entry);
+		up(&dev->struct_sem);
+		atomic_dec(&dev->buf_alloc);
+		return -ENOMEM;
+	}
+	dma->buflist = temp_buflist;
+
+	for (i = 0; i < entry->buf_count; i++) {
+		dma->buflist[i + dma->buf_count] = &entry->buflist[i];
+	}
+
+	dma->buf_count += entry->buf_count;
+	dma->byte_count += byte_count;
+
+	DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+	DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+	up(&dev->struct_sem);
+
+	request->count = entry->buf_count;
+	request->size = size;
+
+	dma->flags = _DRM_DMA_USE_FB;
+
+	atomic_dec(&dev->buf_alloc);
+	return 0;
+}
+
 /**
  * Add buffers for DMA transfers (ioctl).
  *
@@ -937,6 +1216,7 @@
 	drm_buf_desc_t request;
 	drm_file_t *priv = filp->private_data;
 	drm_device_t *dev = priv->head->dev;
+	int ret;
 	
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
 		return -EINVAL;
@@ -947,13 +1227,23 @@
 
 #if __OS_HAS_AGP
 	if ( request.flags & _DRM_AGP_BUFFER )
-		return drm_addbufs_agp( inode, filp, cmd, arg );
+		ret=drm_addbufs_agp(dev, &request);
 	else
 #endif
 	if ( request.flags & _DRM_SG_BUFFER )
-		return drm_addbufs_sg( inode, filp, cmd, arg );
+		ret=drm_addbufs_sg(dev, &request);
+	else if ( request.flags & _DRM_FB_BUFFER)
+		ret=drm_addbufs_fb(dev, &request);
 	else
-		return drm_addbufs_pci( inode, filp, cmd, arg );
+		ret=drm_addbufs_pci(dev, &request);
+
+	if (ret==0) {
+		if (copy_to_user((void __user *)arg, &request,
+				 sizeof(request))) {
+			ret = -EFAULT;
+		}
+	}
+	return ret;
 }
 
 
@@ -1196,43 +1486,31 @@
 		return -EFAULT;
 
 	if ( request.count >= dma->buf_count ) {
-		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) ||
-		    (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) ) {
+		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
+		    || (drm_core_check_feature(dev, DRIVER_SG) 
+			&& (dma->flags & _DRM_DMA_USE_SG))
+		    || (drm_core_check_feature(dev, DRIVER_FB_DMA)
+			&& (dma->flags & _DRM_DMA_USE_FB))) {
 			drm_map_t *map = dev->agp_buffer_map;
+			unsigned long token = dev->agp_buffer_token;
 
 			if ( !map ) {
 				retcode = -EINVAL;
 				goto done;
 			}
 
-#if LINUX_VERSION_CODE <= 0x020402
-			down( &current->mm->mmap_sem );
-#else
 			down_write( &current->mm->mmap_sem );
-#endif
 			virtual = do_mmap( filp, 0, map->size,
 					   PROT_READ | PROT_WRITE,
 					   MAP_SHARED,
-					   (unsigned long)map->offset );
-#if LINUX_VERSION_CODE <= 0x020402
-			up( &current->mm->mmap_sem );
-#else
+					   token );
 			up_write( &current->mm->mmap_sem );
-#endif
 		} else {
-#if LINUX_VERSION_CODE <= 0x020402
-			down( &current->mm->mmap_sem );
-#else
 			down_write( &current->mm->mmap_sem );
-#endif
 			virtual = do_mmap( filp, 0, dma->byte_count,
 					   PROT_READ | PROT_WRITE,
 					   MAP_SHARED, 0 );
-#if LINUX_VERSION_CODE <= 0x020402
-			up( &current->mm->mmap_sem );
-#else
 			up_write( &current->mm->mmap_sem );
-#endif
 		}
 		if ( virtual > -1024UL ) {
 			/* Real error */
@@ -1279,3 +1557,26 @@
 	return retcode;
 }
 
+/**
+ * Compute size order.  Returns the exponent of the smaller power of two which
+ * is greater or equal to given number.
+ * 
+ * \param size size.
+ * \return order.
+ *
+ * \todo Can be made faster.
+ */
+int drm_order( unsigned long size )
+{
+	int order;
+	unsigned long tmp;
+
+	for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
+		;
+
+	if (size & (size - 1))
+		++order;
+
+	return order;
+}
+EXPORT_SYMBOL(drm_order);
diff --git a/drivers/char/drm/drm_context.c b/drivers/char/drm/drm_context.c
index a7cfabd..f515567 100644
--- a/drivers/char/drm/drm_context.c
+++ b/drivers/char/drm/drm_context.c
@@ -212,6 +212,7 @@
 	drm_ctx_priv_map_t __user *argp = (void __user *)arg;
 	drm_ctx_priv_map_t request;
 	drm_map_t *map;
+	drm_map_list_t *_entry;
 
 	if (copy_from_user(&request, argp, sizeof(request)))
 		return -EFAULT;
@@ -225,7 +226,17 @@
 	map = dev->context_sareas[request.ctx_id];
 	up(&dev->struct_sem);
 
-	request.handle = (void *) map->offset;
+	request.handle = 0;
+	list_for_each_entry(_entry, &dev->maplist->head,head) {
+		if (_entry->map == map) {
+			request.handle = (void *)(unsigned long)_entry->user_token;
+			break;
+		}
+	}
+	if (request.handle == 0)
+		return -EINVAL;
+
+
 	if (copy_to_user(argp, &request, sizeof(request)))
 		return -EFAULT;
 	return 0;
@@ -262,7 +273,7 @@
 	list_for_each(list, &dev->maplist->head) {
 		r_list = list_entry(list, drm_map_list_t, head);
 		if (r_list->map
-		    && r_list->map->offset == (unsigned long) request.handle)
+		    && r_list->user_token == (unsigned long) request.handle)
 			goto found;
 	}
 bad:
@@ -369,7 +380,7 @@
 		for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
 			ctx.handle = i;
 			if ( copy_to_user( &res.contexts[i],
-					   &i, sizeof(i) ) )
+					   &ctx, sizeof(ctx) ) )
 				return -EFAULT;
 		}
 	}
diff --git a/drivers/char/drm/drm_drv.c b/drivers/char/drm/drm_drv.c
index 3333c25..6ba48f3 100644
--- a/drivers/char/drm/drm_drv.c
+++ b/drivers/char/drm/drm_drv.c
@@ -70,8 +70,8 @@
 	[DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)]       = { drm_noop,        1, 1 },
 	[DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)]    = { drm_authmagic,   1, 1 },
 
-	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { drm_addmap,      1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { drm_rmmap,       1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)]       = { drm_addmap_ioctl,1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)]        = { drm_rmmap_ioctl, 1, 0 },
 
 	[DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, 1, 1 },
 	[DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, 1, 0 },
@@ -102,10 +102,10 @@
 	[DRM_IOCTL_NR(DRM_IOCTL_CONTROL)]       = { drm_control,     1, 1 },
 
 #if __OS_HAS_AGP
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { drm_agp_acquire, 1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { drm_agp_release, 1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { drm_agp_enable,  1, 1 },
-	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { drm_agp_info,    1, 0 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)]   = { drm_agp_acquire_ioctl, 1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)]   = { drm_agp_release_ioctl, 1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)]    = { drm_agp_enable_ioctl, 1, 1 },
+	[DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)]      = { drm_agp_info_ioctl, 1, 0 },
 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)]     = { drm_agp_alloc,   1, 1 },
 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)]      = { drm_agp_free,    1, 1 },
 	[DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)]      = { drm_agp_bind,    1, 1 },
@@ -127,14 +127,12 @@
  *
  * Frees every resource in \p dev.
  *
- * \sa drm_device and setup().
+ * \sa drm_device
  */
 int drm_takedown( drm_device_t *dev )
 {
 	drm_magic_entry_t *pt, *next;
-	drm_map_t *map;
 	drm_map_list_t *r_list;
-	struct list_head *list, *list_next;
 	drm_vma_entry_t *vma, *vma_next;
 	int i;
 
@@ -142,6 +140,7 @@
 
 	if (dev->driver->pretakedown)
 	  dev->driver->pretakedown(dev);
+	DRM_DEBUG("driver pretakedown completed\n");
 
 	if (dev->unique) {
 		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
@@ -178,11 +177,16 @@
 		}
 		dev->agp->memory = NULL;
 
-		if ( dev->agp->acquired ) drm_agp_do_release(dev);
+		if (dev->agp->acquired)
+		  drm_agp_release(dev);
 
 		dev->agp->acquired = 0;
 		dev->agp->enabled  = 0;
 	}
+	if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
+		drm_sg_cleanup(dev->sg);
+		dev->sg = NULL;
+	}
 
 				/* Clear vma list (only built for debugging) */
 	if ( dev->vmalist ) {
@@ -194,48 +198,11 @@
 	}
 
 	if( dev->maplist ) {
-		list_for_each_safe( list, list_next, &dev->maplist->head ) {
-			r_list = (drm_map_list_t *)list;
-
-			if ( ( map = r_list->map ) ) {
-				switch ( map->type ) {
-				case _DRM_REGISTERS:
-				case _DRM_FRAME_BUFFER:
-					if (drm_core_has_MTRR(dev)) {
-						if ( map->mtrr >= 0 ) {
-							int retcode;
-							retcode = mtrr_del( map->mtrr,
-									    map->offset,
-									    map->size );
-							DRM_DEBUG( "mtrr_del=%d\n", retcode );
-						}
-					}
-					drm_ioremapfree( map->handle, map->size, dev );
-					break;
-				case _DRM_SHM:
-					vfree(map->handle);
-					break;
-
-				case _DRM_AGP:
-					/* Do nothing here, because this is all
-					 * handled in the AGP/GART driver.
-					 */
-					break;
-				case _DRM_SCATTER_GATHER:
-					/* Handle it */
-					if (drm_core_check_feature(dev, DRIVER_SG) && dev->sg) {
-						drm_sg_cleanup(dev->sg);
-						dev->sg = NULL;
-					}
-					break;
-				}
-				drm_free(map, sizeof(*map), DRM_MEM_MAPS);
-			}
-			list_del( list );
-			drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
- 		}
-		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
-		dev->maplist = NULL;
+		while (!list_empty(&dev->maplist->head)) {
+			struct list_head *list = dev->maplist->head.next;
+			r_list = list_entry(list, drm_map_list_t, head);
+			drm_rmmap_locked(dev, r_list->map);
+		}
  	}
 
 	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist ) {
@@ -264,6 +231,7 @@
 	}
 	up( &dev->struct_sem );
 
+	DRM_DEBUG("takedown completed\n");
 	return 0;
 }
 
@@ -312,7 +280,7 @@
  *
  * Cleans up all DRM device, calling takedown().
  * 
- * \sa drm_init().
+ * \sa drm_init
  */
 static void drm_cleanup( drm_device_t *dev )
 {
@@ -325,6 +293,11 @@
 
 	drm_takedown( dev );	
 
+	if (dev->maplist) {
+		drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
+		dev->maplist = NULL;
+	}
+
 	drm_ctxbitmap_cleanup( dev );
 	
 	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) &&
diff --git a/drivers/char/drm/drm_fops.c b/drivers/char/drm/drm_fops.c
index 10e64fd..a1f4e9c 100644
--- a/drivers/char/drm/drm_fops.c
+++ b/drivers/char/drm/drm_fops.c
@@ -71,12 +71,6 @@
 		dev->magiclist[i].tail = NULL;
 	}
 
-	dev->maplist = drm_alloc(sizeof(*dev->maplist),
-				  DRM_MEM_MAPS);
-	if(dev->maplist == NULL) return -ENOMEM;
-	memset(dev->maplist, 0, sizeof(*dev->maplist));
-	INIT_LIST_HEAD(&dev->maplist->head);
-
 	dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist),
 				  DRM_MEM_CTXLIST);
 	if(dev->ctxlist == NULL) return -ENOMEM;
diff --git a/drivers/char/drm/drm_ioctl.c b/drivers/char/drm/drm_ioctl.c
index 39afda0..d2ed3ba 100644
--- a/drivers/char/drm/drm_ioctl.c
+++ b/drivers/char/drm/drm_ioctl.c
@@ -208,7 +208,7 @@
 	map.size   = r_list->map->size;
 	map.type   = r_list->map->type;
 	map.flags  = r_list->map->flags;
-	map.handle = r_list->map->handle;
+	map.handle = (void *)(unsigned long) r_list->user_token;
 	map.mtrr   = r_list->map->mtrr;
 	up(&dev->struct_sem);
 
diff --git a/drivers/char/drm/drm_memory.c b/drivers/char/drm/drm_memory.c
index ace3d42..ff483fb 100644
--- a/drivers/char/drm/drm_memory.c
+++ b/drivers/char/drm/drm_memory.c
@@ -142,27 +142,31 @@
 
 #if __OS_HAS_AGP
 /** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct agp_bridge_data *bridge, int pages, u32 type)
+DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
 {
-	return drm_agp_allocate_memory(bridge, pages, type);
+	return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
 }
+EXPORT_SYMBOL(drm_alloc_agp);
 
 /** Wrapper around agp_free_memory() */
 int drm_free_agp(DRM_AGP_MEM *handle, int pages)
 {
 	return drm_agp_free_memory(handle) ? 0 : -EINVAL;
 }
+EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM *handle, unsigned int start)
 {
 	return drm_agp_bind_memory(handle, start);
 }
+EXPORT_SYMBOL(drm_bind_agp);
 
 /** Wrapper around agp_unbind_memory() */
 int drm_unbind_agp(DRM_AGP_MEM *handle)
 {
 	return drm_agp_unbind_memory(handle);
 }
+EXPORT_SYMBOL(drm_unbind_agp);
 #endif /* agp */
 #endif /* debug_memory */
diff --git a/drivers/char/drm/drm_pci.c b/drivers/char/drm/drm_pci.c
index 192e876..09ed712 100644
--- a/drivers/char/drm/drm_pci.c
+++ b/drivers/char/drm/drm_pci.c
@@ -46,11 +46,11 @@
 /**
  * \brief Allocate a PCI consistent memory block, for DMA.
  */
-void *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
-		    dma_addr_t maxaddr, dma_addr_t * busaddr)
+drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
+				dma_addr_t maxaddr)
 {
-	void *address;
-#if DRM_DEBUG_MEMORY
+	drm_dma_handle_t *dmah;
+#ifdef DRM_DEBUG_MEMORY
 	int area = DRM_MEM_DMA;
 
 	spin_lock(&drm_mem_lock);
@@ -74,13 +74,19 @@
 		return NULL;
 	}
 
-	address = pci_alloc_consistent(dev->pdev, size, busaddr);
+	dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+	if (!dmah)
+		return NULL;
+	
+	dmah->size = size;
+	dmah->vaddr = pci_alloc_consistent(dev->pdev, size, &dmah->busaddr);
 
-#if DRM_DEBUG_MEMORY
-	if (address == NULL) {
+#ifdef DRM_DEBUG_MEMORY
+	if (dmah->vaddr == NULL) {
 		spin_lock(&drm_mem_lock);
 		++drm_mem_stats[area].fail_count;
 		spin_unlock(&drm_mem_lock);
+		kfree(dmah);
 		return NULL;
 	}
 
@@ -90,37 +96,42 @@
 	drm_ram_used += size;
 	spin_unlock(&drm_mem_lock);
 #else
-	if (address == NULL)
+	if (dmah->vaddr == NULL) {
+		kfree(dmah);
 		return NULL;
+	}
 #endif
 
-	memset(address, 0, size);
+	memset(dmah->vaddr, 0, size);
 
-	return address;
+	return dmah;
 }
 EXPORT_SYMBOL(drm_pci_alloc);
 
 /**
- * \brief Free a PCI consistent memory block.
+ * \brief Free a PCI consistent memory block with freeing its descriptor.
+ *
+ * This function is for internal use in the Linux-specific DRM core code.
  */
 void
-drm_pci_free(drm_device_t * dev, size_t size, void *vaddr, dma_addr_t busaddr)
+__drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
 {
-#if DRM_DEBUG_MEMORY
+#ifdef DRM_DEBUG_MEMORY
 	int area = DRM_MEM_DMA;
 	int alloc_count;
 	int free_count;
 #endif
 
-	if (!vaddr) {
-#if DRM_DEBUG_MEMORY
+	if (!dmah->vaddr) {
+#ifdef DRM_DEBUG_MEMORY
 		DRM_MEM_ERROR(area, "Attempt to free address 0\n");
 #endif
 	} else {
-		pci_free_consistent(dev->pdev, size, vaddr, busaddr);
+		pci_free_consistent(dev->pdev, dmah->size, dmah->vaddr,
+				    dmah->busaddr);
 	}
 
-#if DRM_DEBUG_MEMORY
+#ifdef DRM_DEBUG_MEMORY
 	spin_lock(&drm_mem_lock);
 	free_count = ++drm_mem_stats[area].free_count;
 	alloc_count = drm_mem_stats[area].succeed_count;
@@ -135,6 +146,16 @@
 #endif
 
 }
+
+/**
+ * \brief Free a PCI consistent memory block
+ */
+void
+drm_pci_free(drm_device_t *dev, drm_dma_handle_t *dmah)
+{
+	__drm_pci_free(dev, dmah);
+	kfree(dmah);
+}
 EXPORT_SYMBOL(drm_pci_free);
 
 /*@}*/
diff --git a/drivers/char/drm/drm_pciids.h b/drivers/char/drm/drm_pciids.h
index 70ca4fa..58b1747 100644
--- a/drivers/char/drm/drm_pciids.h
+++ b/drivers/char/drm/drm_pciids.h
@@ -25,6 +25,8 @@
 	{0x1002, 0x4965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
 	{0x1002, 0x4966, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
 	{0x1002, 0x4967, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250}, \
+	{0x1002, 0x4A49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
+	{0x1002, 0x4A4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420}, \
 	{0x1002, 0x4C57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
 	{0x1002, 0x4C58, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV200|CHIP_IS_MOBILITY}, \
 	{0x1002, 0x4C59, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100|CHIP_IS_MOBILITY}, \
@@ -33,7 +35,17 @@
 	{0x1002, 0x4C65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
 	{0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
 	{0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R250|CHIP_IS_MOBILITY}, \
+	{0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+	{0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+	{0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+	{0x1002, 0x4E47, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
+	{0x1002, 0x4E48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+	{0x1002, 0x4E49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
+	{0x1002, 0x4E4A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
+	{0x1002, 0x4E4B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R350}, \
 	{0x1002, 0x4E50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
+	{0x1002, 0x4E51, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
+	{0x1002, 0x4E54, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350|CHIP_IS_MOBILITY}, \
 	{0x1002, 0x5144, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
 	{0x1002, 0x5145, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
 	{0x1002, 0x5146, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R100|CHIP_SINGLE_CRTC}, \
@@ -56,6 +68,7 @@
 	{0x1002, 0x516A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
 	{0x1002, 0x516B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
 	{0x1002, 0x516C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R200}, \
+	{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV350}, \
 	{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
 	{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP|CHIP_IS_MOBILITY}, \
 	{0x1002, 0x5836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|CHIP_IS_IGP}, \
@@ -116,9 +129,10 @@
 	{0, 0, 0}
 
 #define mga_PCI_IDS \
-	{0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+	{0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+	{0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \
+	{0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \
+	{0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \
 	{0, 0, 0}
 
 #define mach64_PCI_IDS \
@@ -162,9 +176,10 @@
 
 #define viadrv_PCI_IDS \
 	{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+	{0x1106, 0x3118, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
 	{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
 	{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+	{0x1106, 0x3108, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
 	{0, 0, 0}
 
 #define i810_PCI_IDS \
@@ -181,33 +196,30 @@
 	{0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
 	{0, 0, 0}
 
-#define gamma_PCI_IDS \
-	{0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0, 0, 0}
-
 #define savage_PCI_IDS \
-	{0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
+	{0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+	{0x5333, 0x8a21, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \
+	{0x5333, 0x8a22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+	{0x5333, 0x8a23, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE4}, \
+	{0x5333, 0x8c10, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+	{0x5333, 0x8c11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+	{0x5333, 0x8c12, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+	{0x5333, 0x8c13, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE_MX}, \
+	{0x5333, 0x8c22, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c24, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c2a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c2b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c2c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c2d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c2e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8c2f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SUPERSAVAGE}, \
+	{0x5333, 0x8a25, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+	{0x5333, 0x8a26, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGE}, \
+	{0x5333, 0x8d01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+	{0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \
+	{0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
+	{0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \
 	{0, 0, 0}
 
 #define ffb_PCI_IDS \
@@ -223,10 +235,3 @@
 	{0x8086, 0x2772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
 	{0, 0, 0}
 
-#define viadrv_PCI_IDS \
-	{0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x1106, 0x3122, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x1106, 0x7205, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0x1106, 0x7204, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
-	{0, 0, 0}
-
diff --git a/drivers/char/drm/drm_proc.c b/drivers/char/drm/drm_proc.c
index 4774087..32d2bb9 100644
--- a/drivers/char/drm/drm_proc.c
+++ b/drivers/char/drm/drm_proc.c
@@ -210,8 +210,8 @@
 
 				/* Hardcoded from _DRM_FRAME_BUFFER,
                                    _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
-                                   _DRM_SCATTER_GATHER. */
-	const char   *types[] = { "FB", "REG", "SHM", "AGP", "SG" };
+                                   _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
+	const char   *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
 	const char   *type;
 	int	     i;
 
@@ -229,16 +229,19 @@
 	if (dev->maplist != NULL) list_for_each(list, &dev->maplist->head) {
 		r_list = list_entry(list, drm_map_list_t, head);
 		map = r_list->map;
-		if(!map) continue;
-		if (map->type < 0 || map->type > 4) type = "??";
-		else				    type = types[map->type];
-		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ",
+		if(!map)
+			continue;
+		if (map->type < 0 || map->type > 5)
+			type = "??";
+		else	
+			type = types[map->type];
+		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08x ",
 			       i,
 			       map->offset,
 			       map->size,
 			       type,
 			       map->flags,
-			       (unsigned long)map->handle);
+			       r_list->user_token);
 		if (map->mtrr < 0) {
 			DRM_PROC_PRINT("none\n");
 		} else {
diff --git a/drivers/char/drm/drm_scatter.c b/drivers/char/drm/drm_scatter.c
index 54fddb6..ed267d4 100644
--- a/drivers/char/drm/drm_scatter.c
+++ b/drivers/char/drm/drm_scatter.c
@@ -61,6 +61,12 @@
 		   DRM_MEM_SGLISTS );
 }
 
+#ifdef _LP64
+# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
+#else
+# define ScatterHandle(x) (unsigned int)(x)
+#endif
+
 int drm_sg_alloc( struct inode *inode, struct file *filp,
 		   unsigned int cmd, unsigned long arg )
 {
@@ -133,12 +139,13 @@
 	 */
 	memset( entry->virtual, 0, pages << PAGE_SHIFT );
 
-	entry->handle = (unsigned long)entry->virtual;
+	entry->handle = ScatterHandle((unsigned long)entry->virtual);
 
 	DRM_DEBUG( "sg alloc handle  = %08lx\n", entry->handle );
 	DRM_DEBUG( "sg alloc virtual = %p\n", entry->virtual );
 
-	for ( i = entry->handle, j = 0 ; j < pages ; i += PAGE_SIZE, j++ ) {
+	for (i = (unsigned long)entry->virtual, j = 0; j < pages; 
+		i += PAGE_SIZE, j++) {
 		entry->pagelist[j] = vmalloc_to_page((void *)i);
 		if (!entry->pagelist[j])
 			goto failed;
diff --git a/drivers/char/drm/drm_stub.c b/drivers/char/drm/drm_stub.c
index 48829a1..95a976c 100644
--- a/drivers/char/drm/drm_stub.c
+++ b/drivers/char/drm/drm_stub.c
@@ -75,6 +75,11 @@
 	dev->pci_func = PCI_FUNC(pdev->devfn);
 	dev->irq = pdev->irq;
 
+	dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
+	if (dev->maplist == NULL)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&dev->maplist->head);
+
 	/* the DRM has 6 basic counters */
 	dev->counters = 6;
 	dev->types[0]  = _DRM_STAT_LOCK;
@@ -91,7 +96,8 @@
 			goto error_out_unreg;
 
 	if (drm_core_has_AGP(dev)) {
-		dev->agp = drm_agp_init(dev);
+		if (drm_device_is_agp(dev))
+			dev->agp = drm_agp_init(dev);
 		if (drm_core_check_feature(dev, DRIVER_REQUIRE_AGP) && (dev->agp == NULL)) {
 			DRM_ERROR( "Cannot initialize the agpgart module.\n" );
 			retcode = -EINVAL;
diff --git a/drivers/char/drm/drm_vm.c b/drivers/char/drm/drm_vm.c
index 621220f..ced4215 100644
--- a/drivers/char/drm/drm_vm.c
+++ b/drivers/char/drm/drm_vm.c
@@ -73,12 +73,13 @@
 		r_list = list_entry(list, drm_map_list_t, head);
 		map = r_list->map;
 		if (!map) continue;
-		if (map->offset == VM_OFFSET(vma)) break;
+		if (r_list->user_token == VM_OFFSET(vma))
+			break;
 	}
 
 	if (map && map->type == _DRM_AGP) {
 		unsigned long offset = address - vma->vm_start;
-		unsigned long baddr = VM_OFFSET(vma) + offset;
+		unsigned long baddr = map->offset + offset;
 		struct drm_agp_mem *agpmem;
 		struct page *page;
 
@@ -210,6 +211,8 @@
 		}
 
 		if(!found_maps) {
+			drm_dma_handle_t dmah;
+
 			switch (map->type) {
 			case _DRM_REGISTERS:
 			case _DRM_FRAME_BUFFER:
@@ -228,6 +231,12 @@
 			case _DRM_AGP:
 			case _DRM_SCATTER_GATHER:
 				break;
+			case _DRM_CONSISTENT:
+				dmah.vaddr = map->handle;
+				dmah.busaddr = map->offset;
+				dmah.size = map->size;
+				__drm_pci_free(dev, &dmah);
+				break;
 			}
 			drm_free(map, sizeof(*map), DRM_MEM_MAPS);
 		}
@@ -296,7 +305,7 @@
 
 
 	offset = address - vma->vm_start;
-	map_offset = map->offset - dev->sg->handle;
+	map_offset = map->offset - (unsigned long)dev->sg->virtual;
 	page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
 	page = entry->pagelist[page_offset];
 	get_page(page);
@@ -305,8 +314,6 @@
 }
 
 
-#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
-
 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
 				   unsigned long address,
 				   int *type) {
@@ -335,35 +342,6 @@
 	return drm_do_vm_sg_nopage(vma, address);
 }
 
-#else	/* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
-
-static struct page *drm_vm_nopage(struct vm_area_struct *vma,
-				   unsigned long address,
-				   int unused) {
-	return drm_do_vm_nopage(vma, address);
-}
-
-static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
-				       unsigned long address,
-				       int unused) {
-	return drm_do_vm_shm_nopage(vma, address);
-}
-
-static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
-				       unsigned long address,
-				       int unused) {
-	return drm_do_vm_dma_nopage(vma, address);
-}
-
-static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
-				      unsigned long address,
-				      int unused) {
-	return drm_do_vm_sg_nopage(vma, address);
-}
-
-#endif
-
-
 /** AGP virtual memory operations */
 static struct vm_operations_struct   drm_vm_ops = {
 	.nopage = drm_vm_nopage,
@@ -487,11 +465,7 @@
 
 	vma->vm_ops   = &drm_vm_dma_ops;
 
-#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
-	vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
-#else
 	vma->vm_flags |= VM_RESERVED; /* Don't swap */
-#endif
 
 	vma->vm_file  =	 filp;	/* Needed for drm_vm_open() */
 	drm_vm_open(vma);
@@ -560,13 +534,12 @@
 				   for performance, even if the list was a
 				   bit longer. */
 	list_for_each(list, &dev->maplist->head) {
-		unsigned long off;
 
 		r_list = list_entry(list, drm_map_list_t, head);
 		map = r_list->map;
 		if (!map) continue;
-		off = dev->driver->get_map_ofs(map);
-		if (off == VM_OFFSET(vma)) break;
+		if (r_list->user_token == VM_OFFSET(vma))
+			break;
 	}
 
 	if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
@@ -605,17 +578,17 @@
                 /* fall through to _DRM_FRAME_BUFFER... */        
 	case _DRM_FRAME_BUFFER:
 	case _DRM_REGISTERS:
-		if (VM_OFFSET(vma) >= __pa(high_memory)) {
 #if defined(__i386__) || defined(__x86_64__)
-			if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
-				pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
-				pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
-			}
-#elif defined(__powerpc__)
-			pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
-#endif
-			vma->vm_flags |= VM_IO;	/* not in core dump */
+		if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
+			pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
+			pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
 		}
+#elif defined(__powerpc__)
+		pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
+		if (map->type == _DRM_REGISTERS)
+			pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
+#endif
+		vma->vm_flags |= VM_IO;	/* not in core dump */
 #if defined(__ia64__)
 		if (efi_range_is_wc(vma->vm_start, vma->vm_end -
 				    vma->vm_start))
@@ -628,12 +601,12 @@
 		offset = dev->driver->get_reg_ofs(dev);
 #ifdef __sparc__
 		if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
-					(VM_OFFSET(vma) + offset) >> PAGE_SHIFT,
+					(map->offset + offset) >> PAGE_SHIFT,
 					vma->vm_end - vma->vm_start,
 					vma->vm_page_prot))
 #else
 		if (io_remap_pfn_range(vma, vma->vm_start,
-				     (VM_OFFSET(vma) + offset) >> PAGE_SHIFT,
+				     (map->offset + offset) >> PAGE_SHIFT,
 				     vma->vm_end - vma->vm_start,
 				     vma->vm_page_prot))
 #endif
@@ -641,37 +614,28 @@
 		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
 			  " offset = 0x%lx\n",
 			  map->type,
-			  vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
+			  vma->vm_start, vma->vm_end, map->offset + offset);
 		vma->vm_ops = &drm_vm_ops;
 		break;
 	case _DRM_SHM:
+	case _DRM_CONSISTENT:
+		/* Consistent memory is really like shared memory. It's only
+		 * allocate in a different way */
 		vma->vm_ops = &drm_vm_shm_ops;
 		vma->vm_private_data = (void *)map;
 				/* Don't let this area swap.  Change when
 				   DRM_KERNEL advisory is supported. */
-#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
-		vma->vm_flags |= VM_LOCKED;
-#else
 		vma->vm_flags |= VM_RESERVED;
-#endif
 		break;
 	case _DRM_SCATTER_GATHER:
 		vma->vm_ops = &drm_vm_sg_ops;
 		vma->vm_private_data = (void *)map;
-#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
-		vma->vm_flags |= VM_LOCKED;
-#else
 		vma->vm_flags |= VM_RESERVED;
-#endif
                 break;
 	default:
 		return -EINVAL;	/* This should never happen. */
 	}
-#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
-	vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
-#else
 	vma->vm_flags |= VM_RESERVED; /* Don't swap */
-#endif
 
 	vma->vm_file  =	 filp;	/* Needed for drm_vm_open() */
 	drm_vm_open(vma);
diff --git a/drivers/char/drm/ffb_drv.c b/drivers/char/drm/ffb_drv.c
index ec614ff..1bd0d55 100644
--- a/drivers/char/drm/ffb_drv.c
+++ b/drivers/char/drm/ffb_drv.c
@@ -152,14 +152,11 @@
 		return NULL;
 
 	list_for_each(list, &dev->maplist->head) {
-		unsigned long uoff;
-
 		r_list = (drm_map_list_t *)list;
 		map = r_list->map;
 		if (!map)
 			continue;
-		uoff = (map->offset & 0xffffffff);
-		if (uoff == off)
+		if (r_list->user_token == off)
 			return map;
 	}
 
diff --git a/drivers/char/drm/gamma_context.h b/drivers/char/drm/gamma_context.h
deleted file mode 100644
index d11b507..0000000
--- a/drivers/char/drm/gamma_context.h
+++ /dev/null
@@ -1,492 +0,0 @@
-/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Gareth Hughes <gareth@valinux.com>
- * ChangeLog:
- *  2001-11-16	Torsten Duwe <duwe@caldera.de>
- *		added context constructor/destructor hooks,
- *		needed by SiS driver's memory management.
- */
-
-/* ================================================================
- * Old-style context support -- only used by gamma.  
- */
-
-
-/* The drm_read and drm_write_string code (especially that which manages
-   the circular buffer), is based on Alessandro Rubini's LINUX DEVICE
-   DRIVERS (Cambridge: O'Reilly, 1998), pages 111-113. */
-
-ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off)
-{
-	drm_file_t    *priv   = filp->private_data;
-	drm_device_t  *dev    = priv->dev;
-	int	      left;
-	int	      avail;
-	int	      send;
-	int	      cur;
-
-	DRM_DEBUG("%p, %p\n", dev->buf_rp, dev->buf_wp);
-
-	while (dev->buf_rp == dev->buf_wp) {
-		DRM_DEBUG("  sleeping\n");
-		if (filp->f_flags & O_NONBLOCK) {
-			return -EAGAIN;
-		}
-		interruptible_sleep_on(&dev->buf_readers);
-		if (signal_pending(current)) {
-			DRM_DEBUG("  interrupted\n");
-			return -ERESTARTSYS;
-		}
-		DRM_DEBUG("  awake\n");
-	}
-
-	left  = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
-	avail = DRM_BSZ - left;
-	send  = DRM_MIN(avail, count);
-
-	while (send) {
-		if (dev->buf_wp > dev->buf_rp) {
-			cur = DRM_MIN(send, dev->buf_wp - dev->buf_rp);
-		} else {
-			cur = DRM_MIN(send, dev->buf_end - dev->buf_rp);
-		}
-		if (copy_to_user(buf, dev->buf_rp, cur))
-			return -EFAULT;
-		dev->buf_rp += cur;
-		if (dev->buf_rp == dev->buf_end) dev->buf_rp = dev->buf;
-		send -= cur;
-	}
-
-	wake_up_interruptible(&dev->buf_writers);
-	return DRM_MIN(avail, count);
-}
-
-
-/* In an incredibly convoluted setup, the kernel module actually calls
- * back into the X server to perform context switches on behalf of the
- * 3d clients.
- */
-int DRM(write_string)(drm_device_t *dev, const char *s)
-{
-	int left   = (dev->buf_rp + DRM_BSZ - dev->buf_wp) % DRM_BSZ;
-	int send   = strlen(s);
-	int count;
-
-	DRM_DEBUG("%d left, %d to send (%p, %p)\n",
-		  left, send, dev->buf_rp, dev->buf_wp);
-
-	if (left == 1 || dev->buf_wp != dev->buf_rp) {
-		DRM_ERROR("Buffer not empty (%d left, wp = %p, rp = %p)\n",
-			  left,
-			  dev->buf_wp,
-			  dev->buf_rp);
-	}
-
-	while (send) {
-		if (dev->buf_wp >= dev->buf_rp) {
-			count = DRM_MIN(send, dev->buf_end - dev->buf_wp);
-			if (count == left) --count; /* Leave a hole */
-		} else {
-			count = DRM_MIN(send, dev->buf_rp - dev->buf_wp - 1);
-		}
-		strncpy(dev->buf_wp, s, count);
-		dev->buf_wp += count;
-		if (dev->buf_wp == dev->buf_end) dev->buf_wp = dev->buf;
-		send -= count;
-	}
-
-	if (dev->buf_async) kill_fasync(&dev->buf_async, SIGIO, POLL_IN);
-
-	DRM_DEBUG("waking\n");
-	wake_up_interruptible(&dev->buf_readers);
-	return 0;
-}
-
-unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait)
-{
-	drm_file_t   *priv = filp->private_data;
-	drm_device_t *dev  = priv->dev;
-
-	poll_wait(filp, &dev->buf_readers, wait);
-	if (dev->buf_wp != dev->buf_rp) return POLLIN | POLLRDNORM;
-	return 0;
-}
-
-int DRM(context_switch)(drm_device_t *dev, int old, int new)
-{
-	char	    buf[64];
-	drm_queue_t *q;
-
-	if (test_and_set_bit(0, &dev->context_flag)) {
-		DRM_ERROR("Reentering -- FIXME\n");
-		return -EBUSY;
-	}
-
-	DRM_DEBUG("Context switch from %d to %d\n", old, new);
-
-	if (new >= dev->queue_count) {
-		clear_bit(0, &dev->context_flag);
-		return -EINVAL;
-	}
-
-	if (new == dev->last_context) {
-		clear_bit(0, &dev->context_flag);
-		return 0;
-	}
-
-	q = dev->queuelist[new];
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->use_count) == 1) {
-		atomic_dec(&q->use_count);
-		clear_bit(0, &dev->context_flag);
-		return -EINVAL;
-	}
-
-	/* This causes the X server to wake up & do a bunch of hardware
-	 * interaction to actually effect the context switch.
-	 */
-	sprintf(buf, "C %d %d\n", old, new);
-	DRM(write_string)(dev, buf);
-
-	atomic_dec(&q->use_count);
-
-	return 0;
-}
-
-int DRM(context_switch_complete)(drm_device_t *dev, int new)
-{
-	drm_device_dma_t *dma = dev->dma;
-
-	dev->last_context = new;  /* PRE/POST: This is the _only_ writer. */
-	dev->last_switch  = jiffies;
-
-	if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
-		DRM_ERROR("Lock isn't held after context switch\n");
-	}
-
-	if (!dma || !(dma->next_buffer && dma->next_buffer->while_locked)) {
-		if (DRM(lock_free)(dev, &dev->lock.hw_lock->lock,
-				  DRM_KERNEL_CONTEXT)) {
-			DRM_ERROR("Cannot free lock\n");
-		}
-	}
-
-	clear_bit(0, &dev->context_flag);
-	wake_up_interruptible(&dev->context_wait);
-
-	return 0;
-}
-
-static int DRM(init_queue)(drm_device_t *dev, drm_queue_t *q, drm_ctx_t *ctx)
-{
-	DRM_DEBUG("\n");
-
-	if (atomic_read(&q->use_count) != 1
-	    || atomic_read(&q->finalization)
-	    || atomic_read(&q->block_count)) {
-		DRM_ERROR("New queue is already in use: u%d f%d b%d\n",
-			  atomic_read(&q->use_count),
-			  atomic_read(&q->finalization),
-			  atomic_read(&q->block_count));
-	}
-
-	atomic_set(&q->finalization,  0);
-	atomic_set(&q->block_count,   0);
-	atomic_set(&q->block_read,    0);
-	atomic_set(&q->block_write,   0);
-	atomic_set(&q->total_queued,  0);
-	atomic_set(&q->total_flushed, 0);
-	atomic_set(&q->total_locks,   0);
-
-	init_waitqueue_head(&q->write_queue);
-	init_waitqueue_head(&q->read_queue);
-	init_waitqueue_head(&q->flush_queue);
-
-	q->flags = ctx->flags;
-
-	DRM(waitlist_create)(&q->waitlist, dev->dma->buf_count);
-
-	return 0;
-}
-
-
-/* drm_alloc_queue:
-PRE: 1) dev->queuelist[0..dev->queue_count] is allocated and will not
-	disappear (so all deallocation must be done after IOCTLs are off)
-     2) dev->queue_count < dev->queue_slots
-     3) dev->queuelist[i].use_count == 0 and
-	dev->queuelist[i].finalization == 0 if i not in use
-POST: 1) dev->queuelist[i].use_count == 1
-      2) dev->queue_count < dev->queue_slots */
-
-static int DRM(alloc_queue)(drm_device_t *dev)
-{
-	int	    i;
-	drm_queue_t *queue;
-	int	    oldslots;
-	int	    newslots;
-				/* Check for a free queue */
-	for (i = 0; i < dev->queue_count; i++) {
-		atomic_inc(&dev->queuelist[i]->use_count);
-		if (atomic_read(&dev->queuelist[i]->use_count) == 1
-		    && !atomic_read(&dev->queuelist[i]->finalization)) {
-			DRM_DEBUG("%d (free)\n", i);
-			return i;
-		}
-		atomic_dec(&dev->queuelist[i]->use_count);
-	}
-				/* Allocate a new queue */
-	down(&dev->struct_sem);
-
-	queue = DRM(alloc)(sizeof(*queue), DRM_MEM_QUEUES);
-	memset(queue, 0, sizeof(*queue));
-	atomic_set(&queue->use_count, 1);
-
-	++dev->queue_count;
-	if (dev->queue_count >= dev->queue_slots) {
-		oldslots = dev->queue_slots * sizeof(*dev->queuelist);
-		if (!dev->queue_slots) dev->queue_slots = 1;
-		dev->queue_slots *= 2;
-		newslots = dev->queue_slots * sizeof(*dev->queuelist);
-
-		dev->queuelist = DRM(realloc)(dev->queuelist,
-					      oldslots,
-					      newslots,
-					      DRM_MEM_QUEUES);
-		if (!dev->queuelist) {
-			up(&dev->struct_sem);
-			DRM_DEBUG("out of memory\n");
-			return -ENOMEM;
-		}
-	}
-	dev->queuelist[dev->queue_count-1] = queue;
-
-	up(&dev->struct_sem);
-	DRM_DEBUG("%d (new)\n", dev->queue_count - 1);
-	return dev->queue_count - 1;
-}
-
-int DRM(resctx)(struct inode *inode, struct file *filp,
-		unsigned int cmd, unsigned long arg)
-{
-	drm_ctx_res_t __user *argp = (void __user *)arg;
-	drm_ctx_res_t	res;
-	drm_ctx_t	ctx;
-	int		i;
-
-	DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
-	if (copy_from_user(&res, argp, sizeof(res)))
-		return -EFAULT;
-	if (res.count >= DRM_RESERVED_CONTEXTS) {
-		memset(&ctx, 0, sizeof(ctx));
-		for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
-			ctx.handle = i;
-			if (copy_to_user(&res.contexts[i],
-					 &i,
-					 sizeof(i)))
-				return -EFAULT;
-		}
-	}
-	res.count = DRM_RESERVED_CONTEXTS;
-	if (copy_to_user(argp, &res, sizeof(res)))
-		return -EFAULT;
-	return 0;
-}
-
-int DRM(addctx)(struct inode *inode, struct file *filp,
-		unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_t	ctx;
-	drm_ctx_t	__user *argp = (void __user *)arg;
-
-	if (copy_from_user(&ctx, argp, sizeof(ctx)))
-		return -EFAULT;
-	if ((ctx.handle = DRM(alloc_queue)(dev)) == DRM_KERNEL_CONTEXT) {
-				/* Init kernel's context and get a new one. */
-		DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
-		ctx.handle = DRM(alloc_queue)(dev);
-	}
-	DRM(init_queue)(dev, dev->queuelist[ctx.handle], &ctx);
-	DRM_DEBUG("%d\n", ctx.handle);
-	if (copy_to_user(argp, &ctx, sizeof(ctx)))
-		return -EFAULT;
-	return 0;
-}
-
-int DRM(modctx)(struct inode *inode, struct file *filp,
-		unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_t	ctx;
-	drm_queue_t	*q;
-
-	if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
-		return -EFAULT;
-
-	DRM_DEBUG("%d\n", ctx.handle);
-
-	if (ctx.handle < 0 || ctx.handle >= dev->queue_count) return -EINVAL;
-	q = dev->queuelist[ctx.handle];
-
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->use_count) == 1) {
-				/* No longer in use */
-		atomic_dec(&q->use_count);
-		return -EINVAL;
-	}
-
-	if (DRM_BUFCOUNT(&q->waitlist)) {
-		atomic_dec(&q->use_count);
-		return -EBUSY;
-	}
-
-	q->flags = ctx.flags;
-
-	atomic_dec(&q->use_count);
-	return 0;
-}
-
-int DRM(getctx)(struct inode *inode, struct file *filp,
-		unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_t	__user *argp = (void __user *)arg;
-	drm_ctx_t	ctx;
-	drm_queue_t	*q;
-
-	if (copy_from_user(&ctx, argp, sizeof(ctx)))
-		return -EFAULT;
-
-	DRM_DEBUG("%d\n", ctx.handle);
-
-	if (ctx.handle >= dev->queue_count) return -EINVAL;
-	q = dev->queuelist[ctx.handle];
-
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->use_count) == 1) {
-				/* No longer in use */
-		atomic_dec(&q->use_count);
-		return -EINVAL;
-	}
-
-	ctx.flags = q->flags;
-	atomic_dec(&q->use_count);
-
-	if (copy_to_user(argp, &ctx, sizeof(ctx)))
-		return -EFAULT;
-
-	return 0;
-}
-
-int DRM(switchctx)(struct inode *inode, struct file *filp,
-		   unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_t	ctx;
-
-	if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
-		return -EFAULT;
-	DRM_DEBUG("%d\n", ctx.handle);
-	return DRM(context_switch)(dev, dev->last_context, ctx.handle);
-}
-
-int DRM(newctx)(struct inode *inode, struct file *filp,
-		unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_t	ctx;
-
-	if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
-		return -EFAULT;
-	DRM_DEBUG("%d\n", ctx.handle);
-	DRM(context_switch_complete)(dev, ctx.handle);
-
-	return 0;
-}
-
-int DRM(rmctx)(struct inode *inode, struct file *filp,
-	       unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_t	ctx;
-	drm_queue_t	*q;
-	drm_buf_t	*buf;
-
-	if (copy_from_user(&ctx, (drm_ctx_t __user *)arg, sizeof(ctx)))
-		return -EFAULT;
-	DRM_DEBUG("%d\n", ctx.handle);
-
-	if (ctx.handle >= dev->queue_count) return -EINVAL;
-	q = dev->queuelist[ctx.handle];
-
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->use_count) == 1) {
-				/* No longer in use */
-		atomic_dec(&q->use_count);
-		return -EINVAL;
-	}
-
-	atomic_inc(&q->finalization); /* Mark queue in finalization state */
-	atomic_sub(2, &q->use_count); /* Mark queue as unused (pending
-					 finalization) */
-
-	while (test_and_set_bit(0, &dev->interrupt_flag)) {
-		schedule();
-		if (signal_pending(current)) {
-			clear_bit(0, &dev->interrupt_flag);
-			return -EINTR;
-		}
-	}
-				/* Remove queued buffers */
-	while ((buf = DRM(waitlist_get)(&q->waitlist))) {
-		DRM(free_buffer)(dev, buf);
-	}
-	clear_bit(0, &dev->interrupt_flag);
-
-				/* Wakeup blocked processes */
-	wake_up_interruptible(&q->read_queue);
-	wake_up_interruptible(&q->write_queue);
-	wake_up_interruptible(&q->flush_queue);
-
-				/* Finalization over.  Queue is made
-				   available when both use_count and
-				   finalization become 0, which won't
-				   happen until all the waiting processes
-				   stop waiting. */
-	atomic_dec(&q->finalization);
-	return 0;
-}
-
diff --git a/drivers/char/drm/gamma_dma.c b/drivers/char/drm/gamma_dma.c
deleted file mode 100644
index e486fb8..0000000
--- a/drivers/char/drm/gamma_dma.c
+++ /dev/null
@@ -1,946 +0,0 @@
-/* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
- * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *
- */
-
-#include "gamma.h"
-#include "drmP.h"
-#include "drm.h"
-#include "gamma_drm.h"
-#include "gamma_drv.h"
-
-#include <linux/interrupt.h>	/* For task queue support */
-#include <linux/delay.h>
-
-static inline void gamma_dma_dispatch(drm_device_t *dev, unsigned long address,
-				      unsigned long length)
-{
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-	mb();
-	while ( GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
-		cpu_relax();
-
-	GAMMA_WRITE(GAMMA_DMAADDRESS, address);
-
-	while (GAMMA_READ(GAMMA_GCOMMANDSTATUS) != 4)
-		cpu_relax();
-
-	GAMMA_WRITE(GAMMA_DMACOUNT, length / 4);
-}
-
-void gamma_dma_quiescent_single(drm_device_t *dev)
-{
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-	while (GAMMA_READ(GAMMA_DMACOUNT))
-		cpu_relax();
-
-	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
-		cpu_relax();
-
-	GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
-	GAMMA_WRITE(GAMMA_SYNC, 0);
-
-	do {
-		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
-			cpu_relax();
-	} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
-}
-
-void gamma_dma_quiescent_dual(drm_device_t *dev)
-{
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-	while (GAMMA_READ(GAMMA_DMACOUNT))
-		cpu_relax();
-
-	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
-		cpu_relax();
-
-	GAMMA_WRITE(GAMMA_BROADCASTMASK, 3);
-	GAMMA_WRITE(GAMMA_FILTERMODE, 1 << 10);
-	GAMMA_WRITE(GAMMA_SYNC, 0);
-
-	/* Read from first MX */
-	do {
-		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS))
-			cpu_relax();
-	} while (GAMMA_READ(GAMMA_OUTPUTFIFO) != GAMMA_SYNC_TAG);
-
-	/* Read from second MX */
-	do {
-		while (!GAMMA_READ(GAMMA_OUTFIFOWORDS + 0x10000))
-			cpu_relax();
-	} while (GAMMA_READ(GAMMA_OUTPUTFIFO + 0x10000) != GAMMA_SYNC_TAG);
-}
-
-void gamma_dma_ready(drm_device_t *dev)
-{
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-	while (GAMMA_READ(GAMMA_DMACOUNT))
-		cpu_relax();
-}
-
-static inline int gamma_dma_is_ready(drm_device_t *dev)
-{
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-	return (!GAMMA_READ(GAMMA_DMACOUNT));
-}
-
-irqreturn_t gamma_driver_irq_handler( DRM_IRQ_ARGS )
-{
-	drm_device_t	 *dev = (drm_device_t *)arg;
-	drm_device_dma_t *dma = dev->dma;
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-
-	/* FIXME: should check whether we're actually interested in the interrupt? */
-	atomic_inc(&dev->counts[6]); /* _DRM_STAT_IRQ */
-
-	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
-		cpu_relax();
-
-	GAMMA_WRITE(GAMMA_GDELAYTIMER, 0xc350/2); /* 0x05S */
-	GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS, 8);
-	GAMMA_WRITE(GAMMA_GINTFLAGS, 0x2001);
-	if (gamma_dma_is_ready(dev)) {
-				/* Free previous buffer */
-		if (test_and_set_bit(0, &dev->dma_flag))
-			return IRQ_HANDLED;
-		if (dma->this_buffer) {
-			gamma_free_buffer(dev, dma->this_buffer);
-			dma->this_buffer = NULL;
-		}
-		clear_bit(0, &dev->dma_flag);
-
-		/* Dispatch new buffer */
-		schedule_work(&dev->work);
-	}
-	return IRQ_HANDLED;
-}
-
-/* Only called by gamma_dma_schedule. */
-static int gamma_do_dma(drm_device_t *dev, int locked)
-{
-	unsigned long	 address;
-	unsigned long	 length;
-	drm_buf_t	 *buf;
-	int		 retcode = 0;
-	drm_device_dma_t *dma = dev->dma;
-
-	if (test_and_set_bit(0, &dev->dma_flag)) return -EBUSY;
-
-
-	if (!dma->next_buffer) {
-		DRM_ERROR("No next_buffer\n");
-		clear_bit(0, &dev->dma_flag);
-		return -EINVAL;
-	}
-
-	buf	= dma->next_buffer;
-	/* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
-	/* So we pass the buffer index value into the physical page offset */
-	address = buf->idx << 12;
-	length	= buf->used;
-
-	DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
-		  buf->context, buf->idx, length);
-
-	if (buf->list == DRM_LIST_RECLAIM) {
-		gamma_clear_next_buffer(dev);
-		gamma_free_buffer(dev, buf);
-		clear_bit(0, &dev->dma_flag);
-		return -EINVAL;
-	}
-
-	if (!length) {
-		DRM_ERROR("0 length buffer\n");
-		gamma_clear_next_buffer(dev);
-		gamma_free_buffer(dev, buf);
-		clear_bit(0, &dev->dma_flag);
-		return 0;
-	}
-
-	if (!gamma_dma_is_ready(dev)) {
-		clear_bit(0, &dev->dma_flag);
-		return -EBUSY;
-	}
-
-	if (buf->while_locked) {
-		if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
-			DRM_ERROR("Dispatching buffer %d from pid %d"
-				  " \"while locked\", but no lock held\n",
-				  buf->idx, current->pid);
-		}
-	} else {
-		if (!locked && !gamma_lock_take(&dev->lock.hw_lock->lock,
-					      DRM_KERNEL_CONTEXT)) {
-			clear_bit(0, &dev->dma_flag);
-			return -EBUSY;
-		}
-	}
-
-	if (dev->last_context != buf->context
-	    && !(dev->queuelist[buf->context]->flags
-		 & _DRM_CONTEXT_PRESERVED)) {
-				/* PRE: dev->last_context != buf->context */
-		if (DRM(context_switch)(dev, dev->last_context,
-					buf->context)) {
-			DRM(clear_next_buffer)(dev);
-			DRM(free_buffer)(dev, buf);
-		}
-		retcode = -EBUSY;
-		goto cleanup;
-
-				/* POST: we will wait for the context
-				   switch and will dispatch on a later call
-				   when dev->last_context == buf->context.
-				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
-				   TIME! */
-	}
-
-	gamma_clear_next_buffer(dev);
-	buf->pending	 = 1;
-	buf->waiting	 = 0;
-	buf->list	 = DRM_LIST_PEND;
-
-	/* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
-	address = buf->idx << 12;
-
-	gamma_dma_dispatch(dev, address, length);
-	gamma_free_buffer(dev, dma->this_buffer);
-	dma->this_buffer = buf;
-
-	atomic_inc(&dev->counts[7]); /* _DRM_STAT_DMA */
-	atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
-
-	if (!buf->while_locked && !dev->context_flag && !locked) {
-		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
-				  DRM_KERNEL_CONTEXT)) {
-			DRM_ERROR("\n");
-		}
-	}
-cleanup:
-
-	clear_bit(0, &dev->dma_flag);
-
-
-	return retcode;
-}
-
-static void gamma_dma_timer_bh(unsigned long dev)
-{
-	gamma_dma_schedule((drm_device_t *)dev, 0);
-}
-
-void gamma_irq_immediate_bh(void *dev)
-{
-	gamma_dma_schedule(dev, 0);
-}
-
-int gamma_dma_schedule(drm_device_t *dev, int locked)
-{
-	int		 next;
-	drm_queue_t	 *q;
-	drm_buf_t	 *buf;
-	int		 retcode   = 0;
-	int		 processed = 0;
-	int		 missed;
-	int		 expire	   = 20;
-	drm_device_dma_t *dma	   = dev->dma;
-
-	if (test_and_set_bit(0, &dev->interrupt_flag)) {
-				/* Not reentrant */
-		atomic_inc(&dev->counts[10]); /* _DRM_STAT_MISSED */
-		return -EBUSY;
-	}
-	missed = atomic_read(&dev->counts[10]);
-
-
-again:
-	if (dev->context_flag) {
-		clear_bit(0, &dev->interrupt_flag);
-		return -EBUSY;
-	}
-	if (dma->next_buffer) {
-				/* Unsent buffer that was previously
-				   selected, but that couldn't be sent
-				   because the lock could not be obtained
-				   or the DMA engine wasn't ready.  Try
-				   again. */
-		if (!(retcode = gamma_do_dma(dev, locked))) ++processed;
-	} else {
-		do {
-			next = gamma_select_queue(dev, gamma_dma_timer_bh);
-			if (next >= 0) {
-				q   = dev->queuelist[next];
-				buf = gamma_waitlist_get(&q->waitlist);
-				dma->next_buffer = buf;
-				dma->next_queue	 = q;
-				if (buf && buf->list == DRM_LIST_RECLAIM) {
-					gamma_clear_next_buffer(dev);
-					gamma_free_buffer(dev, buf);
-				}
-			}
-		} while (next >= 0 && !dma->next_buffer);
-		if (dma->next_buffer) {
-			if (!(retcode = gamma_do_dma(dev, locked))) {
-				++processed;
-			}
-		}
-	}
-
-	if (--expire) {
-		if (missed != atomic_read(&dev->counts[10])) {
-			if (gamma_dma_is_ready(dev)) goto again;
-		}
-		if (processed && gamma_dma_is_ready(dev)) {
-			processed = 0;
-			goto again;
-		}
-	}
-
-	clear_bit(0, &dev->interrupt_flag);
-
-	return retcode;
-}
-
-static int gamma_dma_priority(struct file *filp, 
-			      drm_device_t *dev, drm_dma_t *d)
-{
-	unsigned long	  address;
-	unsigned long	  length;
-	int		  must_free = 0;
-	int		  retcode   = 0;
-	int		  i;
-	int		  idx;
-	drm_buf_t	  *buf;
-	drm_buf_t	  *last_buf = NULL;
-	drm_device_dma_t  *dma	    = dev->dma;
-	int		  *send_indices = NULL;
-	int		  *send_sizes = NULL;
-
-	DECLARE_WAITQUEUE(entry, current);
-
-				/* Turn off interrupt handling */
-	while (test_and_set_bit(0, &dev->interrupt_flag)) {
-		schedule();
-		if (signal_pending(current)) return -EINTR;
-	}
-	if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) {
-		while (!gamma_lock_take(&dev->lock.hw_lock->lock,
-				      DRM_KERNEL_CONTEXT)) {
-			schedule();
-			if (signal_pending(current)) {
-				clear_bit(0, &dev->interrupt_flag);
-				return -EINTR;
-			}
-		}
-		++must_free;
-	}
-
-	send_indices = DRM(alloc)(d->send_count * sizeof(*send_indices),
-				  DRM_MEM_DRIVER);
-	if (send_indices == NULL)
-		return -ENOMEM;
-	if (copy_from_user(send_indices, d->send_indices, 
-			   d->send_count * sizeof(*send_indices))) {
-		retcode = -EFAULT;
-                goto cleanup;
-	}
-	
-	send_sizes = DRM(alloc)(d->send_count * sizeof(*send_sizes),
-				DRM_MEM_DRIVER);
-	if (send_sizes == NULL)
-		return -ENOMEM;
-	if (copy_from_user(send_sizes, d->send_sizes, 
-			   d->send_count * sizeof(*send_sizes))) {
-		retcode = -EFAULT;
-                goto cleanup;
-	}
-
-	for (i = 0; i < d->send_count; i++) {
-		idx = send_indices[i];
-		if (idx < 0 || idx >= dma->buf_count) {
-			DRM_ERROR("Index %d (of %d max)\n",
-				  send_indices[i], dma->buf_count - 1);
-			continue;
-		}
-		buf = dma->buflist[ idx ];
-		if (buf->filp != filp) {
-			DRM_ERROR("Process %d using buffer not owned\n",
-				  current->pid);
-			retcode = -EINVAL;
-			goto cleanup;
-		}
-		if (buf->list != DRM_LIST_NONE) {
-			DRM_ERROR("Process %d using buffer on list %d\n",
-				  current->pid, buf->list);
-			retcode = -EINVAL;
-			goto cleanup;
-		}
-				/* This isn't a race condition on
-				   buf->list, since our concern is the
-				   buffer reclaim during the time the
-				   process closes the /dev/drm? handle, so
-				   it can't also be doing DMA. */
-		buf->list	  = DRM_LIST_PRIO;
-		buf->used	  = send_sizes[i];
-		buf->context	  = d->context;
-		buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED;
-		address		  = (unsigned long)buf->address;
-		length		  = buf->used;
-		if (!length) {
-			DRM_ERROR("0 length buffer\n");
-		}
-		if (buf->pending) {
-			DRM_ERROR("Sending pending buffer:"
-				  " buffer %d, offset %d\n",
-				  send_indices[i], i);
-			retcode = -EINVAL;
-			goto cleanup;
-		}
-		if (buf->waiting) {
-			DRM_ERROR("Sending waiting buffer:"
-				  " buffer %d, offset %d\n",
-				  send_indices[i], i);
-			retcode = -EINVAL;
-			goto cleanup;
-		}
-		buf->pending = 1;
-
-		if (dev->last_context != buf->context
-		    && !(dev->queuelist[buf->context]->flags
-			 & _DRM_CONTEXT_PRESERVED)) {
-			add_wait_queue(&dev->context_wait, &entry);
-			current->state = TASK_INTERRUPTIBLE;
-				/* PRE: dev->last_context != buf->context */
-			DRM(context_switch)(dev, dev->last_context,
-					    buf->context);
-				/* POST: we will wait for the context
-				   switch and will dispatch on a later call
-				   when dev->last_context == buf->context.
-				   NOTE WE HOLD THE LOCK THROUGHOUT THIS
-				   TIME! */
-			schedule();
-			current->state = TASK_RUNNING;
-			remove_wait_queue(&dev->context_wait, &entry);
-			if (signal_pending(current)) {
-				retcode = -EINTR;
-				goto cleanup;
-			}
-			if (dev->last_context != buf->context) {
-				DRM_ERROR("Context mismatch: %d %d\n",
-					  dev->last_context,
-					  buf->context);
-			}
-		}
-
-		gamma_dma_dispatch(dev, address, length);
-		atomic_inc(&dev->counts[9]); /* _DRM_STAT_SPECIAL */
-		atomic_add(length, &dev->counts[8]); /* _DRM_STAT_PRIMARY */
-
-		if (last_buf) {
-			gamma_free_buffer(dev, last_buf);
-		}
-		last_buf = buf;
-	}
-
-
-cleanup:
-	if (last_buf) {
-		gamma_dma_ready(dev);
-		gamma_free_buffer(dev, last_buf);
-	}
-	if (send_indices)
-		DRM(free)(send_indices, d->send_count * sizeof(*send_indices), 
-			  DRM_MEM_DRIVER);
-	if (send_sizes)
-		DRM(free)(send_sizes, d->send_count * sizeof(*send_sizes), 
-			  DRM_MEM_DRIVER);
-
-	if (must_free && !dev->context_flag) {
-		if (gamma_lock_free(dev, &dev->lock.hw_lock->lock,
-				  DRM_KERNEL_CONTEXT)) {
-			DRM_ERROR("\n");
-		}
-	}
-	clear_bit(0, &dev->interrupt_flag);
-	return retcode;
-}
-
-static int gamma_dma_send_buffers(struct file *filp,
-				  drm_device_t *dev, drm_dma_t *d)
-{
-	DECLARE_WAITQUEUE(entry, current);
-	drm_buf_t	  *last_buf = NULL;
-	int		  retcode   = 0;
-	drm_device_dma_t  *dma	    = dev->dma;
-	int               send_index;
-
-	if (get_user(send_index, &d->send_indices[d->send_count-1]))
-		return -EFAULT;
-
-	if (d->flags & _DRM_DMA_BLOCK) {
-		last_buf = dma->buflist[send_index];
-		add_wait_queue(&last_buf->dma_wait, &entry);
-	}
-
-	if ((retcode = gamma_dma_enqueue(filp, d))) {
-		if (d->flags & _DRM_DMA_BLOCK)
-			remove_wait_queue(&last_buf->dma_wait, &entry);
-		return retcode;
-	}
-
-	gamma_dma_schedule(dev, 0);
-
-	if (d->flags & _DRM_DMA_BLOCK) {
-		DRM_DEBUG("%d waiting\n", current->pid);
-		for (;;) {
-			current->state = TASK_INTERRUPTIBLE;
-			if (!last_buf->waiting && !last_buf->pending)
-				break; /* finished */
-			schedule();
-			if (signal_pending(current)) {
-				retcode = -EINTR; /* Can't restart */
-				break;
-			}
-		}
-		current->state = TASK_RUNNING;
-		DRM_DEBUG("%d running\n", current->pid);
-		remove_wait_queue(&last_buf->dma_wait, &entry);
-		if (!retcode
-		    || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) {
-			if (!waitqueue_active(&last_buf->dma_wait)) {
-				gamma_free_buffer(dev, last_buf);
-			}
-		}
-		if (retcode) {
-			DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
-				  d->context,
-				  last_buf->waiting,
-				  last_buf->pending,
-				  (long)DRM_WAITCOUNT(dev, d->context),
-				  last_buf->idx,
-				  last_buf->list,
-				  current->pid);
-		}
-	}
-	return retcode;
-}
-
-int gamma_dma(struct inode *inode, struct file *filp, unsigned int cmd,
-	      unsigned long arg)
-{
-	drm_file_t	  *priv	    = filp->private_data;
-	drm_device_t	  *dev	    = priv->dev;
-	drm_device_dma_t  *dma	    = dev->dma;
-	int		  retcode   = 0;
-	drm_dma_t	  __user *argp = (void __user *)arg;
-	drm_dma_t	  d;
-
-	if (copy_from_user(&d, argp, sizeof(d)))
-		return -EFAULT;
-
-	if (d.send_count < 0 || d.send_count > dma->buf_count) {
-		DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
-			  current->pid, d.send_count, dma->buf_count);
-		return -EINVAL;
-	}
-
-	if (d.request_count < 0 || d.request_count > dma->buf_count) {
-		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
-			  current->pid, d.request_count, dma->buf_count);
-		return -EINVAL;
-	}
-
-	if (d.send_count) {
-		if (d.flags & _DRM_DMA_PRIORITY)
-			retcode = gamma_dma_priority(filp, dev, &d);
-		else
-			retcode = gamma_dma_send_buffers(filp, dev, &d);
-	}
-
-	d.granted_count = 0;
-
-	if (!retcode && d.request_count) {
-		retcode = gamma_dma_get_buffers(filp, &d);
-	}
-
-	DRM_DEBUG("%d returning, granted = %d\n",
-		  current->pid, d.granted_count);
-	if (copy_to_user(argp, &d, sizeof(d)))
-		return -EFAULT;
-
-	return retcode;
-}
-
-/* =============================================================
- * DMA initialization, cleanup
- */
-
-static int gamma_do_init_dma( drm_device_t *dev, drm_gamma_init_t *init )
-{
-	drm_gamma_private_t *dev_priv;
-	drm_device_dma_t    *dma = dev->dma;
-	drm_buf_t	    *buf;
-	int i;
-	struct list_head    *list;
-	unsigned long	    *pgt;
-
-	DRM_DEBUG( "%s\n", __FUNCTION__ );
-
-	dev_priv = DRM(alloc)( sizeof(drm_gamma_private_t),
-							DRM_MEM_DRIVER );
-	if ( !dev_priv )
-		return -ENOMEM;
-
-	dev->dev_private = (void *)dev_priv;
-
-	memset( dev_priv, 0, sizeof(drm_gamma_private_t) );
-
-	dev_priv->num_rast = init->num_rast;
-
-	list_for_each(list, &dev->maplist->head) {
-		drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
-		if( r_list->map &&
-		    r_list->map->type == _DRM_SHM &&
-		    r_list->map->flags & _DRM_CONTAINS_LOCK ) {
-			dev_priv->sarea = r_list->map;
- 			break;
- 		}
- 	}
-	
-	dev_priv->mmio0 = drm_core_findmap(dev, init->mmio0);
-	dev_priv->mmio1 = drm_core_findmap(dev, init->mmio1);
-	dev_priv->mmio2 = drm_core_findmap(dev, init->mmio2);
-	dev_priv->mmio3 = drm_core_findmap(dev, init->mmio3);
-	
-	dev_priv->sarea_priv = (drm_gamma_sarea_t *)
-		((u8 *)dev_priv->sarea->handle +
-		 init->sarea_priv_offset);
-
-	if (init->pcimode) {
-		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
-		pgt = buf->address;
-
- 		for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
-			buf = dma->buflist[i];
-			*pgt = virt_to_phys((void*)buf->address) | 0x07;
-			pgt++;
-		}
-
-		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
-	} else {
-		dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
-		drm_core_ioremap( dev->agp_buffer_map, dev);
-
-		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
-		pgt = buf->address;
-
- 		for (i = 0; i < GLINT_DRI_BUF_COUNT; i++) {
-			buf = dma->buflist[i];
-			*pgt = (unsigned long)buf->address + 0x07;
-			pgt++;
-		}
-
-		buf = dma->buflist[GLINT_DRI_BUF_COUNT];
-
-		while (GAMMA_READ(GAMMA_INFIFOSPACE) < 1);
-		GAMMA_WRITE( GAMMA_GDMACONTROL, 0xe);
-	}
-	while (GAMMA_READ(GAMMA_INFIFOSPACE) < 2);
-	GAMMA_WRITE( GAMMA_PAGETABLEADDR, virt_to_phys((void*)buf->address) );
-	GAMMA_WRITE( GAMMA_PAGETABLELENGTH, 2 );
-
-	return 0;
-}
-
-int gamma_do_cleanup_dma( drm_device_t *dev )
-{
-	DRM_DEBUG( "%s\n", __FUNCTION__ );
-
-	/* Make sure interrupts are disabled here because the uninstall ioctl
-	 * may not have been called from userspace and after dev_private
-	 * is freed, it's too late.
-	 */
-	if (drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
-		if ( dev->irq_enabled ) 
-			DRM(irq_uninstall)(dev);
-
-	if ( dev->dev_private ) {
-
-		if ( dev->agp_buffer_map != NULL )
-			drm_core_ioremapfree( dev->agp_buffer_map, dev );
-
-		DRM(free)( dev->dev_private, sizeof(drm_gamma_private_t),
-			   DRM_MEM_DRIVER );
-		dev->dev_private = NULL;
-	}
-
-	return 0;
-}
-
-int gamma_dma_init( struct inode *inode, struct file *filp,
-		  unsigned int cmd, unsigned long arg )
-{
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->dev;
-	drm_gamma_init_t init;
-
-	LOCK_TEST_WITH_RETURN( dev, filp );
-
-	if ( copy_from_user( &init, (drm_gamma_init_t __user *)arg, sizeof(init) ) )
-		return -EFAULT;
-
-	switch ( init.func ) {
-	case GAMMA_INIT_DMA:
-		return gamma_do_init_dma( dev, &init );
-	case GAMMA_CLEANUP_DMA:
-		return gamma_do_cleanup_dma( dev );
-	}
-
-	return -EINVAL;
-}
-
-static int gamma_do_copy_dma( drm_device_t *dev, drm_gamma_copy_t *copy )
-{
-	drm_device_dma_t    *dma = dev->dma;
-	unsigned int        *screenbuf;
-
-	DRM_DEBUG( "%s\n", __FUNCTION__ );
-
-	/* We've DRM_RESTRICTED this DMA buffer */
-
-	screenbuf = dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ]->address;
-
-#if 0
-	*buffer++ = 0x180;	/* Tag (FilterMode) */
-	*buffer++ = 0x200;	/* Allow FBColor through */
-	*buffer++ = 0x53B;	/* Tag */
-	*buffer++ = copy->Pitch;
-	*buffer++ = 0x53A;	/* Tag */
-	*buffer++ = copy->SrcAddress;
-	*buffer++ = 0x539;	/* Tag */
-	*buffer++ = copy->WidthHeight; /* Initiates transfer */
-	*buffer++ = 0x53C;	/* Tag - DMAOutputAddress */
-	*buffer++ = virt_to_phys((void*)screenbuf);
-	*buffer++ = 0x53D;	/* Tag - DMAOutputCount */
-	*buffer++ = copy->Count; /* Reads HostOutFifo BLOCKS until ..*/
-
-	/* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
-	/* Now put it back to the screen */
-
-	*buffer++ = 0x180;	/* Tag (FilterMode) */
-	*buffer++ = 0x400;	/* Allow Sync through */
-	*buffer++ = 0x538;	/* Tag - DMARectangleReadTarget */
-	*buffer++ = 0x155;	/* FBSourceData | count */
-	*buffer++ = 0x537;	/* Tag */
-	*buffer++ = copy->Pitch;
-	*buffer++ = 0x536;	/* Tag */
-	*buffer++ = copy->DstAddress;
-	*buffer++ = 0x535;	/* Tag */
-	*buffer++ = copy->WidthHeight; /* Initiates transfer */
-	*buffer++ = 0x530;	/* Tag - DMAAddr */
-	*buffer++ = virt_to_phys((void*)screenbuf);
-	*buffer++ = 0x531;
-	*buffer++ = copy->Count; /* initiates DMA transfer of color data */
-#endif
-
-	/* need to dispatch it now */
-
-	return 0;
-}
-
-int gamma_dma_copy( struct inode *inode, struct file *filp,
-		  unsigned int cmd, unsigned long arg )
-{
-	drm_file_t *priv = filp->private_data;
-	drm_device_t *dev = priv->dev;
-	drm_gamma_copy_t copy;
-
-	if ( copy_from_user( &copy, (drm_gamma_copy_t __user *)arg, sizeof(copy) ) )
-		return -EFAULT;
-
-	return gamma_do_copy_dma( dev, &copy );
-}
-
-/* =============================================================
- * Per Context SAREA Support
- */
-
-int gamma_getsareactx(struct inode *inode, struct file *filp,
-		     unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_priv_map_t __user *argp = (void __user *)arg;
-	drm_ctx_priv_map_t request;
-	drm_map_t *map;
-
-	if (copy_from_user(&request, argp, sizeof(request)))
-		return -EFAULT;
-
-	down(&dev->struct_sem);
-	if ((int)request.ctx_id >= dev->max_context) {
-		up(&dev->struct_sem);
-		return -EINVAL;
-	}
-
-	map = dev->context_sareas[request.ctx_id];
-	up(&dev->struct_sem);
-
-	request.handle = map->handle;
-	if (copy_to_user(argp, &request, sizeof(request)))
-		return -EFAULT;
-	return 0;
-}
-
-int gamma_setsareactx(struct inode *inode, struct file *filp,
-		     unsigned int cmd, unsigned long arg)
-{
-	drm_file_t	*priv	= filp->private_data;
-	drm_device_t	*dev	= priv->dev;
-	drm_ctx_priv_map_t request;
-	drm_map_t *map = NULL;
-	drm_map_list_t *r_list;
-	struct list_head *list;
-
-	if (copy_from_user(&request,
-			   (drm_ctx_priv_map_t __user *)arg,
-			   sizeof(request)))
-		return -EFAULT;
-
-	down(&dev->struct_sem);
-	r_list = NULL;
-	list_for_each(list, &dev->maplist->head) {
-		r_list = list_entry(list, drm_map_list_t, head);
-		if(r_list->map &&
-		   r_list->map->handle == request.handle) break;
-	}
-	if (list == &(dev->maplist->head)) {
-		up(&dev->struct_sem);
-		return -EINVAL;
-	}
-	map = r_list->map;
-	up(&dev->struct_sem);
-
-	if (!map) return -EINVAL;
-
-	down(&dev->struct_sem);
-	if ((int)request.ctx_id >= dev->max_context) {
-		up(&dev->struct_sem);
-		return -EINVAL;
-	}
-	dev->context_sareas[request.ctx_id] = map;
-	up(&dev->struct_sem);
-	return 0;
-}
-
-void gamma_driver_irq_preinstall( drm_device_t *dev ) {
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-
-	while(GAMMA_READ(GAMMA_INFIFOSPACE) < 2)
-		cpu_relax();
-
-	GAMMA_WRITE( GAMMA_GCOMMANDMODE,	0x00000004 );
-	GAMMA_WRITE( GAMMA_GDMACONTROL,		0x00000000 );
-}
-
-void gamma_driver_irq_postinstall( drm_device_t *dev ) {
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-
-	while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
-		cpu_relax();
-
-	GAMMA_WRITE( GAMMA_GINTENABLE,		0x00002001 );
-	GAMMA_WRITE( GAMMA_COMMANDINTENABLE,	0x00000008 );
-	GAMMA_WRITE( GAMMA_GDELAYTIMER,		0x00039090 );
-}
-
-void gamma_driver_irq_uninstall( drm_device_t *dev ) {
-	drm_gamma_private_t *dev_priv =
-				(drm_gamma_private_t *)dev->dev_private;
-	if (!dev_priv)
-		return;
-
-	while(GAMMA_READ(GAMMA_INFIFOSPACE) < 3)
-		cpu_relax();
-
-	GAMMA_WRITE( GAMMA_GDELAYTIMER,		0x00000000 );
-	GAMMA_WRITE( GAMMA_COMMANDINTENABLE,	0x00000000 );
-	GAMMA_WRITE( GAMMA_GINTENABLE,		0x00000000 );
-}
-
-extern drm_ioctl_desc_t DRM(ioctls)[];
-
-static int gamma_driver_preinit(drm_device_t *dev)
-{
-	/* reset the finish ioctl */
-	DRM(ioctls)[DRM_IOCTL_NR(DRM_IOCTL_FINISH)].func = DRM(finish);
-	return 0;
-}
-
-static void gamma_driver_pretakedown(drm_device_t *dev)
-{
-	gamma_do_cleanup_dma(dev);
-}
-
-static void gamma_driver_dma_ready(drm_device_t *dev)
-{
-	gamma_dma_ready(dev);
-}
-
-static int gamma_driver_dma_quiescent(drm_device_t *dev)
-{
-	drm_gamma_private_t *dev_priv =	(
-		drm_gamma_private_t *)dev->dev_private;
-	if (dev_priv->num_rast == 2)
-		gamma_dma_quiescent_dual(dev);
-	else gamma_dma_quiescent_single(dev);
-	return 0;
-}
-
-void gamma_driver_register_fns(drm_device_t *dev)
-{
-	dev->driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ;
-	DRM(fops).read = gamma_fops_read;
-	DRM(fops).poll = gamma_fops_poll;
-	dev->driver.preinit = gamma_driver_preinit;
-	dev->driver.pretakedown = gamma_driver_pretakedown;
-	dev->driver.dma_ready = gamma_driver_dma_ready;
-	dev->driver.dma_quiescent = gamma_driver_dma_quiescent;
-	dev->driver.dma_flush_block_and_flush = gamma_flush_block_and_flush;
-	dev->driver.dma_flush_unblock = gamma_flush_unblock;
-}
diff --git a/drivers/char/drm/gamma_drm.h b/drivers/char/drm/gamma_drm.h
deleted file mode 100644
index 20819de..0000000
--- a/drivers/char/drm/gamma_drm.h
+++ /dev/null
@@ -1,90 +0,0 @@
-#ifndef _GAMMA_DRM_H_
-#define _GAMMA_DRM_H_
-
-typedef struct _drm_gamma_tex_region {
-	unsigned char next, prev; /* indices to form a circular LRU  */
-	unsigned char in_use;	/* owned by a client, or free? */
-	int age;		/* tracked by clients to update local LRU's */
-} drm_gamma_tex_region_t;
-
-typedef struct {
-	unsigned int	GDeltaMode;
-	unsigned int	GDepthMode;
-	unsigned int	GGeometryMode;
-	unsigned int	GTransformMode;
-} drm_gamma_context_regs_t;
-
-typedef struct _drm_gamma_sarea {
-   	drm_gamma_context_regs_t context_state;
-
-	unsigned int dirty;
-
-
-	/* Maintain an LRU of contiguous regions of texture space.  If
-	 * you think you own a region of texture memory, and it has an
-	 * age different to the one you set, then you are mistaken and
-	 * it has been stolen by another client.  If global texAge
-	 * hasn't changed, there is no need to walk the list.
-	 *
-	 * These regions can be used as a proxy for the fine-grained
-	 * texture information of other clients - by maintaining them
-	 * in the same lru which is used to age their own textures,
-	 * clients have an approximate lru for the whole of global
-	 * texture space, and can make informed decisions as to which
-	 * areas to kick out.  There is no need to choose whether to
-	 * kick out your own texture or someone else's - simply eject
-	 * them all in LRU order.  
-	 */
-   
-#define GAMMA_NR_TEX_REGIONS 64
-	drm_gamma_tex_region_t texList[GAMMA_NR_TEX_REGIONS+1]; 
-				/* Last elt is sentinal */
-        int texAge;		/* last time texture was uploaded */
-        int last_enqueue;	/* last time a buffer was enqueued */
-	int last_dispatch;	/* age of the most recently dispatched buffer */
-	int last_quiescent;     /*  */
-	int ctxOwner;		/* last context to upload state */
-
-	int vertex_prim;
-} drm_gamma_sarea_t;
-
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmGamma.h)
- */
-
-/* Gamma specific ioctls
- * The device specific ioctl range is 0x40 to 0x79.
- */
-#define DRM_IOCTL_GAMMA_INIT		DRM_IOW( 0x40, drm_gamma_init_t)
-#define DRM_IOCTL_GAMMA_COPY		DRM_IOW( 0x41, drm_gamma_copy_t)
-
-typedef struct drm_gamma_copy {
-	unsigned int	DMAOutputAddress;
-	unsigned int	DMAOutputCount;
-	unsigned int	DMAReadGLINTSource;
-	unsigned int	DMARectangleWriteAddress;
-	unsigned int	DMARectangleWriteLinePitch;
-	unsigned int	DMARectangleWrite;
-	unsigned int	DMARectangleReadAddress;
-	unsigned int	DMARectangleReadLinePitch;
-	unsigned int	DMARectangleRead;
-	unsigned int	DMARectangleReadTarget;
-} drm_gamma_copy_t;
-
-typedef struct drm_gamma_init {
-   	enum {
-	   	GAMMA_INIT_DMA    = 0x01,
-	       	GAMMA_CLEANUP_DMA = 0x02
-	} func;
-
-   	int sarea_priv_offset;
-	int pcimode;
-	unsigned int mmio0;
-	unsigned int mmio1;
-	unsigned int mmio2;
-	unsigned int mmio3;
-	unsigned int buffers_offset;
-	int num_rast;
-} drm_gamma_init_t;
-
-#endif /* _GAMMA_DRM_H_ */
diff --git a/drivers/char/drm/gamma_drv.c b/drivers/char/drm/gamma_drv.c
deleted file mode 100644
index e7e64b6..0000000
--- a/drivers/char/drm/gamma_drv.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/* gamma.c -- 3dlabs GMX 2000 driver -*- linux-c -*-
- * Created: Mon Jan  4 08:58:31 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Gareth Hughes <gareth@valinux.com>
- */
-
-#include <linux/config.h>
-#include "gamma.h"
-#include "drmP.h"
-#include "drm.h"
-#include "gamma_drm.h"
-#include "gamma_drv.h"
-
-#include "drm_auth.h"
-#include "drm_agpsupport.h"
-#include "drm_bufs.h"
-#include "gamma_context.h"	/* NOTE! */
-#include "drm_dma.h"
-#include "gamma_old_dma.h"	/* NOTE */
-#include "drm_drawable.h"
-#include "drm_drv.h"
-
-#include "drm_fops.h"
-#include "drm_init.h"
-#include "drm_ioctl.h"
-#include "drm_irq.h"
-#include "gamma_lists.h"        /* NOTE */
-#include "drm_lock.h"
-#include "gamma_lock.h"		/* NOTE */
-#include "drm_memory.h"
-#include "drm_proc.h"
-#include "drm_vm.h"
-#include "drm_stub.h"
-#include "drm_scatter.h"
diff --git a/drivers/char/drm/gamma_drv.h b/drivers/char/drm/gamma_drv.h
deleted file mode 100644
index 146fcc6..0000000
--- a/drivers/char/drm/gamma_drv.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* gamma_drv.h -- Private header for 3dlabs GMX 2000 driver -*- linux-c -*-
- * Created: Mon Jan  4 10:05:05 1999 by faith@precisioninsight.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All rights reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *
- */
-
-#ifndef _GAMMA_DRV_H_
-#define _GAMMA_DRV_H_
-
-typedef struct drm_gamma_private {
-	drm_gamma_sarea_t *sarea_priv;
-	drm_map_t *sarea;
-	drm_map_t *mmio0;
-	drm_map_t *mmio1;
-	drm_map_t *mmio2;
-	drm_map_t *mmio3;
-	int num_rast;
-} drm_gamma_private_t;
-
-				/* gamma_dma.c */
-extern int gamma_dma_init( struct inode *inode, struct file *filp,
-			 unsigned int cmd, unsigned long arg );
-extern int gamma_dma_copy( struct inode *inode, struct file *filp,
-			 unsigned int cmd, unsigned long arg );
-
-extern int gamma_do_cleanup_dma( drm_device_t *dev );
-extern void gamma_dma_ready(drm_device_t *dev);
-extern void gamma_dma_quiescent_single(drm_device_t *dev);
-extern void gamma_dma_quiescent_dual(drm_device_t *dev);
-
-				/* gamma_dma.c */
-extern int  gamma_dma_schedule(drm_device_t *dev, int locked);
-extern int  gamma_dma(struct inode *inode, struct file *filp,
-		      unsigned int cmd, unsigned long arg);
-extern int  gamma_find_devices(void);
-extern int  gamma_found(void);
-
-/* Gamma-specific code pulled from drm_fops.h:
- */
-extern int	     DRM(finish)(struct inode *inode, struct file *filp,
-				 unsigned int cmd, unsigned long arg);
-extern int	     DRM(flush_unblock)(drm_device_t *dev, int context,
-					drm_lock_flags_t flags);
-extern int	     DRM(flush_block_and_flush)(drm_device_t *dev, int context,
-						drm_lock_flags_t flags);
-
-/* Gamma-specific code pulled from drm_dma.h:
- */
-extern void	     DRM(clear_next_buffer)(drm_device_t *dev);
-extern int	     DRM(select_queue)(drm_device_t *dev,
-				       void (*wrapper)(unsigned long));
-extern int	     DRM(dma_enqueue)(struct file *filp, drm_dma_t *dma);
-extern int	     DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma);
-
-
-/* Gamma-specific code pulled from drm_lists.h (now renamed gamma_lists.h):
- */
-extern int	     DRM(waitlist_create)(drm_waitlist_t *bl, int count);
-extern int	     DRM(waitlist_destroy)(drm_waitlist_t *bl);
-extern int	     DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf);
-extern drm_buf_t     *DRM(waitlist_get)(drm_waitlist_t *bl);
-extern int	     DRM(freelist_create)(drm_freelist_t *bl, int count);
-extern int	     DRM(freelist_destroy)(drm_freelist_t *bl);
-extern int	     DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl,
-				       drm_buf_t *buf);
-extern drm_buf_t     *DRM(freelist_get)(drm_freelist_t *bl, int block);
-
-/* externs for gamma changes to the ops */
-extern struct file_operations DRM(fops);
-extern unsigned int gamma_fops_poll(struct file *filp, struct poll_table_struct *wait);
-extern ssize_t gamma_fops_read(struct file *filp, char __user *buf, size_t count, loff_t *off);
-
-
-#define GLINT_DRI_BUF_COUNT 256
-
-#define GAMMA_OFF(reg)						   \
-	((reg < 0x1000)						   \
-	 ? reg							   \
-	 : ((reg < 0x10000)					   \
-	    ? (reg - 0x1000)					   \
-	    : ((reg < 0x11000)					   \
-	       ? (reg - 0x10000)				   \
-	       : (reg - 0x11000))))
-
-#define GAMMA_BASE(reg)	 ((unsigned long)				     \
-			  ((reg < 0x1000)    ? dev_priv->mmio0->handle :     \
-			   ((reg < 0x10000)  ? dev_priv->mmio1->handle :     \
-			    ((reg < 0x11000) ? dev_priv->mmio2->handle :     \
-					       dev_priv->mmio3->handle))))
-#define GAMMA_ADDR(reg)	 (GAMMA_BASE(reg) + GAMMA_OFF(reg))
-#define GAMMA_DEREF(reg) *(__volatile__ int *)GAMMA_ADDR(reg)
-#define GAMMA_READ(reg)	 GAMMA_DEREF(reg)
-#define GAMMA_WRITE(reg,val) do { GAMMA_DEREF(reg) = val; } while (0)
-
-#define GAMMA_BROADCASTMASK    0x9378
-#define GAMMA_COMMANDINTENABLE 0x0c48
-#define GAMMA_DMAADDRESS       0x0028
-#define GAMMA_DMACOUNT	       0x0030
-#define GAMMA_FILTERMODE       0x8c00
-#define GAMMA_GCOMMANDINTFLAGS 0x0c50
-#define GAMMA_GCOMMANDMODE     0x0c40
-#define		GAMMA_QUEUED_DMA_MODE		1<<1
-#define GAMMA_GCOMMANDSTATUS   0x0c60
-#define GAMMA_GDELAYTIMER      0x0c38
-#define GAMMA_GDMACONTROL      0x0060
-#define 	GAMMA_USE_AGP			1<<1
-#define GAMMA_GINTENABLE       0x0808
-#define GAMMA_GINTFLAGS	       0x0810
-#define GAMMA_INFIFOSPACE      0x0018
-#define GAMMA_OUTFIFOWORDS     0x0020
-#define GAMMA_OUTPUTFIFO       0x2000
-#define GAMMA_SYNC	       0x8c40
-#define GAMMA_SYNC_TAG	       0x0188
-#define GAMMA_PAGETABLEADDR    0x0C00
-#define GAMMA_PAGETABLELENGTH  0x0C08
-
-#define GAMMA_PASSTHROUGH	0x1FE
-#define GAMMA_DMAADDRTAG	0x530
-#define GAMMA_DMACOUNTTAG	0x531
-#define GAMMA_COMMANDINTTAG	0x532
-
-#endif
diff --git a/drivers/char/drm/gamma_lists.h b/drivers/char/drm/gamma_lists.h
deleted file mode 100644
index 2d93f41..0000000
--- a/drivers/char/drm/gamma_lists.h
+++ /dev/null
@@ -1,215 +0,0 @@
-/* drm_lists.h -- Buffer list handling routines -*- linux-c -*-
- * Created: Mon Apr 19 20:54:22 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Gareth Hughes <gareth@valinux.com>
- */
-
-#include "drmP.h"
-
-
-int DRM(waitlist_create)(drm_waitlist_t *bl, int count)
-{
-	if (bl->count) return -EINVAL;
-
-	bl->bufs       = DRM(alloc)((bl->count + 2) * sizeof(*bl->bufs),
-				    DRM_MEM_BUFLISTS);
-
-	if(!bl->bufs) return -ENOMEM;
-	memset(bl->bufs, 0, sizeof(*bl->bufs));
-	bl->count      = count;
-	bl->rp	       = bl->bufs;
-	bl->wp	       = bl->bufs;
-	bl->end	       = &bl->bufs[bl->count+1];
-	spin_lock_init(&bl->write_lock);
-	spin_lock_init(&bl->read_lock);
-	return 0;
-}
-
-int DRM(waitlist_destroy)(drm_waitlist_t *bl)
-{
-	if (bl->rp != bl->wp) return -EINVAL;
-	if (bl->bufs) DRM(free)(bl->bufs,
-				(bl->count + 2) * sizeof(*bl->bufs),
-				DRM_MEM_BUFLISTS);
-	bl->count = 0;
-	bl->bufs  = NULL;
-	bl->rp	  = NULL;
-	bl->wp	  = NULL;
-	bl->end	  = NULL;
-	return 0;
-}
-
-int DRM(waitlist_put)(drm_waitlist_t *bl, drm_buf_t *buf)
-{
-	int	      left;
-	unsigned long flags;
-
-	left = DRM_LEFTCOUNT(bl);
-	if (!left) {
-		DRM_ERROR("Overflow while adding buffer %d from filp %p\n",
-			  buf->idx, buf->filp);
-		return -EINVAL;
-	}
-	buf->list	 = DRM_LIST_WAIT;
-
-	spin_lock_irqsave(&bl->write_lock, flags);
-	*bl->wp = buf;
-	if (++bl->wp >= bl->end) bl->wp = bl->bufs;
-	spin_unlock_irqrestore(&bl->write_lock, flags);
-
-	return 0;
-}
-
-drm_buf_t *DRM(waitlist_get)(drm_waitlist_t *bl)
-{
-	drm_buf_t     *buf;
-	unsigned long flags;
-
-	spin_lock_irqsave(&bl->read_lock, flags);
-	buf = *bl->rp;
-	if (bl->rp == bl->wp) {
-		spin_unlock_irqrestore(&bl->read_lock, flags);
-		return NULL;
-	}
-	if (++bl->rp >= bl->end) bl->rp = bl->bufs;
-	spin_unlock_irqrestore(&bl->read_lock, flags);
-
-	return buf;
-}
-
-int DRM(freelist_create)(drm_freelist_t *bl, int count)
-{
-	atomic_set(&bl->count, 0);
-	bl->next      = NULL;
-	init_waitqueue_head(&bl->waiting);
-	bl->low_mark  = 0;
-	bl->high_mark = 0;
-	atomic_set(&bl->wfh,   0);
-	spin_lock_init(&bl->lock);
-	++bl->initialized;
-	return 0;
-}
-
-int DRM(freelist_destroy)(drm_freelist_t *bl)
-{
-	atomic_set(&bl->count, 0);
-	bl->next = NULL;
-	return 0;
-}
-
-int DRM(freelist_put)(drm_device_t *dev, drm_freelist_t *bl, drm_buf_t *buf)
-{
-	drm_device_dma_t *dma  = dev->dma;
-
-	if (!dma) {
-		DRM_ERROR("No DMA support\n");
-		return 1;
-	}
-
-	if (buf->waiting || buf->pending || buf->list == DRM_LIST_FREE) {
-		DRM_ERROR("Freed buffer %d: w%d, p%d, l%d\n",
-			  buf->idx, buf->waiting, buf->pending, buf->list);
-	}
-	if (!bl) return 1;
-	buf->list	= DRM_LIST_FREE;
-
-	spin_lock(&bl->lock);
-	buf->next	= bl->next;
-	bl->next	= buf;
-	spin_unlock(&bl->lock);
-
-	atomic_inc(&bl->count);
-	if (atomic_read(&bl->count) > dma->buf_count) {
-		DRM_ERROR("%d of %d buffers free after addition of %d\n",
-			  atomic_read(&bl->count), dma->buf_count, buf->idx);
-		return 1;
-	}
-				/* Check for high water mark */
-	if (atomic_read(&bl->wfh) && atomic_read(&bl->count)>=bl->high_mark) {
-		atomic_set(&bl->wfh, 0);
-		wake_up_interruptible(&bl->waiting);
-	}
-	return 0;
-}
-
-static drm_buf_t *DRM(freelist_try)(drm_freelist_t *bl)
-{
-	drm_buf_t	  *buf;
-
-	if (!bl) return NULL;
-
-				/* Get buffer */
-	spin_lock(&bl->lock);
-	if (!bl->next) {
-		spin_unlock(&bl->lock);
-		return NULL;
-	}
-	buf	  = bl->next;
-	bl->next  = bl->next->next;
-	spin_unlock(&bl->lock);
-
-	atomic_dec(&bl->count);
-	buf->next = NULL;
-	buf->list = DRM_LIST_NONE;
-	if (buf->waiting || buf->pending) {
-		DRM_ERROR("Free buffer %d: w%d, p%d, l%d\n",
-			  buf->idx, buf->waiting, buf->pending, buf->list);
-	}
-
-	return buf;
-}
-
-drm_buf_t *DRM(freelist_get)(drm_freelist_t *bl, int block)
-{
-	drm_buf_t	  *buf	= NULL;
-	DECLARE_WAITQUEUE(entry, current);
-
-	if (!bl || !bl->initialized) return NULL;
-
-				/* Check for low water mark */
-	if (atomic_read(&bl->count) <= bl->low_mark) /* Became low */
-		atomic_set(&bl->wfh, 1);
-	if (atomic_read(&bl->wfh)) {
-		if (block) {
-			add_wait_queue(&bl->waiting, &entry);
-			for (;;) {
-				current->state = TASK_INTERRUPTIBLE;
-				if (!atomic_read(&bl->wfh)
-				    && (buf = DRM(freelist_try)(bl))) break;
-				schedule();
-				if (signal_pending(current)) break;
-			}
-			current->state = TASK_RUNNING;
-			remove_wait_queue(&bl->waiting, &entry);
-		}
-		return buf;
-	}
-
-	return DRM(freelist_try)(bl);
-}
-
diff --git a/drivers/char/drm/gamma_lock.h b/drivers/char/drm/gamma_lock.h
deleted file mode 100644
index ddec67e..0000000
--- a/drivers/char/drm/gamma_lock.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/* lock.c -- IOCTLs for locking -*- linux-c -*-
- * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Gareth Hughes <gareth@valinux.com>
- */
-
-
-/* Gamma-specific code extracted from drm_lock.h:
- */
-static int DRM(flush_queue)(drm_device_t *dev, int context)
-{
-	DECLARE_WAITQUEUE(entry, current);
-	int		  ret	= 0;
-	drm_queue_t	  *q	= dev->queuelist[context];
-
-	DRM_DEBUG("\n");
-
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->use_count) > 1) {
-		atomic_inc(&q->block_write);
-		add_wait_queue(&q->flush_queue, &entry);
-		atomic_inc(&q->block_count);
-		for (;;) {
-			current->state = TASK_INTERRUPTIBLE;
-			if (!DRM_BUFCOUNT(&q->waitlist)) break;
-			schedule();
-			if (signal_pending(current)) {
-				ret = -EINTR; /* Can't restart */
-				break;
-			}
-		}
-		atomic_dec(&q->block_count);
-		current->state = TASK_RUNNING;
-		remove_wait_queue(&q->flush_queue, &entry);
-	}
-	atomic_dec(&q->use_count);
-
-				/* NOTE: block_write is still incremented!
-				   Use drm_flush_unlock_queue to decrement. */
-	return ret;
-}
-
-static int DRM(flush_unblock_queue)(drm_device_t *dev, int context)
-{
-	drm_queue_t	  *q	= dev->queuelist[context];
-
-	DRM_DEBUG("\n");
-
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->use_count) > 1) {
-		if (atomic_read(&q->block_write)) {
-			atomic_dec(&q->block_write);
-			wake_up_interruptible(&q->write_queue);
-		}
-	}
-	atomic_dec(&q->use_count);
-	return 0;
-}
-
-int DRM(flush_block_and_flush)(drm_device_t *dev, int context,
-			       drm_lock_flags_t flags)
-{
-	int ret = 0;
-	int i;
-
-	DRM_DEBUG("\n");
-
-	if (flags & _DRM_LOCK_FLUSH) {
-		ret = DRM(flush_queue)(dev, DRM_KERNEL_CONTEXT);
-		if (!ret) ret = DRM(flush_queue)(dev, context);
-	}
-	if (flags & _DRM_LOCK_FLUSH_ALL) {
-		for (i = 0; !ret && i < dev->queue_count; i++) {
-			ret = DRM(flush_queue)(dev, i);
-		}
-	}
-	return ret;
-}
-
-int DRM(flush_unblock)(drm_device_t *dev, int context, drm_lock_flags_t flags)
-{
-	int ret = 0;
-	int i;
-
-	DRM_DEBUG("\n");
-
-	if (flags & _DRM_LOCK_FLUSH) {
-		ret = DRM(flush_unblock_queue)(dev, DRM_KERNEL_CONTEXT);
-		if (!ret) ret = DRM(flush_unblock_queue)(dev, context);
-	}
-	if (flags & _DRM_LOCK_FLUSH_ALL) {
-		for (i = 0; !ret && i < dev->queue_count; i++) {
-			ret = DRM(flush_unblock_queue)(dev, i);
-		}
-	}
-
-	return ret;
-}
-
-int DRM(finish)(struct inode *inode, struct file *filp, unsigned int cmd,
-		unsigned long arg)
-{
-	drm_file_t	  *priv	  = filp->private_data;
-	drm_device_t	  *dev	  = priv->dev;
-	int		  ret	  = 0;
-	drm_lock_t	  lock;
-
-	DRM_DEBUG("\n");
-
-	if (copy_from_user(&lock, (drm_lock_t __user *)arg, sizeof(lock)))
-		return -EFAULT;
-	ret = DRM(flush_block_and_flush)(dev, lock.context, lock.flags);
-	DRM(flush_unblock)(dev, lock.context, lock.flags);
-	return ret;
-}
diff --git a/drivers/char/drm/gamma_old_dma.h b/drivers/char/drm/gamma_old_dma.h
deleted file mode 100644
index abdd454..0000000
--- a/drivers/char/drm/gamma_old_dma.h
+++ /dev/null
@@ -1,313 +0,0 @@
-/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- *
- * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Gareth Hughes <gareth@valinux.com>
- */
-
-
-/* Gamma-specific code pulled from drm_dma.h:
- */
-
-void DRM(clear_next_buffer)(drm_device_t *dev)
-{
-	drm_device_dma_t *dma = dev->dma;
-
-	dma->next_buffer = NULL;
-	if (dma->next_queue && !DRM_BUFCOUNT(&dma->next_queue->waitlist)) {
-		wake_up_interruptible(&dma->next_queue->flush_queue);
-	}
-	dma->next_queue	 = NULL;
-}
-
-int DRM(select_queue)(drm_device_t *dev, void (*wrapper)(unsigned long))
-{
-	int	   i;
-	int	   candidate = -1;
-	int	   j	     = jiffies;
-
-	if (!dev) {
-		DRM_ERROR("No device\n");
-		return -1;
-	}
-	if (!dev->queuelist || !dev->queuelist[DRM_KERNEL_CONTEXT]) {
-				/* This only happens between the time the
-				   interrupt is initialized and the time
-				   the queues are initialized. */
-		return -1;
-	}
-
-				/* Doing "while locked" DMA? */
-	if (DRM_WAITCOUNT(dev, DRM_KERNEL_CONTEXT)) {
-		return DRM_KERNEL_CONTEXT;
-	}
-
-				/* If there are buffers on the last_context
-				   queue, and we have not been executing
-				   this context very long, continue to
-				   execute this context. */
-	if (dev->last_switch <= j
-	    && dev->last_switch + DRM_TIME_SLICE > j
-	    && DRM_WAITCOUNT(dev, dev->last_context)) {
-		return dev->last_context;
-	}
-
-				/* Otherwise, find a candidate */
-	for (i = dev->last_checked + 1; i < dev->queue_count; i++) {
-		if (DRM_WAITCOUNT(dev, i)) {
-			candidate = dev->last_checked = i;
-			break;
-		}
-	}
-
-	if (candidate < 0) {
-		for (i = 0; i < dev->queue_count; i++) {
-			if (DRM_WAITCOUNT(dev, i)) {
-				candidate = dev->last_checked = i;
-				break;
-			}
-		}
-	}
-
-	if (wrapper
-	    && candidate >= 0
-	    && candidate != dev->last_context
-	    && dev->last_switch <= j
-	    && dev->last_switch + DRM_TIME_SLICE > j) {
-		if (dev->timer.expires != dev->last_switch + DRM_TIME_SLICE) {
-			del_timer(&dev->timer);
-			dev->timer.function = wrapper;
-			dev->timer.data	    = (unsigned long)dev;
-			dev->timer.expires  = dev->last_switch+DRM_TIME_SLICE;
-			add_timer(&dev->timer);
-		}
-		return -1;
-	}
-
-	return candidate;
-}
-
-
-int DRM(dma_enqueue)(struct file *filp, drm_dma_t *d)
-{
-	drm_file_t    *priv   = filp->private_data;
-	drm_device_t  *dev    = priv->dev;
-	int		  i;
-	drm_queue_t	  *q;
-	drm_buf_t	  *buf;
-	int		  idx;
-	int		  while_locked = 0;
-	drm_device_dma_t  *dma = dev->dma;
-	int		  *ind;
-	int		  err;
-	DECLARE_WAITQUEUE(entry, current);
-
-	DRM_DEBUG("%d\n", d->send_count);
-
-	if (d->flags & _DRM_DMA_WHILE_LOCKED) {
-		int context = dev->lock.hw_lock->lock;
-
-		if (!_DRM_LOCK_IS_HELD(context)) {
-			DRM_ERROR("No lock held during \"while locked\""
-				  " request\n");
-			return -EINVAL;
-		}
-		if (d->context != _DRM_LOCKING_CONTEXT(context)
-		    && _DRM_LOCKING_CONTEXT(context) != DRM_KERNEL_CONTEXT) {
-			DRM_ERROR("Lock held by %d while %d makes"
-				  " \"while locked\" request\n",
-				  _DRM_LOCKING_CONTEXT(context),
-				  d->context);
-			return -EINVAL;
-		}
-		q = dev->queuelist[DRM_KERNEL_CONTEXT];
-		while_locked = 1;
-	} else {
-		q = dev->queuelist[d->context];
-	}
-
-
-	atomic_inc(&q->use_count);
-	if (atomic_read(&q->block_write)) {
-		add_wait_queue(&q->write_queue, &entry);
-		atomic_inc(&q->block_count);
-		for (;;) {
-			current->state = TASK_INTERRUPTIBLE;
-			if (!atomic_read(&q->block_write)) break;
-			schedule();
-			if (signal_pending(current)) {
-				atomic_dec(&q->use_count);
-				remove_wait_queue(&q->write_queue, &entry);
-				return -EINTR;
-			}
-		}
-		atomic_dec(&q->block_count);
-		current->state = TASK_RUNNING;
-		remove_wait_queue(&q->write_queue, &entry);
-	}
-
-	ind = DRM(alloc)(d->send_count * sizeof(int), DRM_MEM_DRIVER);
-	if (!ind)
-		return -ENOMEM;
-
-	if (copy_from_user(ind, d->send_indices, d->send_count * sizeof(int))) {
-		err = -EFAULT;
-                goto out;
-	}
-
-	err = -EINVAL;
-	for (i = 0; i < d->send_count; i++) {
-		idx = ind[i];
-		if (idx < 0 || idx >= dma->buf_count) {
-			DRM_ERROR("Index %d (of %d max)\n",
-				  ind[i], dma->buf_count - 1);
-			goto out;
-		}
-		buf = dma->buflist[ idx ];
-		if (buf->filp != filp) {
-			DRM_ERROR("Process %d using buffer not owned\n",
-				  current->pid);
-			goto out;
-		}
-		if (buf->list != DRM_LIST_NONE) {
-			DRM_ERROR("Process %d using buffer %d on list %d\n",
-				  current->pid, buf->idx, buf->list);
-			goto out;
-		}
-		buf->used	  = ind[i];
-		buf->while_locked = while_locked;
-		buf->context	  = d->context;
-		if (!buf->used) {
-			DRM_ERROR("Queueing 0 length buffer\n");
-		}
-		if (buf->pending) {
-			DRM_ERROR("Queueing pending buffer:"
-				  " buffer %d, offset %d\n",
-				  ind[i], i);
-			goto out;
-		}
-		if (buf->waiting) {
-			DRM_ERROR("Queueing waiting buffer:"
-				  " buffer %d, offset %d\n",
-				  ind[i], i);
-			goto out;
-		}
-		buf->waiting = 1;
-		if (atomic_read(&q->use_count) == 1
-		    || atomic_read(&q->finalization)) {
-			DRM(free_buffer)(dev, buf);
-		} else {
-			DRM(waitlist_put)(&q->waitlist, buf);
-			atomic_inc(&q->total_queued);
-		}
-	}
-	atomic_dec(&q->use_count);
-
-	return 0;
-
-out:
-	DRM(free)(ind, d->send_count * sizeof(int), DRM_MEM_DRIVER);
-	atomic_dec(&q->use_count);
-	return err;
-}
-
-static int DRM(dma_get_buffers_of_order)(struct file *filp, drm_dma_t *d,
-					 int order)
-{
-	drm_file_t    *priv   = filp->private_data;
-	drm_device_t  *dev    = priv->dev;
-	int		  i;
-	drm_buf_t	  *buf;
-	drm_device_dma_t  *dma = dev->dma;
-
-	for (i = d->granted_count; i < d->request_count; i++) {
-		buf = DRM(freelist_get)(&dma->bufs[order].freelist,
-					d->flags & _DRM_DMA_WAIT);
-		if (!buf) break;
-		if (buf->pending || buf->waiting) {
-			DRM_ERROR("Free buffer %d in use: filp %p (w%d, p%d)\n",
-				  buf->idx,
-				  buf->filp,
-				  buf->waiting,
-				  buf->pending);
-		}
-		buf->filp     = filp;
-		if (copy_to_user(&d->request_indices[i],
-				 &buf->idx,
-				 sizeof(buf->idx)))
-			return -EFAULT;
-
-		if (copy_to_user(&d->request_sizes[i],
-				 &buf->total,
-				 sizeof(buf->total)))
-			return -EFAULT;
-
-		++d->granted_count;
-	}
-	return 0;
-}
-
-
-int DRM(dma_get_buffers)(struct file *filp, drm_dma_t *dma)
-{
-	int		  order;
-	int		  retcode = 0;
-	int		  tmp_order;
-
-	order = DRM(order)(dma->request_size);
-
-	dma->granted_count = 0;
-	retcode		   = DRM(dma_get_buffers_of_order)(filp, dma, order);
-
-	if (dma->granted_count < dma->request_count
-	    && (dma->flags & _DRM_DMA_SMALLER_OK)) {
-		for (tmp_order = order - 1;
-		     !retcode
-			     && dma->granted_count < dma->request_count
-			     && tmp_order >= DRM_MIN_ORDER;
-		     --tmp_order) {
-
-			retcode = DRM(dma_get_buffers_of_order)(filp, dma,
-								tmp_order);
-		}
-	}
-
-	if (dma->granted_count < dma->request_count
-	    && (dma->flags & _DRM_DMA_LARGER_OK)) {
-		for (tmp_order = order + 1;
-		     !retcode
-			     && dma->granted_count < dma->request_count
-			     && tmp_order <= DRM_MAX_ORDER;
-		     ++tmp_order) {
-
-			retcode = DRM(dma_get_buffers_of_order)(filp, dma,
-								tmp_order);
-		}
-	}
-	return 0;
-}
-
diff --git a/drivers/char/drm/i810_dma.c b/drivers/char/drm/i810_dma.c
index 18e0b76..2f1659b 100644
--- a/drivers/char/drm/i810_dma.c
+++ b/drivers/char/drm/i810_dma.c
@@ -45,11 +45,6 @@
 #define I810_BUF_UNMAPPED 0
 #define I810_BUF_MAPPED   1
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
-#define down_write down
-#define up_write up
-#endif
-
 static drm_buf_t *i810_freelist_get(drm_device_t *dev)
 {
    	drm_device_dma_t *dma = dev->dma;
@@ -351,6 +346,7 @@
 	   	DRM_ERROR("can not find mmio map!\n");
 	   	return -EINVAL;
 	}
+	dev->agp_buffer_token = init->buffers_offset;
 	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
 	if (!dev->agp_buffer_map) {
 		dev->dev_private = (void *)dev_priv;
@@ -1383,3 +1379,19 @@
 };
 
 int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i810 is AGP.
+ */
+int i810_driver_device_is_agp(drm_device_t * dev)
+{
+	return 1;
+}
diff --git a/drivers/char/drm/i810_drv.c b/drivers/char/drm/i810_drv.c
index ff51b32..0060932 100644
--- a/drivers/char/drm/i810_drv.c
+++ b/drivers/char/drm/i810_drv.c
@@ -84,6 +84,7 @@
 	.dev_priv_size = sizeof(drm_i810_buf_priv_t),
 	.pretakedown = i810_driver_pretakedown,
 	.prerelease = i810_driver_prerelease,
+	.device_is_agp = i810_driver_device_is_agp,
 	.release = i810_driver_release,
 	.dma_quiescent = i810_driver_dma_quiescent,
 	.reclaim_buffers = i810_reclaim_buffers,
diff --git a/drivers/char/drm/i810_drv.h b/drivers/char/drm/i810_drv.h
index 1b40538..62ee4f5 100644
--- a/drivers/char/drm/i810_drv.h
+++ b/drivers/char/drm/i810_drv.h
@@ -120,6 +120,7 @@
 extern void i810_driver_release(drm_device_t *dev, struct file *filp);
 extern void i810_driver_pretakedown(drm_device_t *dev);
 extern void i810_driver_prerelease(drm_device_t *dev, DRMFILE filp);
+extern int i810_driver_device_is_agp(drm_device_t * dev);
 
 #define I810_BASE(reg)		((unsigned long) \
 				dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c
index dc77330..6f89d57 100644
--- a/drivers/char/drm/i830_dma.c
+++ b/drivers/char/drm/i830_dma.c
@@ -47,11 +47,6 @@
 #define I830_BUF_UNMAPPED 0
 #define I830_BUF_MAPPED   1
 
-#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2)
-#define down_write down
-#define up_write up
-#endif
-
 static drm_buf_t *i830_freelist_get(drm_device_t *dev)
 {
    	drm_device_dma_t *dma = dev->dma;
@@ -358,6 +353,7 @@
 		DRM_ERROR("can not find mmio map!\n");
 		return -EINVAL;
 	}
+	dev->agp_buffer_token = init->buffers_offset;
 	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
 	if(!dev->agp_buffer_map) {
 		dev->dev_private = (void *)dev_priv;
@@ -1586,3 +1582,19 @@
 };
 
 int i830_max_ioctl = DRM_ARRAY_SIZE(i830_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i8xx is AGP.
+ */
+int i830_driver_device_is_agp(drm_device_t * dev)
+{
+	return 1;
+}
diff --git a/drivers/char/drm/i830_drv.c b/drivers/char/drm/i830_drv.c
index bc36be7..0da9cd1 100644
--- a/drivers/char/drm/i830_drv.c
+++ b/drivers/char/drm/i830_drv.c
@@ -88,6 +88,7 @@
 	.dev_priv_size = sizeof(drm_i830_buf_priv_t),
 	.pretakedown = i830_driver_pretakedown,
 	.prerelease = i830_driver_prerelease,
+	.device_is_agp = i830_driver_device_is_agp,
 	.release = i830_driver_release,
 	.dma_quiescent = i830_driver_dma_quiescent,
 	.reclaim_buffers = i830_reclaim_buffers,
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h
index df77461..63f96a8 100644
--- a/drivers/char/drm/i830_drv.h
+++ b/drivers/char/drm/i830_drv.h
@@ -137,6 +137,7 @@
 extern void i830_driver_release(drm_device_t *dev, struct file *filp);
 extern int i830_driver_dma_quiescent(drm_device_t *dev);
 extern void i830_driver_prerelease(drm_device_t *dev, DRMFILE filp);
+extern int i830_driver_device_is_agp(drm_device_t * dev);
 
 #define I830_BASE(reg)		((unsigned long) \
 				dev_priv->mmio_map->handle)
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c
index acf9e52..34f552f 100644
--- a/drivers/char/drm/i915_dma.c
+++ b/drivers/char/drm/i915_dma.c
@@ -95,9 +95,8 @@
 			drm_core_ioremapfree( &dev_priv->ring.map, dev);
 		}
 
-		if (dev_priv->hw_status_page) {
-			drm_pci_free(dev, PAGE_SIZE, dev_priv->hw_status_page,
-				     dev_priv->dma_status_page);
+		if (dev_priv->status_page_dmah) {
+			drm_pci_free(dev, dev_priv->status_page_dmah);
 			/* Need to rewrite hardware status page */
 			I915_WRITE(0x02080, 0x1ffff000);
 		}
@@ -174,16 +173,18 @@
 	dev_priv->allow_batchbuffer = 1;
 
 	/* Program Hardware Status Page */
-	dev_priv->hw_status_page = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
-						 0xffffffff, 
-						 &dev_priv->dma_status_page);
+	dev_priv->status_page_dmah = drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE,
+						   0xffffffff);
 
-	if (!dev_priv->hw_status_page) {
+	if (!dev_priv->status_page_dmah) {
 		dev->dev_private = (void *)dev_priv;
 		i915_dma_cleanup(dev);
 		DRM_ERROR("Can not allocate hardware status page\n");
 		return DRM_ERR(ENOMEM);
 	}
+	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
 
@@ -731,3 +732,19 @@
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * All Intel graphics chipsets are treated as AGP, even if they are really
+ * PCI-e.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * A value of 1 is always retured to indictate every i9x5 is AGP.
+ */
+int i915_driver_device_is_agp(drm_device_t * dev)
+{
+	return 1;
+}
diff --git a/drivers/char/drm/i915_drv.c b/drivers/char/drm/i915_drv.c
index 1f59d3f..106b9ec 100644
--- a/drivers/char/drm/i915_drv.c
+++ b/drivers/char/drm/i915_drv.c
@@ -79,6 +79,7 @@
 				DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.pretakedown = i915_driver_pretakedown,
 	.prerelease = i915_driver_prerelease,
+	.device_is_agp = i915_driver_device_is_agp,
 	.irq_preinstall = i915_driver_irq_preinstall,
 	.irq_postinstall = i915_driver_irq_postinstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h
index 9c37d23..70ed4e6 100644
--- a/drivers/char/drm/i915_drv.h
+++ b/drivers/char/drm/i915_drv.h
@@ -79,9 +79,10 @@
 	drm_i915_sarea_t *sarea_priv;
 	drm_i915_ring_buffer_t ring;
 
+	drm_dma_handle_t *status_page_dmah;
 	void *hw_status_page;
-	unsigned long counter;
 	dma_addr_t dma_status_page;
+	unsigned long counter;
 
 	int back_offset;
 	int front_offset;
@@ -102,6 +103,7 @@
 extern void i915_kernel_lost_context(drm_device_t * dev);
 extern void i915_driver_pretakedown(drm_device_t *dev);
 extern void i915_driver_prerelease(drm_device_t *dev, DRMFILE filp);
+extern int i915_driver_device_is_agp(drm_device_t *dev);
 
 /* i915_irq.c */
 extern int i915_irq_emit(DRM_IOCTL_ARGS);
diff --git a/drivers/char/drm/mga_dma.c b/drivers/char/drm/mga_dma.c
index 832eaf8..567b425 100644
--- a/drivers/char/drm/mga_dma.c
+++ b/drivers/char/drm/mga_dma.c
@@ -23,18 +23,21 @@
  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  * DEALINGS IN THE SOFTWARE.
- *
- * Authors:
- *    Rickard E. (Rik) Faith <faith@valinux.com>
- *    Jeff Hartmann <jhartmann@valinux.com>
- *    Keith Whitwell <keith@tungstengraphics.com>
- *
- * Rewritten by:
- *    Gareth Hughes <gareth@valinux.com>
+ */
+
+/**
+ * \file mga_dma.c
+ * DMA support for MGA G200 / G400.
+ * 
+ * \author Rickard E. (Rik) Faith <faith@valinux.com>
+ * \author Jeff Hartmann <jhartmann@valinux.com>
+ * \author Keith Whitwell <keith@tungstengraphics.com>
+ * \author Gareth Hughes <gareth@valinux.com>
  */
 
 #include "drmP.h"
 #include "drm.h"
+#include "drm_sarea.h"
 #include "mga_drm.h"
 #include "mga_drv.h"
 
@@ -148,7 +151,7 @@
 	DRM_DEBUG( "  space = 0x%06x\n", primary->space );
 
 	mga_flush_write_combine();
-	MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER );
+	MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
 
 	DRM_DEBUG( "done.\n" );
 }
@@ -190,7 +193,7 @@
 	DRM_DEBUG( "  space = 0x%06x\n", primary->space );
 
 	mga_flush_write_combine();
-	MGA_WRITE( MGA_PRIMEND, tail | MGA_PAGPXFER );
+	MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access);
 
 	set_bit( 0, &primary->wrapped );
 	DRM_DEBUG( "done.\n" );
@@ -396,23 +399,383 @@
  * DMA initialization, cleanup
  */
 
+
+int mga_driver_preinit(drm_device_t *dev, unsigned long flags)
+{
+	drm_mga_private_t * dev_priv;
+
+	dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
+	if (!dev_priv)
+		return DRM_ERR(ENOMEM);
+
+	dev->dev_private = (void *)dev_priv;
+	memset(dev_priv, 0, sizeof(drm_mga_private_t));
+
+	dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
+	dev_priv->chipset = flags;
+
+	return 0;
+}
+
+/**
+ * Bootstrap the driver for AGP DMA.
+ * 
+ * \todo
+ * Investigate whether there is any benifit to storing the WARP microcode in
+ * AGP memory.  If not, the microcode may as well always be put in PCI
+ * memory.
+ *
+ * \todo
+ * This routine needs to set dma_bs->agp_mode to the mode actually configured
+ * in the hardware.  Looking just at the Linux AGP driver code, I don't see
+ * an easy way to determine this.
+ *
+ * \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
+ */
+static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
+				    drm_mga_dma_bootstrap_t * dma_bs)
+{
+	drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
+	const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
+	int err;
+	unsigned  offset;
+	const unsigned secondary_size = dma_bs->secondary_bin_count
+		* dma_bs->secondary_bin_size;
+	const unsigned agp_size = (dma_bs->agp_size << 20);
+	drm_buf_desc_t req;
+	drm_agp_mode_t mode;
+	drm_agp_info_t info;
+
+	
+	/* Acquire AGP. */
+	err = drm_agp_acquire(dev);
+	if (err) {
+		DRM_ERROR("Unable to acquire AGP\n");
+		return err;
+	}
+
+	err = drm_agp_info(dev, &info);
+	if (err) {
+		DRM_ERROR("Unable to get AGP info\n");
+		return err;
+	}
+
+	mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
+	err = drm_agp_enable(dev, mode);
+	if (err) {
+		DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
+		return err;
+	}
+
+
+	/* In addition to the usual AGP mode configuration, the G200 AGP cards
+	 * need to have the AGP mode "manually" set.
+	 */
+
+	if (dev_priv->chipset == MGA_CARD_TYPE_G200) {
+		if (mode.mode & 0x02) {
+			MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_ENABLE);
+		}
+		else {
+			MGA_WRITE(MGA_AGP_PLL, MGA_AGP2XPLL_DISABLE);
+		}
+	}
+
+
+	/* Allocate and bind AGP memory. */
+	dev_priv->agp_pages = agp_size / PAGE_SIZE;
+	dev_priv->agp_mem = drm_alloc_agp( dev, dev_priv->agp_pages, 0 );
+	if (dev_priv->agp_mem == NULL) {
+		dev_priv->agp_pages = 0;
+		DRM_ERROR("Unable to allocate %uMB AGP memory\n",
+			  dma_bs->agp_size);
+		return DRM_ERR(ENOMEM);
+	}
+		
+	err = drm_bind_agp( dev_priv->agp_mem, 0 );
+	if (err) {
+		DRM_ERROR("Unable to bind AGP memory\n");
+		return err;
+	}
+
+	offset = 0;
+	err = drm_addmap( dev, offset, warp_size,
+			  _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp );
+	if (err) {
+		DRM_ERROR("Unable to map WARP microcode\n");
+		return err;
+	}
+
+	offset += warp_size;
+	err = drm_addmap( dev, offset, dma_bs->primary_size,
+			  _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary );
+	if (err) {
+		DRM_ERROR("Unable to map primary DMA region\n");
+		return err;
+	}
+
+	offset += dma_bs->primary_size;
+	err = drm_addmap( dev, offset, secondary_size,
+			  _DRM_AGP, 0, & dev->agp_buffer_map );
+	if (err) {
+		DRM_ERROR("Unable to map secondary DMA region\n");
+		return err;
+	}
+
+	(void) memset( &req, 0, sizeof(req) );
+	req.count = dma_bs->secondary_bin_count;
+	req.size = dma_bs->secondary_bin_size;
+	req.flags = _DRM_AGP_BUFFER;
+	req.agp_start = offset;
+
+	err = drm_addbufs_agp( dev, & req );
+	if (err) {
+		DRM_ERROR("Unable to add secondary DMA buffers\n");
+		return err;
+	}
+
+	offset += secondary_size;
+	err = drm_addmap( dev, offset, agp_size - offset,
+			  _DRM_AGP, 0, & dev_priv->agp_textures );
+	if (err) {
+		DRM_ERROR("Unable to map AGP texture region\n");
+		return err;
+	}
+
+	drm_core_ioremap(dev_priv->warp, dev);
+	drm_core_ioremap(dev_priv->primary, dev);
+	drm_core_ioremap(dev->agp_buffer_map, dev);
+
+	if (!dev_priv->warp->handle ||
+	    !dev_priv->primary->handle || !dev->agp_buffer_map->handle) {
+		DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
+			  dev_priv->warp->handle, dev_priv->primary->handle,
+			  dev->agp_buffer_map->handle);
+		return DRM_ERR(ENOMEM);
+	}
+
+	dev_priv->dma_access = MGA_PAGPXFER;
+	dev_priv->wagp_enable = MGA_WAGP_ENABLE;
+
+	DRM_INFO("Initialized card for AGP DMA.\n");
+	return 0;
+}
+
+/**
+ * Bootstrap the driver for PCI DMA.
+ * 
+ * \todo
+ * The algorithm for decreasing the size of the primary DMA buffer could be
+ * better.  The size should be rounded up to the nearest page size, then
+ * decrease the request size by a single page each pass through the loop.
+ *
+ * \todo
+ * Determine whether the maximum address passed to drm_pci_alloc is correct.
+ * The same goes for drm_addbufs_pci.
+ * 
+ * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
+ */
+static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
+				    drm_mga_dma_bootstrap_t * dma_bs)
+{
+	drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
+	const unsigned int warp_size = mga_warp_microcode_size(dev_priv);
+	unsigned int primary_size;
+	unsigned int bin_count;
+	int err;
+	drm_buf_desc_t req;
+
+	
+	if (dev->dma == NULL) {
+		DRM_ERROR("dev->dma is NULL\n");
+		return DRM_ERR(EFAULT);
+	}
+
+	/* The proper alignment is 0x100 for this mapping */
+	err = drm_addmap(dev, 0, warp_size, _DRM_CONSISTENT,
+			 _DRM_READ_ONLY, &dev_priv->warp);
+	if (err != 0) {
+		DRM_ERROR("Unable to create mapping for WARP microcode\n");
+		return err;
+	}
+
+	/* Other than the bottom two bits being used to encode other
+	 * information, there don't appear to be any restrictions on the
+	 * alignment of the primary or secondary DMA buffers.
+	 */
+
+	for ( primary_size = dma_bs->primary_size
+	      ; primary_size != 0
+	      ; primary_size >>= 1 ) {
+		/* The proper alignment for this mapping is 0x04 */
+		err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,
+				 _DRM_READ_ONLY, &dev_priv->primary);
+		if (!err)
+			break;
+	}
+
+	if (err != 0) {
+		DRM_ERROR("Unable to allocate primary DMA region\n");
+		return DRM_ERR(ENOMEM);
+	}
+
+	if (dev_priv->primary->size != dma_bs->primary_size) {
+		DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n",
+			 dma_bs->primary_size, 
+			 (unsigned) dev_priv->primary->size);
+		dma_bs->primary_size = dev_priv->primary->size;
+	}
+
+	for ( bin_count = dma_bs->secondary_bin_count
+	      ; bin_count > 0 
+	      ; bin_count-- ) {
+		(void) memset( &req, 0, sizeof(req) );
+		req.count = bin_count;
+		req.size = dma_bs->secondary_bin_size;
+
+		err = drm_addbufs_pci( dev, & req );
+		if (!err) {
+			break;
+		}
+	}
+	
+	if (bin_count == 0) {
+		DRM_ERROR("Unable to add secondary DMA buffers\n");
+		return err;
+	}
+
+	if (bin_count != dma_bs->secondary_bin_count) {
+		DRM_INFO("Secondary PCI DMA buffer bin count reduced from %u "
+			 "to %u.\n", dma_bs->secondary_bin_count, bin_count);
+
+		dma_bs->secondary_bin_count = bin_count;
+	}
+
+	dev_priv->dma_access = 0;
+	dev_priv->wagp_enable = 0;
+
+	dma_bs->agp_mode = 0;
+
+	DRM_INFO("Initialized card for PCI DMA.\n");
+	return 0;
+}
+
+
+static int mga_do_dma_bootstrap(drm_device_t * dev,
+				drm_mga_dma_bootstrap_t * dma_bs)
+{
+	const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
+	int err;
+	drm_mga_private_t * const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+
+	dev_priv->used_new_dma_init = 1;
+
+	/* The first steps are the same for both PCI and AGP based DMA.  Map
+	 * the cards MMIO registers and map a status page.
+	 */
+	err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size,
+			  _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio );
+	if (err) {
+		DRM_ERROR("Unable to map MMIO region\n");
+		return err;
+	}
+
+
+	err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM,
+			  _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL,
+			  & dev_priv->status );
+	if (err) {
+		DRM_ERROR("Unable to map status region\n");
+		return err;
+	}
+
+
+	/* The DMA initialization procedure is slightly different for PCI and
+	 * AGP cards.  AGP cards just allocate a large block of AGP memory and
+	 * carve off portions of it for internal uses.  The remaining memory
+	 * is returned to user-mode to be used for AGP textures.
+	 */
+
+	if (is_agp) {
+		err = mga_do_agp_dma_bootstrap(dev, dma_bs);
+	}
+	
+	/* If we attempted to initialize the card for AGP DMA but failed,
+	 * clean-up any mess that may have been created.
+	 */
+
+	if (err) {
+		mga_do_cleanup_dma(dev);
+	}
+
+
+	/* Not only do we want to try and initialized PCI cards for PCI DMA,
+	 * but we also try to initialized AGP cards that could not be
+	 * initialized for AGP DMA.  This covers the case where we have an AGP
+	 * card in a system with an unsupported AGP chipset.  In that case the
+	 * card will be detected as AGP, but we won't be able to allocate any
+	 * AGP memory, etc.
+	 */
+
+	if (!is_agp || err) {
+		err = mga_do_pci_dma_bootstrap(dev, dma_bs);
+	}
+
+
+	return err;
+}
+
+int mga_dma_bootstrap(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_mga_dma_bootstrap_t bootstrap;
+	int err;
+
+
+	DRM_COPY_FROM_USER_IOCTL(bootstrap,
+				 (drm_mga_dma_bootstrap_t __user *) data,
+				 sizeof(bootstrap));
+
+	err = mga_do_dma_bootstrap(dev, & bootstrap);
+	if (! err) {
+		static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
+		const drm_mga_private_t * const dev_priv = 
+			(drm_mga_private_t *) dev->dev_private;
+
+		if (dev_priv->agp_textures != NULL) {
+			bootstrap.texture_handle = dev_priv->agp_textures->offset;
+			bootstrap.texture_size = dev_priv->agp_textures->size;
+		}
+		else {
+			bootstrap.texture_handle = 0;
+			bootstrap.texture_size = 0;
+		}
+
+		bootstrap.agp_mode = modes[ bootstrap.agp_mode & 0x07 ];
+		if (DRM_COPY_TO_USER( (void __user *) data, & bootstrap,
+				     sizeof(bootstrap))) {
+			err = DRM_ERR(EFAULT);
+		}
+	}
+	else {
+		mga_do_cleanup_dma(dev);
+	}
+
+	return err;
+}
+
 static int mga_do_init_dma( drm_device_t *dev, drm_mga_init_t *init )
 {
 	drm_mga_private_t *dev_priv;
 	int ret;
 	DRM_DEBUG( "\n" );
 
-	dev_priv = drm_alloc( sizeof(drm_mga_private_t), DRM_MEM_DRIVER );
-	if ( !dev_priv )
-		return DRM_ERR(ENOMEM);
 
-	memset( dev_priv, 0, sizeof(drm_mga_private_t) );
+	dev_priv = dev->dev_private;
 
-	dev_priv->chipset = init->chipset;
-
-	dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
-
-	if ( init->sgram ) {
+	if (init->sgram) {
 		dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_BLK;
 	} else {
 		dev_priv->clear_cmd = MGA_DWGCTL_CLEAR | MGA_ATYPE_RSTR;
@@ -436,88 +799,66 @@
 
 	DRM_GETSAREA();
 
-	if(!dev_priv->sarea) {
-		DRM_ERROR( "failed to find sarea!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
+	if (!dev_priv->sarea) {
+		DRM_ERROR("failed to find sarea!\n");
 		return DRM_ERR(EINVAL);
 	}
 
-	dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
-	if(!dev_priv->mmio) {
-		DRM_ERROR( "failed to find mmio region!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
-		return DRM_ERR(EINVAL);
-	}
-	dev_priv->status = drm_core_findmap(dev, init->status_offset);
-	if(!dev_priv->status) {
-		DRM_ERROR( "failed to find status page!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
-		return DRM_ERR(EINVAL);
-	}
-	dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
-	if(!dev_priv->warp) {
-		DRM_ERROR( "failed to find warp microcode region!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
-		return DRM_ERR(EINVAL);
-	}
-	dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
-	if(!dev_priv->primary) {
-		DRM_ERROR( "failed to find primary dma region!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
-		return DRM_ERR(EINVAL);
-	}
-	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
-	if(!dev->agp_buffer_map) {
-		DRM_ERROR( "failed to find dma buffer region!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
-		return DRM_ERR(EINVAL);
+	if (! dev_priv->used_new_dma_init) {
+		dev_priv->status = drm_core_findmap(dev, init->status_offset);
+		if (!dev_priv->status) {
+			DRM_ERROR("failed to find status page!\n");
+			return DRM_ERR(EINVAL);
+		}
+		dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
+		if (!dev_priv->mmio) {
+			DRM_ERROR("failed to find mmio region!\n");
+			return DRM_ERR(EINVAL);
+		}
+		dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
+		if (!dev_priv->warp) {
+			DRM_ERROR("failed to find warp microcode region!\n");
+			return DRM_ERR(EINVAL);
+		}
+		dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
+		if (!dev_priv->primary) {
+			DRM_ERROR("failed to find primary dma region!\n");
+			return DRM_ERR(EINVAL);
+		}
+		dev->agp_buffer_token = init->buffers_offset;
+		dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("failed to find dma buffer region!\n");
+			return DRM_ERR(EINVAL);
+		}
+
+		drm_core_ioremap(dev_priv->warp, dev);
+		drm_core_ioremap(dev_priv->primary, dev);
+		drm_core_ioremap(dev->agp_buffer_map, dev);
 	}
 
 	dev_priv->sarea_priv =
 		(drm_mga_sarea_t *)((u8 *)dev_priv->sarea->handle +
 				    init->sarea_priv_offset);
 
-	drm_core_ioremap( dev_priv->warp, dev );
-	drm_core_ioremap( dev_priv->primary, dev );
-	drm_core_ioremap( dev->agp_buffer_map, dev );
-
-	if(!dev_priv->warp->handle ||
-	   !dev_priv->primary->handle ||
-	   !dev->agp_buffer_map->handle ) {
-		DRM_ERROR( "failed to ioremap agp regions!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
+	if (!dev_priv->warp->handle ||
+	    !dev_priv->primary->handle ||
+	    ((dev_priv->dma_access != 0) &&
+	     ((dev->agp_buffer_map == NULL) ||
+	      (dev->agp_buffer_map->handle == NULL)))) {
+		DRM_ERROR("failed to ioremap agp regions!\n");
 		return DRM_ERR(ENOMEM);
 	}
 
-	ret = mga_warp_install_microcode( dev_priv );
-	if ( ret < 0 ) {
-		DRM_ERROR( "failed to install WARP ucode!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
+	ret = mga_warp_install_microcode(dev_priv);
+	if (ret < 0) {
+		DRM_ERROR("failed to install WARP ucode!\n");
 		return ret;
 	}
 
-	ret = mga_warp_init( dev_priv );
-	if ( ret < 0 ) {
-		DRM_ERROR( "failed to init WARP engine!\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
+	ret = mga_warp_init(dev_priv);
+	if (ret < 0) {
+		DRM_ERROR("failed to init WARP engine!\n");
 		return ret;
 	}
 
@@ -557,22 +898,18 @@
 	dev_priv->sarea_priv->last_frame.head = 0;
 	dev_priv->sarea_priv->last_frame.wrap = 0;
 
-	if ( mga_freelist_init( dev, dev_priv ) < 0 ) {
-		DRM_ERROR( "could not initialize freelist\n" );
-		/* Assign dev_private so we can do cleanup. */
-		dev->dev_private = (void *)dev_priv;
-		mga_do_cleanup_dma( dev );
+	if (mga_freelist_init(dev, dev_priv) < 0) {
+		DRM_ERROR("could not initialize freelist\n");
 		return DRM_ERR(ENOMEM);
 	}
 
-	/* Make dev_private visable to others. */
-	dev->dev_private = (void *)dev_priv;
 	return 0;
 }
 
 static int mga_do_cleanup_dma( drm_device_t *dev )
 {
-	DRM_DEBUG( "\n" );
+	int err = 0;
+	DRM_DEBUG("\n");
 
 	/* Make sure interrupts are disabled here because the uninstall ioctl
 	 * may not have been called from userspace and after dev_private
@@ -583,20 +920,49 @@
 	if ( dev->dev_private ) {
 		drm_mga_private_t *dev_priv = dev->dev_private;
 
-		if ( dev_priv->warp != NULL )
-			drm_core_ioremapfree( dev_priv->warp, dev );
-		if ( dev_priv->primary != NULL )
-			drm_core_ioremapfree( dev_priv->primary, dev );
-		if ( dev->agp_buffer_map != NULL )
-			drm_core_ioremapfree( dev->agp_buffer_map, dev );
+		if ((dev_priv->warp != NULL) 
+		    && (dev_priv->mmio->type != _DRM_CONSISTENT))
+			drm_core_ioremapfree(dev_priv->warp, dev);
 
-		if ( dev_priv->head != NULL ) {
-			mga_freelist_cleanup( dev );
+		if ((dev_priv->primary != NULL) 
+		    && (dev_priv->primary->type != _DRM_CONSISTENT))
+			drm_core_ioremapfree(dev_priv->primary, dev);
+
+		if (dev->agp_buffer_map != NULL)
+			drm_core_ioremapfree(dev->agp_buffer_map, dev);
+
+		if (dev_priv->used_new_dma_init) {
+			if (dev_priv->agp_mem != NULL) {
+				dev_priv->agp_textures = NULL;
+				drm_unbind_agp(dev_priv->agp_mem);
+
+				drm_free_agp(dev_priv->agp_mem, dev_priv->agp_pages);
+				dev_priv->agp_pages = 0;
+				dev_priv->agp_mem = NULL;
+			}
+
+			if ((dev->agp != NULL) && dev->agp->acquired) {
+				err = drm_agp_release(dev);
+			}
+
+			dev_priv->used_new_dma_init = 0;
 		}
 
-		drm_free( dev->dev_private, sizeof(drm_mga_private_t),
-			   DRM_MEM_DRIVER );
-		dev->dev_private = NULL;
+		dev_priv->warp = NULL;
+		dev_priv->primary = NULL;
+		dev_priv->mmio = NULL;
+		dev_priv->status = NULL;
+		dev_priv->sarea = NULL;
+		dev_priv->sarea_priv = NULL;
+		dev->agp_buffer_map = NULL;
+
+		memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));
+		dev_priv->warp_pipe = 0;
+		memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
+
+		if (dev_priv->head != NULL) {
+			mga_freelist_cleanup(dev);
+		}
 	}
 
 	return 0;
@@ -606,14 +972,20 @@
 {
 	DRM_DEVICE;
 	drm_mga_init_t init;
+	int err;
 
 	LOCK_TEST_WITH_RETURN( dev, filp );
 
-	DRM_COPY_FROM_USER_IOCTL( init, (drm_mga_init_t __user *)data, sizeof(init) );
+	DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
+				 sizeof(init));
 
 	switch ( init.func ) {
 	case MGA_INIT_DMA:
-		return mga_do_init_dma( dev, &init );
+		err = mga_do_init_dma(dev, &init);
+		if (err) {
+			(void) mga_do_cleanup_dma(dev);
+		}
+		return err;
 	case MGA_CLEANUP_DMA:
 		return mga_do_cleanup_dma( dev );
 	}
@@ -742,7 +1114,21 @@
 	return ret;
 }
 
-void mga_driver_pretakedown(drm_device_t *dev)
+/**
+ * Called just before the module is unloaded.
+ */
+int mga_driver_postcleanup(drm_device_t * dev)
+{
+	drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+/**
+ * Called when the last opener of the device is closed.
+ */
+void mga_driver_pretakedown(drm_device_t * dev)
 {
 	mga_do_cleanup_dma( dev );
 }
diff --git a/drivers/char/drm/mga_drm.h b/drivers/char/drm/mga_drm.h
index 521d445..d20aab3 100644
--- a/drivers/char/drm/mga_drm.h
+++ b/drivers/char/drm/mga_drm.h
@@ -73,7 +73,8 @@
 
 #define MGA_CARD_TYPE_G200	1
 #define MGA_CARD_TYPE_G400	2
-
+#define MGA_CARD_TYPE_G450	3       /* not currently used */
+#define MGA_CARD_TYPE_G550	4
 
 #define MGA_FRONT		0x1
 #define MGA_BACK		0x2
@@ -225,10 +226,6 @@
 } drm_mga_sarea_t;
 
 
-/* WARNING: If you change any of these defines, make sure to change the
- * defines in the Xserver file (xf86drmMga.h)
- */
-
 /* MGA specific ioctls
  * The device specific ioctl range is 0x40 to 0x79.
  */
@@ -243,6 +240,14 @@
 #define DRM_MGA_BLIT     0x08
 #define DRM_MGA_GETPARAM 0x09
 
+/* 3.2:
+ * ioctls for operating on fences.
+ */
+#define DRM_MGA_SET_FENCE      0x0a
+#define DRM_MGA_WAIT_FENCE     0x0b
+#define DRM_MGA_DMA_BOOTSTRAP  0x0c
+
+
 #define DRM_IOCTL_MGA_INIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t)
 #define DRM_IOCTL_MGA_FLUSH    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t)
 #define DRM_IOCTL_MGA_RESET    DRM_IO(  DRM_COMMAND_BASE + DRM_MGA_RESET)
@@ -253,6 +258,9 @@
 #define DRM_IOCTL_MGA_ILOAD    DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_ILOAD, drm_mga_iload_t)
 #define DRM_IOCTL_MGA_BLIT     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_BLIT, drm_mga_blit_t)
 #define DRM_IOCTL_MGA_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_GETPARAM, drm_mga_getparam_t)
+#define DRM_IOCTL_MGA_SET_FENCE     DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_SET_FENCE, uint32_t)
+#define DRM_IOCTL_MGA_WAIT_FENCE    DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_WAIT_FENCE, uint32_t)
+#define DRM_IOCTL_MGA_DMA_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_MGA_DMA_BOOTSTRAP, drm_mga_dma_bootstrap_t)
 
 typedef struct _drm_mga_warp_index {
    	int installed;
@@ -291,12 +299,72 @@
 	unsigned long buffers_offset;
 } drm_mga_init_t;
 
-typedef struct drm_mga_fullscreen {
-	enum {
-		MGA_INIT_FULLSCREEN    = 0x01,
-		MGA_CLEANUP_FULLSCREEN = 0x02
-	} func;
-} drm_mga_fullscreen_t;
+typedef struct drm_mga_dma_bootstrap {
+	/**
+	 * \name AGP texture region
+	 * 
+	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will
+	 * be filled in with the actual AGP texture settings.
+	 * 
+	 * \warning
+	 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode
+	 * is zero, it means that PCI memory (most likely through the use of
+	 * an IOMMU) is being used for "AGP" textures.
+	 */
+	/*@{*/
+	unsigned long texture_handle; /**< Handle used to map AGP textures. */
+	uint32_t     texture_size;    /**< Size of the AGP texture region. */
+	/*@}*/
+
+
+	/**
+	 * Requested size of the primary DMA region.
+	 * 
+	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+	 * filled in with the actual AGP mode.  If AGP was not available
+	 */
+	uint32_t primary_size;
+
+
+	/**
+	 * Requested number of secondary DMA buffers.
+	 * 
+	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+	 * filled in with the actual number of secondary DMA buffers
+	 * allocated.  Particularly when PCI DMA is used, this may be
+	 * (subtantially) less than the number requested.
+	 */
+	uint32_t secondary_bin_count;
+	
+	
+	/**
+	 * Requested size of each secondary DMA buffer.
+	 * 
+	 * While the kernel \b is free to reduce
+	 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed
+	 * to reduce dma_mga_dma_bootstrap::secondary_bin_size.
+	 */
+	uint32_t secondary_bin_size;
+
+
+	/**
+	 * Bit-wise mask of AGPSTAT2_* values.  Currently only \c AGPSTAT2_1X,
+	 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported.  If this value is
+	 * zero, it means that PCI DMA should be used, even if AGP is
+	 * possible.
+	 * 
+	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be
+	 * filled in with the actual AGP mode.  If AGP was not available
+	 * (i.e., PCI DMA was used), this value will be zero.
+	 */
+	uint32_t agp_mode;
+
+
+	/**
+	 * Desired AGP GART size, measured in megabytes.
+	 */
+	uint8_t agp_size;
+} drm_mga_dma_bootstrap_t;
 
 typedef struct drm_mga_clear {
 	unsigned int flags;
@@ -341,6 +409,14 @@
  */
 #define MGA_PARAM_IRQ_NR            1
 
+/* 3.2: Query the actual card type.  The DDX only distinguishes between
+ * G200 chips and non-G200 chips, which it calls G400.  It turns out that
+ * there are some very sublte differences between the G4x0 chips and the G550
+ * chips.  Using this parameter query, a client-side driver can detect the
+ * difference between a G4x0 and a G550.
+ */
+#define MGA_PARAM_CARD_TYPE         2
+
 typedef struct drm_mga_getparam {
 	int param;
 	void __user *value;
diff --git a/drivers/char/drm/mga_drv.c b/drivers/char/drm/mga_drv.c
index 844cca9..daabbba 100644
--- a/drivers/char/drm/mga_drv.c
+++ b/drivers/char/drm/mga_drv.c
@@ -38,8 +38,15 @@
   
 #include "drm_pciids.h"
 
+static int mga_driver_device_is_agp(drm_device_t * dev);
 static int postinit( struct drm_device *dev, unsigned long flags )
 {
+	drm_mga_private_t * const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+	dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
+	dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
+
 	dev->counters += 3;
 	dev->types[6] = _DRM_STAT_IRQ;
 	dev->types[7] = _DRM_STAT_PRIMARY;
@@ -79,8 +86,11 @@
 
 static struct drm_driver driver = {
 	.driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+	.preinit = mga_driver_preinit,
+	.postcleanup = mga_driver_postcleanup,
 	.pretakedown = mga_driver_pretakedown,
 	.dma_quiescent = mga_driver_dma_quiescent,
+	.device_is_agp = mga_driver_device_is_agp,
 	.vblank_wait = mga_driver_vblank_wait,
 	.irq_preinstall = mga_driver_irq_preinstall,
 	.irq_postinstall = mga_driver_irq_postinstall,
@@ -128,3 +138,38 @@
 MODULE_AUTHOR( DRIVER_AUTHOR );
 MODULE_DESCRIPTION( DRIVER_DESC );
 MODULE_LICENSE("GPL and additional rights");
+
+/**
+ * Determine if the device really is AGP or not.
+ *
+ * In addition to the usual tests performed by \c drm_device_is_agp, this
+ * function detects PCI G450 cards that appear to the system exactly like
+ * AGP G450 cards.
+ *
+ * \param dev   The device to be tested.
+ *
+ * \returns
+ * If the device is a PCI G450, zero is returned.  Otherwise 2 is returned.
+ */
+int mga_driver_device_is_agp(drm_device_t * dev)
+{
+	const struct pci_dev * const pdev = dev->pdev;
+
+
+	/* There are PCI versions of the G450.  These cards have the
+	 * same PCI ID as the AGP G450, but have an additional PCI-to-PCI
+	 * bridge chip.  We detect these cards, which are not currently
+	 * supported by this driver, by looking at the device ID of the
+	 * bus the "card" is on.  If vendor is 0x3388 (Hint Corp) and the
+	 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
+	 * device.
+	 */
+	
+	if ( (pdev->device == 0x0525)
+	     && (pdev->bus->self->vendor == 0x3388)
+	     && (pdev->bus->self->device == 0x0021) ) {
+		return 0;
+	}
+
+	return 2;
+}
diff --git a/drivers/char/drm/mga_drv.h b/drivers/char/drm/mga_drv.h
index 9412e281..b22fdbd 100644
--- a/drivers/char/drm/mga_drv.h
+++ b/drivers/char/drm/mga_drv.h
@@ -38,10 +38,10 @@
 
 #define DRIVER_NAME		"mga"
 #define DRIVER_DESC		"Matrox G200/G400"
-#define DRIVER_DATE		"20021029"
+#define DRIVER_DATE		"20050607"
 
 #define DRIVER_MAJOR		3
-#define DRIVER_MINOR		1
+#define DRIVER_MINOR		2
 #define DRIVER_PATCHLEVEL	0
 
 typedef struct drm_mga_primary_buffer {
@@ -87,9 +87,43 @@
 	int chipset;
 	int usec_timeout;
 
+	/**
+	 * If set, the new DMA initialization sequence was used.  This is
+	 * primarilly used to select how the driver should uninitialized its
+	 * internal DMA structures.
+	 */
+	int used_new_dma_init;
+
+	/**
+	 * If AGP memory is used for DMA buffers, this will be the value
+	 * \c MGA_PAGPXFER.  Otherwise, it will be zero (for a PCI transfer).
+	 */
+	u32 dma_access;
+
+	/**
+	 * If AGP memory is used for DMA buffers, this will be the value
+	 * \c MGA_WAGP_ENABLE.  Otherwise, it will be zero (for a PCI
+	 * transfer).
+	 */
+	u32 wagp_enable;
+
+	/**
+	 * \name MMIO region parameters.
+	 * 
+	 * \sa drm_mga_private_t::mmio
+	 */
+	/*@{*/
+	u32 mmio_base;             /**< Bus address of base of MMIO. */
+	u32 mmio_size;             /**< Size of the MMIO region. */
+	/*@}*/
+
 	u32 clear_cmd;
 	u32 maccess;
 
+	wait_queue_head_t fence_queue;
+	atomic_t last_fence_retired;
+	u32 next_fence_to_post;
+
 	unsigned int fb_cpp;
 	unsigned int front_offset;
 	unsigned int front_pitch;
@@ -108,35 +142,43 @@
 	drm_local_map_t *status;
 	drm_local_map_t *warp;
 	drm_local_map_t *primary;
-	drm_local_map_t *buffers;
 	drm_local_map_t *agp_textures;
+	
+	DRM_AGP_MEM *agp_mem;
+	unsigned int agp_pages;
 } drm_mga_private_t;
 
 				/* mga_dma.c */
-extern int mga_dma_init( DRM_IOCTL_ARGS );
-extern int mga_dma_flush( DRM_IOCTL_ARGS );
-extern int mga_dma_reset( DRM_IOCTL_ARGS );
-extern int mga_dma_buffers( DRM_IOCTL_ARGS );
-extern void mga_driver_pretakedown(drm_device_t *dev);
-extern int mga_driver_dma_quiescent(drm_device_t *dev);
+extern int mga_driver_preinit(drm_device_t * dev, unsigned long flags);
+extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
+extern int mga_dma_init(DRM_IOCTL_ARGS);
+extern int mga_dma_flush(DRM_IOCTL_ARGS);
+extern int mga_dma_reset(DRM_IOCTL_ARGS);
+extern int mga_dma_buffers(DRM_IOCTL_ARGS);
+extern int mga_driver_postcleanup(drm_device_t * dev);
+extern void mga_driver_pretakedown(drm_device_t * dev);
+extern int mga_driver_dma_quiescent(drm_device_t * dev);
 
-extern int mga_do_wait_for_idle( drm_mga_private_t *dev_priv );
+extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
 
-extern void mga_do_dma_flush( drm_mga_private_t *dev_priv );
-extern void mga_do_dma_wrap_start( drm_mga_private_t *dev_priv );
-extern void mga_do_dma_wrap_end( drm_mga_private_t *dev_priv );
+extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
+extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
 
 extern int mga_freelist_put( drm_device_t *dev, drm_buf_t *buf );
 
 				/* mga_warp.c */
-extern int mga_warp_install_microcode( drm_mga_private_t *dev_priv );
-extern int mga_warp_init( drm_mga_private_t *dev_priv );
+extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
+extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
+extern int mga_warp_init(drm_mga_private_t * dev_priv);
 
-extern int mga_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
-extern irqreturn_t mga_driver_irq_handler( DRM_IRQ_ARGS );
-extern void mga_driver_irq_preinstall( drm_device_t *dev );
-extern void mga_driver_irq_postinstall( drm_device_t *dev );
-extern void mga_driver_irq_uninstall( drm_device_t *dev );
+				/* mga_irq.c */
+extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
+extern void mga_driver_irq_preinstall(drm_device_t * dev);
+extern void mga_driver_irq_postinstall(drm_device_t * dev);
+extern void mga_driver_irq_uninstall(drm_device_t * dev);
 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);
 
@@ -527,6 +569,12 @@
  */
 #define MGA_EXEC 			0x0100
 
+/* AGP PLL encoding (for G200 only).
+ */
+#define MGA_AGP_PLL 			0x1e4c
+#	define MGA_AGP2XPLL_DISABLE		(0 << 0)
+#	define MGA_AGP2XPLL_ENABLE		(1 << 0)
+
 /* Warp registers
  */
 #define MGA_WR0				0x2d00
diff --git a/drivers/char/drm/mga_ioc32.c b/drivers/char/drm/mga_ioc32.c
index bc745cf..77d738e 100644
--- a/drivers/char/drm/mga_ioc32.c
+++ b/drivers/char/drm/mga_ioc32.c
@@ -129,9 +129,76 @@
 			 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
 }
 
+typedef struct drm_mga_drm_bootstrap32 {
+	u32 texture_handle;
+	u32 texture_size;
+	u32 primary_size;
+	u32 secondary_bin_count;
+	u32 secondary_bin_size;
+	u32 agp_mode;
+	u8 agp_size;
+} drm_mga_dma_bootstrap32_t;
+
+static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
+				    unsigned long arg)
+{
+	drm_mga_dma_bootstrap32_t dma_bootstrap32;
+	drm_mga_dma_bootstrap_t __user *dma_bootstrap;
+	int err;
+
+	if (copy_from_user(&dma_bootstrap32, (void __user *)arg,
+			   sizeof(dma_bootstrap32)))
+		return -EFAULT;
+
+	dma_bootstrap = compat_alloc_user_space(sizeof(*dma_bootstrap));
+	if (!access_ok(VERIFY_WRITE, dma_bootstrap, sizeof(*dma_bootstrap))
+	    || __put_user(dma_bootstrap32.texture_handle,
+			  &dma_bootstrap->texture_handle)
+	    || __put_user(dma_bootstrap32.texture_size,
+			  &dma_bootstrap->texture_size)
+	    || __put_user(dma_bootstrap32.primary_size,
+			  &dma_bootstrap->primary_size)
+	    || __put_user(dma_bootstrap32.secondary_bin_count,
+			  &dma_bootstrap->secondary_bin_count)
+	    || __put_user(dma_bootstrap32.secondary_bin_size,
+			  &dma_bootstrap->secondary_bin_size)
+	    || __put_user(dma_bootstrap32.agp_mode, &dma_bootstrap->agp_mode)
+	    || __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
+		return -EFAULT;
+
+	err = drm_ioctl(file->f_dentry->d_inode, file,
+			DRM_IOCTL_MGA_DMA_BOOTSTRAP,
+			(unsigned long)dma_bootstrap);
+	if (err)
+		return err;
+
+	if (__get_user(dma_bootstrap32.texture_handle,
+		       &dma_bootstrap->texture_handle)
+	    || __get_user(dma_bootstrap32.texture_size,
+			  &dma_bootstrap->texture_size)
+	    || __get_user(dma_bootstrap32.primary_size,
+			  &dma_bootstrap->primary_size)
+	    || __get_user(dma_bootstrap32.secondary_bin_count,
+			  &dma_bootstrap->secondary_bin_count)
+	    || __get_user(dma_bootstrap32.secondary_bin_size,
+			  &dma_bootstrap->secondary_bin_size)
+	    || __get_user(dma_bootstrap32.agp_mode,
+			  &dma_bootstrap->agp_mode)
+	    || __get_user(dma_bootstrap32.agp_size,
+			  &dma_bootstrap->agp_size))
+		return -EFAULT;
+
+	if (copy_to_user((void __user *)arg, &dma_bootstrap32,
+	    		 sizeof(dma_bootstrap32)))
+		return -EFAULT;
+
+	return 0;
+}
+
 drm_ioctl_compat_t *mga_compat_ioctls[] = {
 	[DRM_MGA_INIT] = compat_mga_init,
 	[DRM_MGA_GETPARAM] = compat_mga_getparam,
+	[DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
 };
 
 /**
diff --git a/drivers/char/drm/mga_irq.c b/drivers/char/drm/mga_irq.c
index bc0b6b5..52eaa4e 100644
--- a/drivers/char/drm/mga_irq.c
+++ b/drivers/char/drm/mga_irq.c
@@ -41,15 +41,40 @@
 	drm_mga_private_t *dev_priv = 
 	   (drm_mga_private_t *)dev->dev_private;
 	int status;
+	int handled = 0;
 
-	status = MGA_READ( MGA_STATUS );
-	
+	status = MGA_READ(MGA_STATUS);
+
 	/* VBLANK interrupt */
 	if ( status & MGA_VLINEPEN ) {
 		MGA_WRITE( MGA_ICLEAR, MGA_VLINEICLR );
 		atomic_inc(&dev->vbl_received);
 		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals( dev );
+		drm_vbl_send_signals(dev);
+		handled = 1;
+	}
+
+	/* SOFTRAP interrupt */
+	if (status & MGA_SOFTRAPEN) {
+		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
+		const u32 prim_end   = MGA_READ(MGA_PRIMEND);
+
+
+		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
+
+		/* In addition to clearing the interrupt-pending bit, we
+		 * have to write to MGA_PRIMEND to re-start the DMA operation.
+		 */
+		if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) {
+			MGA_WRITE(MGA_PRIMEND, prim_end);
+		}
+
+		atomic_inc(&dev_priv->last_fence_retired);
+		DRM_WAKEUP(&dev_priv->fence_queue);
+		handled = 1;
+	}
+
+	if ( handled ) {
 		return IRQ_HANDLED;
 	}
 	return IRQ_NONE;
@@ -73,9 +98,28 @@
 	return ret;
 }
 
-void mga_driver_irq_preinstall( drm_device_t *dev ) {
-  	drm_mga_private_t *dev_priv = 
-	   (drm_mga_private_t *)dev->dev_private;
+int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	unsigned int cur_fence;
+	int ret = 0;
+
+	/* Assume that the user has missed the current sequence number
+	 * by about a day rather than she wants to wait for years
+	 * using fences.
+	 */
+	DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
+		    (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
+		      - *sequence) <= (1 << 23)));
+
+	*sequence = cur_fence;
+
+	return ret;
+}
+
+void mga_driver_irq_preinstall(drm_device_t * dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
 	/* Disable *all* interrupts */
       	MGA_WRITE( MGA_IEN, 0 );
@@ -83,12 +127,14 @@
    	MGA_WRITE( MGA_ICLEAR, ~0 );
 }
 
-void mga_driver_irq_postinstall( drm_device_t *dev ) {
-  	drm_mga_private_t *dev_priv = 
-	   (drm_mga_private_t *)dev->dev_private;
+void mga_driver_irq_postinstall(drm_device_t * dev)
+{
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
-	/* Turn on VBL interrupt */
-   	MGA_WRITE( MGA_IEN, MGA_VLINEIEN );
+	DRM_INIT_WAITQUEUE( &dev_priv->fence_queue );
+
+	/* Turn on vertical blank interrupt and soft trap interrupt. */
+	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
 }
 
 void mga_driver_irq_uninstall( drm_device_t *dev ) {
@@ -98,5 +144,7 @@
 		return;
 
 	/* Disable *all* interrupts */
-	MGA_WRITE( MGA_IEN, 0 );
+	MGA_WRITE(MGA_IEN, 0);
+	
+	dev->irq_enabled = 0;
 }
diff --git a/drivers/char/drm/mga_state.c b/drivers/char/drm/mga_state.c
index 3c7a8f5..05bbb47 100644
--- a/drivers/char/drm/mga_state.c
+++ b/drivers/char/drm/mga_state.c
@@ -53,16 +53,16 @@
 
 	/* Force reset of DWGCTL on G400 (eliminates clip disable bit).
 	 */
-	if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) {
-		DMA_BLOCK( MGA_DWGCTL,		ctx->dwgctl,
-			   MGA_LEN + MGA_EXEC,	0x80000000,
-			   MGA_DWGCTL,		ctx->dwgctl,
-			   MGA_LEN + MGA_EXEC,	0x80000000 );
+	if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+		DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
+			  MGA_LEN + MGA_EXEC, 0x80000000,
+			  MGA_DWGCTL, ctx->dwgctl,
+			  MGA_LEN + MGA_EXEC, 0x80000000);
 	}
-	DMA_BLOCK( MGA_DMAPAD,	0x00000000,
-		   MGA_CXBNDRY,	(box->x2 << 16) | box->x1,
-		   MGA_YTOP,	box->y1 * pitch,
-		   MGA_YBOT,	box->y2 * pitch );
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
+		  MGA_YTOP, box->y1 * pitch,
+		  MGA_YBOT, (box->y2 - 1) * pitch);
 
 	ADVANCE_DMA();
 }
@@ -260,12 +260,11 @@
 
 	/* Padding required to to hardware bug.
 	 */
-	DMA_BLOCK( MGA_DMAPAD,	0xffffffff,
-		   MGA_DMAPAD,	0xffffffff,
-		   MGA_DMAPAD,	0xffffffff,
-		   MGA_WIADDR,	(dev_priv->warp_pipe_phys[pipe] |
-				 MGA_WMODE_START |
-				 MGA_WAGP_ENABLE) );
+	DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
+			       MGA_WMODE_START | dev_priv->wagp_enable));
 
 	ADVANCE_DMA();
 }
@@ -342,12 +341,11 @@
 		   MGA_WR60,	MGA_G400_WR_MAGIC );	/* tex1 height       */
 
 	/* Padding required to to hardware bug */
-	DMA_BLOCK( MGA_DMAPAD,	0xffffffff,
-		   MGA_DMAPAD,	0xffffffff,
-		   MGA_DMAPAD,	0xffffffff,
-		   MGA_WIADDR2,	(dev_priv->warp_pipe_phys[pipe] |
-				 MGA_WMODE_START |
-				 MGA_WAGP_ENABLE) );
+	DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_DMAPAD, 0xffffffff,
+		  MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
+				MGA_WMODE_START | dev_priv->wagp_enable));
 
 	ADVANCE_DMA();
 }
@@ -459,9 +457,9 @@
 	if ( dirty & MGA_UPLOAD_TEX0 )
 		ret |= mga_verify_tex( dev_priv, 0 );
 
-	if ( dev_priv->chipset == MGA_CARD_TYPE_G400 ) {
-		if ( dirty & MGA_UPLOAD_TEX1 )
-			ret |= mga_verify_tex( dev_priv, 1 );
+	if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
+		if (dirty & MGA_UPLOAD_TEX1)
+			ret |= mga_verify_tex(dev_priv, 1);
 
 		if ( dirty & MGA_UPLOAD_PIPE )
 			ret |= ( sarea_priv->warp_pipe > MGA_MAX_G400_PIPES );
@@ -686,12 +684,12 @@
 
 			BEGIN_DMA( 1 );
 
-			DMA_BLOCK( MGA_DMAPAD,		0x00000000,
-				   MGA_DMAPAD,		0x00000000,
-				   MGA_SECADDRESS,	(address |
-							 MGA_DMA_VERTEX),
-				   MGA_SECEND,		((address + length) |
-							 MGA_PAGPXFER) );
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_DMAPAD, 0x00000000,
+				  MGA_SECADDRESS, (address |
+						   MGA_DMA_VERTEX),
+				  MGA_SECEND, ((address + length) |
+					       dev_priv->dma_access));
 
 			ADVANCE_DMA();
 		} while ( ++i < sarea_priv->nbox );
@@ -733,11 +731,11 @@
 
 			BEGIN_DMA( 1 );
 
-			DMA_BLOCK( MGA_DMAPAD,		0x00000000,
-				   MGA_DMAPAD,		0x00000000,
-				   MGA_SETUPADDRESS,	address + start,
-				   MGA_SETUPEND,	((address + end) |
-							 MGA_PAGPXFER) );
+			DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+				  MGA_DMAPAD, 0x00000000,
+				  MGA_SETUPADDRESS, address + start,
+				  MGA_SETUPEND, ((address + end) |
+						 dev_priv->dma_access));
 
 			ADVANCE_DMA();
 		} while ( ++i < sarea_priv->nbox );
@@ -764,7 +762,7 @@
 	drm_mga_private_t *dev_priv = dev->dev_private;
 	drm_mga_buf_priv_t *buf_priv = buf->dev_private;
 	drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
-	u32 srcorg = buf->bus_address | MGA_SRCACC_AGP | MGA_SRCMAP_SYSMEM;
+	u32 srcorg = buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
 	u32 y2;
 	DMA_LOCALS;
 	DRM_DEBUG( "buf=%d used=%d\n", buf->idx, buf->used );
@@ -1095,6 +1093,9 @@
 	case MGA_PARAM_IRQ_NR:
 		value = dev->irq;
 		break;
+	case MGA_PARAM_CARD_TYPE:
+		value = dev_priv->chipset;
+		break;
 	default:
 		return DRM_ERR(EINVAL);
 	}
@@ -1107,17 +1108,82 @@
 	return 0;
 }
 
+static int mga_set_fence(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	u32 temp;
+	DMA_LOCALS;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+		return DRM_ERR(EINVAL);
+	}
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	/* I would normal do this assignment in the declaration of temp,
+	 * but dev_priv may be NULL.
+	 */
+
+	temp = dev_priv->next_fence_to_post;
+	dev_priv->next_fence_to_post++;
+
+	BEGIN_DMA(1);
+	DMA_BLOCK(MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_DMAPAD, 0x00000000,
+		  MGA_SOFTRAP, 0x00000000);
+	ADVANCE_DMA();
+
+	if (DRM_COPY_TO_USER( (u32 __user *) data, & temp, sizeof(u32))) {
+		DRM_ERROR("copy_to_user\n");
+		return DRM_ERR(EFAULT);
+	}
+
+	return 0;
+}
+
+static int mga_wait_fence(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_mga_private_t *dev_priv = dev->dev_private;
+	u32 fence;
+
+	if (!dev_priv) {
+		DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
+		return DRM_ERR(EINVAL);
+	}
+
+	DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
+
+	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
+
+	mga_driver_fence_wait(dev, & fence);
+
+	if (DRM_COPY_TO_USER( (u32 __user *) data, & fence, sizeof(u32))) {
+		DRM_ERROR("copy_to_user\n");
+		return DRM_ERR(EFAULT);
+	}
+
+	return 0;
+}
+
 drm_ioctl_desc_t mga_ioctls[] = {
-	[DRM_IOCTL_NR(DRM_MGA_INIT)]    = { mga_dma_init,    1, 1 },
-	[DRM_IOCTL_NR(DRM_MGA_FLUSH)]   = { mga_dma_flush,   1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_RESET)]   = { mga_dma_reset,   1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_SWAP)]    = { mga_dma_swap,    1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_CLEAR)]   = { mga_dma_clear,   1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_VERTEX)]  = { mga_dma_vertex,  1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_INDICES)] = { mga_dma_indices, 1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_ILOAD)]   = { mga_dma_iload,   1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_BLIT)]    = { mga_dma_blit,    1, 0 },
-	[DRM_IOCTL_NR(DRM_MGA_GETPARAM)]= { mga_getparam,    1, 0 },
+	[DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, 1, 1},
+	[DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, 1, 0},
+	[DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, 1, 1},
+
 };
 
 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);
diff --git a/drivers/char/drm/mga_warp.c b/drivers/char/drm/mga_warp.c
index 0a3a0cc..55ccc8a 100644
--- a/drivers/char/drm/mga_warp.c
+++ b/drivers/char/drm/mga_warp.c
@@ -48,65 +48,52 @@
 	vcbase += WARP_UCODE_SIZE( which );				\
 } while (0)
 
+static const unsigned int mga_warp_g400_microcode_size =
+	       (WARP_UCODE_SIZE(warp_g400_tgz) +
+		WARP_UCODE_SIZE(warp_g400_tgza) +
+		WARP_UCODE_SIZE(warp_g400_tgzaf) +
+		WARP_UCODE_SIZE(warp_g400_tgzf) +
+		WARP_UCODE_SIZE(warp_g400_tgzs) +
+		WARP_UCODE_SIZE(warp_g400_tgzsa) +
+		WARP_UCODE_SIZE(warp_g400_tgzsaf) +
+		WARP_UCODE_SIZE(warp_g400_tgzsf) +
+		WARP_UCODE_SIZE(warp_g400_t2gz) +
+		WARP_UCODE_SIZE(warp_g400_t2gza) +
+		WARP_UCODE_SIZE(warp_g400_t2gzaf) +
+		WARP_UCODE_SIZE(warp_g400_t2gzf) +
+		WARP_UCODE_SIZE(warp_g400_t2gzs) +
+		WARP_UCODE_SIZE(warp_g400_t2gzsa) +
+		WARP_UCODE_SIZE(warp_g400_t2gzsaf) +
+		WARP_UCODE_SIZE(warp_g400_t2gzsf));
 
-static unsigned int mga_warp_g400_microcode_size( drm_mga_private_t *dev_priv )
+static const unsigned int mga_warp_g200_microcode_size =
+	       (WARP_UCODE_SIZE(warp_g200_tgz) +
+		WARP_UCODE_SIZE(warp_g200_tgza) +
+		WARP_UCODE_SIZE(warp_g200_tgzaf) +
+		WARP_UCODE_SIZE(warp_g200_tgzf) +
+		WARP_UCODE_SIZE(warp_g200_tgzs) +
+		WARP_UCODE_SIZE(warp_g200_tgzsa) +
+		WARP_UCODE_SIZE(warp_g200_tgzsaf) +
+		WARP_UCODE_SIZE(warp_g200_tgzsf));
+
+
+unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv)
 {
-	unsigned int size;
-
-	size = ( WARP_UCODE_SIZE( warp_g400_tgz ) +
-		 WARP_UCODE_SIZE( warp_g400_tgza ) +
-		 WARP_UCODE_SIZE( warp_g400_tgzaf ) +
-		 WARP_UCODE_SIZE( warp_g400_tgzf ) +
-		 WARP_UCODE_SIZE( warp_g400_tgzs ) +
-		 WARP_UCODE_SIZE( warp_g400_tgzsa ) +
-		 WARP_UCODE_SIZE( warp_g400_tgzsaf ) +
-		 WARP_UCODE_SIZE( warp_g400_tgzsf ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gz ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gza ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gzaf ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gzf ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gzs ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gzsa ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gzsaf ) +
-		 WARP_UCODE_SIZE( warp_g400_t2gzsf ) );
-
-	size = PAGE_ALIGN( size );
-
-	DRM_DEBUG( "G400 ucode size = %d bytes\n", size );
-	return size;
-}
-
-static unsigned int mga_warp_g200_microcode_size( drm_mga_private_t *dev_priv )
-{
-	unsigned int size;
-
-	size = ( WARP_UCODE_SIZE( warp_g200_tgz ) +
-		 WARP_UCODE_SIZE( warp_g200_tgza ) +
-		 WARP_UCODE_SIZE( warp_g200_tgzaf ) +
-		 WARP_UCODE_SIZE( warp_g200_tgzf ) +
-		 WARP_UCODE_SIZE( warp_g200_tgzs ) +
-		 WARP_UCODE_SIZE( warp_g200_tgzsa ) +
-		 WARP_UCODE_SIZE( warp_g200_tgzsaf ) +
-		 WARP_UCODE_SIZE( warp_g200_tgzsf ) );
-
-	size = PAGE_ALIGN( size );
-
-	DRM_DEBUG( "G200 ucode size = %d bytes\n", size );
-	return size;
+	switch (dev_priv->chipset) {
+	case MGA_CARD_TYPE_G400:
+	case MGA_CARD_TYPE_G550:
+		return PAGE_ALIGN(mga_warp_g400_microcode_size);
+	case MGA_CARD_TYPE_G200:
+		return PAGE_ALIGN(mga_warp_g200_microcode_size);
+	default:
+		return 0;
+	}
 }
 
 static int mga_warp_install_g400_microcode( drm_mga_private_t *dev_priv )
 {
 	unsigned char *vcbase = dev_priv->warp->handle;
 	unsigned long pcbase = dev_priv->warp->offset;
-	unsigned int size;
-
-	size = mga_warp_g400_microcode_size( dev_priv );
-	if ( size > dev_priv->warp->size ) {
-		DRM_ERROR( "microcode too large! (%u > %lu)\n",
-			   size, dev_priv->warp->size );
-		return DRM_ERR(ENOMEM);
-	}
 
 	memset( dev_priv->warp_pipe_phys, 0,
 		sizeof(dev_priv->warp_pipe_phys) );
@@ -136,35 +123,36 @@
 {
 	unsigned char *vcbase = dev_priv->warp->handle;
 	unsigned long pcbase = dev_priv->warp->offset;
-	unsigned int size;
 
-	size = mga_warp_g200_microcode_size( dev_priv );
-	if ( size > dev_priv->warp->size ) {
-		DRM_ERROR( "microcode too large! (%u > %lu)\n",
-			   size, dev_priv->warp->size );
-		return DRM_ERR(ENOMEM);
-	}
+	memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys));
 
-	memset( dev_priv->warp_pipe_phys, 0,
-		sizeof(dev_priv->warp_pipe_phys) );
-
-	WARP_UCODE_INSTALL( warp_g200_tgz,	MGA_WARP_TGZ );
-	WARP_UCODE_INSTALL( warp_g200_tgzf,	MGA_WARP_TGZF );
-	WARP_UCODE_INSTALL( warp_g200_tgza,	MGA_WARP_TGZA );
-	WARP_UCODE_INSTALL( warp_g200_tgzaf,	MGA_WARP_TGZAF );
-	WARP_UCODE_INSTALL( warp_g200_tgzs,	MGA_WARP_TGZS );
-	WARP_UCODE_INSTALL( warp_g200_tgzsf,	MGA_WARP_TGZSF );
-	WARP_UCODE_INSTALL( warp_g200_tgzsa,	MGA_WARP_TGZSA );
-	WARP_UCODE_INSTALL( warp_g200_tgzsaf,	MGA_WARP_TGZSAF );
+	WARP_UCODE_INSTALL(warp_g200_tgz, MGA_WARP_TGZ);
+	WARP_UCODE_INSTALL(warp_g200_tgzf, MGA_WARP_TGZF);
+	WARP_UCODE_INSTALL(warp_g200_tgza, MGA_WARP_TGZA);
+	WARP_UCODE_INSTALL(warp_g200_tgzaf, MGA_WARP_TGZAF);
+	WARP_UCODE_INSTALL(warp_g200_tgzs, MGA_WARP_TGZS);
+	WARP_UCODE_INSTALL(warp_g200_tgzsf, MGA_WARP_TGZSF);
+	WARP_UCODE_INSTALL(warp_g200_tgzsa, MGA_WARP_TGZSA);
+	WARP_UCODE_INSTALL(warp_g200_tgzsaf, MGA_WARP_TGZSAF);
 
 	return 0;
 }
 
 int mga_warp_install_microcode(	drm_mga_private_t *dev_priv )
 {
-	switch ( dev_priv->chipset ) {
+	const unsigned int size = mga_warp_microcode_size(dev_priv);
+
+	DRM_DEBUG("MGA ucode size = %d bytes\n", size);
+	if (size > dev_priv->warp->size) {
+		DRM_ERROR("microcode too large! (%u > %lu)\n",
+			  size, dev_priv->warp->size);
+		return DRM_ERR(ENOMEM);
+	}
+
+	switch (dev_priv->chipset) {
 	case MGA_CARD_TYPE_G400:
-		return mga_warp_install_g400_microcode( dev_priv );
+	case MGA_CARD_TYPE_G550:
+		return mga_warp_install_g400_microcode(dev_priv);
 	case MGA_CARD_TYPE_G200:
 		return mga_warp_install_g200_microcode( dev_priv );
 	default:
@@ -182,10 +170,11 @@
 	 */
 	switch ( dev_priv->chipset ) {
 	case MGA_CARD_TYPE_G400:
-		MGA_WRITE( MGA_WIADDR2, MGA_WMODE_SUSPEND );
-		MGA_WRITE( MGA_WGETMSB, 0x00000E00 );
-		MGA_WRITE( MGA_WVRTXSZ, 0x00001807 );
-		MGA_WRITE( MGA_WACCEPTSEQ, 0x18000000 );
+	case MGA_CARD_TYPE_G550:
+		MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND);
+		MGA_WRITE(MGA_WGETMSB, 0x00000E00);
+		MGA_WRITE(MGA_WVRTXSZ, 0x00001807);
+		MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000);
 		break;
 	case MGA_CARD_TYPE_G200:
 		MGA_WRITE( MGA_WIADDR, MGA_WMODE_SUSPEND );
diff --git a/drivers/char/drm/r128_cce.c b/drivers/char/drm/r128_cce.c
index 08ed8d0..8951522 100644
--- a/drivers/char/drm/r128_cce.c
+++ b/drivers/char/drm/r128_cce.c
@@ -326,7 +326,8 @@
 		ring_start = dev_priv->cce_ring->offset - dev->agp->base;
 	else
 #endif
-		ring_start = dev_priv->cce_ring->offset - dev->sg->handle;
+		ring_start = dev_priv->cce_ring->offset - 
+				(unsigned long)dev->sg->virtual;
 
 	R128_WRITE( R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET );
 
@@ -487,6 +488,7 @@
 		r128_do_cleanup_cce( dev );
 		return DRM_ERR(EINVAL);
 	}
+	dev->agp_buffer_token = init->buffers_offset;
 	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
 	if(!dev->agp_buffer_map) {
 		DRM_ERROR("could not find dma buffer region!\n");
@@ -537,7 +539,7 @@
 		dev_priv->cce_buffers_offset = dev->agp->base;
 	else
 #endif
-		dev_priv->cce_buffers_offset = dev->sg->handle;
+		dev_priv->cce_buffers_offset = (unsigned long)dev->sg->virtual;
 
 	dev_priv->ring.start = (u32 *)dev_priv->cce_ring->handle;
 	dev_priv->ring.end = ((u32 *)dev_priv->cce_ring->handle
diff --git a/drivers/char/drm/r128_drm.h b/drivers/char/drm/r128_drm.h
index 0cba17d..b616cd3 100644
--- a/drivers/char/drm/r128_drm.h
+++ b/drivers/char/drm/r128_drm.h
@@ -215,7 +215,7 @@
 #define DRM_IOCTL_R128_INDIRECT   DRM_IOWR(DRM_COMMAND_BASE + DRM_R128_INDIRECT, drm_r128_indirect_t)
 #define DRM_IOCTL_R128_FULLSCREEN DRM_IOW( DRM_COMMAND_BASE + DRM_R128_FULLSCREEN, drm_r128_fullscreen_t)
 #define DRM_IOCTL_R128_CLEAR2     DRM_IOW( DRM_COMMAND_BASE + DRM_R128_CLEAR2, drm_r128_clear2_t)
-#define DRM_IOCTL_R128_GETPARAM   DRM_IOW( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
+#define DRM_IOCTL_R128_GETPARAM   DRM_IOWR( DRM_COMMAND_BASE + DRM_R128_GETPARAM, drm_r128_getparam_t)
 #define DRM_IOCTL_R128_FLIP       DRM_IO(  DRM_COMMAND_BASE + DRM_R128_FLIP)
 
 typedef struct drm_r128_init {
diff --git a/drivers/char/drm/r300_cmdbuf.c b/drivers/char/drm/r300_cmdbuf.c
new file mode 100644
index 0000000..623f1f4
--- /dev/null
+++ b/drivers/char/drm/r300_cmdbuf.c
@@ -0,0 +1,801 @@
+/* r300_cmdbuf.c -- Command buffer emission for R300 -*- linux-c -*-
+ *
+ * Copyright (C) The Weather Channel, Inc.  2002.
+ * Copyright (C) 2004 Nicolai Haehnle.
+ * All Rights Reserved.
+ *
+ * The Weather Channel (TM) funded Tungsten Graphics to develop the
+ * initial release of the Radeon 8500 driver under the XFree86 license.
+ * This notice must be preserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Nicolai Haehnle <prefect_@gmx.net>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+#include "r300_reg.h"
+
+
+#define R300_SIMULTANEOUS_CLIPRECTS		4
+
+/* Values for R300_RE_CLIPRECT_CNTL depending on the number of cliprects
+ */
+static const int r300_cliprect_cntl[4] = {
+	0xAAAA,
+	0xEEEE,
+	0xFEFE,
+	0xFFFE
+};
+
+
+/**
+ * Emit up to R300_SIMULTANEOUS_CLIPRECTS cliprects from the given command
+ * buffer, starting with index n.
+ */
+static int r300_emit_cliprects(drm_radeon_private_t* dev_priv,
+			       drm_radeon_cmd_buffer_t* cmdbuf,
+			       int n)
+{
+	drm_clip_rect_t box;
+	int nr;
+	int i;
+	RING_LOCALS;
+
+	nr = cmdbuf->nbox - n;
+	if (nr > R300_SIMULTANEOUS_CLIPRECTS)
+		nr = R300_SIMULTANEOUS_CLIPRECTS;
+
+	DRM_DEBUG("%i cliprects\n", nr);
+
+	if (nr) {
+		BEGIN_RING(6 + nr*2);
+		OUT_RING( CP_PACKET0( R300_RE_CLIPRECT_TL_0, nr*2 - 1 ) );
+
+		for(i = 0; i < nr; ++i) {
+			if (DRM_COPY_FROM_USER_UNCHECKED(&box, &cmdbuf->boxes[n+i], sizeof(box))) {
+				DRM_ERROR("copy cliprect faulted\n");
+				return DRM_ERR(EFAULT);
+			}
+
+			box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+			box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+			box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+			box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK;
+
+			OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |
+					(box.y1 << R300_CLIPRECT_Y_SHIFT));
+			OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |
+					(box.y2 << R300_CLIPRECT_Y_SHIFT));
+		}
+
+		OUT_RING_REG( R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr-1] );
+
+		/* TODO/SECURITY: Force scissors to a safe value, otherwise the
+		* client might be able to trample over memory.
+		* The impact should be very limited, but I'd rather be safe than
+		* sorry.
+		*/
+		OUT_RING( CP_PACKET0( R300_RE_SCISSORS_TL, 1 ) );
+		OUT_RING( 0 );
+		OUT_RING( R300_SCISSORS_X_MASK | R300_SCISSORS_Y_MASK );
+		ADVANCE_RING();
+		} else {
+		/* Why we allow zero cliprect rendering:
+		 * There are some commands in a command buffer that must be submitted
+		 * even when there are no cliprects, e.g. DMA buffer discard
+		 * or state setting (though state setting could be avoided by
+		 * simulating a loss of context).
+		 *
+		 * Now since the cmdbuf interface is so chaotic right now (and is
+		 * bound to remain that way for a bit until things settle down),
+		 * it is basically impossible to filter out the commands that are
+		 * necessary and those that aren't.
+		 *
+		 * So I choose the safe way and don't do any filtering at all;
+		 * instead, I simply set up the engine so that all rendering
+		 * can't produce any fragments.
+		 */
+		BEGIN_RING(2);
+		OUT_RING_REG( R300_RE_CLIPRECT_CNTL, 0 );
+		ADVANCE_RING();
+		}
+
+	return 0;
+}
+
+u8  r300_reg_flags[0x10000>>2];
+
+
+void r300_init_reg_flags(void)
+{
+	int i;
+	memset(r300_reg_flags, 0, 0x10000>>2);
+	#define ADD_RANGE_MARK(reg, count,mark) \
+		for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\
+			r300_reg_flags[i]|=(mark);
+	
+	#define MARK_SAFE		1
+	#define MARK_CHECK_OFFSET	2
+	
+	#define ADD_RANGE(reg, count)	ADD_RANGE_MARK(reg, count, MARK_SAFE)
+
+	/* these match cmducs() command in r300_driver/r300/r300_cmdbuf.c */
+	ADD_RANGE(R300_SE_VPORT_XSCALE, 6);
+	ADD_RANGE(0x2080, 1);
+	ADD_RANGE(R300_SE_VTE_CNTL, 2);
+	ADD_RANGE(0x2134, 2);
+	ADD_RANGE(0x2140, 1);
+	ADD_RANGE(R300_VAP_INPUT_CNTL_0, 2);
+	ADD_RANGE(0x21DC, 1);
+	ADD_RANGE(0x221C, 1);
+	ADD_RANGE(0x2220, 4);
+	ADD_RANGE(0x2288, 1);
+	ADD_RANGE(R300_VAP_OUTPUT_VTX_FMT_0, 2);
+	ADD_RANGE(R300_VAP_PVS_CNTL_1, 3);
+	ADD_RANGE(R300_GB_ENABLE, 1);
+	ADD_RANGE(R300_GB_MSPOS0, 5);
+	ADD_RANGE(R300_TX_ENABLE, 1);
+	ADD_RANGE(0x4200, 4);
+	ADD_RANGE(0x4214, 1);
+	ADD_RANGE(R300_RE_POINTSIZE, 1);
+	ADD_RANGE(0x4230, 3);
+	ADD_RANGE(R300_RE_LINE_CNT, 1);
+	ADD_RANGE(0x4238, 1);
+	ADD_RANGE(0x4260, 3);
+	ADD_RANGE(0x4274, 4);
+	ADD_RANGE(0x4288, 5);
+	ADD_RANGE(0x42A0, 1);
+	ADD_RANGE(R300_RE_ZBIAS_T_FACTOR, 4);
+	ADD_RANGE(0x42B4, 1);
+	ADD_RANGE(R300_RE_CULL_CNTL, 1);
+	ADD_RANGE(0x42C0, 2);
+	ADD_RANGE(R300_RS_CNTL_0, 2);
+	ADD_RANGE(R300_RS_INTERP_0, 8);
+	ADD_RANGE(R300_RS_ROUTE_0, 8);
+	ADD_RANGE(0x43A4, 2);
+	ADD_RANGE(0x43E8, 1);
+	ADD_RANGE(R300_PFS_CNTL_0, 3);
+	ADD_RANGE(R300_PFS_NODE_0, 4);
+	ADD_RANGE(R300_PFS_TEXI_0, 64);
+	ADD_RANGE(0x46A4, 5);
+	ADD_RANGE(R300_PFS_INSTR0_0, 64);
+	ADD_RANGE(R300_PFS_INSTR1_0, 64);
+	ADD_RANGE(R300_PFS_INSTR2_0, 64);
+	ADD_RANGE(R300_PFS_INSTR3_0, 64);
+	ADD_RANGE(0x4BC0, 1);
+	ADD_RANGE(0x4BC8, 3);
+	ADD_RANGE(R300_PP_ALPHA_TEST, 2);
+	ADD_RANGE(0x4BD8, 1);
+	ADD_RANGE(R300_PFS_PARAM_0_X, 64);
+	ADD_RANGE(0x4E00, 1);
+	ADD_RANGE(R300_RB3D_CBLEND, 2);
+	ADD_RANGE(R300_RB3D_COLORMASK, 1);
+	ADD_RANGE(0x4E10, 3);
+	ADD_RANGE_MARK(R300_RB3D_COLOROFFSET0, 1, MARK_CHECK_OFFSET); /* check offset */
+	ADD_RANGE(R300_RB3D_COLORPITCH0, 1);
+	ADD_RANGE(0x4E50, 9);
+	ADD_RANGE(0x4E88, 1);
+	ADD_RANGE(0x4EA0, 2);
+	ADD_RANGE(R300_RB3D_ZSTENCIL_CNTL_0, 3);
+	ADD_RANGE(0x4F10, 4);
+	ADD_RANGE_MARK(R300_RB3D_DEPTHOFFSET, 1, MARK_CHECK_OFFSET); /* check offset */
+	ADD_RANGE(R300_RB3D_DEPTHPITCH, 1); 
+	ADD_RANGE(0x4F28, 1);
+	ADD_RANGE(0x4F30, 2);
+	ADD_RANGE(0x4F44, 1);
+	ADD_RANGE(0x4F54, 1);
+
+	ADD_RANGE(R300_TX_FILTER_0, 16);
+	ADD_RANGE(R300_TX_UNK1_0, 16);
+	ADD_RANGE(R300_TX_SIZE_0, 16);
+	ADD_RANGE(R300_TX_FORMAT_0, 16);
+		/* Texture offset is dangerous and needs more checking */
+	ADD_RANGE_MARK(R300_TX_OFFSET_0, 16, MARK_CHECK_OFFSET);
+	ADD_RANGE(R300_TX_UNK4_0, 16);
+	ADD_RANGE(R300_TX_BORDER_COLOR_0, 16);
+
+	/* Sporadic registers used as primitives are emitted */
+	ADD_RANGE(0x4f18, 1);
+	ADD_RANGE(R300_RB3D_DSTCACHE_CTLSTAT, 1);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);
+	ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8);
+
+}
+
+static __inline__ int r300_check_range(unsigned  reg, int count)
+{
+	int i;
+	if(reg & ~0xffff)return -1;
+	for(i=(reg>>2);i<(reg>>2)+count;i++)
+		if(r300_reg_flags[i]!=MARK_SAFE)return 1;
+	return 0;
+}
+
+  /* we expect offsets passed to the framebuffer to be either within video memory or
+      within AGP space */
+static __inline__ int r300_check_offset(drm_radeon_private_t* dev_priv, u32 offset)
+{
+	/* we realy want to check against end of video aperture
+		but this value is not being kept. 
+		This code is correct for now (does the same thing as the
+		code that sets MC_FB_LOCATION) in radeon_cp.c */
+	if((offset>=dev_priv->fb_location) && 
+		(offset<dev_priv->gart_vm_start))return 0;
+	if((offset>=dev_priv->gart_vm_start) &&
+		 (offset<dev_priv->gart_vm_start+dev_priv->gart_size))return 0;
+	return 1;
+}
+
+static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t* dev_priv,
+						drm_radeon_cmd_buffer_t* cmdbuf,
+						drm_r300_cmd_header_t header)
+{
+	int reg;
+	int sz;
+	int i;
+	int values[64];
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+	
+	if((sz>64)||(sz<0)){
+		DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n", reg, sz);
+		return DRM_ERR(EINVAL);
+		}
+	for(i=0;i<sz;i++){
+		values[i]=((int __user*)cmdbuf->buf)[i];
+		switch(r300_reg_flags[(reg>>2)+i]){
+		case MARK_SAFE:
+			break;
+		case MARK_CHECK_OFFSET:
+			if(r300_check_offset(dev_priv, (u32)values[i])){
+				DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n", reg, sz);
+				return DRM_ERR(EINVAL);
+				}
+			break;
+		default:
+			DRM_ERROR("Register %04x failed check as flag=%02x\n", reg+i*4, r300_reg_flags[(reg>>2)+i]);
+			return DRM_ERR(EINVAL);
+			}
+		}
+		
+	BEGIN_RING(1+sz);
+	OUT_RING( CP_PACKET0( reg, sz-1 ) );
+	OUT_RING_TABLE( values, sz );
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz*4;
+	cmdbuf->bufsz -= sz*4;
+
+	return 0;
+}
+
+/**
+ * Emits a packet0 setting arbitrary registers.
+ * Called by r300_do_cp_cmdbuf.
+ *
+ * Note that checks are performed on contents and addresses of the registers
+ */
+static __inline__ int r300_emit_packet0(drm_radeon_private_t* dev_priv,
+						drm_radeon_cmd_buffer_t* cmdbuf,
+						drm_r300_cmd_header_t header)
+{
+	int reg;
+	int sz;
+	RING_LOCALS;
+
+	sz = header.packet0.count;
+	reg = (header.packet0.reghi << 8) | header.packet0.reglo;
+
+	if (!sz)
+		return 0;
+
+	if (sz*4 > cmdbuf->bufsz)
+		return DRM_ERR(EINVAL);
+		
+	if (reg+sz*4 >= 0x10000){
+		DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg, sz);
+		return DRM_ERR(EINVAL);
+		}
+
+	if(r300_check_range(reg, sz)){
+		/* go and check everything */
+		return r300_emit_carefully_checked_packet0(dev_priv, cmdbuf, header);
+		}
+	/* the rest of the data is safe to emit, whatever the values the user passed */
+
+	BEGIN_RING(1+sz);
+	OUT_RING( CP_PACKET0( reg, sz-1 ) );
+	OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz );
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz*4;
+	cmdbuf->bufsz -= sz*4;
+
+	return 0;
+}
+
+
+/**
+ * Uploads user-supplied vertex program instructions or parameters onto
+ * the graphics card.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_vpu(drm_radeon_private_t* dev_priv,
+				    drm_radeon_cmd_buffer_t* cmdbuf,
+				    drm_r300_cmd_header_t header)
+{
+	int sz;
+	int addr;
+	RING_LOCALS;
+
+	sz = header.vpu.count;
+	addr = (header.vpu.adrhi << 8) | header.vpu.adrlo;
+
+	if (!sz)
+		return 0;
+	if (sz*16 > cmdbuf->bufsz)
+		return DRM_ERR(EINVAL);
+
+	BEGIN_RING(5+sz*4);
+	/* Wait for VAP to come to senses.. */
+	/* there is no need to emit it multiple times, (only once before VAP is programmed,
+	   but this optimization is for later */
+	OUT_RING_REG( R300_VAP_PVS_WAITIDLE, 0 );
+	OUT_RING_REG( R300_VAP_PVS_UPLOAD_ADDRESS, addr );
+	OUT_RING( CP_PACKET0_TABLE( R300_VAP_PVS_UPLOAD_DATA, sz*4 - 1 ) );
+	OUT_RING_TABLE( (int __user*)cmdbuf->buf, sz*4 );
+
+	ADVANCE_RING();
+
+	cmdbuf->buf += sz*16;
+	cmdbuf->bufsz -= sz*16;
+
+	return 0;
+}
+
+
+/**
+ * Emit a clear packet from userspace.
+ * Called by r300_emit_packet3.
+ */
+static __inline__ int r300_emit_clear(drm_radeon_private_t* dev_priv,
+				      drm_radeon_cmd_buffer_t* cmdbuf)
+{
+	RING_LOCALS;
+
+	if (8*4 > cmdbuf->bufsz)
+		return DRM_ERR(EINVAL);
+
+	BEGIN_RING(10);
+	OUT_RING( CP_PACKET3( R200_3D_DRAW_IMMD_2, 8 ) );
+	OUT_RING( R300_PRIM_TYPE_POINT|R300_PRIM_WALK_RING|
+	          (1<<R300_PRIM_NUM_VERTICES_SHIFT) );
+	OUT_RING_TABLE( (int __user*)cmdbuf->buf, 8 );
+	ADVANCE_RING();
+
+	cmdbuf->buf += 8*4;
+	cmdbuf->bufsz -= 8*4;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t* dev_priv,
+				      drm_radeon_cmd_buffer_t* cmdbuf,
+				      u32 header)
+{
+	int count, i,k;
+	#define MAX_ARRAY_PACKET  64
+	u32 payload[MAX_ARRAY_PACKET];
+	u32 narrays;
+	RING_LOCALS;
+
+	count=(header>>16) & 0x3fff;
+	
+	if((count+1)>MAX_ARRAY_PACKET){
+		DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n", count);
+		return DRM_ERR(EINVAL);
+		}
+	memset(payload, 0, MAX_ARRAY_PACKET*4);
+	memcpy(payload, cmdbuf->buf+4, (count+1)*4);	
+	
+	/* carefully check packet contents */
+	
+	narrays=payload[0];
+	k=0;
+	i=1;
+	while((k<narrays) && (i<(count+1))){
+		i++; /* skip attribute field */
+		if(r300_check_offset(dev_priv, payload[i])){
+			DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
+			return DRM_ERR(EINVAL);
+			}
+		k++;
+		i++;
+		if(k==narrays)break;
+		/* have one more to process, they come in pairs */
+		if(r300_check_offset(dev_priv, payload[i])){
+			DRM_ERROR("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i);
+			return DRM_ERR(EINVAL);
+			}
+		k++;
+		i++;			
+		}
+	/* do the counts match what we expect ? */
+	if((k!=narrays) || (i!=(count+1))){
+		DRM_ERROR("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n", k, i, narrays, count+1);
+		return DRM_ERR(EINVAL);
+		}
+
+	/* all clear, output packet */
+
+	BEGIN_RING(count+2);
+	OUT_RING(header);
+	OUT_RING_TABLE(payload, count+1);
+	ADVANCE_RING();
+
+	cmdbuf->buf += (count+2)*4;
+	cmdbuf->bufsz -= (count+2)*4;
+
+	return 0;
+}
+
+static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t* dev_priv,
+				      drm_radeon_cmd_buffer_t* cmdbuf)
+{
+	u32 header;
+	int count;
+	RING_LOCALS;
+
+	if (4 > cmdbuf->bufsz)
+		return DRM_ERR(EINVAL);
+
+        /* Fixme !! This simply emits a packet without much checking.
+	   We need to be smarter. */
+
+	/* obtain first word - actual packet3 header */
+	header = *(u32 __user*)cmdbuf->buf;
+
+	/* Is it packet 3 ? */
+	if( (header>>30)!=0x3 ) {
+		DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
+		return DRM_ERR(EINVAL);
+		}
+
+	count=(header>>16) & 0x3fff;
+
+	/* Check again now that we know how much data to expect */
+	if ((count+2)*4 > cmdbuf->bufsz){
+		DRM_ERROR("Expected packet3 of length %d but have only %d bytes left\n",
+			(count+2)*4, cmdbuf->bufsz);
+		return DRM_ERR(EINVAL);
+		}
+
+	/* Is it a packet type we know about ? */
+	switch(header & 0xff00){
+	case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
+		return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
+
+	case RADEON_CP_3D_DRAW_IMMD_2: /* triggers drawing using in-packet vertex data */
+	case RADEON_CP_3D_DRAW_VBUF_2: /* triggers drawing of vertex buffers setup elsewhere */
+	case RADEON_CP_3D_DRAW_INDX_2: /* triggers drawing using indices to vertex buffer */
+	case RADEON_CP_INDX_BUFFER: /* DRAW_INDX_2 without INDX_BUFFER seems to lock up the gpu */
+	case RADEON_WAIT_FOR_IDLE:
+	case RADEON_CP_NOP:
+		/* these packets are safe */
+		break;
+	default:
+		DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
+		return DRM_ERR(EINVAL);
+		}
+
+
+	BEGIN_RING(count+2);
+	OUT_RING(header);
+	OUT_RING_TABLE( (int __user*)(cmdbuf->buf+4), count+1);
+	ADVANCE_RING();
+
+	cmdbuf->buf += (count+2)*4;
+	cmdbuf->bufsz -= (count+2)*4;
+
+	return 0;
+}
+
+
+/**
+ * Emit a rendering packet3 from userspace.
+ * Called by r300_do_cp_cmdbuf.
+ */
+static __inline__ int r300_emit_packet3(drm_radeon_private_t* dev_priv,
+					drm_radeon_cmd_buffer_t* cmdbuf,
+					drm_r300_cmd_header_t header)
+{
+	int n;
+	int ret;
+	char __user* orig_buf = cmdbuf->buf;
+	int orig_bufsz = cmdbuf->bufsz;
+
+	/* This is a do-while-loop so that we run the interior at least once,
+	 * even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
+	 */
+	n = 0;
+	do {
+		if (cmdbuf->nbox > R300_SIMULTANEOUS_CLIPRECTS) {
+			ret = r300_emit_cliprects(dev_priv, cmdbuf, n);
+			if (ret)
+				return ret;
+
+			cmdbuf->buf = orig_buf;
+			cmdbuf->bufsz = orig_bufsz;
+			}
+
+		switch(header.packet3.packet) {
+		case R300_CMD_PACKET3_CLEAR:
+			DRM_DEBUG("R300_CMD_PACKET3_CLEAR\n");
+			ret = r300_emit_clear(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_clear failed\n");
+				return ret;
+				}
+			break;
+
+		case R300_CMD_PACKET3_RAW:
+			DRM_DEBUG("R300_CMD_PACKET3_RAW\n");
+			ret = r300_emit_raw_packet3(dev_priv, cmdbuf);
+			if (ret) {
+				DRM_ERROR("r300_emit_raw_packet3 failed\n");
+				return ret;
+				}
+			break;
+
+		default:
+			DRM_ERROR("bad packet3 type %i at %p\n",
+				header.packet3.packet,
+				cmdbuf->buf - sizeof(header));
+			return DRM_ERR(EINVAL);
+			}
+
+		n += R300_SIMULTANEOUS_CLIPRECTS;
+	} while(n < cmdbuf->nbox);
+
+	return 0;
+}
+
+/* Some of the R300 chips seem to be extremely touchy about the two registers
+ * that are configured in r300_pacify.
+ * Among the worst offenders seems to be the R300 ND (0x4E44): When userspace
+ * sends a command buffer that contains only state setting commands and a
+ * vertex program/parameter upload sequence, this will eventually lead to a
+ * lockup, unless the sequence is bracketed by calls to r300_pacify.
+ * So we should take great care to *always* call r300_pacify before
+ * *anything* 3D related, and again afterwards. This is what the
+ * call bracket in r300_do_cp_cmdbuf is for.
+ */
+
+/**
+ * Emit the sequence to pacify R300.
+ */
+static __inline__ void r300_pacify(drm_radeon_private_t* dev_priv)
+{
+	RING_LOCALS;
+
+	BEGIN_RING(6);
+	OUT_RING( CP_PACKET0( R300_RB3D_DSTCACHE_CTLSTAT, 0 ) );
+	OUT_RING( 0xa );
+	OUT_RING( CP_PACKET0( 0x4f18, 0 ) );
+	OUT_RING( 0x3 );
+	OUT_RING( CP_PACKET3( RADEON_CP_NOP, 0 ) );
+	OUT_RING( 0x0 );
+	ADVANCE_RING();
+}
+
+
+/**
+ * Called by r300_do_cp_cmdbuf to update the internal buffer age and state.
+ * The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
+ * be careful about how this function is called.
+ */
+static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+	drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
+
+	buf_priv->age = ++dev_priv->sarea_priv->last_dispatch;
+	buf->pending = 1;
+	buf->used = 0;
+}
+
+
+/**
+ * Parses and validates a user-supplied command buffer and emits appropriate
+ * commands on the DMA ring buffer.
+ * Called by the ioctl handler function radeon_cp_cmdbuf.
+ */
+int r300_do_cp_cmdbuf(drm_device_t* dev,
+			  DRMFILE filp,
+		      drm_file_t* filp_priv,
+		      drm_radeon_cmd_buffer_t* cmdbuf)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+        drm_device_dma_t *dma = dev->dma;
+        drm_buf_t *buf = NULL;
+	int emit_dispatch_age = 0;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+
+	/* See the comment above r300_emit_begin3d for why this call must be here,
+	 * and what the cleanup gotos are for. */
+	r300_pacify(dev_priv);
+
+	if (cmdbuf->nbox <= R300_SIMULTANEOUS_CLIPRECTS) {
+		ret = r300_emit_cliprects(dev_priv, cmdbuf, 0);
+		if (ret)
+			goto cleanup;
+		}
+
+	while(cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
+		int idx;
+		drm_r300_cmd_header_t header;
+
+		header.u = *(unsigned int *)cmdbuf->buf;
+
+		cmdbuf->buf += sizeof(header);
+		cmdbuf->bufsz -= sizeof(header);
+
+		switch(header.header.cmd_type) {
+		case R300_CMD_PACKET0: 
+			DRM_DEBUG("R300_CMD_PACKET0\n");
+			ret = r300_emit_packet0(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet0 failed\n");
+				goto cleanup;
+				}
+			break;
+
+		case R300_CMD_VPU:
+			DRM_DEBUG("R300_CMD_VPU\n");
+			ret = r300_emit_vpu(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_vpu failed\n");
+				goto cleanup;
+				}
+			break;
+
+		case R300_CMD_PACKET3:
+			DRM_DEBUG("R300_CMD_PACKET3\n");
+			ret = r300_emit_packet3(dev_priv, cmdbuf, header);
+			if (ret) {
+				DRM_ERROR("r300_emit_packet3 failed\n");
+				goto cleanup;
+				}
+			break;
+
+		case R300_CMD_END3D:
+			DRM_DEBUG("R300_CMD_END3D\n");
+			/* TODO: 
+				Ideally userspace driver should not need to issue this call, 
+				i.e. the drm driver should issue it automatically and prevent
+				lockups.
+				
+				In practice, we do not understand why this call is needed and what
+				it does (except for some vague guesses that it has to do with cache
+				coherence) and so the user space driver does it. 
+				
+				Once we are sure which uses prevent lockups the code could be moved
+				into the kernel and the userspace driver will not
+				need to use this command.
+
+				Note that issuing this command does not hurt anything
+				except, possibly, performance */
+			r300_pacify(dev_priv);
+			break;
+
+		case R300_CMD_CP_DELAY:
+			/* simple enough, we can do it here */
+			DRM_DEBUG("R300_CMD_CP_DELAY\n");
+			{
+				int i;
+				RING_LOCALS;
+
+				BEGIN_RING(header.delay.count);
+				for(i=0;i<header.delay.count;i++)
+					OUT_RING(RADEON_CP_PACKET2);
+				ADVANCE_RING();
+			}
+			break;
+
+		case R300_CMD_DMA_DISCARD:
+			DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
+            		idx = header.dma.buf_idx;
+            		if (idx < 0 || idx >= dma->buf_count) {
+                		DRM_ERROR("buffer index %d (of %d max)\n",
+                      			idx, dma->buf_count - 1);
+				ret = DRM_ERR(EINVAL);
+                		goto cleanup;
+            			}
+
+	                buf = dma->buflist[idx];
+            		if (buf->filp != filp || buf->pending) {
+                		DRM_ERROR("bad buffer %p %p %d\n",
+                      		buf->filp, filp, buf->pending);
+                		ret = DRM_ERR(EINVAL);
+				goto cleanup;
+            			}
+
+			emit_dispatch_age = 1;
+			r300_discard_buffer(dev, buf);
+            		break;
+
+		case R300_CMD_WAIT:
+			/* simple enough, we can do it here */
+			DRM_DEBUG("R300_CMD_WAIT\n");
+			if(header.wait.flags==0)break; /* nothing to do */
+
+			{
+				RING_LOCALS;
+
+				BEGIN_RING(2);
+				OUT_RING( CP_PACKET0( RADEON_WAIT_UNTIL, 0 ) );
+				OUT_RING( (header.wait.flags & 0xf)<<14 );
+				ADVANCE_RING();
+			}
+			break;
+
+		default:
+			DRM_ERROR("bad cmd_type %i at %p\n",
+			          header.header.cmd_type,
+				  cmdbuf->buf - sizeof(header));
+			ret = DRM_ERR(EINVAL);
+			goto cleanup;
+			}
+	}
+
+	DRM_DEBUG("END\n");
+
+cleanup:
+	r300_pacify(dev_priv);
+
+	/* We emit the vertex buffer age here, outside the pacifier "brackets"
+	 * for two reasons:
+	 *  (1) This may coalesce multiple age emissions into a single one and
+	 *  (2) more importantly, some chips lock up hard when scratch registers
+	 *      are written inside the pacifier bracket.
+	 */
+	if (emit_dispatch_age) {
+		RING_LOCALS;
+
+		/* Emit the vertex buffer age */
+		BEGIN_RING(2);
+		RADEON_DISPATCH_AGE(dev_priv->sarea_priv->last_dispatch);
+		ADVANCE_RING();
+		}
+
+	COMMIT_RING();
+
+	return ret;
+}
+
diff --git a/drivers/char/drm/r300_reg.h b/drivers/char/drm/r300_reg.h
new file mode 100644
index 0000000..c3e7ca3
--- /dev/null
+++ b/drivers/char/drm/r300_reg.h
@@ -0,0 +1,1412 @@
+/**************************************************************************
+
+Copyright (C) 2004-2005 Nicolai Haehnle et al.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+on the rights to use, copy, modify, merge, publish, distribute, sub
+license, and/or sell copies of the Software, and to permit persons to whom
+the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice (including the next
+paragraph) shall be included in all copies or substantial portions of the
+Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
+DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+**************************************************************************/
+
+#ifndef _R300_REG_H
+#define _R300_REG_H
+
+#define R300_MC_INIT_MISC_LAT_TIMER	0x180
+#	define R300_MC_MISC__MC_CPR_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_VF_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_DISP0R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_DISP1R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_FIXED_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_E2R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_SAME_PAGE_PRIO_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_INIT_LAT_SHIFT	28
+
+
+#define R300_MC_INIT_GFX_LAT_TIMER	0x154
+#	define R300_MC_MISC__MC_G3D0R_INIT_LAT_SHIFT	0
+#	define R300_MC_MISC__MC_G3D1R_INIT_LAT_SHIFT	4
+#	define R300_MC_MISC__MC_G3D2R_INIT_LAT_SHIFT	8
+#	define R300_MC_MISC__MC_G3D3R_INIT_LAT_SHIFT	12
+#	define R300_MC_MISC__MC_TX0R_INIT_LAT_SHIFT	16
+#	define R300_MC_MISC__MC_TX1R_INIT_LAT_SHIFT	20
+#	define R300_MC_MISC__MC_GLOBR_INIT_LAT_SHIFT	24
+#	define R300_MC_MISC__MC_GLOBW_FULL_LAT_SHIFT	28
+
+/*
+This file contains registers and constants for the R300. They have been
+found mostly by examining command buffers captured using glxtest, as well
+as by extrapolating some known registers and constants from the R200.
+
+I am fairly certain that they are correct unless stated otherwise in comments.
+*/
+
+#define R300_SE_VPORT_XSCALE                0x1D98
+#define R300_SE_VPORT_XOFFSET               0x1D9C
+#define R300_SE_VPORT_YSCALE                0x1DA0
+#define R300_SE_VPORT_YOFFSET               0x1DA4
+#define R300_SE_VPORT_ZSCALE                0x1DA8
+#define R300_SE_VPORT_ZOFFSET               0x1DAC
+
+
+/* This register is written directly and also starts data section in many 3d CP_PACKET3's */
+#define R300_VAP_VF_CNTL	0x2084
+
+#	define	R300_VAP_VF_CNTL__PRIM_TYPE__SHIFT                       0
+#	define  R300_VAP_VF_CNTL__PRIM_NONE				 (0<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POINTS				 (1<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINES				 (2<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_STRIP			 (3<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLES			 (4<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_FAN			 (5<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_TRIANGLE_STRIP			 (6<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_LINE_LOOP			 (12<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUADS			 	 (13<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_QUAD_STRIP			 (14<<0)
+#	define  R300_VAP_VF_CNTL__PRIM_POLYGON			 	 (15<<0)
+
+#	define	R300_VAP_VF_CNTL__PRIM_WALK__SHIFT                       4
+	/* State based - direct writes to registers trigger vertex generation */
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_STATE_BASED                      (0<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_INDICES                          (1<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_LIST                      (2<<4)
+#	define	R300_VAP_VF_CNTL__PRIM_WALK_VERTEX_EMBEDDED                  (3<<4)
+
+		/* I don't think I saw these three used.. */
+#	define	R300_VAP_VF_CNTL__COLOR_ORDER__SHIFT                     6
+#	define	R300_VAP_VF_CNTL__TCL_OUTPUT_CTL_ENA__SHIFT              9
+#	define	R300_VAP_VF_CNTL__PROG_STREAM_ENA__SHIFT                 10
+
+		/* index size - when not set the indices are assumed to be 16 bit */
+#	define	R300_VAP_VF_CNTL__INDEX_SIZE_32bit                      (1<<11)
+                /* number of vertices */
+#	define	R300_VAP_VF_CNTL__NUM_VERTICES__SHIFT                    16
+
+/* BEGIN: Wild guesses */
+#define R300_VAP_OUTPUT_VTX_FMT_0           0x2090
+#       define R300_VAP_OUTPUT_VTX_FMT_0__POS_PRESENT     (1<<0)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_PRESENT   (1<<1)
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_1_PRESENT (1<<2) /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_2_PRESENT (1<<3) /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__COLOR_3_PRESENT (1<<4) /* GUESS */
+#       define R300_VAP_OUTPUT_VTX_FMT_0__PT_SIZE_PRESENT (1<<16) /* GUESS */
+
+#define R300_VAP_OUTPUT_VTX_FMT_1           0x2094
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT 0
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT 3
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT 6
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT 9
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT 12
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT 15
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT 18
+#       define R300_VAP_OUTPUT_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT 21
+/* END */
+
+#define R300_SE_VTE_CNTL                  0x20b0
+#	define     R300_VPORT_X_SCALE_ENA                0x00000001
+#	define     R300_VPORT_X_OFFSET_ENA               0x00000002
+#	define     R300_VPORT_Y_SCALE_ENA                0x00000004
+#	define     R300_VPORT_Y_OFFSET_ENA               0x00000008
+#	define     R300_VPORT_Z_SCALE_ENA                0x00000010
+#	define     R300_VPORT_Z_OFFSET_ENA               0x00000020
+#	define     R300_VTX_XY_FMT                       0x00000100
+#	define     R300_VTX_Z_FMT                        0x00000200
+#	define     R300_VTX_W0_FMT                       0x00000400
+#	define     R300_VTX_W0_NORMALIZE                 0x00000800
+#	define     R300_VTX_ST_DENORMALIZED              0x00001000
+
+/* BEGIN: Vertex data assembly - lots of uncertainties */
+/* gap */
+/* Where do we get our vertex data?
+//
+// Vertex data either comes either from immediate mode registers or from
+// vertex arrays.
+// There appears to be no mixed mode (though we can force the pitch of
+// vertex arrays to 0, effectively reusing the same element over and over
+// again).
+//
+// Immediate mode is controlled by the INPUT_CNTL registers. I am not sure
+// if these registers influence vertex array processing.
+//
+// Vertex arrays are controlled via the 3D_LOAD_VBPNTR packet3.
+//
+// In both cases, vertex attributes are then passed through INPUT_ROUTE.
+
+// Beginning with INPUT_ROUTE_0_0 is a list of WORDs that route vertex data
+// into the vertex processor's input registers.
+// The first word routes the first input, the second word the second, etc.
+// The corresponding input is routed into the register with the given index.
+// The list is ended by a word with INPUT_ROUTE_END set.
+//
+// Always set COMPONENTS_4 in immediate mode. */
+
+#define R300_VAP_INPUT_ROUTE_0_0            0x2150
+#       define R300_INPUT_ROUTE_COMPONENTS_1     (0 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_2     (1 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_3     (2 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_4     (3 << 0)
+#       define R300_INPUT_ROUTE_COMPONENTS_RGBA  (4 << 0) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_IDX_SHIFT    8
+#       define R300_VAP_INPUT_ROUTE_IDX_MASK     (31 << 8) /* GUESS */
+#       define R300_VAP_INPUT_ROUTE_END          (1 << 13)
+#       define R300_INPUT_ROUTE_IMMEDIATE_MODE   (0 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT            (1 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_UNSIGNED_BYTE    (2 << 14) /* GUESS */
+#       define R300_INPUT_ROUTE_FLOAT_COLOR      (3 << 14) /* GUESS */
+#define R300_VAP_INPUT_ROUTE_0_1            0x2154
+#define R300_VAP_INPUT_ROUTE_0_2            0x2158
+#define R300_VAP_INPUT_ROUTE_0_3            0x215C
+#define R300_VAP_INPUT_ROUTE_0_4            0x2160
+#define R300_VAP_INPUT_ROUTE_0_5            0x2164
+#define R300_VAP_INPUT_ROUTE_0_6            0x2168
+#define R300_VAP_INPUT_ROUTE_0_7            0x216C
+
+/* gap */
+/* Notes:
+//  - always set up to produce at least two attributes:
+//    if vertex program uses only position, fglrx will set normal, too
+//  - INPUT_CNTL_0_COLOR and INPUT_CNTL_COLOR bits are always equal */
+#define R300_VAP_INPUT_CNTL_0               0x2180
+#       define R300_INPUT_CNTL_0_COLOR           0x00000001
+#define R300_VAP_INPUT_CNTL_1               0x2184
+#       define R300_INPUT_CNTL_POS               0x00000001
+#       define R300_INPUT_CNTL_NORMAL            0x00000002
+#       define R300_INPUT_CNTL_COLOR             0x00000004
+#       define R300_INPUT_CNTL_TC0               0x00000400
+#       define R300_INPUT_CNTL_TC1               0x00000800
+#       define R300_INPUT_CNTL_TC2               0x00001000 /* GUESS */
+#       define R300_INPUT_CNTL_TC3               0x00002000 /* GUESS */
+#       define R300_INPUT_CNTL_TC4               0x00004000 /* GUESS */
+#       define R300_INPUT_CNTL_TC5               0x00008000 /* GUESS */
+#       define R300_INPUT_CNTL_TC6               0x00010000 /* GUESS */
+#       define R300_INPUT_CNTL_TC7               0x00020000 /* GUESS */
+
+/* gap */
+/* Words parallel to INPUT_ROUTE_0; All words that are active in INPUT_ROUTE_0
+// are set to a swizzling bit pattern, other words are 0.
+//
+// In immediate mode, the pattern is always set to xyzw. In vertex array
+// mode, the swizzling pattern is e.g. used to set zw components in texture
+// coordinates with only tweo components. */
+#define R300_VAP_INPUT_ROUTE_1_0            0x21E0
+#       define R300_INPUT_ROUTE_SELECT_X    0
+#       define R300_INPUT_ROUTE_SELECT_Y    1
+#       define R300_INPUT_ROUTE_SELECT_Z    2
+#       define R300_INPUT_ROUTE_SELECT_W    3
+#       define R300_INPUT_ROUTE_SELECT_ZERO 4
+#       define R300_INPUT_ROUTE_SELECT_ONE  5
+#       define R300_INPUT_ROUTE_SELECT_MASK 7
+#       define R300_INPUT_ROUTE_X_SHIFT          0
+#       define R300_INPUT_ROUTE_Y_SHIFT          3
+#       define R300_INPUT_ROUTE_Z_SHIFT          6
+#       define R300_INPUT_ROUTE_W_SHIFT          9
+#       define R300_INPUT_ROUTE_ENABLE           (15 << 12)
+#define R300_VAP_INPUT_ROUTE_1_1            0x21E4
+#define R300_VAP_INPUT_ROUTE_1_2            0x21E8
+#define R300_VAP_INPUT_ROUTE_1_3            0x21EC
+#define R300_VAP_INPUT_ROUTE_1_4            0x21F0
+#define R300_VAP_INPUT_ROUTE_1_5            0x21F4
+#define R300_VAP_INPUT_ROUTE_1_6            0x21F8
+#define R300_VAP_INPUT_ROUTE_1_7            0x21FC
+
+/* END */
+
+/* gap */
+/* BEGIN: Upload vertex program and data
+// The programmable vertex shader unit has a memory bank of unknown size
+// that can be written to in 16 byte units by writing the address into
+// UPLOAD_ADDRESS, followed by data in UPLOAD_DATA (multiples of 4 DWORDs).
+//
+// Pointers into the memory bank are always in multiples of 16 bytes.
+//
+// The memory bank is divided into areas with fixed meaning.
+//
+// Starting at address UPLOAD_PROGRAM: Vertex program instructions.
+// Native limits reported by drivers from ATI suggest size 256 (i.e. 4KB),
+// whereas the difference between known addresses suggests size 512.
+//
+// Starting at address UPLOAD_PARAMETERS: Vertex program parameters.
+// Native reported limits and the VPI layout suggest size 256, whereas
+// difference between known addresses suggests size 512.
+//
+// At address UPLOAD_POINTSIZE is a vector (0, 0, ps, 0), where ps is the
+// floating point pointsize. The exact purpose of this state is uncertain,
+// as there is also the R300_RE_POINTSIZE register.
+//
+// Multiple vertex programs and parameter sets can be loaded at once,
+// which could explain the size discrepancy. */
+#define R300_VAP_PVS_UPLOAD_ADDRESS         0x2200
+#       define R300_PVS_UPLOAD_PROGRAM           0x00000000
+#       define R300_PVS_UPLOAD_PARAMETERS        0x00000200
+#       define R300_PVS_UPLOAD_POINTSIZE         0x00000406
+/* gap */
+#define R300_VAP_PVS_UPLOAD_DATA            0x2208
+/* END */
+
+/* gap */
+/* I do not know the purpose of this register. However, I do know that
+// it is set to 221C_CLEAR for clear operations and to 221C_NORMAL
+// for normal rendering. */
+#define R300_VAP_UNKNOWN_221C               0x221C
+#       define R300_221C_NORMAL                  0x00000000
+#       define R300_221C_CLEAR                   0x0001C000
+
+/* gap */
+/* Sometimes, END_OF_PKT and 0x2284=0 are the only commands sent between
+// rendering commands and overwriting vertex program parameters.
+// Therefore, I suspect writing zero to 0x2284 synchronizes the engine and
+// avoids bugs caused by still running shaders reading bad data from memory. */
+#define R300_VAP_PVS_WAITIDLE               0x2284 /* GUESS */
+
+/* Absolutely no clue what this register is about. */
+#define R300_VAP_UNKNOWN_2288               0x2288
+#       define R300_2288_R300                    0x00750000 /* -- nh */
+#       define R300_2288_RV350                   0x0000FFFF /* -- Vladimir */
+
+/* gap */
+/* Addresses are relative to the vertex program instruction area of the
+// memory bank. PROGRAM_END points to the last instruction of the active
+// program
+//
+// The meaning of the two UNKNOWN fields is obviously not known. However,
+// experiments so far have shown that both *must* point to an instruction
+// inside the vertex program, otherwise the GPU locks up.
+// fglrx usually sets CNTL_3_UNKNOWN to the end of the program and
+// CNTL_1_UNKNOWN points to instruction where last write to position takes place. 
+// Most likely this is used to ignore rest of the program in cases where group of verts arent visible.
+// For some reason this "section" is sometimes accepted other instruction that have
+// no relationship with position calculations. 
+*/
+#define R300_VAP_PVS_CNTL_1                 0x22D0
+#       define R300_PVS_CNTL_1_PROGRAM_START_SHIFT   0
+#       define R300_PVS_CNTL_1_POS_END_SHIFT         10
+#       define R300_PVS_CNTL_1_PROGRAM_END_SHIFT     20
+/* Addresses are relative the the vertex program parameters area. */
+#define R300_VAP_PVS_CNTL_2                 0x22D4
+#       define R300_PVS_CNTL_2_PARAM_OFFSET_SHIFT 0
+#       define R300_PVS_CNTL_2_PARAM_COUNT_SHIFT  16
+#define R300_VAP_PVS_CNTL_3	           0x22D8
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN_SHIFT 10
+#       define R300_PVS_CNTL_3_PROGRAM_UNKNOWN2_SHIFT 0
+
+/* The entire range from 0x2300 to 0x2AC inclusive seems to be used for
+// immediate vertices */
+#define R300_VAP_VTX_COLOR_R                0x2464
+#define R300_VAP_VTX_COLOR_G                0x2468
+#define R300_VAP_VTX_COLOR_B                0x246C
+#define R300_VAP_VTX_POS_0_X_1              0x2490 /* used for glVertex2*() */
+#define R300_VAP_VTX_POS_0_Y_1              0x2494
+#define R300_VAP_VTX_COLOR_PKD              0x249C /* RGBA */
+#define R300_VAP_VTX_POS_0_X_2              0x24A0 /* used for glVertex3*() */
+#define R300_VAP_VTX_POS_0_Y_2              0x24A4
+#define R300_VAP_VTX_POS_0_Z_2              0x24A8
+#define R300_VAP_VTX_END_OF_PKT             0x24AC /* write 0 to indicate end of packet? */
+
+/* gap */
+
+/* These are values from r300_reg/r300_reg.h - they are known to be correct
+   and are here so we can use one register file instead of several
+   - Vladimir */
+#define R300_GB_VAP_RASTER_VTX_FMT_0	0x4000
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__POS_PRESENT	(1<<0)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_0_PRESENT	(1<<1)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_1_PRESENT	(1<<2)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_2_PRESENT	(1<<3)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_3_PRESENT	(1<<4)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__COLOR_SPACE	(0xf<<5)
+#	define R300_GB_VAP_RASTER_VTX_FMT_0__PT_SIZE_PRESENT	(0x1<<16)
+
+#define R300_GB_VAP_RASTER_VTX_FMT_1	0x4004
+	/* each of the following is 3 bits wide, specifies number
+	   of components */
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_0_COMP_CNT_SHIFT	0
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_1_COMP_CNT_SHIFT	3
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_2_COMP_CNT_SHIFT	6
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_3_COMP_CNT_SHIFT	9
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_4_COMP_CNT_SHIFT	12
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_5_COMP_CNT_SHIFT	15
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_6_COMP_CNT_SHIFT	18
+#	define R300_GB_VAP_RASTER_VTX_FMT_1__TEX_7_COMP_CNT_SHIFT	21
+
+/* UNK30 seems to enables point to quad transformation on textures
+   (or something closely related to that).
+   This bit is rather fatal at the time being due to lackings at pixel shader side */
+#define R300_GB_ENABLE	0x4008
+#	define R300_GB_POINT_STUFF_ENABLE	(1<<0)
+#	define R300_GB_LINE_STUFF_ENABLE	(1<<1)
+#	define R300_GB_TRIANGLE_STUFF_ENABLE	(1<<2)
+#	define R300_GB_STENCIL_AUTO_ENABLE	(1<<4)
+#	define R300_GB_UNK30			(1<<30)
+	/* each of the following is 2 bits wide */
+#define R300_GB_TEX_REPLICATE	0
+#define R300_GB_TEX_ST		1
+#define R300_GB_TEX_STR		2
+#	define R300_GB_TEX0_SOURCE_SHIFT	16
+#	define R300_GB_TEX1_SOURCE_SHIFT	18
+#	define R300_GB_TEX2_SOURCE_SHIFT	20
+#	define R300_GB_TEX3_SOURCE_SHIFT	22
+#	define R300_GB_TEX4_SOURCE_SHIFT	24
+#	define R300_GB_TEX5_SOURCE_SHIFT	26
+#	define R300_GB_TEX6_SOURCE_SHIFT	28
+#	define R300_GB_TEX7_SOURCE_SHIFT	30
+
+/* MSPOS - positions for multisample antialiasing (?) */
+#define R300_GB_MSPOS0	0x4010
+	/* shifts - each of the fields is 4 bits */
+#	define R300_GB_MSPOS0__MS_X0_SHIFT	0
+#	define R300_GB_MSPOS0__MS_Y0_SHIFT	4
+#	define R300_GB_MSPOS0__MS_X1_SHIFT	8
+#	define R300_GB_MSPOS0__MS_Y1_SHIFT	12
+#	define R300_GB_MSPOS0__MS_X2_SHIFT	16
+#	define R300_GB_MSPOS0__MS_Y2_SHIFT	20
+#	define R300_GB_MSPOS0__MSBD0_Y		24
+#	define R300_GB_MSPOS0__MSBD0_X		28
+
+#define R300_GB_MSPOS1	0x4014
+#	define R300_GB_MSPOS1__MS_X3_SHIFT	0
+#	define R300_GB_MSPOS1__MS_Y3_SHIFT	4
+#	define R300_GB_MSPOS1__MS_X4_SHIFT	8
+#	define R300_GB_MSPOS1__MS_Y4_SHIFT	12
+#	define R300_GB_MSPOS1__MS_X5_SHIFT	16
+#	define R300_GB_MSPOS1__MS_Y5_SHIFT	20
+#	define R300_GB_MSPOS1__MSBD1		24
+
+
+#define R300_GB_TILE_CONFIG	0x4018
+#	define R300_GB_TILE_ENABLE	(1<<0)
+#	define R300_GB_TILE_PIPE_COUNT_RV300	0
+#	define R300_GB_TILE_PIPE_COUNT_R300	(3<<1)
+#	define R300_GB_TILE_PIPE_COUNT_R420	(7<<1)
+#	define R300_GB_TILE_SIZE_8		0
+#	define R300_GB_TILE_SIZE_16		(1<<4)
+#	define R300_GB_TILE_SIZE_32		(2<<4)
+#	define R300_GB_SUPER_SIZE_1		(0<<6)
+#	define R300_GB_SUPER_SIZE_2		(1<<6)
+#	define R300_GB_SUPER_SIZE_4		(2<<6)
+#	define R300_GB_SUPER_SIZE_8		(3<<6)
+#	define R300_GB_SUPER_SIZE_16		(4<<6)
+#	define R300_GB_SUPER_SIZE_32		(5<<6)
+#	define R300_GB_SUPER_SIZE_64		(6<<6)
+#	define R300_GB_SUPER_SIZE_128		(7<<6)
+#	define R300_GB_SUPER_X_SHIFT		9	/* 3 bits wide */
+#	define R300_GB_SUPER_Y_SHIFT		12	/* 3 bits wide */
+#	define R300_GB_SUPER_TILE_A		0
+#	define R300_GB_SUPER_TILE_B		(1<<15)
+#	define R300_GB_SUBPIXEL_1_12		0
+#	define R300_GB_SUBPIXEL_1_16		(1<<16)
+
+#define R300_GB_FIFO_SIZE	0x4024
+	/* each of the following is 2 bits wide */
+#define R300_GB_FIFO_SIZE_32	0
+#define R300_GB_FIFO_SIZE_64	1
+#define R300_GB_FIFO_SIZE_128	2
+#define R300_GB_FIFO_SIZE_256	3
+#	define R300_SC_IFIFO_SIZE_SHIFT	0
+#	define R300_SC_TZFIFO_SIZE_SHIFT	2
+#	define R300_SC_BFIFO_SIZE_SHIFT	4
+
+#	define R300_US_OFIFO_SIZE_SHIFT	12
+#	define R300_US_WFIFO_SIZE_SHIFT	14
+	/* the following use the same constants as above, but meaning is
+	   is times 2 (i.e. instead of 32 words it means 64 */
+#	define R300_RS_TFIFO_SIZE_SHIFT	6
+#	define R300_RS_CFIFO_SIZE_SHIFT	8
+#	define R300_US_RAM_SIZE_SHIFT		10
+	/* watermarks, 3 bits wide */
+#	define R300_RS_HIGHWATER_COL_SHIFT	16
+#	define R300_RS_HIGHWATER_TEX_SHIFT	19
+#	define R300_OFIFO_HIGHWATER_SHIFT	22	/* two bits only */
+#	define R300_CUBE_FIFO_HIGHWATER_COL_SHIFT	24
+
+#define R300_GB_SELECT	0x401C
+#	define R300_GB_FOG_SELECT_C0A		0
+#	define R300_GB_FOG_SELECT_C1A		1
+#	define R300_GB_FOG_SELECT_C2A		2
+#	define R300_GB_FOG_SELECT_C3A		3
+#	define R300_GB_FOG_SELECT_1_1_W	4
+#	define R300_GB_FOG_SELECT_Z		5
+#	define R300_GB_DEPTH_SELECT_Z		0
+#	define R300_GB_DEPTH_SELECT_1_1_W	(1<<3)
+#	define R300_GB_W_SELECT_1_W		0
+#	define R300_GB_W_SELECT_1		(1<<4)
+
+#define R300_GB_AA_CONFIG		0x4020
+#	define R300_AA_ENABLE			0x01
+#	define R300_AA_SUBSAMPLES_2		0
+#	define R300_AA_SUBSAMPLES_3		(1<<1)
+#	define R300_AA_SUBSAMPLES_4		(2<<1)
+#	define R300_AA_SUBSAMPLES_6		(3<<1)
+
+/* END */
+
+/* gap */
+/* The upper enable bits are guessed, based on fglrx reported limits. */
+#define R300_TX_ENABLE                      0x4104
+#       define R300_TX_ENABLE_0                  (1 << 0)
+#       define R300_TX_ENABLE_1                  (1 << 1)
+#       define R300_TX_ENABLE_2                  (1 << 2)
+#       define R300_TX_ENABLE_3                  (1 << 3)
+#       define R300_TX_ENABLE_4                  (1 << 4)
+#       define R300_TX_ENABLE_5                  (1 << 5)
+#       define R300_TX_ENABLE_6                  (1 << 6)
+#       define R300_TX_ENABLE_7                  (1 << 7)
+#       define R300_TX_ENABLE_8                  (1 << 8)
+#       define R300_TX_ENABLE_9                  (1 << 9)
+#       define R300_TX_ENABLE_10                 (1 << 10)
+#       define R300_TX_ENABLE_11                 (1 << 11)
+#       define R300_TX_ENABLE_12                 (1 << 12)
+#       define R300_TX_ENABLE_13                 (1 << 13)
+#       define R300_TX_ENABLE_14                 (1 << 14)
+#       define R300_TX_ENABLE_15                 (1 << 15)
+
+/* The pointsize is given in multiples of 6. The pointsize can be
+// enormous: Clear() renders a single point that fills the entire
+// framebuffer. */
+#define R300_RE_POINTSIZE                   0x421C
+#       define R300_POINTSIZE_Y_SHIFT            0
+#       define R300_POINTSIZE_Y_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_POINTSIZE_X_SHIFT            16
+#       define R300_POINTSIZE_X_MASK             (0xFFFF << 16) /* GUESS */
+#       define R300_POINTSIZE_MAX             (R300_POINTSIZE_Y_MASK / 6)
+
+/* The line width is given in multiples of 6.
+   In default mode lines are classified as vertical lines.
+   HO: horizontal
+   VE: vertical or horizontal
+   HO & VE: no classification
+*/
+#define R300_RE_LINE_CNT                      0x4234
+#       define R300_LINESIZE_SHIFT            0
+#       define R300_LINESIZE_MASK             (0xFFFF << 0) /* GUESS */
+#       define R300_LINESIZE_MAX             (R300_LINESIZE_MASK / 6)
+#       define R300_LINE_CNT_HO               (1 << 16)
+#       define R300_LINE_CNT_VE               (1 << 17)
+
+/* Some sort of scale or clamp value for texcoordless textures. */
+#define R300_RE_UNK4238                       0x4238
+
+#define R300_RE_SHADE_MODEL                   0x4278
+#	define R300_RE_SHADE_MODEL_SMOOTH     0x3aaaa
+#	define R300_RE_SHADE_MODEL_FLAT       0x39595
+
+/* Dangerous */
+#define R300_RE_POLYGON_MODE                  0x4288
+#	define R300_PM_ENABLED                (1 << 0)
+#	define R300_PM_FRONT_POINT            (0 << 0)
+#	define R300_PM_BACK_POINT             (0 << 0)
+#	define R300_PM_FRONT_LINE             (1 << 4)
+#	define R300_PM_FRONT_FILL             (1 << 5)
+#	define R300_PM_BACK_LINE              (1 << 7)
+#	define R300_PM_BACK_FILL              (1 << 8)
+
+/* Not sure why there are duplicate of factor and constant values. 
+   My best guess so far is that there are seperate zbiases for test and write. 
+   Ordering might be wrong.
+   Some of the tests indicate that fgl has a fallback implementation of zbias
+   via pixel shaders. */
+#define R300_RE_ZBIAS_T_FACTOR                0x42A4
+#define R300_RE_ZBIAS_T_CONSTANT              0x42A8
+#define R300_RE_ZBIAS_W_FACTOR                0x42AC
+#define R300_RE_ZBIAS_W_CONSTANT              0x42B0
+
+/* This register needs to be set to (1<<1) for RV350 to correctly
+   perform depth test (see --vb-triangles in r300_demo)
+   Don't know about other chips. - Vladimir
+   This is set to 3 when GL_POLYGON_OFFSET_FILL is on.
+   My guess is that there are two bits for each zbias primitive (FILL, LINE, POINT).
+   One to enable depth test and one for depth write.
+   Yet this doesnt explain why depth writes work ...
+    */
+#define R300_RE_OCCLUSION_CNTL		    0x42B4
+#	define R300_OCCLUSION_ON		(1<<1)
+
+#define R300_RE_CULL_CNTL                   0x42B8
+#       define R300_CULL_FRONT                   (1 << 0)
+#       define R300_CULL_BACK                    (1 << 1)
+#       define R300_FRONT_FACE_CCW               (0 << 2)
+#       define R300_FRONT_FACE_CW                (1 << 2)
+
+
+/* BEGIN: Rasterization / Interpolators - many guesses
+// 0_UNKNOWN_18 has always been set except for clear operations.
+// TC_CNT is the number of incoming texture coordinate sets (i.e. it depends
+// on the vertex program, *not* the fragment program) */
+#define R300_RS_CNTL_0                      0x4300
+#       define R300_RS_CNTL_TC_CNT_SHIFT         2
+#       define R300_RS_CNTL_TC_CNT_MASK          (7 << 2)
+#		define R300_RS_CNTL_CI_CNT_SHIFT         7 /* number of color interpolators used */
+#       define R300_RS_CNTL_0_UNKNOWN_18         (1 << 18)
+/* Guess: RS_CNTL_1 holds the index of the highest used RS_ROUTE_n register. */
+#define R300_RS_CNTL_1                      0x4304
+
+/* gap */
+/* Only used for texture coordinates.
+// Use the source field to route texture coordinate input from the vertex program
+// to the desired interpolator. Note that the source field is relative to the
+// outputs the vertex program *actually* writes. If a vertex program only writes
+// texcoord[1], this will be source index 0.
+// Set INTERP_USED on all interpolators that produce data used by the
+// fragment program. INTERP_USED looks like a swizzling mask, but
+// I haven't seen it used that way.
+//
+// Note: The _UNKNOWN constants are always set in their respective register.
+// I don't know if this is necessary. */
+#define R300_RS_INTERP_0                    0x4310
+#define R300_RS_INTERP_1                    0x4314
+#       define R300_RS_INTERP_1_UNKNOWN          0x40
+#define R300_RS_INTERP_2                    0x4318
+#       define R300_RS_INTERP_2_UNKNOWN          0x80
+#define R300_RS_INTERP_3                    0x431C
+#       define R300_RS_INTERP_3_UNKNOWN          0xC0
+#define R300_RS_INTERP_4                    0x4320
+#define R300_RS_INTERP_5                    0x4324
+#define R300_RS_INTERP_6                    0x4328
+#define R300_RS_INTERP_7                    0x432C
+#       define R300_RS_INTERP_SRC_SHIFT          2
+#       define R300_RS_INTERP_SRC_MASK           (7 << 2)
+#       define R300_RS_INTERP_USED               0x00D10000
+
+/* These DWORDs control how vertex data is routed into fragment program
+// registers, after interpolators. */
+#define R300_RS_ROUTE_0                     0x4330
+#define R300_RS_ROUTE_1                     0x4334
+#define R300_RS_ROUTE_2                     0x4338
+#define R300_RS_ROUTE_3                     0x433C /* GUESS */
+#define R300_RS_ROUTE_4                     0x4340 /* GUESS */
+#define R300_RS_ROUTE_5                     0x4344 /* GUESS */
+#define R300_RS_ROUTE_6                     0x4348 /* GUESS */
+#define R300_RS_ROUTE_7                     0x434C /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_0     0
+#       define R300_RS_ROUTE_SOURCE_INTERP_1     1
+#       define R300_RS_ROUTE_SOURCE_INTERP_2     2
+#       define R300_RS_ROUTE_SOURCE_INTERP_3     3
+#       define R300_RS_ROUTE_SOURCE_INTERP_4     4
+#       define R300_RS_ROUTE_SOURCE_INTERP_5     5 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_6     6 /* GUESS */
+#       define R300_RS_ROUTE_SOURCE_INTERP_7     7 /* GUESS */
+#       define R300_RS_ROUTE_ENABLE              (1 << 3) /* GUESS */
+#       define R300_RS_ROUTE_DEST_SHIFT          6
+#       define R300_RS_ROUTE_DEST_MASK           (31 << 6) /* GUESS */
+
+/* Special handling for color: When the fragment program uses color,
+// the ROUTE_0_COLOR bit is set and ROUTE_0_COLOR_DEST contains the
+// color register index. */
+#       define R300_RS_ROUTE_0_COLOR             (1 << 14)
+#       define R300_RS_ROUTE_0_COLOR_DEST_SHIFT  17
+#       define R300_RS_ROUTE_0_COLOR_DEST_MASK   (31 << 17) /* GUESS */
+/* As above, but for secondary color */
+#		define R300_RS_ROUTE_1_COLOR1            (1 << 14)
+#		define R300_RS_ROUTE_1_COLOR1_DEST_SHIFT 17
+#		define R300_RS_ROUTE_1_COLOR1_DEST_MASK  (31 << 17)
+#		define R300_RS_ROUTE_1_UNKNOWN11         (1 << 11)
+/* END */
+
+/* BEGIN: Scissors and cliprects
+// There are four clipping rectangles. Their corner coordinates are inclusive.
+// Every pixel is assigned a number from 0 and 15 by setting bits 0-3 depending
+// on whether the pixel is inside cliprects 0-3, respectively. For example,
+// if a pixel is inside cliprects 0 and 1, but outside 2 and 3, it is assigned
+// the number 3 (binary 0011).
+// Iff the bit corresponding to the pixel's number in RE_CLIPRECT_CNTL is set,
+// the pixel is rasterized.
+//
+// In addition to this, there is a scissors rectangle. Only pixels inside the
+// scissors rectangle are drawn. (coordinates are inclusive)
+//
+// For some reason, the top-left corner of the framebuffer is at (1440, 1440)
+// for the purpose of clipping and scissors. */
+#define R300_RE_CLIPRECT_TL_0               0x43B0
+#define R300_RE_CLIPRECT_BR_0               0x43B4
+#define R300_RE_CLIPRECT_TL_1               0x43B8
+#define R300_RE_CLIPRECT_BR_1               0x43BC
+#define R300_RE_CLIPRECT_TL_2               0x43C0
+#define R300_RE_CLIPRECT_BR_2               0x43C4
+#define R300_RE_CLIPRECT_TL_3               0x43C8
+#define R300_RE_CLIPRECT_BR_3               0x43CC
+#       define R300_CLIPRECT_OFFSET              1440
+#       define R300_CLIPRECT_MASK                0x1FFF
+#       define R300_CLIPRECT_X_SHIFT             0
+#       define R300_CLIPRECT_X_MASK              (0x1FFF << 0)
+#       define R300_CLIPRECT_Y_SHIFT             13
+#       define R300_CLIPRECT_Y_MASK              (0x1FFF << 13)
+#define R300_RE_CLIPRECT_CNTL               0x43D0
+#       define R300_CLIP_OUT                     (1 << 0)
+#       define R300_CLIP_0                       (1 << 1)
+#       define R300_CLIP_1                       (1 << 2)
+#       define R300_CLIP_10                      (1 << 3)
+#       define R300_CLIP_2                       (1 << 4)
+#       define R300_CLIP_20                      (1 << 5)
+#       define R300_CLIP_21                      (1 << 6)
+#       define R300_CLIP_210                     (1 << 7)
+#       define R300_CLIP_3                       (1 << 8)
+#       define R300_CLIP_30                      (1 << 9)
+#       define R300_CLIP_31                      (1 << 10)
+#       define R300_CLIP_310                     (1 << 11)
+#       define R300_CLIP_32                      (1 << 12)
+#       define R300_CLIP_320                     (1 << 13)
+#       define R300_CLIP_321                     (1 << 14)
+#       define R300_CLIP_3210                    (1 << 15)
+
+/* gap */
+#define R300_RE_SCISSORS_TL                 0x43E0
+#define R300_RE_SCISSORS_BR                 0x43E4
+#       define R300_SCISSORS_OFFSET              1440
+#       define R300_SCISSORS_X_SHIFT             0
+#       define R300_SCISSORS_X_MASK              (0x1FFF << 0)
+#       define R300_SCISSORS_Y_SHIFT             13
+#       define R300_SCISSORS_Y_MASK              (0x1FFF << 13)
+/* END */
+
+/* BEGIN: Texture specification
+// The texture specification dwords are grouped by meaning and not by texture unit.
+// This means that e.g. the offset for texture image unit N is found in register
+// TX_OFFSET_0 + (4*N) */
+#define R300_TX_FILTER_0                    0x4400
+#       define R300_TX_REPEAT                    0
+#       define R300_TX_MIRRORED                  1
+#       define R300_TX_CLAMP                     4
+#       define R300_TX_CLAMP_TO_EDGE             2
+#       define R300_TX_CLAMP_TO_BORDER           6
+#       define R300_TX_WRAP_S_SHIFT              0
+#       define R300_TX_WRAP_S_MASK               (7 << 0)
+#       define R300_TX_WRAP_T_SHIFT              3
+#       define R300_TX_WRAP_T_MASK               (7 << 3)
+#       define R300_TX_WRAP_Q_SHIFT              6
+#       define R300_TX_WRAP_Q_MASK               (7 << 6)
+#       define R300_TX_MAG_FILTER_NEAREST        (1 << 9)
+#       define R300_TX_MAG_FILTER_LINEAR         (2 << 9)
+#       define R300_TX_MAG_FILTER_MASK           (3 << 9)
+#       define R300_TX_MIN_FILTER_NEAREST        (1 << 11)
+#       define R300_TX_MIN_FILTER_LINEAR         (2 << 11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_NEAREST       (5  <<  11)
+#	define R300_TX_MIN_FILTER_NEAREST_MIP_LINEAR        (9  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_NEAREST        (6  <<  11)
+#	define R300_TX_MIN_FILTER_LINEAR_MIP_LINEAR         (10 <<  11)
+
+/* NOTE: NEAREST doesnt seem to exist.
+   Im not seting MAG_FILTER_MASK and (3 << 11) on for all
+   anisotropy modes because that would void selected mag filter */
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST             ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
+#	define R300_TX_MIN_FILTER_ANISO_LINEAR              ((0 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_NEAREST ((1 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
+#	define R300_TX_MIN_FILTER_ANISO_NEAREST_MIP_LINEAR  ((2 << 13) /*|R300_TX_MAG_FILTER_MASK|(3<<11)*/)
+#       define R300_TX_MIN_FILTER_MASK           ( (15 << 11) | (3 << 13) )
+#	define R300_TX_MAX_ANISO_1_TO_1  (0 << 21)
+#	define R300_TX_MAX_ANISO_2_TO_1  (2 << 21)
+#	define R300_TX_MAX_ANISO_4_TO_1  (4 << 21)
+#	define R300_TX_MAX_ANISO_8_TO_1  (6 << 21)
+#	define R300_TX_MAX_ANISO_16_TO_1 (8 << 21)
+#	define R300_TX_MAX_ANISO_MASK    (14 << 21)
+
+#define R300_TX_UNK1_0                      0x4440
+#	define R300_LOD_BIAS_MASK	    0x1fff
+
+#define R300_TX_SIZE_0                      0x4480
+#       define R300_TX_WIDTHMASK_SHIFT           0
+#       define R300_TX_WIDTHMASK_MASK            (2047 << 0)
+#       define R300_TX_HEIGHTMASK_SHIFT          11
+#       define R300_TX_HEIGHTMASK_MASK           (2047 << 11)
+#       define R300_TX_UNK23                     (1 << 23)
+#       define R300_TX_SIZE_SHIFT                26 /* largest of width, height */
+#       define R300_TX_SIZE_MASK                 (15 << 26)
+#define R300_TX_FORMAT_0                    0x44C0
+	/* The interpretation of the format word by Wladimir van der Laan */
+	/* The X, Y, Z and W refer to the layout of the components.
+	   They are given meanings as R, G, B and Alpha by the swizzle
+	   specification */
+#	define R300_TX_FORMAT_X8		    0x0
+#	define R300_TX_FORMAT_X16		    0x1
+#	define R300_TX_FORMAT_Y4X4		    0x2
+#	define R300_TX_FORMAT_Y8X8		    0x3
+#	define R300_TX_FORMAT_Y16X16		    0x4
+#	define R300_TX_FORMAT_Z3Y3X2		    0x5
+#	define R300_TX_FORMAT_Z5Y6X5		    0x6
+#	define R300_TX_FORMAT_Z6Y5X5		    0x7
+#	define R300_TX_FORMAT_Z11Y11X10		    0x8
+#	define R300_TX_FORMAT_Z10Y11X11		    0x9
+#	define R300_TX_FORMAT_W4Z4Y4X4		    0xA
+#	define R300_TX_FORMAT_W1Z5Y5X5		    0xB
+#	define R300_TX_FORMAT_W8Z8Y8X8		    0xC
+#	define R300_TX_FORMAT_W2Z10Y10X10	    0xD
+#	define R300_TX_FORMAT_W16Z16Y16X16	    0xE
+#	define R300_TX_FORMAT_DXT1	    	    0xF
+#	define R300_TX_FORMAT_DXT3	    	    0x10
+#	define R300_TX_FORMAT_DXT5	    	    0x11
+#	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */
+#	define R300_TX_FORMAT_A8R8G8B8	    	    0x13     /* no swizzle */
+#	define R300_TX_FORMAT_B8G8_B8G8	    	    0x14     /* no swizzle */
+#	define R300_TX_FORMAT_G8R8_G8B8	    	    0x15     /* no swizzle */
+						  /* 0x16 - some 16 bit green format.. ?? */
+#	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */
+
+	/* gap */
+	/* Floating point formats */
+	/* Note - hardware supports both 16 and 32 bit floating point */
+#	define R300_TX_FORMAT_FL_I16	    	    0x18
+#	define R300_TX_FORMAT_FL_I16A16	    	    0x19
+#	define R300_TX_FORMAT_FL_R16G16B16A16	    0x1A
+#	define R300_TX_FORMAT_FL_I32	    	    0x1B
+#	define R300_TX_FORMAT_FL_I32A32	    	    0x1C
+#	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D
+	/* alpha modes, convenience mostly */
+	/* if you have alpha, pick constant appropriate to the
+	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
+# 	define R300_TX_FORMAT_ALPHA_1CH		    0x000
+# 	define R300_TX_FORMAT_ALPHA_2CH		    0x200
+# 	define R300_TX_FORMAT_ALPHA_4CH		    0x600
+# 	define R300_TX_FORMAT_ALPHA_NONE	    0xA00
+	/* Swizzling */
+	/* constants */
+#	define R300_TX_FORMAT_X		0
+#	define R300_TX_FORMAT_Y		1
+#	define R300_TX_FORMAT_Z		2
+#	define R300_TX_FORMAT_W		3
+#	define R300_TX_FORMAT_ZERO	4
+#	define R300_TX_FORMAT_ONE	5
+#	define R300_TX_FORMAT_CUT_Z	6		/* 2.0*Z, everything above 1.0 is set to 0.0 */
+#	define R300_TX_FORMAT_CUT_W	7		/* 2.0*W, everything above 1.0 is set to 0.0 */
+
+#	define R300_TX_FORMAT_B_SHIFT	18
+#	define R300_TX_FORMAT_G_SHIFT	15
+#	define R300_TX_FORMAT_R_SHIFT	12
+#	define R300_TX_FORMAT_A_SHIFT	9
+	/* Convenience macro to take care of layout and swizzling */
+#	define R300_EASY_TX_FORMAT(B, G, R, A, FMT)	(\
+	  ((R300_TX_FORMAT_##B)<<R300_TX_FORMAT_B_SHIFT) \
+	| ((R300_TX_FORMAT_##G)<<R300_TX_FORMAT_G_SHIFT) \
+	| ((R300_TX_FORMAT_##R)<<R300_TX_FORMAT_R_SHIFT) \
+	| ((R300_TX_FORMAT_##A)<<R300_TX_FORMAT_A_SHIFT) \
+	| (R300_TX_FORMAT_##FMT) \
+	  )
+	/* These can be ORed with result of R300_EASY_TX_FORMAT() */
+	/* We don't really know what they do. Take values from a constant color ? */
+#	define R300_TX_FORMAT_CONST_X		(1<<5)
+#	define R300_TX_FORMAT_CONST_Y		(2<<5)
+#	define R300_TX_FORMAT_CONST_Z		(4<<5)
+#	define R300_TX_FORMAT_CONST_W		(8<<5)
+
+#	define R300_TX_FORMAT_YUV_MODE		0x00800000
+
+#define R300_TX_OFFSET_0                    0x4540
+/* BEGIN: Guess from R200 */
+#       define R300_TXO_ENDIAN_NO_SWAP           (0 << 0)
+#       define R300_TXO_ENDIAN_BYTE_SWAP         (1 << 0)
+#       define R300_TXO_ENDIAN_WORD_SWAP         (2 << 0)
+#       define R300_TXO_ENDIAN_HALFDW_SWAP       (3 << 0)
+#       define R300_TXO_OFFSET_MASK              0xffffffe0
+#       define R300_TXO_OFFSET_SHIFT             5
+/* END */
+#define R300_TX_UNK4_0                      0x4580
+#define R300_TX_BORDER_COLOR_0              0x45C0 //ff00ff00 == { 0, 1.0, 0, 1.0 }
+
+/* END */
+
+/* BEGIN: Fragment program instruction set
+// Fragment programs are written directly into register space.
+// There are separate instruction streams for texture instructions and ALU
+// instructions.
+// In order to synchronize these streams, the program is divided into up
+// to 4 nodes. Each node begins with a number of TEX operations, followed
+// by a number of ALU operations.
+// The first node can have zero TEX ops, all subsequent nodes must have at least
+// one TEX ops.
+// All nodes must have at least one ALU op.
+//
+// The index of the last node is stored in PFS_CNTL_0: A value of 0 means
+// 1 node, a value of 3 means 4 nodes.
+// The total amount of instructions is defined in PFS_CNTL_2. The offsets are
+// offsets into the respective instruction streams, while *_END points to the
+// last instruction relative to this offset. */
+#define R300_PFS_CNTL_0                     0x4600
+#       define R300_PFS_CNTL_LAST_NODES_SHIFT    0
+#       define R300_PFS_CNTL_LAST_NODES_MASK     (3 << 0)
+#       define R300_PFS_CNTL_FIRST_NODE_HAS_TEX  (1 << 3)
+#define R300_PFS_CNTL_1                     0x4604
+/* There is an unshifted value here which has so far always been equal to the
+// index of the highest used temporary register. */
+#define R300_PFS_CNTL_2                     0x4608
+#       define R300_PFS_CNTL_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_CNTL_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_CNTL_ALU_END_SHIFT       6
+#       define R300_PFS_CNTL_ALU_END_MASK        (63 << 0)
+#       define R300_PFS_CNTL_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_CNTL_TEX_OFFSET_MASK     (31 << 12) /* GUESS */
+#       define R300_PFS_CNTL_TEX_END_SHIFT       18
+#       define R300_PFS_CNTL_TEX_END_MASK        (31 << 18) /* GUESS */
+
+/* gap */
+/* Nodes are stored backwards. The last active node is always stored in
+// PFS_NODE_3.
+// Example: In a 2-node program, NODE_0 and NODE_1 are set to 0. The
+// first node is stored in NODE_2, the second node is stored in NODE_3.
+//
+// Offsets are relative to the master offset from PFS_CNTL_2.
+// LAST_NODE is set for the last node, and only for the last node. */
+#define R300_PFS_NODE_0                     0x4610
+#define R300_PFS_NODE_1                     0x4614
+#define R300_PFS_NODE_2                     0x4618
+#define R300_PFS_NODE_3                     0x461C
+#       define R300_PFS_NODE_ALU_OFFSET_SHIFT    0
+#       define R300_PFS_NODE_ALU_OFFSET_MASK     (63 << 0)
+#       define R300_PFS_NODE_ALU_END_SHIFT       6
+#       define R300_PFS_NODE_ALU_END_MASK        (63 << 6)
+#       define R300_PFS_NODE_TEX_OFFSET_SHIFT    12
+#       define R300_PFS_NODE_TEX_OFFSET_MASK     (31 << 12)
+#       define R300_PFS_NODE_TEX_END_SHIFT       17
+#       define R300_PFS_NODE_TEX_END_MASK        (31 << 17)
+#       define R300_PFS_NODE_LAST_NODE           (1 << 22)
+
+/* TEX
+// As far as I can tell, texture instructions cannot write into output
+// registers directly. A subsequent ALU instruction is always necessary,
+// even if it's just MAD o0, r0, 1, 0 */
+#define R300_PFS_TEXI_0                     0x4620
+#       define R300_FPITX_SRC_SHIFT              0
+#       define R300_FPITX_SRC_MASK               (31 << 0)
+#       define R300_FPITX_SRC_CONST              (1 << 5) /* GUESS */
+#       define R300_FPITX_DST_SHIFT              6
+#       define R300_FPITX_DST_MASK               (31 << 6)
+#       define R300_FPITX_IMAGE_SHIFT            11
+#       define R300_FPITX_IMAGE_MASK             (15 << 11) /* GUESS based on layout and native limits */
+/* Unsure if these are opcodes, or some kind of bitfield, but this is how
+ * they were set when I checked
+ */
+#		define R300_FPITX_OPCODE_SHIFT			15
+#			define R300_FPITX_OP_TEX			1
+#			define R300_FPITX_OP_TXP			3
+#			define R300_FPITX_OP_TXB			4
+
+/* ALU
+// The ALU instructions register blocks are enumerated according to the order
+// in which fglrx. I assume there is space for 64 instructions, since
+// each block has space for a maximum of 64 DWORDs, and this matches reported
+// native limits.
+//
+// The basic functional block seems to be one MAD for each color and alpha,
+// and an adder that adds all components after the MUL.
+//  - ADD, MUL, MAD etc.: use MAD with appropriate neutral operands
+//  - DP4: Use OUTC_DP4, OUTA_DP4
+//  - DP3: Use OUTC_DP3, OUTA_DP4, appropriate alpha operands
+//  - DPH: Use OUTC_DP4, OUTA_DP4, appropriate alpha operands
+//  - CMP: If ARG2 < 0, return ARG1, else return ARG0
+//  - FLR: use FRC+MAD
+//  - XPD: use MAD+MAD
+//  - SGE, SLT: use MAD+CMP
+//  - RSQ: use ABS modifier for argument
+//  - Use OUTC_REPL_ALPHA to write results of an alpha-only operation (e.g. RCP)
+//    into color register
+//  - apparently, there's no quick DST operation
+//  - fglrx set FPI2_UNKNOWN_31 on a "MAD fragment.color, tmp0, tmp1, tmp2"
+//  - fglrx set FPI2_UNKNOWN_31 on a "MAX r2, r1, c0"
+//  - fglrx once set FPI0_UNKNOWN_31 on a "FRC r1, r1"
+//
+// Operand selection
+// First stage selects three sources from the available registers and
+// constant parameters. This is defined in INSTR1 (color) and INSTR3 (alpha).
+// fglrx sorts the three source fields: Registers before constants,
+// lower indices before higher indices; I do not know whether this is necessary.
+// fglrx fills unused sources with "read constant 0"
+// According to specs, you cannot select more than two different constants.
+//
+// Second stage selects the operands from the sources. This is defined in
+// INSTR0 (color) and INSTR2 (alpha). You can also select the special constants
+// zero and one.
+// Swizzling and negation happens in this stage, as well.
+//
+// Important: Color and alpha seem to be mostly separate, i.e. their sources
+// selection appears to be fully independent (the register storage is probably
+// physically split into a color and an alpha section).
+// However (because of the apparent physical split), there is some interaction
+// WRT swizzling. If, for example, you want to load an R component into an
+// Alpha operand, this R component is taken from a *color* source, not from
+// an alpha source. The corresponding register doesn't even have to appear in
+// the alpha sources list. (I hope this alll makes sense to you)
+//
+// Destination selection
+// The destination register index is in FPI1 (color) and FPI3 (alpha) together
+// with enable bits.
+// There are separate enable bits for writing into temporary registers
+// (DSTC_REG_* /DSTA_REG) and and program output registers (DSTC_OUTPUT_* /DSTA_OUTPUT).
+// You can write to both at once, or not write at all (the same index
+// must be used for both).
+//
+// Note: There is a special form for LRP
+//  - Argument order is the same as in ARB_fragment_program.
+//  - Operation is MAD
+//  - ARG1 is set to ARGC_SRC1C_LRP/ARGC_SRC1A_LRP
+//  - Set FPI0/FPI2_SPECIAL_LRP
+// Arbitrary LRP (including support for swizzling) requires vanilla MAD+MAD */
+#define R300_PFS_INSTR1_0                   0x46C0
+#       define R300_FPI1_SRC0C_SHIFT             0
+#       define R300_FPI1_SRC0C_MASK              (31 << 0)
+#       define R300_FPI1_SRC0C_CONST             (1 << 5)
+#       define R300_FPI1_SRC1C_SHIFT             6
+#       define R300_FPI1_SRC1C_MASK              (31 << 6)
+#       define R300_FPI1_SRC1C_CONST             (1 << 11)
+#       define R300_FPI1_SRC2C_SHIFT             12
+#       define R300_FPI1_SRC2C_MASK              (31 << 12)
+#       define R300_FPI1_SRC2C_CONST             (1 << 17)
+#       define R300_FPI1_DSTC_SHIFT              18
+#       define R300_FPI1_DSTC_MASK               (31 << 18)
+#       define R300_FPI1_DSTC_REG_X              (1 << 23)
+#       define R300_FPI1_DSTC_REG_Y              (1 << 24)
+#       define R300_FPI1_DSTC_REG_Z              (1 << 25)
+#       define R300_FPI1_DSTC_OUTPUT_X           (1 << 26)
+#       define R300_FPI1_DSTC_OUTPUT_Y           (1 << 27)
+#       define R300_FPI1_DSTC_OUTPUT_Z           (1 << 28)
+
+#define R300_PFS_INSTR3_0                   0x47C0
+#       define R300_FPI3_SRC0A_SHIFT             0
+#       define R300_FPI3_SRC0A_MASK              (31 << 0)
+#       define R300_FPI3_SRC0A_CONST             (1 << 5)
+#       define R300_FPI3_SRC1A_SHIFT             6
+#       define R300_FPI3_SRC1A_MASK              (31 << 6)
+#       define R300_FPI3_SRC1A_CONST             (1 << 11)
+#       define R300_FPI3_SRC2A_SHIFT             12
+#       define R300_FPI3_SRC2A_MASK              (31 << 12)
+#       define R300_FPI3_SRC2A_CONST             (1 << 17)
+#       define R300_FPI3_DSTA_SHIFT              18
+#       define R300_FPI3_DSTA_MASK               (31 << 18)
+#       define R300_FPI3_DSTA_REG                (1 << 23)
+#       define R300_FPI3_DSTA_OUTPUT             (1 << 24)
+
+#define R300_PFS_INSTR0_0                   0x48C0
+#       define R300_FPI0_ARGC_SRC0C_XYZ          0
+#       define R300_FPI0_ARGC_SRC0C_XXX          1
+#       define R300_FPI0_ARGC_SRC0C_YYY          2
+#       define R300_FPI0_ARGC_SRC0C_ZZZ          3
+#       define R300_FPI0_ARGC_SRC1C_XYZ          4
+#       define R300_FPI0_ARGC_SRC1C_XXX          5
+#       define R300_FPI0_ARGC_SRC1C_YYY          6
+#       define R300_FPI0_ARGC_SRC1C_ZZZ          7
+#       define R300_FPI0_ARGC_SRC2C_XYZ          8
+#       define R300_FPI0_ARGC_SRC2C_XXX          9
+#       define R300_FPI0_ARGC_SRC2C_YYY          10
+#       define R300_FPI0_ARGC_SRC2C_ZZZ          11
+#       define R300_FPI0_ARGC_SRC0A              12
+#       define R300_FPI0_ARGC_SRC1A              13
+#       define R300_FPI0_ARGC_SRC2A              14
+#       define R300_FPI0_ARGC_SRC1C_LRP          15
+#       define R300_FPI0_ARGC_ZERO               20
+#       define R300_FPI0_ARGC_ONE                21
+#       define R300_FPI0_ARGC_HALF               22 /* GUESS */
+#       define R300_FPI0_ARGC_SRC0C_YZX          23
+#       define R300_FPI0_ARGC_SRC1C_YZX          24
+#       define R300_FPI0_ARGC_SRC2C_YZX          25
+#       define R300_FPI0_ARGC_SRC0C_ZXY          26
+#       define R300_FPI0_ARGC_SRC1C_ZXY          27
+#       define R300_FPI0_ARGC_SRC2C_ZXY          28
+#       define R300_FPI0_ARGC_SRC0CA_WZY         29
+#       define R300_FPI0_ARGC_SRC1CA_WZY         30
+#       define R300_FPI0_ARGC_SRC2CA_WZY         31
+
+#       define R300_FPI0_ARG0C_SHIFT             0
+#       define R300_FPI0_ARG0C_MASK              (31 << 0)
+#       define R300_FPI0_ARG0C_NEG               (1 << 5)
+#       define R300_FPI0_ARG0C_ABS               (1 << 6)
+#       define R300_FPI0_ARG1C_SHIFT             7
+#       define R300_FPI0_ARG1C_MASK              (31 << 7)
+#       define R300_FPI0_ARG1C_NEG               (1 << 12)
+#       define R300_FPI0_ARG1C_ABS               (1 << 13)
+#       define R300_FPI0_ARG2C_SHIFT             14
+#       define R300_FPI0_ARG2C_MASK              (31 << 14)
+#       define R300_FPI0_ARG2C_NEG               (1 << 19)
+#       define R300_FPI0_ARG2C_ABS               (1 << 20)
+#       define R300_FPI0_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI0_OUTC_MAD                (0 << 23)
+#       define R300_FPI0_OUTC_DP3                (1 << 23)
+#       define R300_FPI0_OUTC_DP4                (2 << 23)
+#       define R300_FPI0_OUTC_MIN                (4 << 23)
+#       define R300_FPI0_OUTC_MAX                (5 << 23)
+#       define R300_FPI0_OUTC_CMP                (8 << 23)
+#       define R300_FPI0_OUTC_FRC                (9 << 23)
+#       define R300_FPI0_OUTC_REPL_ALPHA         (10 << 23)
+#       define R300_FPI0_OUTC_SAT                (1 << 30)
+#       define R300_FPI0_UNKNOWN_31              (1 << 31)
+
+#define R300_PFS_INSTR2_0                   0x49C0
+#       define R300_FPI2_ARGA_SRC0C_X            0
+#       define R300_FPI2_ARGA_SRC0C_Y            1
+#       define R300_FPI2_ARGA_SRC0C_Z            2
+#       define R300_FPI2_ARGA_SRC1C_X            3
+#       define R300_FPI2_ARGA_SRC1C_Y            4
+#       define R300_FPI2_ARGA_SRC1C_Z            5
+#       define R300_FPI2_ARGA_SRC2C_X            6
+#       define R300_FPI2_ARGA_SRC2C_Y            7
+#       define R300_FPI2_ARGA_SRC2C_Z            8
+#       define R300_FPI2_ARGA_SRC0A              9
+#       define R300_FPI2_ARGA_SRC1A              10
+#       define R300_FPI2_ARGA_SRC2A              11
+#       define R300_FPI2_ARGA_SRC1A_LRP          15
+#       define R300_FPI2_ARGA_ZERO               16
+#       define R300_FPI2_ARGA_ONE                17
+#       define R300_FPI2_ARGA_HALF               18 /* GUESS */
+
+#       define R300_FPI2_ARG0A_SHIFT             0
+#       define R300_FPI2_ARG0A_MASK              (31 << 0)
+#       define R300_FPI2_ARG0A_NEG               (1 << 5)
+#		define R300_FPI2_ARG0A_ABS				 (1 << 6) /* GUESS */
+#       define R300_FPI2_ARG1A_SHIFT             7
+#       define R300_FPI2_ARG1A_MASK              (31 << 7)
+#       define R300_FPI2_ARG1A_NEG               (1 << 12)
+#		define R300_FPI2_ARG1A_ABS				 (1 << 13) /* GUESS */
+#       define R300_FPI2_ARG2A_SHIFT             14
+#       define R300_FPI2_ARG2A_MASK              (31 << 14)
+#       define R300_FPI2_ARG2A_NEG               (1 << 19)
+#		define R300_FPI2_ARG2A_ABS				 (1 << 20) /* GUESS */
+#       define R300_FPI2_SPECIAL_LRP             (1 << 21)
+#       define R300_FPI2_OUTA_MAD                (0 << 23)
+#       define R300_FPI2_OUTA_DP4                (1 << 23)
+#       define R300_FPI2_OUTA_MIN                (2 << 23)
+#       define R300_FPI2_OUTA_MAX                (3 << 23)
+#       define R300_FPI2_OUTA_CMP                (6 << 23)
+#       define R300_FPI2_OUTA_FRC                (7 << 23)
+#       define R300_FPI2_OUTA_EX2                (8 << 23)
+#       define R300_FPI2_OUTA_LG2                (9 << 23)
+#       define R300_FPI2_OUTA_RCP                (10 << 23)
+#       define R300_FPI2_OUTA_RSQ                (11 << 23)
+#       define R300_FPI2_OUTA_SAT                (1 << 30)
+#       define R300_FPI2_UNKNOWN_31              (1 << 31)
+/* END */
+
+/* gap */
+#define R300_PP_ALPHA_TEST                  0x4BD4
+#       define R300_REF_ALPHA_MASK               0x000000ff
+#       define R300_ALPHA_TEST_FAIL              (0 << 8)
+#       define R300_ALPHA_TEST_LESS              (1 << 8)
+#       define R300_ALPHA_TEST_LEQUAL            (3 << 8)
+#       define R300_ALPHA_TEST_EQUAL             (2 << 8)
+#       define R300_ALPHA_TEST_GEQUAL            (6 << 8)
+#       define R300_ALPHA_TEST_GREATER           (4 << 8)
+#       define R300_ALPHA_TEST_NEQUAL            (5 << 8)
+#       define R300_ALPHA_TEST_PASS              (7 << 8)
+#       define R300_ALPHA_TEST_OP_MASK           (7 << 8)
+#       define R300_ALPHA_TEST_ENABLE            (1 << 11)
+
+/* gap */
+/* Fragment program parameters in 7.16 floating point */
+#define R300_PFS_PARAM_0_X                  0x4C00
+#define R300_PFS_PARAM_0_Y                  0x4C04
+#define R300_PFS_PARAM_0_Z                  0x4C08
+#define R300_PFS_PARAM_0_W                  0x4C0C
+/* GUESS: PARAM_31 is last, based on native limits reported by fglrx */
+#define R300_PFS_PARAM_31_X                 0x4DF0
+#define R300_PFS_PARAM_31_Y                 0x4DF4
+#define R300_PFS_PARAM_31_Z                 0x4DF8
+#define R300_PFS_PARAM_31_W                 0x4DFC
+
+/* Notes:
+// - AFAIK fglrx always sets BLEND_UNKNOWN when blending is used in the application
+// - AFAIK fglrx always sets BLEND_NO_SEPARATE when CBLEND and ABLEND are set to the same
+//   function (both registers are always set up completely in any case)
+// - Most blend flags are simply copied from R200 and not tested yet */
+#define R300_RB3D_CBLEND                    0x4E04
+#define R300_RB3D_ABLEND                    0x4E08
+ /* the following only appear in CBLEND */
+#       define R300_BLEND_ENABLE                     (1 << 0)
+#       define R300_BLEND_UNKNOWN                    (3 << 1)
+#       define R300_BLEND_NO_SEPARATE                (1 << 3)
+ /* the following are shared between CBLEND and ABLEND */
+#       define R300_FCN_MASK                         (3  << 12)
+#       define R300_COMB_FCN_ADD_CLAMP               (0  << 12)
+#       define R300_COMB_FCN_ADD_NOCLAMP             (1  << 12)
+#       define R300_COMB_FCN_SUB_CLAMP               (2  << 12)
+#       define R300_COMB_FCN_SUB_NOCLAMP             (3  << 12)
+#       define R300_SRC_BLEND_GL_ZERO                (32 << 16)
+#       define R300_SRC_BLEND_GL_ONE                 (33 << 16)
+#       define R300_SRC_BLEND_GL_SRC_COLOR           (34 << 16)
+#       define R300_SRC_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 16)
+#       define R300_SRC_BLEND_GL_DST_COLOR           (36 << 16)
+#       define R300_SRC_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 16)
+#       define R300_SRC_BLEND_GL_SRC_ALPHA           (38 << 16)
+#       define R300_SRC_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 16)
+#       define R300_SRC_BLEND_GL_DST_ALPHA           (40 << 16)
+#       define R300_SRC_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 16)
+#       define R300_SRC_BLEND_GL_SRC_ALPHA_SATURATE  (42 << 16)
+#       define R300_SRC_BLEND_MASK                   (63 << 16)
+#       define R300_DST_BLEND_GL_ZERO                (32 << 24)
+#       define R300_DST_BLEND_GL_ONE                 (33 << 24)
+#       define R300_DST_BLEND_GL_SRC_COLOR           (34 << 24)
+#       define R300_DST_BLEND_GL_ONE_MINUS_SRC_COLOR (35 << 24)
+#       define R300_DST_BLEND_GL_DST_COLOR           (36 << 24)
+#       define R300_DST_BLEND_GL_ONE_MINUS_DST_COLOR (37 << 24)
+#       define R300_DST_BLEND_GL_SRC_ALPHA           (38 << 24)
+#       define R300_DST_BLEND_GL_ONE_MINUS_SRC_ALPHA (39 << 24)
+#       define R300_DST_BLEND_GL_DST_ALPHA           (40 << 24)
+#       define R300_DST_BLEND_GL_ONE_MINUS_DST_ALPHA (41 << 24)
+#       define R300_DST_BLEND_MASK                   (63 << 24)
+#define R300_RB3D_COLORMASK                 0x4E0C
+#       define R300_COLORMASK0_B                 (1<<0)
+#       define R300_COLORMASK0_G                 (1<<1)
+#       define R300_COLORMASK0_R                 (1<<2)
+#       define R300_COLORMASK0_A                 (1<<3)
+
+/* gap */
+#define R300_RB3D_COLOROFFSET0              0x4E28
+#       define R300_COLOROFFSET_MASK             0xFFFFFFF0 /* GUESS */
+#define R300_RB3D_COLOROFFSET1              0x4E2C /* GUESS */
+#define R300_RB3D_COLOROFFSET2              0x4E30 /* GUESS */
+#define R300_RB3D_COLOROFFSET3              0x4E34 /* GUESS */
+/* gap */
+/* Bit 16: Larger tiles
+// Bit 17: 4x2 tiles
+// Bit 18: Extremely weird tile like, but some pixels duplicated? */
+#define R300_RB3D_COLORPITCH0               0x4E38
+#       define R300_COLORPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_COLOR_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_COLOR_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_COLOR_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_COLOR_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
+#       define R300_COLOR_FORMAT_RGB565          (2 << 22)
+#       define R300_COLOR_FORMAT_ARGB8888        (3 << 22)
+#define R300_RB3D_COLORPITCH1               0x4E3C /* GUESS */
+#define R300_RB3D_COLORPITCH2               0x4E40 /* GUESS */
+#define R300_RB3D_COLORPITCH3               0x4E44 /* GUESS */
+
+/* gap */
+/* Guess by Vladimir.
+// Set to 0A before 3D operations, set to 02 afterwards. */
+#define R300_RB3D_DSTCACHE_CTLSTAT          0x4E4C
+#       define R300_RB3D_DSTCACHE_02             0x00000002
+#       define R300_RB3D_DSTCACHE_0A             0x0000000A
+
+/* gap */
+/* There seems to be no "write only" setting, so use Z-test = ALWAYS for this. */
+/* Bit (1<<8) is the "test" bit. so plain write is 6  - vd */
+#define R300_RB3D_ZSTENCIL_CNTL_0                   0x4F00
+#       define R300_RB3D_Z_DISABLED_1            0x00000010 /* GUESS */
+#       define R300_RB3D_Z_DISABLED_2            0x00000014 /* GUESS */
+#       define R300_RB3D_Z_TEST                  0x00000012
+#       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016
+#       define R300_RB3D_Z_WRITE_ONLY        	 0x00000006
+
+#       define R300_RB3D_Z_TEST                  0x00000012
+#       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016
+#       define R300_RB3D_Z_WRITE_ONLY        	 0x00000006
+#	define R300_RB3D_STENCIL_ENABLE		 0x00000001
+
+#define R300_RB3D_ZSTENCIL_CNTL_1                   0x4F04
+		/* functions */
+#	define R300_ZS_NEVER			0
+#	define R300_ZS_LESS			1
+#	define R300_ZS_LEQUAL			2
+#	define R300_ZS_EQUAL			3
+#	define R300_ZS_GEQUAL			4
+#	define R300_ZS_GREATER			5
+#	define R300_ZS_NOTEQUAL			6
+#	define R300_ZS_ALWAYS			7
+#       define R300_ZS_MASK                     7
+		/* operations */
+#	define R300_ZS_KEEP			0
+#	define R300_ZS_ZERO			1
+#	define R300_ZS_REPLACE			2
+#	define R300_ZS_INCR			3
+#	define R300_ZS_DECR			4
+#	define R300_ZS_INVERT			5
+#	define R300_ZS_INCR_WRAP		6
+#	define R300_ZS_DECR_WRAP		7
+
+       /* front and back refer to operations done for front
+          and back faces, i.e. separate stencil function support */
+#	define R300_RB3D_ZS1_DEPTH_FUNC_SHIFT		0
+#	define R300_RB3D_ZS1_FRONT_FUNC_SHIFT		3
+#	define R300_RB3D_ZS1_FRONT_FAIL_OP_SHIFT	6
+#	define R300_RB3D_ZS1_FRONT_ZPASS_OP_SHIFT	9
+#	define R300_RB3D_ZS1_FRONT_ZFAIL_OP_SHIFT      12
+#	define R300_RB3D_ZS1_BACK_FUNC_SHIFT           15
+#	define R300_RB3D_ZS1_BACK_FAIL_OP_SHIFT        18
+#	define R300_RB3D_ZS1_BACK_ZPASS_OP_SHIFT       21
+#	define R300_RB3D_ZS1_BACK_ZFAIL_OP_SHIFT       24
+
+
+
+#define R300_RB3D_ZSTENCIL_CNTL_2                   0x4F08
+#	define R300_RB3D_ZS2_STENCIL_REF_SHIFT		0
+#	define R300_RB3D_ZS2_STENCIL_MASK		0xFF
+#	define R300_RB3D_ZS2_STENCIL_MASK_SHIFT	        8
+#	define R300_RB3D_ZS2_STENCIL_WRITE_MASK_SHIFT	16
+
+/* gap */
+
+#define R300_RB3D_ZSTENCIL_FORMAT                   0x4F10
+#	define R300_DEPTH_FORMAT_16BIT_INT_Z     (0 << 0)
+#	define R300_DEPTH_FORMAT_24BIT_INT_Z     (2 << 0)
+
+/* gap */
+#define R300_RB3D_DEPTHOFFSET               0x4F20
+#define R300_RB3D_DEPTHPITCH                0x4F24
+#       define R300_DEPTHPITCH_MASK              0x00001FF8 /* GUESS */
+#       define R300_DEPTH_TILE_ENABLE            (1 << 16) /* GUESS */
+#       define R300_DEPTH_MICROTILE_ENABLE       (1 << 17) /* GUESS */
+#       define R300_DEPTH_ENDIAN_NO_SWAP         (0 << 18) /* GUESS */
+#       define R300_DEPTH_ENDIAN_WORD_SWAP       (1 << 18) /* GUESS */
+#       define R300_DEPTH_ENDIAN_DWORD_SWAP      (2 << 18) /* GUESS */
+
+/* BEGIN: Vertex program instruction set
+// Every instruction is four dwords long:
+//  DWORD 0: output and opcode
+//  DWORD 1: first argument
+//  DWORD 2: second argument
+//  DWORD 3: third argument
+//
+// Notes:
+//  - ABS r, a is implemented as MAX r, a, -a
+//  - MOV is implemented as ADD to zero
+//  - XPD is implemented as MUL + MAD
+//  - FLR is implemented as FRC + ADD
+//  - apparently, fglrx tries to schedule instructions so that there is at least
+//    one instruction between the write to a temporary and the first read
+//    from said temporary; however, violations of this scheduling are allowed
+//  - register indices seem to be unrelated with OpenGL aliasing to conventional state
+//  - only one attribute and one parameter can be loaded at a time; however, the
+//    same attribute/parameter can be used for more than one argument
+//  - the second software argument for POW is the third hardware argument (no idea why)
+//  - MAD with only temporaries as input seems to use VPI_OUT_SELECT_MAD_2
+//
+// There is some magic surrounding LIT:
+//  The single argument is replicated across all three inputs, but swizzled:
+//   First argument: xyzy
+//   Second argument: xyzx
+//   Third argument: xyzw
+//  Whenever the result is used later in the fragment program, fglrx forces x and w
+//  to be 1.0 in the input selection; I don't know whether this is strictly necessary */
+#define R300_VPI_OUT_OP_DOT                     (1 << 0)
+#define R300_VPI_OUT_OP_MUL                     (2 << 0)
+#define R300_VPI_OUT_OP_ADD                     (3 << 0)
+#define R300_VPI_OUT_OP_MAD                     (4 << 0)
+#define R300_VPI_OUT_OP_DST                     (5 << 0)
+#define R300_VPI_OUT_OP_FRC                     (6 << 0)
+#define R300_VPI_OUT_OP_MAX                     (7 << 0)
+#define R300_VPI_OUT_OP_MIN                     (8 << 0)
+#define R300_VPI_OUT_OP_SGE                     (9 << 0)
+#define R300_VPI_OUT_OP_SLT                     (10 << 0)
+#define R300_VPI_OUT_OP_UNK12                   (12 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, vector(scalar, vector) */
+#define R300_VPI_OUT_OP_EXP                     (65 << 0)
+#define R300_VPI_OUT_OP_LOG                     (66 << 0)
+#define R300_VPI_OUT_OP_UNK67                   (67 << 0) /* Used in fog computations, scalar(scalar) */
+#define R300_VPI_OUT_OP_LIT                     (68 << 0)
+#define R300_VPI_OUT_OP_POW                     (69 << 0)
+#define R300_VPI_OUT_OP_RCP                     (70 << 0)
+#define R300_VPI_OUT_OP_RSQ                     (72 << 0)
+#define R300_VPI_OUT_OP_UNK73                   (73 << 0) /* Used in GL_POINT_DISTANCE_ATTENUATION_ARB, scalar(scalar) */
+#define R300_VPI_OUT_OP_EX2                     (75 << 0)
+#define R300_VPI_OUT_OP_LG2                     (76 << 0)
+#define R300_VPI_OUT_OP_MAD_2                   (128 << 0)
+#define R300_VPI_OUT_OP_UNK129                  (129 << 0) /* all temps, vector(scalar, vector, vector) */
+
+#define R300_VPI_OUT_REG_CLASS_TEMPORARY        (0 << 8)
+#define R300_VPI_OUT_REG_CLASS_RESULT           (2 << 8)
+#define R300_VPI_OUT_REG_CLASS_MASK             (31 << 8)
+
+#define R300_VPI_OUT_REG_INDEX_SHIFT            13
+#define R300_VPI_OUT_REG_INDEX_MASK             (31 << 13) /* GUESS based on fglrx native limits */
+
+#define R300_VPI_OUT_WRITE_X                    (1 << 20)
+#define R300_VPI_OUT_WRITE_Y                    (1 << 21)
+#define R300_VPI_OUT_WRITE_Z                    (1 << 22)
+#define R300_VPI_OUT_WRITE_W                    (1 << 23)
+
+#define R300_VPI_IN_REG_CLASS_TEMPORARY         (0 << 0)
+#define R300_VPI_IN_REG_CLASS_ATTRIBUTE         (1 << 0)
+#define R300_VPI_IN_REG_CLASS_PARAMETER         (2 << 0)
+#define R300_VPI_IN_REG_CLASS_NONE              (9 << 0)
+#define R300_VPI_IN_REG_CLASS_MASK              (31 << 0) /* GUESS */
+
+#define R300_VPI_IN_REG_INDEX_SHIFT             5
+#define R300_VPI_IN_REG_INDEX_MASK              (255 << 5) /* GUESS based on fglrx native limits */
+
+/* The R300 can select components from the input register arbitrarily.
+// Use the following constants, shifted by the component shift you
+// want to select */
+#define R300_VPI_IN_SELECT_X    0
+#define R300_VPI_IN_SELECT_Y    1
+#define R300_VPI_IN_SELECT_Z    2
+#define R300_VPI_IN_SELECT_W    3
+#define R300_VPI_IN_SELECT_ZERO 4
+#define R300_VPI_IN_SELECT_ONE  5
+#define R300_VPI_IN_SELECT_MASK 7
+
+#define R300_VPI_IN_X_SHIFT                     13
+#define R300_VPI_IN_Y_SHIFT                     16
+#define R300_VPI_IN_Z_SHIFT                     19
+#define R300_VPI_IN_W_SHIFT                     22
+
+#define R300_VPI_IN_NEG_X                       (1 << 25)
+#define R300_VPI_IN_NEG_Y                       (1 << 26)
+#define R300_VPI_IN_NEG_Z                       (1 << 27)
+#define R300_VPI_IN_NEG_W                       (1 << 28)
+/* END */
+
+//BEGIN: Packet 3 commands
+
+// A primitive emission dword.
+#define R300_PRIM_TYPE_NONE                     (0 << 0)
+#define R300_PRIM_TYPE_POINT                    (1 << 0)
+#define R300_PRIM_TYPE_LINE                     (2 << 0)
+#define R300_PRIM_TYPE_LINE_STRIP               (3 << 0)
+#define R300_PRIM_TYPE_TRI_LIST                 (4 << 0)
+#define R300_PRIM_TYPE_TRI_FAN                  (5 << 0)
+#define R300_PRIM_TYPE_TRI_STRIP                (6 << 0)
+#define R300_PRIM_TYPE_TRI_TYPE2                (7 << 0)
+#define R300_PRIM_TYPE_RECT_LIST                (8 << 0)
+#define R300_PRIM_TYPE_3VRT_POINT_LIST          (9 << 0)
+#define R300_PRIM_TYPE_3VRT_LINE_LIST           (10 << 0)
+#define R300_PRIM_TYPE_POINT_SPRITES            (11 << 0) // GUESS (based on r200)
+#define R300_PRIM_TYPE_LINE_LOOP                (12 << 0)
+#define R300_PRIM_TYPE_QUADS                    (13 << 0)
+#define R300_PRIM_TYPE_QUAD_STRIP               (14 << 0)
+#define R300_PRIM_TYPE_POLYGON                  (15 << 0)
+#define R300_PRIM_TYPE_MASK                     0xF
+#define R300_PRIM_WALK_IND                      (1 << 4)
+#define R300_PRIM_WALK_LIST                     (2 << 4)
+#define R300_PRIM_WALK_RING                     (3 << 4)
+#define R300_PRIM_WALK_MASK                     (3 << 4)
+#define R300_PRIM_COLOR_ORDER_BGRA              (0 << 6) // GUESS (based on r200)
+#define R300_PRIM_COLOR_ORDER_RGBA              (1 << 6) // GUESS
+#define R300_PRIM_NUM_VERTICES_SHIFT            16
+
+// Draw a primitive from vertex data in arrays loaded via 3D_LOAD_VBPNTR.
+// Two parameter dwords:
+// 0. The first parameter appears to be always 0
+// 1. The second parameter is a standard primitive emission dword.
+#define R300_PACKET3_3D_DRAW_VBUF           0x00002800
+
+// Specify the full set of vertex arrays as (address, stride).
+// The first parameter is the number of vertex arrays specified.
+// The rest of the command is a variable length list of blocks, where
+// each block is three dwords long and specifies two arrays.
+// The first dword of a block is split into two words, the lower significant
+// word refers to the first array, the more significant word to the second
+// array in the block.
+// The low byte of each word contains the size of an array entry in dwords,
+// the high byte contains the stride of the array.
+// The second dword of a block contains the pointer to the first array,
+// the third dword of a block contains the pointer to the second array.
+// Note that if the total number of arrays is odd, the third dword of
+// the last block is omitted.
+#define R300_PACKET3_3D_LOAD_VBPNTR         0x00002F00
+
+#define R300_PACKET3_INDX_BUFFER            0x00003300
+#    define R300_EB_UNK1_SHIFT                      24
+#    define R300_EB_UNK1                    (0x80<<24)
+#    define R300_EB_UNK2                        0x0810
+#define R300_PACKET3_3D_DRAW_INDX_2         0x00003600
+
+//END
+
+#endif /* _R300_REG_H */
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c
index 20bcf87..6d9080a 100644
--- a/drivers/char/drm/radeon_cp.c
+++ b/drivers/char/drm/radeon_cp.c
@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "radeon_drm.h"
 #include "radeon_drv.h"
+#include "r300_reg.h"
 
 #define RADEON_FIFO_DEBUG	0
 
@@ -1151,6 +1152,8 @@
 
 #if __OS_HAS_AGP
 	if ( !dev_priv->is_pci ) {
+		/* set RADEON_AGP_BASE here instead of relying on X from user space */
+		RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base);
 		RADEON_WRITE( RADEON_CP_RB_RPTR_ADDR,
 			      dev_priv->ring_rptr->offset
 			      - dev->agp->base
@@ -1407,6 +1410,7 @@
 		radeon_do_cleanup_cp(dev);
 		return DRM_ERR(EINVAL);
 	}
+	dev->agp_buffer_token = init->buffers_offset;
 	dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
 	if(!dev->agp_buffer_map) {
 		DRM_ERROR("could not find dma buffer region!\n");
@@ -1625,6 +1629,9 @@
 
 	DRM_COPY_FROM_USER_IOCTL( init, (drm_radeon_init_t __user *)data, sizeof(init) );
 
+	if(init.func == RADEON_INIT_R300_CP)
+		r300_init_reg_flags();
+
 	switch ( init.func ) {
 	case RADEON_INIT_CP:
 	case RADEON_INIT_R200_CP:
@@ -2039,15 +2046,43 @@
 	case CHIP_RV200:
 	case CHIP_R200:
 	case CHIP_R300:
+	case CHIP_R420:
 		dev_priv->flags |= CHIP_HAS_HIERZ;
 		break;
 	default:
 	/* all other chips have no hierarchical z buffer */
 		break;
 	}
+
+	if (drm_device_is_agp(dev))
+		dev_priv->flags |= CHIP_IS_AGP;
+	
+	DRM_DEBUG("%s card detected\n",
+		  ((dev_priv->flags & CHIP_IS_AGP) ? "AGP" : "PCI"));
 	return ret;
 }
 
+int radeon_presetup(struct drm_device *dev)
+{
+	int ret;
+	drm_local_map_t *map;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	ret = drm_addmap(dev, drm_get_resource_start(dev, 2),
+			 drm_get_resource_len(dev, 2), _DRM_REGISTERS,
+			 _DRM_READ_ONLY, &dev_priv->mmio);
+	if (ret != 0)
+		return ret;
+
+	ret = drm_addmap(dev, drm_get_resource_start(dev, 0),
+			 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &map);
+	if (ret != 0)
+		return ret;
+
+	return 0;
+}
+
 int radeon_driver_postcleanup(struct drm_device *dev)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/char/drm/radeon_drm.h b/drivers/char/drm/radeon_drm.h
index c1e62d0..3792798 100644
--- a/drivers/char/drm/radeon_drm.h
+++ b/drivers/char/drm/radeon_drm.h
@@ -195,6 +195,52 @@
 #define RADEON_WAIT_2D  0x1
 #define RADEON_WAIT_3D  0x2
 
+/* Allowed parameters for R300_CMD_PACKET3
+ */
+#define R300_CMD_PACKET3_CLEAR		0
+#define R300_CMD_PACKET3_RAW		1
+
+/* Commands understood by cmd_buffer ioctl for R300.
+ * The interface has not been stabilized, so some of these may be removed
+ * and eventually reordered before stabilization.
+ */
+#define R300_CMD_PACKET0		1 
+#define R300_CMD_VPU			2 /* emit vertex program upload */
+#define R300_CMD_PACKET3		3 /* emit a packet3 */
+#define R300_CMD_END3D			4 /* emit sequence ending 3d rendering */
+#define R300_CMD_CP_DELAY		5
+#define R300_CMD_DMA_DISCARD		6
+#define R300_CMD_WAIT			7
+#	define R300_WAIT_2D  		0x1
+#	define R300_WAIT_3D  		0x2
+#	define R300_WAIT_2D_CLEAN  	0x3
+#	define R300_WAIT_3D_CLEAN  	0x4
+
+typedef union {
+	unsigned int u;
+	struct {
+		unsigned char cmd_type, pad0, pad1, pad2;
+	} header;
+	struct {
+		unsigned char cmd_type, count, reglo, reghi;
+	} packet0;
+	struct {
+		unsigned char cmd_type, count, adrlo, adrhi;
+	} vpu;
+	struct {
+		unsigned char cmd_type, packet, pad0, pad1;
+	} packet3;
+	struct {
+		unsigned char cmd_type, packet;
+		unsigned short count; /* amount of packet2 to emit */
+	} delay;
+	struct {
+		unsigned char cmd_type, buf_idx, pad0, pad1;
+	} dma;
+	struct {
+		unsigned char cmd_type, flags, pad0, pad1;	
+	} wait;
+} drm_r300_cmd_header_t;
 
 #define RADEON_FRONT			0x1
 #define RADEON_BACK			0x2
diff --git a/drivers/char/drm/radeon_drv.c b/drivers/char/drm/radeon_drv.c
index 18e4e5b..e0682f6 100644
--- a/drivers/char/drm/radeon_drv.c
+++ b/drivers/char/drm/radeon_drv.c
@@ -76,6 +76,7 @@
 	.driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
 	.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
 	.preinit = radeon_driver_preinit,
+	.presetup = radeon_presetup,
 	.postcleanup = radeon_driver_postcleanup,
 	.prerelease = radeon_driver_prerelease,
 	.pretakedown = radeon_driver_pretakedown,
diff --git a/drivers/char/drm/radeon_drv.h b/drivers/char/drm/radeon_drv.h
index 771aa80..f12a963 100644
--- a/drivers/char/drm/radeon_drv.h
+++ b/drivers/char/drm/radeon_drv.h
@@ -82,9 +82,10 @@
  *     - Add support for r100 cube maps
  * 1.16- Add R200_EMIT_PP_TRI_PERF_CNTL packet to support brilinear
  *       texture filtering on r200
+ * 1.17- Add initial support for R300 (3D).
  */
 #define DRIVER_MAJOR		1
-#define DRIVER_MINOR		16
+#define DRIVER_MINOR		17
 #define DRIVER_PATCHLEVEL	0
 
 #define GET_RING_HEAD(dev_priv)		DRM_READ32(  (dev_priv)->ring_rptr, 0 )
@@ -106,7 +107,9 @@
 	CHIP_RV280,
 	CHIP_R300,
 	CHIP_RS300,
+	CHIP_R350,
 	CHIP_RV350,
+	CHIP_R420,
 	CHIP_LAST,
 };
 
@@ -290,6 +293,7 @@
 extern int radeon_do_cp_idle( drm_radeon_private_t *dev_priv );
 
 extern int radeon_driver_preinit(struct drm_device *dev, unsigned long flags);
+extern int radeon_presetup(struct drm_device *dev);
 extern int radeon_driver_postcleanup(struct drm_device *dev);
 
 extern int radeon_mem_alloc( DRM_IOCTL_ARGS );
@@ -320,6 +324,14 @@
 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg);
 
+
+/* r300_cmdbuf.c */
+extern void r300_init_reg_flags(void);
+
+extern int r300_do_cp_cmdbuf(drm_device_t* dev, DRMFILE filp,
+			     drm_file_t* filp_priv,
+			     drm_radeon_cmd_buffer_t* cmdbuf);
+
 /* Flags for stats.boxes
  */
 #define RADEON_BOX_DMA_IDLE      0x1
@@ -357,6 +369,11 @@
 #define RADEON_CRTC2_OFFSET		0x0324
 #define RADEON_CRTC2_OFFSET_CNTL	0x0328
 
+#define RADEON_MPP_TB_CONFIG		0x01c0
+#define RADEON_MEM_CNTL			0x0140
+#define RADEON_MEM_SDRAM_MODE_REG	0x0158
+#define RADEON_AGP_BASE			0x0170
+
 #define RADEON_RB3D_COLOROFFSET		0x1c40
 #define RADEON_RB3D_COLORPITCH		0x1c48
 
@@ -651,16 +668,27 @@
 #define RADEON_CP_PACKET1		0x40000000
 #define RADEON_CP_PACKET2		0x80000000
 #define RADEON_CP_PACKET3		0xC0000000
+#       define RADEON_CP_NOP                    0x00001000
+#       define RADEON_CP_NEXT_CHAR              0x00001900
+#       define RADEON_CP_PLY_NEXTSCAN           0x00001D00
+#       define RADEON_CP_SET_SCISSORS           0x00001E00
+             /* GEN_INDX_PRIM is unsupported starting with R300 */
 #	define RADEON_3D_RNDR_GEN_INDX_PRIM	0x00002300
 #	define RADEON_WAIT_FOR_IDLE		0x00002600
 #	define RADEON_3D_DRAW_VBUF		0x00002800
 #	define RADEON_3D_DRAW_IMMD		0x00002900
 #	define RADEON_3D_DRAW_INDX		0x00002A00
+#       define RADEON_CP_LOAD_PALETTE           0x00002C00
 #	define RADEON_3D_LOAD_VBPNTR		0x00002F00
 #	define RADEON_MPEG_IDCT_MACROBLOCK	0x00003000
 #	define RADEON_MPEG_IDCT_MACROBLOCK_REV	0x00003100
 #	define RADEON_3D_CLEAR_ZMASK		0x00003200
+#	define RADEON_CP_INDX_BUFFER		0x00003300
+#       define RADEON_CP_3D_DRAW_VBUF_2         0x00003400
+#       define RADEON_CP_3D_DRAW_IMMD_2         0x00003500
+#       define RADEON_CP_3D_DRAW_INDX_2         0x00003600
 #	define RADEON_3D_CLEAR_HIZ		0x00003700
+#       define RADEON_CP_3D_CLEAR_CMASK         0x00003802
 #	define RADEON_CNTL_HOSTDATA_BLT		0x00009400
 #	define RADEON_CNTL_PAINT_MULTI		0x00009A00
 #	define RADEON_CNTL_BITBLT_MULTI		0x00009B00
diff --git a/drivers/char/drm/radeon_state.c b/drivers/char/drm/radeon_state.c
index 1f79e24..64a3e3a 100644
--- a/drivers/char/drm/radeon_state.c
+++ b/drivers/char/drm/radeon_state.c
@@ -1493,7 +1493,7 @@
 
 }
 
-#define RADEON_MAX_TEXTURE_SIZE (RADEON_BUFFER_SIZE - 8 * sizeof(u32))
+#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
 
 static int radeon_cp_dispatch_texture( DRMFILE filp,
 				       drm_device_t *dev,
@@ -1506,10 +1506,11 @@
 	u32 format;
 	u32 *buffer;
 	const u8 __user *data;
-	int size, dwords, tex_width, blit_width;
+	int size, dwords, tex_width, blit_width, spitch;
 	u32 height;
 	int i;
 	u32 texpitch, microtile;
+	u32 offset;
 	RING_LOCALS;
 
 	DRM_GET_PRIV_WITH_RETURN( filp_priv, filp );
@@ -1530,17 +1531,6 @@
 	RADEON_WAIT_UNTIL_IDLE();
 	ADVANCE_RING();
 
-#ifdef __BIG_ENDIAN
-	/* The Mesa texture functions provide the data in little endian as the
-	 * chip wants it, but we need to compensate for the fact that the CP
-	 * ring gets byte-swapped
-	 */
-	BEGIN_RING( 2 );
-	OUT_RING_REG( RADEON_RBBM_GUICNTL, RADEON_HOST_DATA_SWAP_32BIT );
-	ADVANCE_RING();
-#endif
-
-
 	/* The compiler won't optimize away a division by a variable,
 	 * even if the only legal values are powers of two.  Thus, we'll
 	 * use a shift instead.
@@ -1572,6 +1562,10 @@
 		DRM_ERROR( "invalid texture format %d\n", tex->format );
 		return DRM_ERR(EINVAL);
 	}
+	spitch = blit_width >> 6;
+	if (spitch == 0 && image->height > 1)
+		return DRM_ERR(EINVAL);
+
 	texpitch = tex->pitch;
 	if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
 		microtile = 1;
@@ -1624,25 +1618,6 @@
 		 */
 		buffer = (u32*)((char*)dev->agp_buffer_map->handle + buf->offset);
 		dwords = size / 4;
-		buffer[0] = CP_PACKET3( RADEON_CNTL_HOSTDATA_BLT, dwords + 6 );
-		buffer[1] = (RADEON_GMC_DST_PITCH_OFFSET_CNTL |
-			     RADEON_GMC_BRUSH_NONE |
-			     (format << 8) |
-			     RADEON_GMC_SRC_DATATYPE_COLOR |
-			     RADEON_ROP3_S |
-			     RADEON_DP_SRC_SOURCE_HOST_DATA |
-			     RADEON_GMC_CLR_CMP_CNTL_DIS |
-			     RADEON_GMC_WR_MSK_DIS);
-		
-		buffer[2] = (texpitch << 22) | (tex->offset >> 10);
-		buffer[3] = 0xffffffff;
-		buffer[4] = 0xffffffff;
-		buffer[5] = (image->y << 16) | image->x;
-		buffer[6] = (height << 16) | image->width;
-		buffer[7] = dwords;
-		buffer += 8;
-
-		
 
 		if (microtile) {
 			/* texture micro tiling in use, minimum texture width is thus 16 bytes.
@@ -1750,9 +1725,28 @@
 		}
 
 		buf->filp = filp;
-		buf->used = (dwords + 8) * sizeof(u32);
-		radeon_cp_dispatch_indirect( dev, buf, 0, buf->used );
-		radeon_cp_discard_buffer( dev, buf );
+		buf->used = size;
+		offset = dev_priv->gart_buffers_offset + buf->offset;
+		BEGIN_RING(9);
+		OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+		OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+			 RADEON_GMC_BRUSH_NONE |
+			 (format << 8) |
+			 RADEON_GMC_SRC_DATATYPE_COLOR |
+			 RADEON_ROP3_S |
+			 RADEON_DP_SRC_SOURCE_MEMORY |
+			 RADEON_GMC_CLR_CMP_CNTL_DIS |
+			 RADEON_GMC_WR_MSK_DIS );
+		OUT_RING((spitch << 22) | (offset >> 10));
+		OUT_RING((texpitch << 22) | (tex->offset >> 10));
+		OUT_RING(0);
+		OUT_RING((image->x << 16) | image->y);
+		OUT_RING((image->width << 16) | height);
+		RADEON_WAIT_UNTIL_2D_IDLE();
+		ADVANCE_RING();
+
+		radeon_cp_discard_buffer(dev, buf);
 
 		/* Update the input parameters for next time */
 		image->y += height;
@@ -2797,6 +2791,17 @@
 
 	orig_nbox = cmdbuf.nbox;
 
+	if(dev_priv->microcode_version == UCODE_R300) {
+		int temp;
+		temp=r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
+	
+		if (orig_bufsz != 0)
+			drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
+	
+		return temp;
+	}
+
+	/* microcode_version != r300 */
 	while ( cmdbuf.bufsz >= sizeof(header) ) {
 
 		header.i = *(int *)cmdbuf.buf;
diff --git a/drivers/char/drm/savage_bci.c b/drivers/char/drm/savage_bci.c
new file mode 100644
index 0000000..2fd40ba
--- /dev/null
+++ b/drivers/char/drm/savage_bci.c
@@ -0,0 +1,1096 @@
+/* savage_bci.c -- BCI support for Savage
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "savage_drm.h"
+#include "savage_drv.h"
+
+/* Need a long timeout for shadow status updates can take a while
+ * and so can waiting for events when the queue is full. */
+#define SAVAGE_DEFAULT_USEC_TIMEOUT	1000000 /* 1s */
+#define SAVAGE_EVENT_USEC_TIMEOUT	5000000 /* 5s */
+#define SAVAGE_FREELIST_DEBUG		0
+
+static int
+savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
+{
+	uint32_t mask = dev_priv->status_used_mask;
+	uint32_t threshold = dev_priv->bci_threshold_hi;
+	uint32_t status;
+	int i;
+
+#if SAVAGE_BCI_DEBUG
+	if (n > dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - threshold)
+		DRM_ERROR("Trying to emit %d words "
+			  "(more than guaranteed space in COB)\n", n);
+#endif
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		DRM_MEMORYBARRIER();
+		status = dev_priv->status_ptr[0];
+		if ((status & mask) < threshold)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, threshold=0x%08x\n", status, threshold);
+#endif
+	return DRM_ERR(EBUSY);
+}
+
+static int
+savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
+{
+	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_STATUS_WORD0);
+		if ((status & SAVAGE_FIFO_USED_MASK_S3D) <= maxUsed)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return DRM_ERR(EBUSY);
+}
+
+static int
+savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
+{
+	uint32_t maxUsed = dev_priv->cob_size + SAVAGE_BCI_FIFO_SIZE - n;
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_DEFAULT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_ALT_STATUS_WORD0);
+		if ((status & SAVAGE_FIFO_USED_MASK_S4) <= maxUsed)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x\n", status);
+#endif
+	return DRM_ERR(EBUSY);
+}
+
+/*
+ * Waiting for events.
+ *
+ * The BIOSresets the event tag to 0 on mode changes. Therefore we
+ * never emit 0 to the event tag. If we find a 0 event tag we know the
+ * BIOS stomped on it and return success assuming that the BIOS waited
+ * for engine idle.
+ *
+ * Note: if the Xserver uses the event tag it has to follow the same
+ * rule. Otherwise there may be glitches every 2^16 events.
+ */
+static int
+savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
+{
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
+		DRM_MEMORYBARRIER();
+		status = dev_priv->status_ptr[1];
+		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
+		    (status & 0xffff) == 0)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
+#endif
+
+	return DRM_ERR(EBUSY);
+}
+
+static int
+savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
+{
+	uint32_t status;
+	int i;
+
+	for (i = 0; i < SAVAGE_EVENT_USEC_TIMEOUT; i++) {
+		status = SAVAGE_READ(SAVAGE_STATUS_WORD1);
+		if ((((status & 0xffff) - e) & 0xffff) <= 0x7fff ||
+		    (status & 0xffff) == 0)
+			return 0;
+		DRM_UDELAY(1);
+	}
+
+#if SAVAGE_BCI_DEBUG
+	DRM_ERROR("failed!\n");
+	DRM_INFO("   status=0x%08x, e=0x%04x\n", status, e);
+#endif
+
+	return DRM_ERR(EBUSY);
+}
+
+uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
+			       unsigned int flags)
+{
+	uint16_t count;
+	BCI_LOCALS;
+
+	if (dev_priv->status_ptr) {
+		/* coordinate with Xserver */
+		count = dev_priv->status_ptr[1023];
+		if (count < dev_priv->event_counter)
+			dev_priv->event_wrap++;
+	} else {
+		count = dev_priv->event_counter;
+	}
+	count = (count + 1) & 0xffff;
+	if (count == 0) {
+		count++; /* See the comment above savage_wait_event_*. */
+		dev_priv->event_wrap++;
+	}
+	dev_priv->event_counter = count;
+	if (dev_priv->status_ptr)
+		dev_priv->status_ptr[1023] = (uint32_t)count;
+
+	if ((flags & (SAVAGE_WAIT_2D | SAVAGE_WAIT_3D))) {
+		unsigned int wait_cmd = BCI_CMD_WAIT;
+		if ((flags & SAVAGE_WAIT_2D))
+			wait_cmd |= BCI_CMD_WAIT_2D;
+		if ((flags & SAVAGE_WAIT_3D))
+			wait_cmd |= BCI_CMD_WAIT_3D;
+		BEGIN_BCI(2);
+		BCI_WRITE(wait_cmd);
+	} else {
+		BEGIN_BCI(1);
+	}
+	BCI_WRITE(BCI_CMD_UPDATE_EVENT_TAG | (uint32_t)count);
+
+	return count;
+}
+
+/*
+ * Freelist management
+ */
+static int savage_freelist_init(drm_device_t *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_device_dma_t *dma = dev->dma;
+	drm_buf_t *buf;
+	drm_savage_buf_priv_t *entry;
+	int i;
+	DRM_DEBUG("count=%d\n", dma->buf_count);
+
+	dev_priv->head.next = &dev_priv->tail;
+	dev_priv->head.prev = NULL;
+	dev_priv->head.buf = NULL;
+
+	dev_priv->tail.next = NULL;
+	dev_priv->tail.prev = &dev_priv->head;
+	dev_priv->tail.buf = NULL;
+
+	for (i = 0; i < dma->buf_count; i++) {
+		buf = dma->buflist[i];
+		entry = buf->dev_private;
+
+		SET_AGE(&entry->age, 0, 0);
+		entry->buf = buf;
+
+		entry->next = dev_priv->head.next;
+		entry->prev = &dev_priv->head;
+		dev_priv->head.next->prev = entry;
+		dev_priv->head.next = entry;
+	}
+
+	return 0;
+}
+
+static drm_buf_t *savage_freelist_get(drm_device_t *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
+	uint16_t event;
+	unsigned int wrap;
+	DRM_DEBUG("\n");
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		event = dev_priv->status_ptr[1] & 0xffff;
+	else
+		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	wrap = dev_priv->event_wrap;
+	if (event > dev_priv->event_counter)
+		wrap--; /* hardware hasn't passed the last wrap yet */
+
+	DRM_DEBUG("   tail=0x%04x %d\n", tail->age.event, tail->age.wrap);
+	DRM_DEBUG("   head=0x%04x %d\n", event, wrap);
+
+	if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) {
+		drm_savage_buf_priv_t *next = tail->next;
+		drm_savage_buf_priv_t *prev = tail->prev;
+		prev->next = next;
+		next->prev = prev;
+		tail->next = tail->prev = NULL;
+		return tail->buf;
+	}
+
+	DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf);
+	return NULL;
+}
+
+void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
+
+	DRM_DEBUG("age=0x%04x wrap=%d\n", entry->age.event, entry->age.wrap);
+
+	if (entry->next != NULL || entry->prev != NULL) {
+		DRM_ERROR("entry already on freelist.\n");
+		return;
+	}
+
+	prev = &dev_priv->head;
+	next = prev->next;
+	prev->next = entry;
+	next->prev = entry;
+	entry->prev = prev;
+	entry->next = next;
+}
+
+/*
+ * Command DMA
+ */
+static int savage_dma_init(drm_savage_private_t *dev_priv)
+{
+	unsigned int i;
+
+	dev_priv->nr_dma_pages = dev_priv->cmd_dma->size /
+		(SAVAGE_DMA_PAGE_SIZE*4);
+	dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
+					dev_priv->nr_dma_pages,
+					DRM_MEM_DRIVER);
+	if (dev_priv->dma_pages == NULL)
+		return DRM_ERR(ENOMEM);
+
+	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	SET_AGE(&dev_priv->last_dma_age, 0, 0);
+
+	dev_priv->first_dma_page = 0;
+	dev_priv->current_dma_page = 0;
+
+	return 0;
+}
+
+void savage_dma_reset(drm_savage_private_t *dev_priv)
+{
+	uint16_t event;
+	unsigned int wrap, i;
+	event = savage_bci_emit_event(dev_priv, 0);
+	wrap = dev_priv->event_wrap;
+	for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	SET_AGE(&dev_priv->last_dma_age, event, wrap);
+	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
+}
+
+void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page)
+{
+	uint16_t event;
+	unsigned int wrap;
+
+	/* Faked DMA buffer pages don't age. */
+	if (dev_priv->cmd_dma == &dev_priv->fake_dma)
+		return;
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		event = dev_priv->status_ptr[1] & 0xffff;
+	else
+		event = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	wrap = dev_priv->event_wrap;
+	if (event > dev_priv->event_counter)
+		wrap--; /* hardware hasn't passed the last wrap yet */
+
+	if (dev_priv->dma_pages[page].age.wrap > wrap ||
+	    (dev_priv->dma_pages[page].age.wrap == wrap &&
+	     dev_priv->dma_pages[page].age.event > event)) {
+		if (dev_priv->wait_evnt(dev_priv,
+					dev_priv->dma_pages[page].age.event)
+		    < 0)
+			DRM_ERROR("wait_evnt failed!\n");
+	}
+}
+
+uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)
+{
+	unsigned int cur = dev_priv->current_dma_page;
+	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -
+		dev_priv->dma_pages[cur].used;
+	unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) /
+		SAVAGE_DMA_PAGE_SIZE;
+	uint32_t *dma_ptr;
+	unsigned int i;
+
+	DRM_DEBUG("cur=%u, cur->used=%u, n=%u, rest=%u, nr_pages=%u\n",
+		  cur, dev_priv->dma_pages[cur].used, n, rest, nr_pages);
+
+	if (cur + nr_pages < dev_priv->nr_dma_pages) {
+		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
+			cur*SAVAGE_DMA_PAGE_SIZE +
+			dev_priv->dma_pages[cur].used;
+		if (n < rest)
+			rest = n;
+		dev_priv->dma_pages[cur].used += rest;
+		n -= rest;
+		cur++;
+	} else {
+		dev_priv->dma_flush(dev_priv);
+		nr_pages = (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE;
+		for (i = cur; i < dev_priv->nr_dma_pages; ++i) {
+			dev_priv->dma_pages[i].age = dev_priv->last_dma_age;
+			dev_priv->dma_pages[i].used = 0;
+			dev_priv->dma_pages[i].flushed = 0;
+		}
+		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle;
+		dev_priv->first_dma_page = cur = 0;
+	}
+	for (i = cur; nr_pages > 0; ++i, --nr_pages) {
+#if SAVAGE_DMA_DEBUG
+		if (dev_priv->dma_pages[i].used) {
+			DRM_ERROR("unflushed page %u: used=%u\n",
+				  i, dev_priv->dma_pages[i].used);
+		}
+#endif
+		if (n > SAVAGE_DMA_PAGE_SIZE)
+			dev_priv->dma_pages[i].used = SAVAGE_DMA_PAGE_SIZE;
+		else
+			dev_priv->dma_pages[i].used = n;
+		n -= SAVAGE_DMA_PAGE_SIZE;
+	}
+	dev_priv->current_dma_page = --i;
+
+	DRM_DEBUG("cur=%u, cur->used=%u, n=%u\n",
+		  i, dev_priv->dma_pages[i].used, n);
+
+	savage_dma_wait(dev_priv, dev_priv->current_dma_page);
+
+	return dma_ptr;
+}
+
+static void savage_dma_flush(drm_savage_private_t *dev_priv)
+{
+	unsigned int first = dev_priv->first_dma_page;
+	unsigned int cur = dev_priv->current_dma_page;
+	uint16_t event;
+	unsigned int wrap, pad, align, len, i;
+	unsigned long phys_addr;
+	BCI_LOCALS;
+
+	if (first == cur &&
+	    dev_priv->dma_pages[cur].used == dev_priv->dma_pages[cur].flushed)
+		return;
+
+	/* pad length to multiples of 2 entries
+	 * align start of next DMA block to multiles of 8 entries */
+	pad = -dev_priv->dma_pages[cur].used & 1;
+	align = -(dev_priv->dma_pages[cur].used + pad) & 7;
+
+	DRM_DEBUG("first=%u, cur=%u, first->flushed=%u, cur->used=%u, "
+		  "pad=%u, align=%u\n",
+		  first, cur, dev_priv->dma_pages[first].flushed,
+		  dev_priv->dma_pages[cur].used, pad, align);
+
+	/* pad with noops */
+	if (pad) {
+		uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
+			cur * SAVAGE_DMA_PAGE_SIZE +
+			dev_priv->dma_pages[cur].used;
+		dev_priv->dma_pages[cur].used += pad;
+		while(pad != 0) {
+			*dma_ptr++ = BCI_CMD_WAIT;
+			pad--;
+		}
+	}
+
+	DRM_MEMORYBARRIER();
+
+	/* do flush ... */
+	phys_addr = dev_priv->cmd_dma->offset +
+		(first * SAVAGE_DMA_PAGE_SIZE +
+		 dev_priv->dma_pages[first].flushed) * 4;
+	len = (cur - first) * SAVAGE_DMA_PAGE_SIZE +
+		dev_priv->dma_pages[cur].used -
+		dev_priv->dma_pages[first].flushed;
+
+	DRM_DEBUG("phys_addr=%lx, len=%u\n",
+		  phys_addr | dev_priv->dma_type, len);
+
+	BEGIN_BCI(3);
+	BCI_SET_REGISTERS(SAVAGE_DMABUFADDR, 1);
+	BCI_WRITE(phys_addr | dev_priv->dma_type);
+	BCI_DMA(len);
+
+	/* fix alignment of the start of the next block */
+	dev_priv->dma_pages[cur].used += align;
+
+	/* age DMA pages */
+	event = savage_bci_emit_event(dev_priv, 0);
+	wrap = dev_priv->event_wrap;
+	for (i = first; i < cur; ++i) {
+		SET_AGE(&dev_priv->dma_pages[i].age, event, wrap);
+		dev_priv->dma_pages[i].used = 0;
+		dev_priv->dma_pages[i].flushed = 0;
+	}
+	/* age the current page only when it's full */
+	if (dev_priv->dma_pages[cur].used == SAVAGE_DMA_PAGE_SIZE) {
+		SET_AGE(&dev_priv->dma_pages[cur].age, event, wrap);
+		dev_priv->dma_pages[cur].used = 0;
+		dev_priv->dma_pages[cur].flushed = 0;
+		/* advance to next page */
+		cur++;
+		if (cur == dev_priv->nr_dma_pages)
+			cur = 0;
+		dev_priv->first_dma_page = dev_priv->current_dma_page = cur;
+	} else {
+		dev_priv->first_dma_page = cur;
+		dev_priv->dma_pages[cur].flushed = dev_priv->dma_pages[i].used;
+	}
+	SET_AGE(&dev_priv->last_dma_age, event, wrap);
+
+	DRM_DEBUG("first=cur=%u, cur->used=%u, cur->flushed=%u\n", cur,
+		  dev_priv->dma_pages[cur].used,
+		  dev_priv->dma_pages[cur].flushed);
+}
+
+static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
+{
+	unsigned int i, j;
+	BCI_LOCALS;
+
+	if (dev_priv->first_dma_page == dev_priv->current_dma_page &&
+	    dev_priv->dma_pages[dev_priv->current_dma_page].used == 0)
+		return;
+
+	DRM_DEBUG("first=%u, cur=%u, cur->used=%u\n",
+		  dev_priv->first_dma_page, dev_priv->current_dma_page,
+		  dev_priv->dma_pages[dev_priv->current_dma_page].used);
+
+	for (i = dev_priv->first_dma_page;
+	     i <= dev_priv->current_dma_page && dev_priv->dma_pages[i].used;
+	     ++i) {
+		uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +
+			i * SAVAGE_DMA_PAGE_SIZE;
+#if SAVAGE_DMA_DEBUG
+		/* Sanity check: all pages except the last one must be full. */
+		if (i < dev_priv->current_dma_page &&
+		    dev_priv->dma_pages[i].used != SAVAGE_DMA_PAGE_SIZE) {
+			DRM_ERROR("partial DMA page %u: used=%u",
+				  i, dev_priv->dma_pages[i].used);
+		}
+#endif
+		BEGIN_BCI(dev_priv->dma_pages[i].used);
+		for (j = 0; j < dev_priv->dma_pages[i].used; ++j) {
+			BCI_WRITE(dma_ptr[j]);
+		}
+		dev_priv->dma_pages[i].used = 0;
+	}
+
+	/* reset to first page */
+	dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
+}
+
+/*
+ * Initalize mappings. On Savage4 and SavageIX the alignment
+ * and size of the aperture is not suitable for automatic MTRR setup
+ * in drm_addmap. Therefore we do it manually before the maps are
+ * initialized. We also need to take care of deleting the MTRRs in
+ * postcleanup.
+ */
+int savage_preinit(drm_device_t *dev, unsigned long chipset)
+{
+	drm_savage_private_t *dev_priv;
+	unsigned long mmio_base, fb_base, fb_size, aperture_base;
+	/* fb_rsrc and aper_rsrc aren't really used currently, but still exist
+	 * in case we decide we need information on the BAR for BSD in the
+	 * future.
+	 */
+	unsigned int fb_rsrc, aper_rsrc;
+	int ret = 0;
+
+	dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
+	if (dev_priv == NULL)
+		return DRM_ERR(ENOMEM);
+
+	memset(dev_priv, 0, sizeof(drm_savage_private_t));
+	dev->dev_private = (void *)dev_priv;
+	dev_priv->chipset = (enum savage_family)chipset;
+
+	dev_priv->mtrr[0].handle = -1;
+	dev_priv->mtrr[1].handle = -1;
+	dev_priv->mtrr[2].handle = -1;
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		fb_rsrc = 0;
+		fb_base = drm_get_resource_start(dev, 0);
+		fb_size = SAVAGE_FB_SIZE_S3;
+		mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
+		aper_rsrc = 0;
+		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
+		/* this should always be true */
+		if (drm_get_resource_len(dev, 0) == 0x08000000) {
+			/* Don't make MMIO write-cobining! We need 3
+			 * MTRRs. */
+			dev_priv->mtrr[0].base = fb_base;
+			dev_priv->mtrr[0].size = 0x01000000;
+			dev_priv->mtrr[0].handle = mtrr_add(
+				dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
+				MTRR_TYPE_WRCOMB, 1);
+			dev_priv->mtrr[1].base = fb_base+0x02000000;
+			dev_priv->mtrr[1].size = 0x02000000;
+			dev_priv->mtrr[1].handle = mtrr_add(
+				dev_priv->mtrr[1].base, dev_priv->mtrr[1].size,
+				MTRR_TYPE_WRCOMB, 1);
+			dev_priv->mtrr[2].base = fb_base+0x04000000;
+			dev_priv->mtrr[2].size = 0x04000000;
+			dev_priv->mtrr[2].handle = mtrr_add(
+				dev_priv->mtrr[2].base, dev_priv->mtrr[2].size,
+				MTRR_TYPE_WRCOMB, 1);
+		} else {
+			DRM_ERROR("strange pci_resource_len %08lx\n",
+				  drm_get_resource_len(dev, 0));
+		}
+	} else if (chipset != S3_SUPERSAVAGE && chipset != S3_SAVAGE2000) {
+		mmio_base = drm_get_resource_start(dev, 0);
+		fb_rsrc = 1;
+		fb_base = drm_get_resource_start(dev, 1);
+		fb_size = SAVAGE_FB_SIZE_S4;
+		aper_rsrc = 1;
+		aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
+		/* this should always be true */
+		if (drm_get_resource_len(dev, 1) == 0x08000000) {
+			/* Can use one MTRR to cover both fb and
+			 * aperture. */
+			dev_priv->mtrr[0].base = fb_base;
+			dev_priv->mtrr[0].size = 0x08000000;
+			dev_priv->mtrr[0].handle = mtrr_add(
+				dev_priv->mtrr[0].base, dev_priv->mtrr[0].size,
+				MTRR_TYPE_WRCOMB, 1);
+		} else {
+			DRM_ERROR("strange pci_resource_len %08lx\n",
+				  drm_get_resource_len(dev, 1));
+		}
+	} else {
+		mmio_base = drm_get_resource_start(dev, 0);
+		fb_rsrc = 1;
+		fb_base = drm_get_resource_start(dev, 1);
+		fb_size = drm_get_resource_len(dev, 1);
+		aper_rsrc = 2;
+		aperture_base = drm_get_resource_start(dev, 2);
+		/* Automatic MTRR setup will do the right thing. */
+	}
+
+	ret = drm_addmap(dev, mmio_base, SAVAGE_MMIO_SIZE, _DRM_REGISTERS,
+			 _DRM_READ_ONLY, &dev_priv->mmio);
+	if (ret)
+		return ret;
+
+	ret = drm_addmap(dev, fb_base, fb_size, _DRM_FRAME_BUFFER,
+			 _DRM_WRITE_COMBINING, &dev_priv->fb);
+	if (ret)
+		return ret;
+
+	ret = drm_addmap(dev, aperture_base, SAVAGE_APERTURE_SIZE,
+			 _DRM_FRAME_BUFFER, _DRM_WRITE_COMBINING,
+			 &dev_priv->aperture);
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+/*
+ * Delete MTRRs and free device-private data.
+ */
+int savage_postcleanup(drm_device_t *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	for (i = 0; i < 3; ++i)
+		if (dev_priv->mtrr[i].handle >= 0)
+			mtrr_del(dev_priv->mtrr[i].handle,
+				 dev_priv->mtrr[i].base,
+				 dev_priv->mtrr[i].size);
+
+	drm_free(dev_priv, sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	if (init->fb_bpp != 16 && init->fb_bpp != 32) {
+		DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
+		return DRM_ERR(EINVAL);
+	}
+	if (init->depth_bpp != 16 && init->depth_bpp != 32) {
+		DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
+		return DRM_ERR(EINVAL);
+	}
+	if (init->dma_type != SAVAGE_DMA_AGP &&
+	    init->dma_type != SAVAGE_DMA_PCI) {
+		DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
+		return DRM_ERR(EINVAL);
+	}
+
+	dev_priv->cob_size = init->cob_size;
+	dev_priv->bci_threshold_lo = init->bci_threshold_lo;
+	dev_priv->bci_threshold_hi = init->bci_threshold_hi;
+	dev_priv->dma_type = init->dma_type;
+
+	dev_priv->fb_bpp = init->fb_bpp;
+	dev_priv->front_offset = init->front_offset;
+	dev_priv->front_pitch = init->front_pitch;
+	dev_priv->back_offset = init->back_offset;
+	dev_priv->back_pitch = init->back_pitch;
+	dev_priv->depth_bpp = init->depth_bpp;
+	dev_priv->depth_offset = init->depth_offset;
+	dev_priv->depth_pitch = init->depth_pitch;
+
+	dev_priv->texture_offset = init->texture_offset;
+	dev_priv->texture_size = init->texture_size;
+
+	DRM_GETSAREA();
+	if (!dev_priv->sarea) {
+		DRM_ERROR("could not find sarea!\n");
+		savage_do_cleanup_bci(dev);
+		return DRM_ERR(EINVAL);
+	}
+	if (init->status_offset != 0) {
+		dev_priv->status = drm_core_findmap(dev, init->status_offset);
+		if (!dev_priv->status) {
+			DRM_ERROR("could not find shadow status region!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+	} else {
+		dev_priv->status = NULL;
+	}
+	if (dev_priv->dma_type == SAVAGE_DMA_AGP && init->buffers_offset) {
+		dev->agp_buffer_map = drm_core_findmap(dev,
+						       init->buffers_offset);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("could not find DMA buffer region!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+		drm_core_ioremap(dev->agp_buffer_map, dev);
+		if (!dev->agp_buffer_map) {
+			DRM_ERROR("failed to ioremap DMA buffer region!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(ENOMEM);
+		}
+	}
+	if (init->agp_textures_offset) {
+		dev_priv->agp_textures =
+			drm_core_findmap(dev, init->agp_textures_offset);
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("could not find agp texture region!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+	} else {
+		dev_priv->agp_textures = NULL;
+	}
+
+	if (init->cmd_dma_offset) {
+		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			DRM_ERROR("command DMA not supported on "
+				  "Savage3D/MX/IX.\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+		if (dev->dma && dev->dma->buflist) {
+			DRM_ERROR("command and vertex DMA not supported "
+				  "at the same time.\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+		dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
+		if (!dev_priv->cmd_dma) {
+			DRM_ERROR("could not find command DMA region!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+		if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
+			if (dev_priv->cmd_dma->type != _DRM_AGP) {
+				DRM_ERROR("AGP command DMA region is not a "
+					  "_DRM_AGP map!\n");
+				savage_do_cleanup_bci(dev);
+				return DRM_ERR(EINVAL);
+			}
+			drm_core_ioremap(dev_priv->cmd_dma, dev);
+			if (!dev_priv->cmd_dma->handle) {
+				DRM_ERROR("failed to ioremap command "
+					  "DMA region!\n");
+				savage_do_cleanup_bci(dev);
+				return DRM_ERR(ENOMEM);
+			}
+		} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
+			DRM_ERROR("PCI command DMA region is not a "
+				  "_DRM_CONSISTENT map!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(EINVAL);
+		}
+	} else {
+		dev_priv->cmd_dma = NULL;
+	}
+
+	dev_priv->dma_flush = savage_dma_flush;
+	if (!dev_priv->cmd_dma) {
+		DRM_DEBUG("falling back to faked command DMA.\n");
+		dev_priv->fake_dma.offset = 0;
+		dev_priv->fake_dma.size = SAVAGE_FAKE_DMA_SIZE;
+		dev_priv->fake_dma.type = _DRM_SHM;
+		dev_priv->fake_dma.handle = drm_alloc(SAVAGE_FAKE_DMA_SIZE,
+						      DRM_MEM_DRIVER);
+		if (!dev_priv->fake_dma.handle) {
+			DRM_ERROR("could not allocate faked DMA buffer!\n");
+			savage_do_cleanup_bci(dev);
+			return DRM_ERR(ENOMEM);
+		}
+		dev_priv->cmd_dma = &dev_priv->fake_dma;
+		dev_priv->dma_flush = savage_fake_dma_flush;
+	}
+
+	dev_priv->sarea_priv =
+		(drm_savage_sarea_t *)((uint8_t *)dev_priv->sarea->handle +
+				       init->sarea_priv_offset);
+
+	/* setup bitmap descriptors */
+	{
+		unsigned int color_tile_format;
+		unsigned int depth_tile_format;
+		unsigned int front_stride, back_stride, depth_stride;
+		if (dev_priv->chipset <= S3_SAVAGE4) {
+			color_tile_format = dev_priv->fb_bpp == 16 ?
+				SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
+			depth_tile_format = dev_priv->depth_bpp == 16 ?
+				SAVAGE_BD_TILE_16BPP : SAVAGE_BD_TILE_32BPP;
+		} else {
+			color_tile_format = SAVAGE_BD_TILE_DEST;
+			depth_tile_format = SAVAGE_BD_TILE_DEST;
+		}
+		front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp/8);
+		back_stride  = dev_priv-> back_pitch / (dev_priv->fb_bpp/8);
+		depth_stride = dev_priv->depth_pitch / (dev_priv->depth_bpp/8);
+
+		dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE |
+			(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
+			(color_tile_format << SAVAGE_BD_TILE_SHIFT);
+
+		dev_priv-> back_bd =  back_stride | SAVAGE_BD_BW_DISABLE |
+			(dev_priv->fb_bpp << SAVAGE_BD_BPP_SHIFT) |
+			(color_tile_format << SAVAGE_BD_TILE_SHIFT);
+
+		dev_priv->depth_bd = depth_stride | SAVAGE_BD_BW_DISABLE |
+			(dev_priv->depth_bpp << SAVAGE_BD_BPP_SHIFT) |
+			(depth_tile_format << SAVAGE_BD_TILE_SHIFT);
+	}
+
+	/* setup status and bci ptr */
+	dev_priv->event_counter = 0;
+	dev_priv->event_wrap = 0;
+	dev_priv->bci_ptr = (volatile uint32_t *)
+	    ((uint8_t *)dev_priv->mmio->handle + SAVAGE_BCI_OFFSET);
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S3D;
+	} else {
+		dev_priv->status_used_mask = SAVAGE_FIFO_USED_MASK_S4;
+	}
+	if (dev_priv->status != NULL) {
+		dev_priv->status_ptr =
+			(volatile uint32_t *)dev_priv->status->handle;
+		dev_priv->wait_fifo = savage_bci_wait_fifo_shadow;
+		dev_priv->wait_evnt = savage_bci_wait_event_shadow;
+		dev_priv->status_ptr[1023] = dev_priv->event_counter;
+	} else {
+		dev_priv->status_ptr = NULL;
+		if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			dev_priv->wait_fifo = savage_bci_wait_fifo_s3d;
+		} else {
+			dev_priv->wait_fifo = savage_bci_wait_fifo_s4;
+		}
+		dev_priv->wait_evnt = savage_bci_wait_event_reg;
+	}
+
+	/* cliprect functions */
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset))
+		dev_priv->emit_clip_rect = savage_emit_clip_rect_s3d;
+	else
+		dev_priv->emit_clip_rect = savage_emit_clip_rect_s4;
+
+	if (savage_freelist_init(dev) < 0) {
+		DRM_ERROR("could not initialize freelist\n");
+		savage_do_cleanup_bci(dev);
+		return DRM_ERR(ENOMEM);
+	}
+
+	if (savage_dma_init(dev_priv) <  0) {
+		DRM_ERROR("could not initialize command DMA\n");
+		savage_do_cleanup_bci(dev);
+		return DRM_ERR(ENOMEM);
+	}
+
+	return 0;
+}
+
+int savage_do_cleanup_bci(drm_device_t *dev)
+{
+	drm_savage_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->cmd_dma == &dev_priv->fake_dma) {
+		if (dev_priv->fake_dma.handle)
+			drm_free(dev_priv->fake_dma.handle,
+				 SAVAGE_FAKE_DMA_SIZE, DRM_MEM_DRIVER);
+	} else if (dev_priv->cmd_dma && dev_priv->cmd_dma->handle &&
+		   dev_priv->cmd_dma->type == _DRM_AGP &&
+		   dev_priv->dma_type == SAVAGE_DMA_AGP)
+		drm_core_ioremapfree(dev_priv->cmd_dma, dev);
+
+	if (dev_priv->dma_type == SAVAGE_DMA_AGP &&
+	    dev->agp_buffer_map && dev->agp_buffer_map->handle) {
+		drm_core_ioremapfree(dev->agp_buffer_map, dev);
+		/* make sure the next instance (which may be running
+		 * in PCI mode) doesn't try to use an old
+		 * agp_buffer_map. */
+		dev->agp_buffer_map = NULL;
+	}
+
+	if (dev_priv->dma_pages)
+		drm_free(dev_priv->dma_pages,
+			 sizeof(drm_savage_dma_page_t)*dev_priv->nr_dma_pages,
+			 DRM_MEM_DRIVER);
+
+	return 0;
+}
+
+static int savage_bci_init(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_savage_init_t init;
+
+	LOCK_TEST_WITH_RETURN(dev, filp);
+
+	DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
+				 sizeof(init));
+
+	switch (init.func) {
+	case SAVAGE_INIT_BCI:
+		return savage_do_init_bci(dev, &init);
+	case SAVAGE_CLEANUP_BCI:
+		return savage_do_cleanup_bci(dev);
+	}
+
+	return DRM_ERR(EINVAL);
+}
+
+static int savage_bci_event_emit(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_event_emit_t event;
+
+	DRM_DEBUG("\n");
+
+	LOCK_TEST_WITH_RETURN(dev, filp);
+
+	DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
+				 sizeof(event));
+
+	event.count = savage_bci_emit_event(dev_priv, event.flags);
+	event.count |= dev_priv->event_wrap << 16;
+	DRM_COPY_TO_USER_IOCTL(&((drm_savage_event_emit_t __user *)data)->count,
+			       event.count, sizeof(event.count));
+	return 0;
+}
+
+static int savage_bci_event_wait(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_savage_event_wait_t event;
+	unsigned int event_e, hw_e;
+	unsigned int event_w, hw_w;
+
+	DRM_DEBUG("\n");
+
+	DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
+				 sizeof(event));
+
+	UPDATE_EVENT_COUNTER();
+	if (dev_priv->status_ptr)
+		hw_e = dev_priv->status_ptr[1] & 0xffff;
+	else
+		hw_e = SAVAGE_READ(SAVAGE_STATUS_WORD1) & 0xffff;
+	hw_w = dev_priv->event_wrap;
+	if (hw_e > dev_priv->event_counter)
+		hw_w--; /* hardware hasn't passed the last wrap yet */
+
+	event_e = event.count & 0xffff;
+	event_w = event.count >> 16;
+
+	/* Don't need to wait if
+	 * - event counter wrapped since the event was emitted or
+	 * - the hardware has advanced up to or over the event to wait for.
+	 */
+	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) )
+		return 0;
+	else
+		return dev_priv->wait_evnt(dev_priv, event_e);
+}
+
+/*
+ * DMA buffer management
+ */
+
+static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
+{
+	drm_buf_t *buf;
+	int i;
+
+	for (i = d->granted_count; i < d->request_count; i++) {
+		buf = savage_freelist_get(dev);
+		if (!buf)
+			return DRM_ERR(EAGAIN);
+
+		buf->filp = filp;
+
+		if (DRM_COPY_TO_USER(&d->request_indices[i],
+				     &buf->idx, sizeof(buf->idx)))
+			return DRM_ERR(EFAULT);
+		if (DRM_COPY_TO_USER(&d->request_sizes[i],
+				     &buf->total, sizeof(buf->total)))
+			return DRM_ERR(EFAULT);
+
+		d->granted_count++;
+	}
+	return 0;
+}
+
+int savage_bci_buffers(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_device_dma_t *dma = dev->dma;
+	drm_dma_t d;
+	int ret = 0;
+
+	LOCK_TEST_WITH_RETURN(dev, filp);
+
+	DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
+
+	/* Please don't send us buffers.
+	 */
+	if (d.send_count != 0) {
+		DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
+			  DRM_CURRENTPID, d.send_count);
+		return DRM_ERR(EINVAL);
+	}
+
+	/* We'll send you buffers.
+	 */
+	if (d.request_count < 0 || d.request_count > dma->buf_count) {
+		DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
+			  DRM_CURRENTPID, d.request_count, dma->buf_count);
+		return DRM_ERR(EINVAL);
+	}
+
+	d.granted_count = 0;
+
+	if (d.request_count) {
+		ret = savage_bci_get_buffers(filp, dev, &d);
+	}
+
+	DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
+
+	return ret;
+}
+
+void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp) {
+	drm_device_dma_t *dma = dev->dma;
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	int i;
+
+	if (!dma)
+		return;
+	if (!dev_priv)
+		return;
+	if (!dma->buflist)
+		return;
+
+	/*i830_flush_queue(dev);*/
+
+	for (i = 0; i < dma->buf_count; i++) {
+		drm_buf_t *buf = dma->buflist[i];
+		drm_savage_buf_priv_t *buf_priv = buf->dev_private;
+
+		if (buf->filp == filp && buf_priv &&
+		    buf_priv->next == NULL && buf_priv->prev == NULL) {
+			uint16_t event;
+			DRM_DEBUG("reclaimed from client\n");
+			event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
+			SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
+			savage_freelist_put(dev, buf);
+		}
+	}
+
+	drm_core_reclaim_buffers(dev, filp);
+}
+
+
+drm_ioctl_desc_t savage_ioctls[] = {
+	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, 1, 1},
+	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, 1, 0},
+	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, 1, 0},
+	[DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, 1, 0},
+};
+
+int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/drivers/char/drm/savage_drm.h b/drivers/char/drm/savage_drm.h
new file mode 100644
index 0000000..6526c9a
--- /dev/null
+++ b/drivers/char/drm/savage_drm.h
@@ -0,0 +1,209 @@
+/* savage_drm.h -- Public header for the savage driver
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRM_H__
+#define __SAVAGE_DRM_H__
+
+#ifndef __SAVAGE_SAREA_DEFINES__
+#define __SAVAGE_SAREA_DEFINES__
+
+/* 2 heaps (1 for card, 1 for agp), each divided into upto 128
+ * regions, subject to a minimum region size of (1<<16) == 64k.
+ *
+ * Clients may subdivide regions internally, but when sharing between
+ * clients, the region size is the minimum granularity.
+ */
+
+#define SAVAGE_CARD_HEAP		0
+#define SAVAGE_AGP_HEAP			1
+#define SAVAGE_NR_TEX_HEAPS		2
+#define SAVAGE_NR_TEX_REGIONS		16
+#define SAVAGE_LOG_MIN_TEX_REGION_SIZE	16
+
+#endif /* __SAVAGE_SAREA_DEFINES__ */
+
+typedef struct _drm_savage_sarea {
+	/* LRU lists for texture memory in agp space and on the card.
+	 */
+	drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
+	unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
+
+	/* Mechanism to validate card state.
+	 */
+	int ctxOwner;
+} drm_savage_sarea_t, *drm_savage_sarea_ptr;
+
+/* Savage-specific ioctls
+ */
+#define DRM_SAVAGE_BCI_INIT		0x00
+#define DRM_SAVAGE_BCI_CMDBUF           0x01
+#define DRM_SAVAGE_BCI_EVENT_EMIT	0x02
+#define DRM_SAVAGE_BCI_EVENT_WAIT	0x03
+
+#define DRM_IOCTL_SAVAGE_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t)
+#define DRM_IOCTL_SAVAGE_CMDBUF		DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t)
+#define DRM_IOCTL_SAVAGE_EVENT_EMIT	DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t)
+#define DRM_IOCTL_SAVAGE_EVENT_WAIT	DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t)
+
+#define SAVAGE_DMA_PCI	1
+#define SAVAGE_DMA_AGP	3
+typedef struct drm_savage_init {
+	enum {
+		SAVAGE_INIT_BCI = 1,
+		SAVAGE_CLEANUP_BCI = 2
+	} func;
+	unsigned int sarea_priv_offset;
+
+	/* some parameters */
+	unsigned int cob_size;
+	unsigned int bci_threshold_lo, bci_threshold_hi;
+	unsigned int dma_type;
+
+	/* frame buffer layout */
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	/* local textures */
+	unsigned int texture_offset;
+	unsigned int texture_size;
+
+	/* physical locations of non-permanent maps */
+	unsigned long status_offset;
+	unsigned long buffers_offset;
+	unsigned long agp_textures_offset;
+	unsigned long cmd_dma_offset;
+} drm_savage_init_t;
+
+typedef union drm_savage_cmd_header drm_savage_cmd_header_t;
+typedef struct drm_savage_cmdbuf {
+				/* command buffer in client's address space */
+	drm_savage_cmd_header_t __user *cmd_addr;
+	unsigned int size;	/* size of the command buffer in 64bit units */
+
+	unsigned int dma_idx;	/* DMA buffer index to use */
+	int discard;		/* discard DMA buffer when done */
+				/* vertex buffer in client's address space */
+	unsigned int __user *vb_addr;
+	unsigned int vb_size;	/* size of client vertex buffer in bytes */
+	unsigned int vb_stride;	/* stride of vertices in 32bit words */
+				/* boxes in client's address space */
+	drm_clip_rect_t __user *box_addr;
+	unsigned int nbox;	/* number of clipping boxes */
+} drm_savage_cmdbuf_t;
+
+#define SAVAGE_WAIT_2D  0x1 /* wait for 2D idle before updating event tag */
+#define SAVAGE_WAIT_3D  0x2 /* wait for 3D idle before updating event tag */
+#define SAVAGE_WAIT_IRQ 0x4 /* emit or wait for IRQ, not implemented yet */
+typedef struct drm_savage_event {
+	unsigned int count;
+	unsigned int flags;
+} drm_savage_event_emit_t, drm_savage_event_wait_t;
+
+/* Commands for the cmdbuf ioctl
+ */
+#define SAVAGE_CMD_STATE	0  /* a range of state registers */
+#define SAVAGE_CMD_DMA_PRIM	1  /* vertices from DMA buffer */
+#define SAVAGE_CMD_VB_PRIM	2  /* vertices from client vertex buffer */
+#define SAVAGE_CMD_DMA_IDX	3  /* indexed vertices from DMA buffer */
+#define SAVAGE_CMD_VB_IDX	4  /* indexed vertices client vertex buffer */
+#define SAVAGE_CMD_CLEAR	5  /* clear buffers */
+#define SAVAGE_CMD_SWAP		6  /* swap buffers */
+
+/* Primitive types
+*/
+#define SAVAGE_PRIM_TRILIST	0  /* triangle list */
+#define SAVAGE_PRIM_TRISTRIP	1  /* triangle strip */
+#define SAVAGE_PRIM_TRIFAN	2  /* triangle fan */
+#define SAVAGE_PRIM_TRILIST_201	3  /* reorder verts for correct flat
+				    * shading on s3d */
+
+/* Skip flags (vertex format)
+ */
+#define SAVAGE_SKIP_Z		0x01
+#define SAVAGE_SKIP_W		0x02
+#define SAVAGE_SKIP_C0		0x04
+#define SAVAGE_SKIP_C1		0x08
+#define SAVAGE_SKIP_S0		0x10
+#define SAVAGE_SKIP_T0		0x20
+#define SAVAGE_SKIP_ST0		0x30
+#define SAVAGE_SKIP_S1		0x40
+#define SAVAGE_SKIP_T1		0x80
+#define SAVAGE_SKIP_ST1		0xc0
+#define SAVAGE_SKIP_ALL_S3D	0x3f
+#define SAVAGE_SKIP_ALL_S4	0xff
+
+/* Buffer names for clear command
+ */
+#define SAVAGE_FRONT		0x1
+#define SAVAGE_BACK		0x2
+#define SAVAGE_DEPTH		0x4
+
+/* 64-bit command header
+ */
+union drm_savage_cmd_header {
+	struct {
+		unsigned char cmd;	/* command */
+		unsigned char pad0;
+		unsigned short pad1;
+		unsigned short pad2;
+		unsigned short pad3;
+	} cmd; /* generic */
+	struct {
+		unsigned char cmd;
+		unsigned char global;	/* need idle engine? */
+		unsigned short count;	/* number of consecutive registers */
+		unsigned short start;	/* first register */
+		unsigned short pad3;
+	} state; /* SAVAGE_CMD_STATE */
+	struct {
+		unsigned char cmd;
+		unsigned char prim;	/* primitive type */
+		unsigned short skip;	/* vertex format (skip flags) */
+		unsigned short count;	/* number of vertices */
+		unsigned short start;	/* first vertex in DMA/vertex buffer */
+	} prim; /* SAVAGE_CMD_DMA_PRIM, SAVAGE_CMD_VB_PRIM */
+	struct {
+		unsigned char cmd;
+		unsigned char prim;
+		unsigned short skip;
+		unsigned short count;	/* number of indices that follow */
+		unsigned short pad3;
+	} idx; /* SAVAGE_CMD_DMA_IDX, SAVAGE_CMD_VB_IDX */
+	struct {
+		unsigned char cmd;
+		unsigned char pad0;
+		unsigned short pad1;
+		unsigned int flags;
+	} clear0; /* SAVAGE_CMD_CLEAR */
+	struct {
+		unsigned int mask;
+		unsigned int value;
+	} clear1; /* SAVAGE_CMD_CLEAR data */
+};
+
+#endif
diff --git a/drivers/char/drm/savage_drv.c b/drivers/char/drm/savage_drv.c
new file mode 100644
index 0000000..ac8d270
--- /dev/null
+++ b/drivers/char/drm/savage_drv.c
@@ -0,0 +1,112 @@
+/* savage_drv.c -- Savage driver for Linux
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/config.h>
+#include "drmP.h"
+#include "savage_drm.h"
+#include "savage_drv.h"
+
+#include "drm_pciids.h"
+
+static int postinit( struct drm_device *dev, unsigned long flags )
+{
+	DRM_INFO( "Initialized %s %d.%d.%d %s on minor %d: %s\n",
+		DRIVER_NAME,
+		DRIVER_MAJOR,
+		DRIVER_MINOR,
+		DRIVER_PATCHLEVEL,
+		DRIVER_DATE,
+		dev->primary.minor,
+		pci_pretty_name(dev->pdev)
+		);
+	return 0;
+}
+
+static int version( drm_version_t *version )
+{
+	int len;
+
+	version->version_major = DRIVER_MAJOR;
+	version->version_minor = DRIVER_MINOR;
+	version->version_patchlevel = DRIVER_PATCHLEVEL;
+	DRM_COPY( version->name, DRIVER_NAME );
+	DRM_COPY( version->date, DRIVER_DATE );
+	DRM_COPY( version->desc, DRIVER_DESC );
+	return 0;
+}
+
+static struct pci_device_id pciidlist[] = {
+	savage_PCI_IDS
+};
+
+extern drm_ioctl_desc_t savage_ioctls[];
+extern int savage_max_ioctl;
+
+static struct drm_driver driver = {
+	.driver_features =
+	    DRIVER_USE_AGP | DRIVER_USE_MTRR |
+	    DRIVER_HAVE_DMA | DRIVER_PCI_DMA,
+	.dev_priv_size = sizeof(drm_savage_buf_priv_t),
+	.preinit = savage_preinit,
+	.postinit = postinit,
+	.postcleanup = savage_postcleanup,
+	.reclaim_buffers = savage_reclaim_buffers,
+	.get_map_ofs = drm_core_get_map_ofs,
+	.get_reg_ofs = drm_core_get_reg_ofs,
+	.version = version,
+	.ioctls = savage_ioctls,
+	.dma_ioctl = savage_bci_buffers,
+	.fops = {
+		.owner   = THIS_MODULE,
+		.open	 = drm_open,
+		.release = drm_release,
+		.ioctl	 = drm_ioctl,
+		.mmap	 = drm_mmap,
+		.poll = drm_poll,
+		.fasync  = drm_fasync,
+	},
+	.pci_driver = {
+		.name          = DRIVER_NAME,
+		.id_table      = pciidlist,
+	}
+};
+
+static int __init savage_init(void)
+{
+	driver.num_ioctls = savage_max_ioctl;
+	return drm_init(&driver);
+}
+
+static void __exit savage_exit(void)
+{
+	drm_exit(&driver);
+}
+
+module_init(savage_init);
+module_exit(savage_exit);
+
+MODULE_AUTHOR( DRIVER_AUTHOR );
+MODULE_DESCRIPTION( DRIVER_DESC );
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/drm/savage_drv.h b/drivers/char/drm/savage_drv.h
new file mode 100644
index 0000000..a454349
--- /dev/null
+++ b/drivers/char/drm/savage_drv.h
@@ -0,0 +1,579 @@
+/* savage_drv.h -- Private header for the savage driver
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef __SAVAGE_DRV_H__
+#define __SAVAGE_DRV_H__
+
+#define DRIVER_AUTHOR	"Felix Kuehling"
+
+#define DRIVER_NAME	"savage"
+#define DRIVER_DESC	"Savage3D/MX/IX, Savage4, SuperSavage, Twister, ProSavage[DDR]"
+#define DRIVER_DATE	"20050313"
+
+#define DRIVER_MAJOR		2
+#define DRIVER_MINOR		4
+#define DRIVER_PATCHLEVEL	1
+/* Interface history:
+ *
+ * 1.x   The DRM driver from the VIA/S3 code drop, basically a dummy
+ * 2.0   The first real DRM
+ * 2.1   Scissors registers managed by the DRM, 3D operations clipped by
+ *       cliprects of the cmdbuf ioctl
+ * 2.2   Implemented SAVAGE_CMD_DMA_IDX and SAVAGE_CMD_VB_IDX
+ * 2.3   Event counters used by BCI_EVENT_EMIT/WAIT ioctls are now 32 bits
+ *       wide and thus very long lived (unlikely to ever wrap). The size
+ *       in the struct was 32 bits before, but only 16 bits were used
+ * 2.4   Implemented command DMA. Now drm_savage_init_t.cmd_dma_offset is
+ *       actually used
+ */
+
+typedef struct drm_savage_age {
+	uint16_t event;
+	unsigned int wrap;
+} drm_savage_age_t;
+
+typedef struct drm_savage_buf_priv {
+	struct drm_savage_buf_priv *next;
+	struct drm_savage_buf_priv *prev;
+	drm_savage_age_t age;
+	drm_buf_t *buf;
+} drm_savage_buf_priv_t;
+
+typedef struct drm_savage_dma_page {
+	drm_savage_age_t age;
+	unsigned int used, flushed;
+} drm_savage_dma_page_t;
+#define SAVAGE_DMA_PAGE_SIZE 1024 /* in dwords */
+/* Fake DMA buffer size in bytes. 4 pages. Allows a maximum command
+ * size of 16kbytes or 4k entries. Minimum requirement would be
+ * 10kbytes for 255 40-byte vertices in one drawing command. */
+#define SAVAGE_FAKE_DMA_SIZE (SAVAGE_DMA_PAGE_SIZE*4*4)
+
+/* interesting bits of hardware state that are saved in dev_priv */
+typedef union {
+	struct drm_savage_common_state {
+		uint32_t vbaddr;
+	} common;
+	struct {
+		unsigned char pad[sizeof(struct drm_savage_common_state)];
+		uint32_t texctrl, texaddr;
+		uint32_t scstart, new_scstart;
+		uint32_t scend, new_scend;
+	} s3d;
+	struct {
+		unsigned char pad[sizeof(struct drm_savage_common_state)];
+		uint32_t texdescr, texaddr0, texaddr1;
+		uint32_t drawctrl0, new_drawctrl0;
+		uint32_t drawctrl1, new_drawctrl1;
+	} s4;
+} drm_savage_state_t;
+
+/* these chip tags should match the ones in the 2D driver in savage_regs.h. */
+enum savage_family {
+	S3_UNKNOWN = 0,
+	S3_SAVAGE3D,
+	S3_SAVAGE_MX,
+	S3_SAVAGE4,
+	S3_PROSAVAGE,
+	S3_TWISTER,
+	S3_PROSAVAGEDDR,
+	S3_SUPERSAVAGE,
+	S3_SAVAGE2000,
+	S3_LAST
+};
+
+#define S3_SAVAGE3D_SERIES(chip)  ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
+
+#define S3_SAVAGE4_SERIES(chip)  ((chip==S3_SAVAGE4)            \
+                                  || (chip==S3_PROSAVAGE)       \
+                                  || (chip==S3_TWISTER)         \
+                                  || (chip==S3_PROSAVAGEDDR))
+
+#define	S3_SAVAGE_MOBILE_SERIES(chip)	((chip==S3_SAVAGE_MX) || (chip==S3_SUPERSAVAGE))
+
+#define S3_SAVAGE_SERIES(chip)    ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE2000))
+
+#define S3_MOBILE_TWISTER_SERIES(chip)   ((chip==S3_TWISTER)    \
+                                          ||(chip==S3_PROSAVAGEDDR))
+
+/* flags */
+#define SAVAGE_IS_AGP 1
+
+typedef struct drm_savage_private {
+	drm_savage_sarea_t *sarea_priv;
+
+	drm_savage_buf_priv_t head, tail;
+
+	/* who am I? */
+	enum savage_family chipset;
+
+	unsigned int cob_size;
+	unsigned int bci_threshold_lo, bci_threshold_hi;
+	unsigned int dma_type;
+
+	/* frame buffer layout */
+	unsigned int fb_bpp;
+	unsigned int front_offset, front_pitch;
+	unsigned int back_offset, back_pitch;
+	unsigned int depth_bpp;
+	unsigned int depth_offset, depth_pitch;
+
+	/* bitmap descriptors for swap and clear */
+	unsigned int front_bd, back_bd, depth_bd;
+
+	/* local textures */
+	unsigned int texture_offset;
+	unsigned int texture_size;
+
+	/* memory regions in physical memory */
+	drm_local_map_t *sarea;
+	drm_local_map_t *mmio;
+	drm_local_map_t *fb;
+	drm_local_map_t *aperture;
+	drm_local_map_t *status;
+	drm_local_map_t *agp_textures;
+	drm_local_map_t *cmd_dma;
+	drm_local_map_t fake_dma;
+
+	struct {
+		int handle;
+		unsigned long base, size;
+	} mtrr[3];
+
+	/* BCI and status-related stuff */
+	volatile uint32_t *status_ptr, *bci_ptr;
+	uint32_t status_used_mask;
+	uint16_t event_counter;
+	unsigned int event_wrap;
+
+	/* Savage4 command DMA */
+	drm_savage_dma_page_t *dma_pages;
+	unsigned int nr_dma_pages, first_dma_page, current_dma_page;
+	drm_savage_age_t last_dma_age;
+
+	/* saved hw state for global/local check on S3D */
+	uint32_t hw_draw_ctrl, hw_zbuf_ctrl;
+	/* and for scissors (global, so don't emit if not changed) */
+	uint32_t hw_scissors_start, hw_scissors_end;
+
+	drm_savage_state_t state;
+
+	/* after emitting a wait cmd Savage3D needs 63 nops before next DMA */
+	unsigned int waiting;
+
+	/* config/hardware-dependent function pointers */
+	int (*wait_fifo)(struct drm_savage_private *dev_priv, unsigned int n);
+	int (*wait_evnt)(struct drm_savage_private *dev_priv, uint16_t e);
+	/* Err, there is a macro wait_event in include/linux/wait.h.
+	 * Avoid unwanted macro expansion. */
+	void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
+			       drm_clip_rect_t *pbox);
+	void (*dma_flush)(struct drm_savage_private *dev_priv);
+} drm_savage_private_t;
+
+/* ioctls */
+extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
+extern int savage_bci_buffers(DRM_IOCTL_ARGS);
+
+/* BCI functions */
+extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
+				      unsigned int flags);
+extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
+extern void savage_dma_reset(drm_savage_private_t *dev_priv);
+extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
+extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
+				  unsigned int n);
+extern int savage_preinit(drm_device_t *dev, unsigned long chipset);
+extern int savage_postcleanup(drm_device_t *dev);
+extern int savage_do_cleanup_bci(drm_device_t *dev);
+extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
+
+/* state functions */
+extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
+				      drm_clip_rect_t *pbox);
+extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
+				     drm_clip_rect_t *pbox);
+
+#define SAVAGE_FB_SIZE_S3	0x01000000	/*  16MB */
+#define SAVAGE_FB_SIZE_S4	0x02000000	/*  32MB */
+#define SAVAGE_MMIO_SIZE        0x00080000	/* 512kB */
+#define SAVAGE_APERTURE_OFFSET  0x02000000	/*  32MB */
+#define SAVAGE_APERTURE_SIZE    0x05000000	/* 5 tiled surfaces, 16MB each */
+
+#define SAVAGE_BCI_OFFSET       0x00010000      /* offset of the BCI region
+						 * inside the MMIO region */
+#define SAVAGE_BCI_FIFO_SIZE	32		/* number of entries in on-chip
+						 * BCI FIFO */
+
+/*
+ * MMIO registers
+ */
+#define SAVAGE_STATUS_WORD0		0x48C00
+#define SAVAGE_STATUS_WORD1		0x48C04
+#define SAVAGE_ALT_STATUS_WORD0 	0x48C60
+
+#define SAVAGE_FIFO_USED_MASK_S3D	0x0001ffff
+#define SAVAGE_FIFO_USED_MASK_S4	0x001fffff
+
+/* Copied from savage_bci.h in the 2D driver with some renaming. */
+
+/* Bitmap descriptors */
+#define SAVAGE_BD_STRIDE_SHIFT 0
+#define SAVAGE_BD_BPP_SHIFT   16
+#define SAVAGE_BD_TILE_SHIFT  24
+#define SAVAGE_BD_BW_DISABLE  (1<<28)
+/* common: */
+#define	SAVAGE_BD_TILE_LINEAR		0
+/* savage4, MX, IX, 3D */
+#define	SAVAGE_BD_TILE_16BPP		2
+#define	SAVAGE_BD_TILE_32BPP		3
+/* twister, prosavage, DDR, supersavage, 2000 */
+#define	SAVAGE_BD_TILE_DEST		1
+#define	SAVAGE_BD_TILE_TEXTURE		2
+/* GBD - BCI enable */
+/* savage4, MX, IX, 3D */
+#define SAVAGE_GBD_BCI_ENABLE                    8
+/* twister, prosavage, DDR, supersavage, 2000 */
+#define SAVAGE_GBD_BCI_ENABLE_TWISTER            0
+
+#define SAVAGE_GBD_BIG_ENDIAN                    4
+#define SAVAGE_GBD_LITTLE_ENDIAN                 0
+#define SAVAGE_GBD_64                            1
+
+/*  Global Bitmap Descriptor */
+#define SAVAGE_BCI_GLB_BD_LOW             0x8168
+#define SAVAGE_BCI_GLB_BD_HIGH            0x816C
+
+/*
+ * BCI registers
+ */
+/* Savage4/Twister/ProSavage 3D registers */
+#define SAVAGE_DRAWLOCALCTRL_S4		0x1e
+#define SAVAGE_TEXPALADDR_S4		0x1f
+#define SAVAGE_TEXCTRL0_S4		0x20
+#define SAVAGE_TEXCTRL1_S4		0x21
+#define SAVAGE_TEXADDR0_S4		0x22
+#define SAVAGE_TEXADDR1_S4		0x23
+#define SAVAGE_TEXBLEND0_S4		0x24
+#define SAVAGE_TEXBLEND1_S4		0x25
+#define SAVAGE_TEXXPRCLR_S4		0x26 /* never used */
+#define SAVAGE_TEXDESCR_S4		0x27
+#define SAVAGE_FOGTABLE_S4		0x28
+#define SAVAGE_FOGCTRL_S4		0x30
+#define SAVAGE_STENCILCTRL_S4		0x31
+#define SAVAGE_ZBUFCTRL_S4		0x32
+#define SAVAGE_ZBUFOFF_S4		0x33
+#define SAVAGE_DESTCTRL_S4		0x34
+#define SAVAGE_DRAWCTRL0_S4		0x35
+#define SAVAGE_DRAWCTRL1_S4		0x36
+#define SAVAGE_ZWATERMARK_S4		0x37
+#define SAVAGE_DESTTEXRWWATERMARK_S4	0x38
+#define SAVAGE_TEXBLENDCOLOR_S4		0x39
+/* Savage3D/MX/IX 3D registers */
+#define SAVAGE_TEXPALADDR_S3D		0x18
+#define SAVAGE_TEXXPRCLR_S3D		0x19 /* never used */
+#define SAVAGE_TEXADDR_S3D		0x1A
+#define SAVAGE_TEXDESCR_S3D		0x1B
+#define SAVAGE_TEXCTRL_S3D		0x1C
+#define SAVAGE_FOGTABLE_S3D		0x20
+#define SAVAGE_FOGCTRL_S3D		0x30
+#define SAVAGE_DRAWCTRL_S3D		0x31
+#define SAVAGE_ZBUFCTRL_S3D		0x32
+#define SAVAGE_ZBUFOFF_S3D		0x33
+#define SAVAGE_DESTCTRL_S3D		0x34
+#define SAVAGE_SCSTART_S3D		0x35
+#define SAVAGE_SCEND_S3D		0x36
+#define SAVAGE_ZWATERMARK_S3D		0x37 
+#define SAVAGE_DESTTEXRWWATERMARK_S3D	0x38
+/* common stuff */
+#define SAVAGE_VERTBUFADDR		0x3e
+#define SAVAGE_BITPLANEWTMASK		0xd7
+#define SAVAGE_DMABUFADDR		0x51
+
+/* texture enable bits (needed for tex addr checking) */
+#define SAVAGE_TEXCTRL_TEXEN_MASK	0x00010000 /* S3D */
+#define SAVAGE_TEXDESCR_TEX0EN_MASK	0x02000000 /* S4 */
+#define SAVAGE_TEXDESCR_TEX1EN_MASK	0x04000000 /* S4 */
+
+/* Global fields in Savage4/Twister/ProSavage 3D registers:
+ *
+ * All texture registers and DrawLocalCtrl are local. All other
+ * registers are global. */
+
+/* Global fields in Savage3D/MX/IX 3D registers:
+ *
+ * All texture registers are local. DrawCtrl and ZBufCtrl are
+ * partially local. All other registers are global.
+ *
+ * DrawCtrl global fields: cullMode, alphaTestCmpFunc, alphaTestEn, alphaRefVal
+ * ZBufCtrl global fields: zCmpFunc, zBufEn
+ */
+#define SAVAGE_DRAWCTRL_S3D_GLOBAL	0x03f3c00c
+#define SAVAGE_ZBUFCTRL_S3D_GLOBAL	0x00000027
+
+/* Masks for scissor bits (drawCtrl[01] on s4, scissorStart/End on s3d)
+ */
+#define SAVAGE_SCISSOR_MASK_S4		0x00fff7ff
+#define SAVAGE_SCISSOR_MASK_S3D		0x07ff07ff
+
+/*
+ * BCI commands
+ */
+#define BCI_CMD_NOP                  0x40000000
+#define BCI_CMD_RECT                 0x48000000
+#define BCI_CMD_RECT_XP              0x01000000
+#define BCI_CMD_RECT_YP              0x02000000
+#define BCI_CMD_SCANLINE             0x50000000
+#define BCI_CMD_LINE                 0x5C000000
+#define BCI_CMD_LINE_LAST_PIXEL      0x58000000
+#define BCI_CMD_BYTE_TEXT            0x63000000
+#define BCI_CMD_NT_BYTE_TEXT         0x67000000
+#define BCI_CMD_BIT_TEXT             0x6C000000
+#define BCI_CMD_GET_ROP(cmd)         (((cmd) >> 16) & 0xFF)
+#define BCI_CMD_SET_ROP(cmd, rop)    ((cmd) |= ((rop & 0xFF) << 16))
+#define BCI_CMD_SEND_COLOR           0x00008000
+
+#define BCI_CMD_CLIP_NONE            0x00000000
+#define BCI_CMD_CLIP_CURRENT         0x00002000
+#define BCI_CMD_CLIP_LR              0x00004000
+#define BCI_CMD_CLIP_NEW             0x00006000
+
+#define BCI_CMD_DEST_GBD             0x00000000
+#define BCI_CMD_DEST_PBD             0x00000800
+#define BCI_CMD_DEST_PBD_NEW         0x00000C00
+#define BCI_CMD_DEST_SBD             0x00001000
+#define BCI_CMD_DEST_SBD_NEW         0x00001400
+
+#define BCI_CMD_SRC_TRANSPARENT      0x00000200
+#define BCI_CMD_SRC_SOLID            0x00000000
+#define BCI_CMD_SRC_GBD              0x00000020
+#define BCI_CMD_SRC_COLOR            0x00000040
+#define BCI_CMD_SRC_MONO             0x00000060
+#define BCI_CMD_SRC_PBD_COLOR        0x00000080
+#define BCI_CMD_SRC_PBD_MONO         0x000000A0
+#define BCI_CMD_SRC_PBD_COLOR_NEW    0x000000C0
+#define BCI_CMD_SRC_PBD_MONO_NEW     0x000000E0
+#define BCI_CMD_SRC_SBD_COLOR        0x00000100
+#define BCI_CMD_SRC_SBD_MONO         0x00000120
+#define BCI_CMD_SRC_SBD_COLOR_NEW    0x00000140
+#define BCI_CMD_SRC_SBD_MONO_NEW     0x00000160
+
+#define BCI_CMD_PAT_TRANSPARENT      0x00000010
+#define BCI_CMD_PAT_NONE             0x00000000
+#define BCI_CMD_PAT_COLOR            0x00000002
+#define BCI_CMD_PAT_MONO             0x00000003
+#define BCI_CMD_PAT_PBD_COLOR        0x00000004
+#define BCI_CMD_PAT_PBD_MONO         0x00000005
+#define BCI_CMD_PAT_PBD_COLOR_NEW    0x00000006
+#define BCI_CMD_PAT_PBD_MONO_NEW     0x00000007
+#define BCI_CMD_PAT_SBD_COLOR        0x00000008
+#define BCI_CMD_PAT_SBD_MONO         0x00000009
+#define BCI_CMD_PAT_SBD_COLOR_NEW    0x0000000A
+#define BCI_CMD_PAT_SBD_MONO_NEW     0x0000000B
+
+#define BCI_BD_BW_DISABLE            0x10000000
+#define BCI_BD_TILE_MASK             0x03000000
+#define BCI_BD_TILE_NONE             0x00000000
+#define BCI_BD_TILE_16               0x02000000
+#define BCI_BD_TILE_32               0x03000000
+#define BCI_BD_GET_BPP(bd)           (((bd) >> 16) & 0xFF)
+#define BCI_BD_SET_BPP(bd, bpp)      ((bd) |= (((bpp) & 0xFF) << 16))
+#define BCI_BD_GET_STRIDE(bd)        ((bd) & 0xFFFF)
+#define BCI_BD_SET_STRIDE(bd, st)    ((bd) |= ((st) & 0xFFFF))
+
+#define BCI_CMD_SET_REGISTER            0x96000000
+
+#define BCI_CMD_WAIT                    0xC0000000
+#define BCI_CMD_WAIT_3D                 0x00010000
+#define BCI_CMD_WAIT_2D                 0x00020000
+
+#define BCI_CMD_UPDATE_EVENT_TAG        0x98000000
+
+#define BCI_CMD_DRAW_PRIM               0x80000000
+#define BCI_CMD_DRAW_INDEXED_PRIM       0x88000000
+#define BCI_CMD_DRAW_CONT               0x01000000
+#define BCI_CMD_DRAW_TRILIST            0x00000000
+#define BCI_CMD_DRAW_TRISTRIP           0x02000000
+#define BCI_CMD_DRAW_TRIFAN             0x04000000
+#define BCI_CMD_DRAW_SKIPFLAGS          0x000000ff
+#define BCI_CMD_DRAW_NO_Z		0x00000001
+#define BCI_CMD_DRAW_NO_W		0x00000002
+#define BCI_CMD_DRAW_NO_CD		0x00000004
+#define BCI_CMD_DRAW_NO_CS		0x00000008
+#define BCI_CMD_DRAW_NO_U0		0x00000010
+#define BCI_CMD_DRAW_NO_V0		0x00000020
+#define BCI_CMD_DRAW_NO_UV0		0x00000030
+#define BCI_CMD_DRAW_NO_U1		0x00000040
+#define BCI_CMD_DRAW_NO_V1		0x00000080
+#define BCI_CMD_DRAW_NO_UV1		0x000000c0
+
+#define BCI_CMD_DMA			0xa8000000
+
+#define BCI_W_H(w, h)                ((((h) << 16) | (w)) & 0x0FFF0FFF)
+#define BCI_X_Y(x, y)                ((((y) << 16) | (x)) & 0x0FFF0FFF)
+#define BCI_X_W(x, y)                ((((w) << 16) | (x)) & 0x0FFF0FFF)
+#define BCI_CLIP_LR(l, r)            ((((r) << 16) | (l)) & 0x0FFF0FFF)
+#define BCI_CLIP_TL(t, l)            ((((t) << 16) | (l)) & 0x0FFF0FFF)
+#define BCI_CLIP_BR(b, r)            ((((b) << 16) | (r)) & 0x0FFF0FFF)
+
+#define BCI_LINE_X_Y(x, y)           (((y) << 16) | ((x) & 0xFFFF))
+#define BCI_LINE_STEPS(diag, axi)    (((axi) << 16) | ((diag) & 0xFFFF))
+#define BCI_LINE_MISC(maj, ym, xp, yp, err) \
+	(((maj) & 0x1FFF) | \
+	((ym) ? 1<<13 : 0) | \
+	((xp) ? 1<<14 : 0) | \
+	((yp) ? 1<<15 : 0) | \
+	((err) << 16))
+
+/*
+ * common commands
+ */
+#define BCI_SET_REGISTERS( first, n )			\
+	BCI_WRITE(BCI_CMD_SET_REGISTER |		\
+		  ((uint32_t)(n) & 0xff) << 16 |	\
+		  ((uint32_t)(first) & 0xffff))
+#define DMA_SET_REGISTERS( first, n )			\
+	DMA_WRITE(BCI_CMD_SET_REGISTER |		\
+		  ((uint32_t)(n) & 0xff) << 16 |	\
+		  ((uint32_t)(first) & 0xffff))
+
+#define BCI_DRAW_PRIMITIVE(n, type, skip)         \
+        BCI_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
+		  ((n) << 16))
+#define DMA_DRAW_PRIMITIVE(n, type, skip)         \
+        DMA_WRITE(BCI_CMD_DRAW_PRIM | (type) | (skip) | \
+		  ((n) << 16))
+
+#define BCI_DRAW_INDICES_S3D(n, type, i0)         \
+        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
+		  ((n) << 16) | (i0))
+
+#define BCI_DRAW_INDICES_S4(n, type, skip)        \
+        BCI_WRITE(BCI_CMD_DRAW_INDEXED_PRIM | (type) |  \
+                  (skip) | ((n) << 16))
+
+#define BCI_DMA(n)	\
+	BCI_WRITE(BCI_CMD_DMA | (((n) >> 1) - 1))
+
+/*
+ * access to MMIO
+ */
+#define SAVAGE_READ(reg)	DRM_READ32(  dev_priv->mmio, (reg) )
+#define SAVAGE_WRITE(reg)	DRM_WRITE32( dev_priv->mmio, (reg) )
+
+/*
+ * access to the burst command interface (BCI)
+ */
+#define SAVAGE_BCI_DEBUG 1
+
+#define BCI_LOCALS    volatile uint32_t *bci_ptr;
+
+#define BEGIN_BCI( n ) do {			\
+	dev_priv->wait_fifo(dev_priv, (n));	\
+	bci_ptr = dev_priv->bci_ptr;		\
+} while(0)
+
+#define BCI_WRITE( val ) *bci_ptr++ = (uint32_t)(val)
+
+#define BCI_COPY_FROM_USER(src,n) do {				\
+    unsigned int i;						\
+    for (i = 0; i < n; ++i) {					\
+	uint32_t val;						\
+	DRM_GET_USER_UNCHECKED(val, &((uint32_t*)(src))[i]);	\
+	BCI_WRITE(val);						\
+    }								\
+} while(0)
+
+/*
+ * command DMA support
+ */
+#define SAVAGE_DMA_DEBUG 1
+
+#define DMA_LOCALS   uint32_t *dma_ptr;
+
+#define BEGIN_DMA( n ) do {						\
+	unsigned int cur = dev_priv->current_dma_page;			\
+	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -			\
+		dev_priv->dma_pages[cur].used;				\
+	if ((n) > rest) {						\
+		dma_ptr = savage_dma_alloc(dev_priv, (n));		\
+	} else { /* fast path for small allocations */			\
+		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +	\
+			cur * SAVAGE_DMA_PAGE_SIZE +			\
+			dev_priv->dma_pages[cur].used;			\
+		if (dev_priv->dma_pages[cur].used == 0)			\
+			savage_dma_wait(dev_priv, cur);			\
+		dev_priv->dma_pages[cur].used += (n);			\
+	}								\
+} while(0)
+
+#define DMA_WRITE( val ) *dma_ptr++ = (uint32_t)(val)
+
+#define DMA_COPY_FROM_USER(src,n) do {				\
+	DRM_COPY_FROM_USER_UNCHECKED(dma_ptr, (src), (n)*4);	\
+	dma_ptr += n;						\
+} while(0)
+
+#if SAVAGE_DMA_DEBUG
+#define DMA_COMMIT() do {						\
+	unsigned int cur = dev_priv->current_dma_page;			\
+	uint32_t *expected = (uint32_t *)dev_priv->cmd_dma->handle +	\
+			cur * SAVAGE_DMA_PAGE_SIZE +			\
+			dev_priv->dma_pages[cur].used;			\
+	if (dma_ptr != expected) {					\
+		DRM_ERROR("DMA allocation and use don't match: "	\
+			  "%p != %p\n", expected, dma_ptr);		\
+		savage_dma_reset(dev_priv);				\
+	}								\
+} while(0)
+#else
+#define DMA_COMMIT() do {/* nothing */} while(0)
+#endif
+
+#define DMA_FLUSH() dev_priv->dma_flush(dev_priv)
+
+/* Buffer aging via event tag
+ */
+
+#define UPDATE_EVENT_COUNTER( ) do {			\
+	if (dev_priv->status_ptr) {			\
+		uint16_t count;				\
+		/* coordinate with Xserver */		\
+		count = dev_priv->status_ptr[1023];	\
+		if (count < dev_priv->event_counter)	\
+			dev_priv->event_wrap++;		\
+		dev_priv->event_counter = count;	\
+	}						\
+} while(0)
+
+#define SET_AGE( age, e, w ) do {	\
+	(age)->event = e;		\
+	(age)->wrap = w;		\
+} while(0)
+
+#define TEST_AGE( age, e, w )				\
+	( (age)->wrap < (w) || ( (age)->wrap == (w) && (age)->event <= (e) ) )
+
+#endif /* __SAVAGE_DRV_H__ */
diff --git a/drivers/char/drm/savage_state.c b/drivers/char/drm/savage_state.c
new file mode 100644
index 0000000..475695a
--- /dev/null
+++ b/drivers/char/drm/savage_state.c
@@ -0,0 +1,1146 @@
+/* savage_state.c -- State and drawing support for Savage
+ *
+ * Copyright 2004  Felix Kuehling
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
+ * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include "drmP.h"
+#include "savage_drm.h"
+#include "savage_drv.h"
+
+void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
+			       drm_clip_rect_t *pbox)
+{
+	uint32_t scstart = dev_priv->state.s3d.new_scstart;
+	uint32_t scend   = dev_priv->state.s3d.new_scend;
+	scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) |
+		((uint32_t)pbox->x1 & 0x000007ff) | 
+		(((uint32_t)pbox->y1 << 16) & 0x07ff0000);
+	scend   = (scend   & ~SAVAGE_SCISSOR_MASK_S3D) |
+		(((uint32_t)pbox->x2-1) & 0x000007ff) |
+		((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000);
+	if (scstart != dev_priv->state.s3d.scstart ||
+	    scend   != dev_priv->state.s3d.scend) {
+		DMA_LOCALS;
+		BEGIN_DMA(4);
+		DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
+		DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);
+		DMA_WRITE(scstart);
+		DMA_WRITE(scend);
+		dev_priv->state.s3d.scstart = scstart;
+		dev_priv->state.s3d.scend   = scend;
+		dev_priv->waiting = 1;
+		DMA_COMMIT();
+	}
+}
+
+void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
+			      drm_clip_rect_t *pbox)
+{
+	uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
+	uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
+	drawctrl0 = (drawctrl0 & ~SAVAGE_SCISSOR_MASK_S4) |
+		((uint32_t)pbox->x1 & 0x000007ff) |
+		(((uint32_t)pbox->y1 << 12) & 0x00fff000);
+	drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) |
+		(((uint32_t)pbox->x2-1) & 0x000007ff) |
+		((((uint32_t)pbox->y2-1) << 12) & 0x00fff000);
+	if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||
+	    drawctrl1 != dev_priv->state.s4.drawctrl1) {
+		DMA_LOCALS;
+		BEGIN_DMA(4);
+		DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D);
+		DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);
+		DMA_WRITE(drawctrl0);
+		DMA_WRITE(drawctrl1);
+		dev_priv->state.s4.drawctrl0 = drawctrl0;
+		dev_priv->state.s4.drawctrl1 = drawctrl1;
+		dev_priv->waiting = 1;
+		DMA_COMMIT();
+	}
+}
+
+static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
+				 uint32_t addr)
+{
+	if ((addr & 6) != 2) { /* reserved bits */
+		DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
+		return DRM_ERR(EINVAL);
+	}
+	if (!(addr & 1)) { /* local */
+		addr &= ~7;
+		if (addr <  dev_priv->texture_offset ||
+		    addr >= dev_priv->texture_offset+dev_priv->texture_size) {
+			DRM_ERROR("bad texAddr%d %08x (local addr out of range)\n",
+				  unit, addr);
+			return DRM_ERR(EINVAL);
+		}
+	} else { /* AGP */
+		if (!dev_priv->agp_textures) {
+			DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
+				  unit, addr);
+			return DRM_ERR(EINVAL);
+		}
+		addr &= ~7;
+		if (addr < dev_priv->agp_textures->offset ||
+		    addr >= (dev_priv->agp_textures->offset +
+			     dev_priv->agp_textures->size)) {
+			DRM_ERROR("bad texAddr%d %08x (AGP addr out of range)\n",
+				  unit, addr);
+			return DRM_ERR(EINVAL);
+		}
+	}
+	return 0;
+}
+
+#define SAVE_STATE(reg,where)			\
+	if(start <= reg && start+count > reg)	\
+		DRM_GET_USER_UNCHECKED(dev_priv->state.where, &regs[reg-start])
+#define SAVE_STATE_MASK(reg,where,mask) do {			\
+	if(start <= reg && start+count > reg) {			\
+		uint32_t tmp;					\
+		DRM_GET_USER_UNCHECKED(tmp, &regs[reg-start]);	\
+		dev_priv->state.where = (tmp & (mask)) |	\
+			(dev_priv->state.where & ~(mask));	\
+	}							\
+} while (0)
+static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
+				   unsigned int start, unsigned int count,
+				   const uint32_t __user *regs)
+{
+	if (start < SAVAGE_TEXPALADDR_S3D ||
+	    start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
+		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
+			  start, start+count-1);
+		return DRM_ERR(EINVAL);
+	}
+
+	SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
+			~SAVAGE_SCISSOR_MASK_S3D);
+	SAVE_STATE_MASK(SAVAGE_SCEND_S3D, s3d.new_scend,
+			~SAVAGE_SCISSOR_MASK_S3D);
+
+	/* if any texture regs were changed ... */
+	if (start <= SAVAGE_TEXCTRL_S3D &&
+	    start+count > SAVAGE_TEXPALADDR_S3D) {
+		/* ... check texture state */
+		SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);
+		SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr);
+		if (dev_priv->state.s3d.texctrl & SAVAGE_TEXCTRL_TEXEN_MASK)
+			return savage_verify_texaddr(
+				dev_priv, 0, dev_priv->state.s3d.texaddr);
+	}
+
+	return 0;
+}
+
+static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
+				  unsigned int start, unsigned int count,
+				  const uint32_t __user *regs)
+{
+	int ret = 0;
+
+	if (start < SAVAGE_DRAWLOCALCTRL_S4 ||
+	    start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
+		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
+			  start, start+count-1);
+		return DRM_ERR(EINVAL);
+	}
+
+	SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
+			~SAVAGE_SCISSOR_MASK_S4);
+	SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4, s4.new_drawctrl1,
+			~SAVAGE_SCISSOR_MASK_S4);
+
+	/* if any texture regs were changed ... */
+	if (start <= SAVAGE_TEXDESCR_S4 &&
+	    start+count > SAVAGE_TEXPALADDR_S4) {
+		/* ... check texture state */
+		SAVE_STATE(SAVAGE_TEXDESCR_S4, s4.texdescr);
+		SAVE_STATE(SAVAGE_TEXADDR0_S4, s4.texaddr0);
+		SAVE_STATE(SAVAGE_TEXADDR1_S4, s4.texaddr1);
+		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX0EN_MASK)
+			ret |= savage_verify_texaddr(
+				dev_priv, 0, dev_priv->state.s4.texaddr0);
+		if (dev_priv->state.s4.texdescr & SAVAGE_TEXDESCR_TEX1EN_MASK)
+			ret |= savage_verify_texaddr(
+				dev_priv, 1, dev_priv->state.s4.texaddr1);
+	}
+
+	return ret;
+}
+#undef SAVE_STATE
+#undef SAVE_STATE_MASK
+
+static int savage_dispatch_state(drm_savage_private_t *dev_priv,
+				 const drm_savage_cmd_header_t *cmd_header,
+				 const uint32_t __user *regs)
+{
+	unsigned int count = cmd_header->state.count;
+	unsigned int start = cmd_header->state.start;
+	unsigned int count2 = 0;
+	unsigned int bci_size;
+	int ret;
+	DMA_LOCALS;
+
+	if (!count)
+		return 0;
+
+	if (DRM_VERIFYAREA_READ(regs, count*4))
+		return DRM_ERR(EFAULT);
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		ret = savage_verify_state_s3d(dev_priv, start, count, regs);
+		if (ret != 0)
+			return ret;
+		/* scissor regs are emitted in savage_dispatch_draw */
+		if (start < SAVAGE_SCSTART_S3D) {
+			if (start+count > SAVAGE_SCEND_S3D+1)
+				count2 = count - (SAVAGE_SCEND_S3D+1 - start);
+			if (start+count > SAVAGE_SCSTART_S3D)
+				count = SAVAGE_SCSTART_S3D - start;
+		} else if (start <= SAVAGE_SCEND_S3D) {
+			if (start+count > SAVAGE_SCEND_S3D+1) {
+				count -= SAVAGE_SCEND_S3D+1 - start;
+				start = SAVAGE_SCEND_S3D+1;
+			} else
+				return 0;
+		}
+	} else {
+		ret = savage_verify_state_s4(dev_priv, start, count, regs);
+		if (ret != 0)
+			return ret;
+		/* scissor regs are emitted in savage_dispatch_draw */
+		if (start < SAVAGE_DRAWCTRL0_S4) {
+			if (start+count > SAVAGE_DRAWCTRL1_S4+1)
+				count2 = count - (SAVAGE_DRAWCTRL1_S4+1 - start);
+			if (start+count > SAVAGE_DRAWCTRL0_S4)
+				count = SAVAGE_DRAWCTRL0_S4 - start;
+		} else if (start <= SAVAGE_DRAWCTRL1_S4) {
+			if (start+count > SAVAGE_DRAWCTRL1_S4+1) {
+				count -= SAVAGE_DRAWCTRL1_S4+1 - start;
+				start = SAVAGE_DRAWCTRL1_S4+1;
+			} else
+				return 0;
+		}
+	}
+
+	bci_size = count + (count+254)/255 + count2 + (count2+254)/255;
+
+	if (cmd_header->state.global) {
+		BEGIN_DMA(bci_size+1);
+		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);
+		dev_priv->waiting = 1;
+	} else {
+		BEGIN_DMA(bci_size);
+	}
+
+	do {
+		while (count > 0) {
+			unsigned int n = count < 255 ? count : 255;
+			DMA_SET_REGISTERS(start, n);
+			DMA_COPY_FROM_USER(regs, n);
+			count -= n;
+			start += n;
+			regs += n;
+		}
+		start += 2;
+		regs += 2;
+		count = count2;
+		count2 = 0;
+	} while (count);
+
+	DMA_COMMIT();
+
+	return 0;
+}
+
+static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
+				    const drm_savage_cmd_header_t *cmd_header,
+				    const drm_buf_t *dmabuf)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->prim.prim;
+	unsigned int skip = cmd_header->prim.skip;
+	unsigned int n = cmd_header->prim.count;
+	unsigned int start = cmd_header->prim.start;
+	unsigned int i;
+	BCI_LOCALS;
+
+	if (!dmabuf) {
+	    DRM_ERROR("called without dma buffers!\n");
+	    return DRM_ERR(EINVAL);
+	}
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return DRM_ERR(EINVAL);
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip != 0) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
+				  skip);
+			return DRM_ERR(EINVAL);
+		}
+	} else {
+		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
+			(skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
+			(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
+		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
+				  skip);
+			return DRM_ERR(EINVAL);
+		}
+		if (reorder) {
+			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
+			return DRM_ERR(EINVAL);
+		}
+	}
+
+	if (start + n > dmabuf->total/32) {
+		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
+			  start, start + n - 1, dmabuf->total/32);
+		return DRM_ERR(EINVAL);
+	}
+
+	/* Vertex DMA doesn't work with command DMA at the same time,
+	 * so we use BCI_... to submit commands here. Flush buffered
+	 * faked DMA first. */
+	DMA_FLUSH();
+
+	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
+		BEGIN_BCI(2);
+		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
+		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
+		dev_priv->state.common.vbaddr = dmabuf->bus_address;
+	}
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
+		/* Workaround for what looks like a hardware bug. If a
+		 * WAIT_3D_IDLE was emitted some time before the
+		 * indexed drawing command then the engine will lock
+		 * up. There are two known workarounds:
+		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
+		BEGIN_BCI(63);
+		for (i = 0; i < 63; ++i)
+			BCI_WRITE(BCI_CMD_WAIT);
+		dev_priv->waiting = 0;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 indices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		if (reorder) {
+			/* Need to reorder indices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = {-1, -1, -1};
+			reorder[start%3] = 2;
+
+			BEGIN_BCI((count+1+1)/2);
+			BCI_DRAW_INDICES_S3D(count, prim, start+2);
+
+			for (i = start+1; i+1 < start+count; i += 2)
+				BCI_WRITE((i + reorder[i % 3]) |
+					  ((i+1 + reorder[(i+1) % 3]) << 16));
+			if (i < start+count)
+				BCI_WRITE(i + reorder[i%3]);
+		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			BEGIN_BCI((count+1+1)/2);
+			BCI_DRAW_INDICES_S3D(count, prim, start);
+
+			for (i = start+1; i+1 < start+count; i += 2)
+				BCI_WRITE(i | ((i+1) << 16));
+			if (i < start+count)
+				BCI_WRITE(i);
+		} else {
+			BEGIN_BCI((count+2+1)/2);
+			BCI_DRAW_INDICES_S4(count, prim, skip);
+
+			for (i = start; i+1 < start+count; i += 2)
+				BCI_WRITE(i | ((i+1) << 16));
+			if (i < start+count)
+				BCI_WRITE(i);
+		}
+
+		start += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
+				   const drm_savage_cmd_header_t *cmd_header,
+				   const uint32_t __user *vtxbuf,
+				   unsigned int vb_size,
+				   unsigned int vb_stride)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->prim.prim;
+	unsigned int skip = cmd_header->prim.skip;
+	unsigned int n = cmd_header->prim.count;
+	unsigned int start = cmd_header->prim.start;
+	unsigned int vtx_size;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of vertices %u in TRILIST\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR("wrong number of vertices %u in TRIFAN/STRIP\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return DRM_ERR(EINVAL);
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip > SAVAGE_SKIP_ALL_S3D) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return DRM_ERR(EINVAL);
+		}
+		vtx_size = 8; /* full vertex */
+	} else {
+		if (skip > SAVAGE_SKIP_ALL_S4) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return DRM_ERR(EINVAL);
+		}
+		vtx_size = 10; /* full vertex */
+	}
+
+	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
+		(skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
+		(skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
+
+	if (vtx_size > vb_stride) {
+		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
+			  vtx_size, vb_stride);
+		return DRM_ERR(EINVAL);
+	}
+
+	if (start + n > vb_size / (vb_stride*4)) {
+		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
+			  start, start + n - 1, vb_size / (vb_stride*4));
+		return DRM_ERR(EINVAL);
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 vertices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		if (reorder) {
+			/* Need to reorder vertices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = {-1, -1, -1};
+			reorder[start%3] = 2;
+
+			BEGIN_DMA(count*vtx_size+1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = start; i < start+count; ++i) {
+				unsigned int j = i + reorder[i % 3];
+				DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
+						   vtx_size);
+			}
+
+			DMA_COMMIT();
+		} else {
+			BEGIN_DMA(count*vtx_size+1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			if (vb_stride == vtx_size) {
+				DMA_COPY_FROM_USER(&vtxbuf[vb_stride*start],
+						   vtx_size*count);
+			} else {
+				for (i = start; i < start+count; ++i) {
+					DMA_COPY_FROM_USER(
+						&vtxbuf[vb_stride*i],
+						vtx_size);
+				}
+			}
+
+			DMA_COMMIT();
+		}
+
+		start += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
+				   const drm_savage_cmd_header_t *cmd_header,
+				   const uint16_t __user *usr_idx,
+				   const drm_buf_t *dmabuf)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->idx.prim;
+	unsigned int skip = cmd_header->idx.skip;
+	unsigned int n = cmd_header->idx.count;
+	unsigned int i;
+	BCI_LOCALS;
+
+	if (!dmabuf) {
+	    DRM_ERROR("called without dma buffers!\n");
+	    return DRM_ERR(EINVAL);
+	}
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of indices %u in TRILIST\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return DRM_ERR(EINVAL);
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip != 0) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
+				  skip);
+			return DRM_ERR(EINVAL);
+		}
+	} else {
+		unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
+			(skip >> 2 & 1) - (skip >> 3 & 1) - (skip >> 4 & 1) -
+			(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
+		if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
+			DRM_ERROR("invalid skip flags 0x%04x for DMA\n",
+				  skip);
+			return DRM_ERR(EINVAL);
+		}
+		if (reorder) {
+			DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
+			return DRM_ERR(EINVAL);
+		}
+	}
+
+	/* Vertex DMA doesn't work with command DMA at the same time,
+	 * so we use BCI_... to submit commands here. Flush buffered
+	 * faked DMA first. */
+	DMA_FLUSH();
+
+	if (dmabuf->bus_address != dev_priv->state.common.vbaddr) {
+		BEGIN_BCI(2);
+		BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR, 1);
+		BCI_WRITE(dmabuf->bus_address | dev_priv->dma_type);
+		dev_priv->state.common.vbaddr = dmabuf->bus_address;
+	}
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset) && dev_priv->waiting) {
+		/* Workaround for what looks like a hardware bug. If a
+		 * WAIT_3D_IDLE was emitted some time before the
+		 * indexed drawing command then the engine will lock
+		 * up. There are two known workarounds:
+		 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
+		BEGIN_BCI(63);
+		for (i = 0; i < 63; ++i)
+			BCI_WRITE(BCI_CMD_WAIT);
+		dev_priv->waiting = 0;
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 indices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		/* Is it ok to allocate 510 bytes on the stack in an ioctl? */
+		uint16_t idx[255];
+
+		/* Copy and check indices */
+		DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
+		for (i = 0; i < count; ++i) {
+			if (idx[i] > dmabuf->total/32) {
+				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
+					  i, idx[i], dmabuf->total/32);
+				return DRM_ERR(EINVAL);
+			}
+		}
+
+		if (reorder) {
+			/* Need to reorder indices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = {2, -1, -1};
+
+			BEGIN_BCI((count+1+1)/2);
+			BCI_DRAW_INDICES_S3D(count, prim, idx[2]);
+
+			for (i = 1; i+1 < count; i += 2)
+				BCI_WRITE(idx[i + reorder[i % 3]] |
+					  (idx[i+1 + reorder[(i+1) % 3]] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i + reorder[i%3]]);
+		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+			BEGIN_BCI((count+1+1)/2);
+			BCI_DRAW_INDICES_S3D(count, prim, idx[0]);
+
+			for (i = 1; i+1 < count; i += 2)
+				BCI_WRITE(idx[i] | (idx[i+1] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i]);
+		} else {
+			BEGIN_BCI((count+2+1)/2);
+			BCI_DRAW_INDICES_S4(count, prim, skip);
+
+			for (i = 0; i+1 < count; i += 2)
+				BCI_WRITE(idx[i] | (idx[i+1] << 16));
+			if (i < count)
+				BCI_WRITE(idx[i]);
+		}
+
+		usr_idx += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
+				  const drm_savage_cmd_header_t *cmd_header,
+				  const uint16_t __user *usr_idx,
+				  const uint32_t __user *vtxbuf,
+				  unsigned int vb_size,
+				  unsigned int vb_stride)
+{
+	unsigned char reorder = 0;
+	unsigned int prim = cmd_header->idx.prim;
+	unsigned int skip = cmd_header->idx.skip;
+	unsigned int n = cmd_header->idx.count;
+	unsigned int vtx_size;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (!n)
+		return 0;
+
+	switch (prim) {
+	case SAVAGE_PRIM_TRILIST_201:
+		reorder = 1;
+		prim = SAVAGE_PRIM_TRILIST;
+	case SAVAGE_PRIM_TRILIST:
+		if (n % 3 != 0) {
+			DRM_ERROR("wrong number of indices %u in TRILIST\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	case SAVAGE_PRIM_TRISTRIP:
+	case SAVAGE_PRIM_TRIFAN:
+		if (n < 3) {
+			DRM_ERROR("wrong number of indices %u in TRIFAN/STRIP\n",
+				  n);
+			return DRM_ERR(EINVAL);
+		}
+		break;
+	default:
+		DRM_ERROR("invalid primitive type %u\n", prim);
+		return DRM_ERR(EINVAL);
+	}
+
+	if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
+		if (skip > SAVAGE_SKIP_ALL_S3D) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return DRM_ERR(EINVAL);
+		}
+		vtx_size = 8; /* full vertex */
+	} else {
+		if (skip > SAVAGE_SKIP_ALL_S4) {
+			DRM_ERROR("invalid skip flags 0x%04x\n", skip);
+			return DRM_ERR(EINVAL);
+		}
+		vtx_size = 10; /* full vertex */
+	}
+
+	vtx_size -= (skip & 1) + (skip >> 1 & 1) +
+		(skip >> 2 & 1) + (skip >> 3 & 1) + (skip >> 4 & 1) +
+		(skip >> 5 & 1) + (skip >> 6 & 1) + (skip >> 7 & 1);
+
+	if (vtx_size > vb_stride) {
+		DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
+			  vtx_size, vb_stride);
+		return DRM_ERR(EINVAL);
+	}
+
+	prim <<= 25;
+	while (n != 0) {
+		/* Can emit up to 255 vertices (85 triangles) at once. */
+		unsigned int count = n > 255 ? 255 : n;
+		/* Is it ok to allocate 510 bytes on the stack in an ioctl? */
+		uint16_t idx[255];
+
+		/* Copy and check indices */
+		DRM_COPY_FROM_USER_UNCHECKED(idx, usr_idx, count*2);
+		for (i = 0; i < count; ++i) {
+			if (idx[i] > vb_size / (vb_stride*4)) {
+				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
+					  i, idx[i],  vb_size / (vb_stride*4));
+				return DRM_ERR(EINVAL);
+			}
+		}
+
+		if (reorder) {
+			/* Need to reorder vertices for correct flat
+			 * shading while preserving the clock sense
+			 * for correct culling. Only on Savage3D. */
+			int reorder[3] = {2, -1, -1};
+
+			BEGIN_DMA(count*vtx_size+1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = 0; i < count; ++i) {
+				unsigned int j = idx[i + reorder[i % 3]];
+				DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
+						   vtx_size);
+			}
+
+			DMA_COMMIT();
+		} else {
+			BEGIN_DMA(count*vtx_size+1);
+			DMA_DRAW_PRIMITIVE(count, prim, skip);
+
+			for (i = 0; i < count; ++i) {
+				unsigned int j = idx[i];
+				DMA_COPY_FROM_USER(&vtxbuf[vb_stride*j],
+						   vtx_size);
+			}
+
+			DMA_COMMIT();
+		}
+
+		usr_idx += count;
+		n -= count;
+
+		prim |= BCI_CMD_DRAW_CONT;
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
+				 const drm_savage_cmd_header_t *cmd_header,
+				 const drm_savage_cmd_header_t __user *data,
+				 unsigned int nbox,
+				 const drm_clip_rect_t __user *usr_boxes)
+{
+	unsigned int flags = cmd_header->clear0.flags, mask, value;
+	unsigned int clear_cmd;
+	unsigned int i, nbufs;
+	DMA_LOCALS;
+
+	if (nbox == 0)
+		return 0;
+
+	DRM_GET_USER_UNCHECKED(mask, &((const drm_savage_cmd_header_t*)data)
+			       ->clear1.mask);
+	DRM_GET_USER_UNCHECKED(value, &((const drm_savage_cmd_header_t*)data)
+			       ->clear1.value);
+
+	clear_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
+		BCI_CMD_SEND_COLOR | BCI_CMD_DEST_PBD_NEW;
+	BCI_CMD_SET_ROP(clear_cmd,0xCC);
+
+	nbufs = ((flags & SAVAGE_FRONT) ? 1 : 0) +
+		((flags & SAVAGE_BACK) ? 1 : 0) +
+		((flags & SAVAGE_DEPTH) ? 1 : 0);
+	if (nbufs == 0)
+		return 0;
+
+	if (mask != 0xffffffff) {
+		/* set mask */
+		BEGIN_DMA(2);
+		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
+		DMA_WRITE(mask);
+		DMA_COMMIT();
+	}
+	for (i = 0; i < nbox; ++i) {
+		drm_clip_rect_t box;
+		unsigned int x, y, w, h;
+		unsigned int buf;
+		DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
+		x = box.x1, y = box.y1;
+		w = box.x2 - box.x1;
+		h = box.y2 - box.y1;
+		BEGIN_DMA(nbufs*6);
+		for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {
+			if (!(flags & buf))
+				continue;
+			DMA_WRITE(clear_cmd);
+			switch(buf) {
+			case SAVAGE_FRONT:
+				DMA_WRITE(dev_priv->front_offset);
+				DMA_WRITE(dev_priv->front_bd);
+				break;
+			case SAVAGE_BACK:
+				DMA_WRITE(dev_priv->back_offset);
+				DMA_WRITE(dev_priv->back_bd);
+				break;
+			case SAVAGE_DEPTH:
+				DMA_WRITE(dev_priv->depth_offset);
+				DMA_WRITE(dev_priv->depth_bd);
+				break;
+			}
+			DMA_WRITE(value);
+			DMA_WRITE(BCI_X_Y(x, y));
+			DMA_WRITE(BCI_W_H(w, h));
+		}
+		DMA_COMMIT();
+	}
+	if (mask != 0xffffffff) {
+		/* reset mask */
+		BEGIN_DMA(2);
+		DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK, 1);
+		DMA_WRITE(0xffffffff);
+		DMA_COMMIT();
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
+				unsigned int nbox,
+				const drm_clip_rect_t __user *usr_boxes)
+{
+	unsigned int swap_cmd;
+	unsigned int i;
+	DMA_LOCALS;
+
+	if (nbox == 0)
+		return 0;
+
+	swap_cmd = BCI_CMD_RECT | BCI_CMD_RECT_XP | BCI_CMD_RECT_YP |
+		BCI_CMD_SRC_PBD_COLOR_NEW | BCI_CMD_DEST_GBD;
+	BCI_CMD_SET_ROP(swap_cmd,0xCC);
+
+	for (i = 0; i < nbox; ++i) {
+		drm_clip_rect_t box;
+		DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
+
+		BEGIN_DMA(6);
+		DMA_WRITE(swap_cmd);
+		DMA_WRITE(dev_priv->back_offset);
+		DMA_WRITE(dev_priv->back_bd);
+		DMA_WRITE(BCI_X_Y(box.x1, box.y1));
+		DMA_WRITE(BCI_X_Y(box.x1, box.y1));
+		DMA_WRITE(BCI_W_H(box.x2-box.x1, box.y2-box.y1));
+		DMA_COMMIT();
+	}
+
+	return 0;
+}
+
+static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
+				const drm_savage_cmd_header_t __user *start,
+				const drm_savage_cmd_header_t __user *end,
+				const drm_buf_t *dmabuf,
+				const unsigned int __user *usr_vtxbuf,
+				unsigned int vb_size, unsigned int vb_stride,
+				unsigned int nbox,
+				const drm_clip_rect_t __user *usr_boxes)
+{
+	unsigned int i, j;
+	int ret;
+
+	for (i = 0; i < nbox; ++i) {
+		drm_clip_rect_t box;
+		const drm_savage_cmd_header_t __user *usr_cmdbuf;
+		DRM_COPY_FROM_USER_UNCHECKED(&box, &usr_boxes[i], sizeof(box));
+		dev_priv->emit_clip_rect(dev_priv, &box);
+
+		usr_cmdbuf = start;
+		while (usr_cmdbuf < end) {
+			drm_savage_cmd_header_t cmd_header;
+			DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
+						     sizeof(cmd_header));
+			usr_cmdbuf++;
+			switch (cmd_header.cmd.cmd) {
+			case SAVAGE_CMD_DMA_PRIM:
+				ret = savage_dispatch_dma_prim(
+					dev_priv, &cmd_header, dmabuf);
+				break;
+			case SAVAGE_CMD_VB_PRIM:
+				ret = savage_dispatch_vb_prim(
+					dev_priv, &cmd_header,
+					(const uint32_t __user *)usr_vtxbuf,
+					vb_size, vb_stride);
+				break;
+			case SAVAGE_CMD_DMA_IDX:
+				j = (cmd_header.idx.count + 3) / 4;
+				/* j was check in savage_bci_cmdbuf */
+				ret = savage_dispatch_dma_idx(
+					dev_priv, &cmd_header,
+					(const uint16_t __user *)usr_cmdbuf,
+					dmabuf);
+				usr_cmdbuf += j;
+				break;
+			case SAVAGE_CMD_VB_IDX:
+				j = (cmd_header.idx.count + 3) / 4;
+				/* j was check in savage_bci_cmdbuf */
+				ret = savage_dispatch_vb_idx(
+					dev_priv, &cmd_header,
+					(const uint16_t __user *)usr_cmdbuf,
+					(const uint32_t __user *)usr_vtxbuf,
+					vb_size, vb_stride);
+				usr_cmdbuf += j;
+				break;
+			default:
+				/* What's the best return code? EFAULT? */
+				DRM_ERROR("IMPLEMENTATION ERROR: "
+					  "non-drawing-command %d\n",
+					  cmd_header.cmd.cmd);
+				return DRM_ERR(EINVAL);
+			}
+
+			if (ret != 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
+{
+	DRM_DEVICE;
+	drm_savage_private_t *dev_priv = dev->dev_private;
+	drm_device_dma_t *dma = dev->dma;
+	drm_buf_t *dmabuf;
+	drm_savage_cmdbuf_t cmdbuf;
+	drm_savage_cmd_header_t __user *usr_cmdbuf;
+	drm_savage_cmd_header_t __user *first_draw_cmd;
+	unsigned int __user *usr_vtxbuf;
+	drm_clip_rect_t __user *usr_boxes;
+	unsigned int i, j;
+	int ret = 0;
+
+	DRM_DEBUG("\n");
+	
+	LOCK_TEST_WITH_RETURN(dev, filp);
+
+	DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
+				 sizeof(cmdbuf));
+
+	if (dma && dma->buflist) {
+		if (cmdbuf.dma_idx > dma->buf_count) {
+			DRM_ERROR("vertex buffer index %u out of range (0-%u)\n",
+				  cmdbuf.dma_idx, dma->buf_count-1);
+			return DRM_ERR(EINVAL);
+		}
+		dmabuf = dma->buflist[cmdbuf.dma_idx];
+	} else {
+		dmabuf = NULL;
+	}
+
+	usr_cmdbuf = (drm_savage_cmd_header_t __user *)cmdbuf.cmd_addr;
+	usr_vtxbuf = (unsigned int __user *)cmdbuf.vb_addr;
+	usr_boxes = (drm_clip_rect_t __user *)cmdbuf.box_addr;
+	if ((cmdbuf.size && DRM_VERIFYAREA_READ(usr_cmdbuf, cmdbuf.size*8)) ||
+	    (cmdbuf.vb_size && DRM_VERIFYAREA_READ(
+		    usr_vtxbuf, cmdbuf.vb_size)) ||
+	    (cmdbuf.nbox && DRM_VERIFYAREA_READ(
+		    usr_boxes, cmdbuf.nbox*sizeof(drm_clip_rect_t))))
+		return DRM_ERR(EFAULT);
+
+	/* Make sure writes to DMA buffers are finished before sending
+	 * DMA commands to the graphics hardware. */
+	DRM_MEMORYBARRIER();
+
+	/* Coming from user space. Don't know if the Xserver has
+	 * emitted wait commands. Assuming the worst. */
+	dev_priv->waiting = 1;
+
+	i = 0;
+	first_draw_cmd = NULL;
+	while (i < cmdbuf.size) {
+		drm_savage_cmd_header_t cmd_header;
+		DRM_COPY_FROM_USER_UNCHECKED(&cmd_header, usr_cmdbuf,
+					     sizeof(cmd_header));
+		usr_cmdbuf++;
+		i++;
+
+		/* Group drawing commands with same state to minimize
+		 * iterations over clip rects. */
+		j = 0;
+		switch (cmd_header.cmd.cmd) {
+		case SAVAGE_CMD_DMA_IDX:
+		case SAVAGE_CMD_VB_IDX:
+			j = (cmd_header.idx.count + 3) / 4;
+			if (i + j > cmdbuf.size) {
+				DRM_ERROR("indexed drawing command extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				return DRM_ERR(EINVAL);
+			}
+			/* fall through */
+		case SAVAGE_CMD_DMA_PRIM:
+		case SAVAGE_CMD_VB_PRIM:
+			if (!first_draw_cmd)
+				first_draw_cmd = usr_cmdbuf-1;
+			usr_cmdbuf += j;
+			i += j;
+			break;
+		default:
+			if (first_draw_cmd) {
+				ret = savage_dispatch_draw (
+					dev_priv, first_draw_cmd, usr_cmdbuf-1,
+					dmabuf, usr_vtxbuf, cmdbuf.vb_size,
+					cmdbuf.vb_stride,
+					cmdbuf.nbox, usr_boxes);
+				if (ret != 0)
+					return ret;
+				first_draw_cmd = NULL;
+			}
+		}
+		if (first_draw_cmd)
+			continue;
+
+		switch (cmd_header.cmd.cmd) {
+		case SAVAGE_CMD_STATE:
+			j = (cmd_header.state.count + 1) / 2;
+			if (i + j > cmdbuf.size) {
+				DRM_ERROR("command SAVAGE_CMD_STATE extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				return DRM_ERR(EINVAL);
+			}
+			ret = savage_dispatch_state(
+				dev_priv, &cmd_header,
+				(uint32_t __user *)usr_cmdbuf);
+			usr_cmdbuf += j;
+			i += j;
+			break;
+		case SAVAGE_CMD_CLEAR:
+			if (i + 1 > cmdbuf.size) {
+				DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
+					  "beyond end of command buffer\n");
+				DMA_FLUSH();
+				return DRM_ERR(EINVAL);
+			}
+			ret = savage_dispatch_clear(dev_priv, &cmd_header,
+						    usr_cmdbuf,
+						    cmdbuf.nbox, usr_boxes);
+			usr_cmdbuf++;
+			i++;
+			break;
+		case SAVAGE_CMD_SWAP:
+			ret = savage_dispatch_swap(dev_priv,
+						   cmdbuf.nbox, usr_boxes);
+			break;
+		default:
+			DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
+			DMA_FLUSH();
+			return DRM_ERR(EINVAL);
+		}
+
+		if (ret != 0) {
+			DMA_FLUSH();
+			return ret;
+		}
+	}
+
+	if (first_draw_cmd) {
+		ret = savage_dispatch_draw (
+			dev_priv, first_draw_cmd, usr_cmdbuf, dmabuf,
+			usr_vtxbuf, cmdbuf.vb_size, cmdbuf.vb_stride,
+			cmdbuf.nbox, usr_boxes);
+		if (ret != 0) {
+			DMA_FLUSH();
+			return ret;
+		}
+	}
+
+	DMA_FLUSH();
+
+	if (dmabuf && cmdbuf.discard) {
+		drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
+		uint16_t event;
+		event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
+		SET_AGE(&buf_priv->age, event, dev_priv->event_wrap);
+		savage_freelist_put(dev, dmabuf);
+	}
+
+	return 0;
+}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 79e8aa6..e0239a1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1923,6 +1923,17 @@
 	  
 	  If in doubt, say Y.
 
+config SIS190
+	tristate "SiS190 gigabit ethernet support"
+	depends on PCI
+	select CRC32
+	select MII
+	---help---
+	  Say Y here if you have a SiS 190 PCI Gigabit Ethernet adapter.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called sis190.  This is recommended.
+
 config SKGE
 	tristate "New SysKonnect GigaEthernet support (EXPERIMENTAL)"
 	depends on PCI && EXPERIMENTAL
@@ -2093,6 +2104,25 @@
 menu "Ethernet (10000 Mbit)"
 	depends on !UML
 
+config CHELSIO_T1
+        tristate "Chelsio 10Gb Ethernet support"
+        depends on PCI
+        help
+          This driver supports Chelsio N110 and N210 models 10Gb Ethernet
+          cards. More information about adapter features and performance
+          tuning is in <file:Documentation/networking/cxgb.txt>.
+
+          For general information about Chelsio and our products, visit
+          our website at <http://www.chelsio.com>.
+
+          For customer support, please visit our customer support page at
+          <http://www.chelsio.com/support.htm>.
+
+          Please send feedback to <linux-bugs@chelsio.com>.
+
+          To compile this driver as a module, choose M here: the module
+          will be called cxgb.
+
 config IXGB
 	tristate "Intel(R) PRO/10GbE support"
 	depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a369ae2..5baafcd 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_E1000) += e1000/
 obj-$(CONFIG_IBM_EMAC) += ibm_emac/
 obj-$(CONFIG_IXGB) += ixgb/
+obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_BONDING) += bonding/
 obj-$(CONFIG_GIANFAR) += gianfar_driver.o
 
@@ -42,6 +43,7 @@
 obj-$(CONFIG_E100) += e100.o
 obj-$(CONFIG_TLAN) += tlan.o
 obj-$(CONFIG_EPIC100) += epic100.o
+obj-$(CONFIG_SIS190) += sis190.o
 obj-$(CONFIG_SIS900) += sis900.o
 obj-$(CONFIG_YELLOWFIN) += yellowfin.o
 obj-$(CONFIG_ACENIC) += acenic.o
diff --git a/drivers/net/chelsio/Makefile b/drivers/net/chelsio/Makefile
new file mode 100644
index 0000000..91e9278
--- /dev/null
+++ b/drivers/net/chelsio/Makefile
@@ -0,0 +1,11 @@
+#
+# Chelsio 10Gb NIC driver for Linux.
+#
+
+obj-$(CONFIG_CHELSIO_T1) += cxgb.o
+
+EXTRA_CFLAGS += -I$(TOPDIR)/drivers/net/chelsio $(DEBUG_FLAGS)
+
+
+cxgb-objs := cxgb2.o espi.o pm3393.o sge.o subr.o mv88x201x.o
+
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
new file mode 100644
index 0000000..f093488
--- /dev/null
+++ b/drivers/net/chelsio/common.h
@@ -0,0 +1,314 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: common.h                                                            *
+ * $Revision: 1.21 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_COMMON_H_
+#define _CXGB_COMMON_H_
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <linux/pci_ids.h>
+
+#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
+#define DRV_NAME "cxgb"
+#define DRV_VERSION "2.1.1"
+#define PFX      DRV_NAME ": "
+
+#define CH_ERR(fmt, ...)   printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
+#define CH_WARN(fmt, ...)  printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
+#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
+
+#define CH_DEVICE(devid, ssid, idx) \
+	{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
+
+#define SUPPORTED_PAUSE       (1 << 13)
+#define SUPPORTED_LOOPBACK    (1 << 15)
+
+#define ADVERTISED_PAUSE      (1 << 13)
+#define ADVERTISED_ASYM_PAUSE (1 << 14)
+
+typedef struct adapter adapter_t;
+
+void t1_elmer0_ext_intr(adapter_t *adapter);
+void t1_link_changed(adapter_t *adapter, int port_id, int link_status,
+			int speed, int duplex, int fc);
+
+struct t1_rx_mode {
+	struct net_device *dev;
+	u32 idx;
+	struct dev_mc_list *list;
+};
+
+#define t1_rx_mode_promisc(rm)	(rm->dev->flags & IFF_PROMISC)
+#define t1_rx_mode_allmulti(rm)	(rm->dev->flags & IFF_ALLMULTI)
+#define t1_rx_mode_mc_cnt(rm)	(rm->dev->mc_count)
+
+static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
+{
+	u8 *addr = 0;
+
+	if (rm->idx++ < rm->dev->mc_count) {
+		addr = rm->list->dmi_addr;
+		rm->list = rm->list->next;
+	}
+	return addr;
+}
+
+#define	MAX_NPORTS 4
+
+#define SPEED_INVALID 0xffff
+#define DUPLEX_INVALID 0xff
+
+enum {
+	CHBT_BOARD_N110,
+	CHBT_BOARD_N210
+};
+
+enum {
+	CHBT_TERM_T1,
+	CHBT_TERM_T2
+};
+
+enum {
+	CHBT_MAC_PM3393,
+};
+
+enum {
+	CHBT_PHY_88X2010,
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+/* Revisions of T1 chip */
+enum {
+	TERM_T1A   = 0,
+	TERM_T1B   = 1,
+	TERM_T2    = 3
+};
+
+struct sge_params {
+	unsigned int cmdQ_size[2];
+	unsigned int freelQ_size[2];
+	unsigned int large_buf_capacity;
+	unsigned int rx_coalesce_usecs;
+	unsigned int last_rx_coalesce_raw;
+	unsigned int default_rx_coalesce_usecs;
+	unsigned int sample_interval_usecs;
+	unsigned int coalesce_enable;
+	unsigned int polling;
+};
+
+struct chelsio_pci_params {
+	unsigned short speed;
+	unsigned char  width;
+	unsigned char  is_pcix;
+};
+
+struct adapter_params {
+	struct sge_params sge;
+	struct chelsio_pci_params pci;
+
+	const struct board_info *brd_info;
+
+	unsigned int   nports;          /* # of ethernet ports */
+	unsigned int   stats_update_period;
+	unsigned short chip_revision;
+	unsigned char  chip_version;
+};
+
+struct link_config {
+	unsigned int   supported;        /* link capabilities */
+	unsigned int   advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_duplex; /* duplex user has requested */
+	unsigned char  duplex;           /* actual link duplex */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+};
+
+struct cmac;
+struct cphy;
+
+struct port_info {
+	struct net_device *dev;
+	struct cmac *mac;
+	struct cphy *phy;
+	struct link_config link_config;
+	struct net_device_stats netstats;
+};
+
+struct sge;
+struct peespi;
+
+struct adapter {
+	u8 *regs;
+	struct pci_dev *pdev;
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+
+	const char *name;
+	int msg_enable;
+	u32 mmio_len;
+
+	struct work_struct ext_intr_handler_task;
+	struct adapter_params params;
+
+	struct vlan_group *vlan_grp;
+
+	/* Terminator modules. */
+	struct sge    *sge;
+	struct peespi *espi;
+
+	struct port_info port[MAX_NPORTS];
+	struct work_struct stats_update_task;
+	struct timer_list stats_update_timer;
+
+	struct semaphore mib_mutex;
+	spinlock_t tpi_lock;
+	spinlock_t work_lock;
+	/* guards async operations */
+	spinlock_t async_lock ____cacheline_aligned;
+	u32 slow_intr_mask;
+};
+
+enum {                                           /* adapter flags */
+	FULL_INIT_DONE        = 1 << 0,
+	TSO_CAPABLE           = 1 << 2,
+	TCP_CSUM_CAPABLE      = 1 << 3,
+	UDP_CSUM_CAPABLE      = 1 << 4,
+	VLAN_ACCEL_CAPABLE    = 1 << 5,
+	RX_CSUM_ENABLED       = 1 << 6,
+};
+
+struct mdio_ops;
+struct gmac;
+struct gphy;
+
+struct board_info {
+	unsigned char           board;
+	unsigned char           port_number;
+	unsigned long           caps;
+	unsigned char           chip_term;
+	unsigned char           chip_mac;
+	unsigned char           chip_phy;
+	unsigned int            clock_core;
+	unsigned int            clock_mc3;
+	unsigned int            clock_mc4;
+	unsigned int            espi_nports;
+	unsigned int            clock_cspi;
+	unsigned int            clock_elmer0;
+	unsigned char           mdio_mdien;
+	unsigned char           mdio_mdiinv;
+	unsigned char           mdio_mdc;
+	unsigned char           mdio_phybaseaddr;
+	struct gmac            *gmac;
+	struct gphy            *gphy;
+	struct mdio_ops	       *mdio_ops;
+	const char             *desc;
+};
+
+extern struct pci_device_id t1_pci_tbl[];
+
+static inline int adapter_matches_type(const adapter_t *adapter,
+				       int version, int revision)
+{
+	return adapter->params.chip_version == version &&
+	       adapter->params.chip_revision == revision;
+}
+
+#define t1_is_T1B(adap) adapter_matches_type(adap, CHBT_TERM_T1, TERM_T1B)
+#define is_T2(adap)     adapter_matches_type(adap, CHBT_TERM_T2, TERM_T2)
+
+/* Returns true if an adapter supports VLAN acceleration and TSO */
+static inline int vlan_tso_capable(const adapter_t *adapter)
+{
+	return !t1_is_T1B(adapter);
+}
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+#define board_info(adapter) ((adapter)->params.brd_info)
+#define is_10G(adapter) (board_info(adapter)->caps & SUPPORTED_10000baseT_Full)
+
+static inline unsigned int core_ticks_per_usec(const adapter_t *adap)
+{
+	return board_info(adap)->clock_core / 1000000;
+}
+
+extern int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value);
+extern int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *value);
+
+extern void t1_interrupts_enable(adapter_t *adapter);
+extern void t1_interrupts_disable(adapter_t *adapter);
+extern void t1_interrupts_clear(adapter_t *adapter);
+extern int elmer0_ext_intr_handler(adapter_t *adapter);
+extern int t1_slow_intr_handler(adapter_t *adapter);
+
+extern int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc);
+extern const struct board_info *t1_get_board_info(unsigned int board_id);
+extern const struct board_info *t1_get_board_info_from_ids(unsigned int devid,
+						    unsigned short ssid);
+extern int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data);
+extern int t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+		     struct adapter_params *p);
+extern int t1_init_hw_modules(adapter_t *adapter);
+extern int t1_init_sw_modules(adapter_t *adapter, const struct board_info *bi);
+extern void t1_free_sw_modules(adapter_t *adapter);
+extern void t1_fatal_err(adapter_t *adapter);
+
+extern void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable);
+extern void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable);
+extern void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable);
+
+#endif /* _CXGB_COMMON_H_ */
diff --git a/drivers/net/chelsio/cphy.h b/drivers/net/chelsio/cphy.h
new file mode 100644
index 0000000..3412342
--- /dev/null
+++ b/drivers/net/chelsio/cphy.h
@@ -0,0 +1,148 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cphy.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPHY_H_
+#define _CXGB_CPHY_H_
+
+#include "common.h"
+
+struct mdio_ops {
+	void (*init)(adapter_t *adapter, const struct board_info *bi);
+	int  (*read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+		     int reg_addr, unsigned int *val);
+	int  (*write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+		      int reg_addr, unsigned int val);
+};
+
+/* PHY interrupt types */
+enum {
+	cphy_cause_link_change = 0x1,
+	cphy_cause_error = 0x2
+};
+
+struct cphy;
+
+/* PHY operations */
+struct cphy_ops {
+	void (*destroy)(struct cphy *);
+	int (*reset)(struct cphy *, int wait);
+
+	int (*interrupt_enable)(struct cphy *);
+	int (*interrupt_disable)(struct cphy *);
+	int (*interrupt_clear)(struct cphy *);
+	int (*interrupt_handler)(struct cphy *);
+
+	int (*autoneg_enable)(struct cphy *);
+	int (*autoneg_disable)(struct cphy *);
+	int (*autoneg_restart)(struct cphy *);
+
+	int (*advertise)(struct cphy *phy, unsigned int advertise_map);
+	int (*set_loopback)(struct cphy *, int on);
+	int (*set_speed_duplex)(struct cphy *phy, int speed, int duplex);
+	int (*get_link_status)(struct cphy *phy, int *link_ok, int *speed,
+			       int *duplex, int *fc);
+};
+
+/* A PHY instance */
+struct cphy {
+	int addr;                            /* PHY address */
+	adapter_t *adapter;                  /* associated adapter */
+	struct cphy_ops *ops;                /* PHY operations */
+	int (*mdio_read)(adapter_t *adapter, int phy_addr, int mmd_addr,
+			 int reg_addr, unsigned int *val);
+	int (*mdio_write)(adapter_t *adapter, int phy_addr, int mmd_addr,
+			  int reg_addr, unsigned int val);
+	struct cphy_instance *instance;
+};
+
+/* Convenience MDIO read/write wrappers */
+static inline int mdio_read(struct cphy *cphy, int mmd, int reg,
+			    unsigned int *valp)
+{
+	return cphy->mdio_read(cphy->adapter, cphy->addr, mmd, reg, valp);
+}
+
+static inline int mdio_write(struct cphy *cphy, int mmd, int reg,
+			     unsigned int val)
+{
+	return cphy->mdio_write(cphy->adapter, cphy->addr, mmd, reg, val);
+}
+
+static inline int simple_mdio_read(struct cphy *cphy, int reg,
+				   unsigned int *valp)
+{
+	return mdio_read(cphy, 0, reg, valp);
+}
+
+static inline int simple_mdio_write(struct cphy *cphy, int reg,
+				    unsigned int val)
+{
+	return mdio_write(cphy, 0, reg, val);
+}
+
+/* Convenience initializer */
+static inline void cphy_init(struct cphy *phy, adapter_t *adapter,
+			     int phy_addr, struct cphy_ops *phy_ops,
+			     struct mdio_ops *mdio_ops)
+{
+	phy->adapter = adapter;
+	phy->addr    = phy_addr;
+	phy->ops     = phy_ops;
+	if (mdio_ops) {
+		phy->mdio_read  = mdio_ops->read;
+		phy->mdio_write = mdio_ops->write;
+	}
+}
+
+/* Operations of the PHY-instance factory */
+struct gphy {
+	/* Construct a PHY instance with the given PHY address */
+	struct cphy *(*create)(adapter_t *adapter, int phy_addr,
+			       struct mdio_ops *mdio_ops);
+
+	/*
+	 * Reset the PHY chip.  This resets the whole PHY chip, not individual
+	 * ports.
+	 */
+	int (*reset)(adapter_t *adapter);
+};
+
+extern struct gphy t1_mv88x201x_ops;
+extern struct gphy t1_dummy_phy_ops;
+
+#endif /* _CXGB_CPHY_H_ */
diff --git a/drivers/net/chelsio/cpl5_cmd.h b/drivers/net/chelsio/cpl5_cmd.h
new file mode 100644
index 0000000..27925e4
--- /dev/null
+++ b/drivers/net/chelsio/cpl5_cmd.h
@@ -0,0 +1,145 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cpl5_cmd.h                                                          *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_CPL5_CMD_H_
+#define _CXGB_CPL5_CMD_H_
+
+#include <asm/byteorder.h>
+
+#if !defined(__LITTLE_ENDIAN_BITFIELD) && !defined(__BIG_ENDIAN_BITFIELD)
+#error "Adjust your <asm/byteorder.h> defines"
+#endif
+
+enum CPL_opcode {
+	CPL_RX_PKT            = 0xAD,
+	CPL_TX_PKT            = 0xB2,
+	CPL_TX_PKT_LSO        = 0xB6,
+};
+
+enum {                /* TX_PKT_LSO ethernet types */
+	CPL_ETH_II,
+	CPL_ETH_II_VLAN,
+	CPL_ETH_802_3,
+	CPL_ETH_802_3_VLAN
+};
+
+struct cpl_rx_data {
+	u32 rsvd0;
+	u32 len;
+	u32 seq;
+	u16 urg;
+	u8  rsvd1;
+	u8  status;
+};
+
+/*
+ * We want this header's alignment to be no more stringent than 2-byte aligned.
+ * All fields are u8 or u16 except for the length.  However that field is not
+ * used so we break it into 2 16-bit parts to easily meet our alignment needs.
+ */
+struct cpl_tx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 ip_csum_dis:1;
+	u8 l4_csum_dis:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 l4_csum_dis:1;
+	u8 ip_csum_dis:1;
+	u8 iff:4;
+#endif
+	u16 vlan;
+	u16 len_hi;
+	u16 len_lo;
+};
+
+struct cpl_tx_pkt_lso {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 ip_csum_dis:1;
+	u8 l4_csum_dis:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 l4_csum_dis:1;
+	u8 ip_csum_dis:1;
+	u8 iff:4;
+#endif
+	u16 vlan;
+	u32 len;
+
+	u32 rsvd2;
+	u8 rsvd3;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 tcp_hdr_words:4;
+	u8 ip_hdr_words:4;
+#else
+	u8 ip_hdr_words:4;
+	u8 tcp_hdr_words:4;
+#endif
+	u16 eth_type_mss;
+};
+
+struct cpl_rx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 csum_valid:1;
+	u8 bad_pkt:1;
+	u8 vlan_valid:1;
+	u8 rsvd:1;
+#else
+	u8 rsvd:1;
+	u8 vlan_valid:1;
+	u8 bad_pkt:1;
+	u8 csum_valid:1;
+	u8 iff:4;
+#endif
+	u16 csum;
+	u16 vlan;
+	u16 len;
+};
+
+#endif /* _CXGB_CPL5_CMD_H_ */
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
new file mode 100644
index 0000000..28ae478
--- /dev/null
+++ b/drivers/net/chelsio/cxgb2.c
@@ -0,0 +1,1256 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: cxgb2.c                                                             *
+ * $Revision: 1.25 $                                                         *
+ * $Date: 2005/06/22 00:43:25 $                                              *
+ * Description:                                                              *
+ *  Chelsio 10Gb Ethernet Driver.                                            *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/mii.h>
+#include <linux/sockios.h>
+#include <linux/proc_fs.h>
+#include <linux/dma-mapping.h>
+#include <asm/uaccess.h>
+
+#include "cpl5_cmd.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "espi.h"
+
+#ifdef work_struct
+#include <linux/tqueue.h>
+#define INIT_WORK INIT_TQUEUE
+#define schedule_work schedule_task
+#define flush_scheduled_work flush_scheduled_tasks
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+	mod_timer(&ap->stats_update_timer, jiffies + secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+	del_timer_sync(&ap->stats_update_timer);
+	flush_scheduled_tasks();
+}
+
+/*
+ * Stats update timer for 2.4.  It schedules a task to do the actual update as
+ * we need to access MAC statistics in process context.
+ */
+static void mac_stats_timer(unsigned long data)
+{
+	struct adapter *ap = (struct adapter *)data;
+
+	schedule_task(&ap->stats_update_task);
+}
+#else
+#include <linux/workqueue.h>
+
+static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
+{
+	schedule_delayed_work(&ap->stats_update_task, secs * HZ);
+}
+
+static inline void cancel_mac_stats_update(struct adapter *ap)
+{
+	cancel_delayed_work(&ap->stats_update_task);
+}
+#endif
+
+#define MAX_CMDQ_ENTRIES 16384
+#define MAX_CMDQ1_ENTRIES 1024
+#define MAX_RX_BUFFERS 16384
+#define MAX_RX_JUMBO_BUFFERS 16384
+#define MAX_TX_BUFFERS_HIGH	16384U
+#define MAX_TX_BUFFERS_LOW	1536U
+#define MIN_FL_ENTRIES 32
+
+#define PORT_MASK ((1 << MAX_NPORTS) - 1)
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+/*
+ * The EEPROM is actually bigger but only the first few bytes are used so we
+ * only report those.
+ */
+#define EEPROM_SIZE 32
+
+MODULE_DESCRIPTION(DRV_DESCRIPTION);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("GPL");
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+MODULE_PARM(dflt_msg_enable, "i");
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 message enable bitmap");
+
+
+static const char pci_speed[][4] = {
+	"33", "66", "100", "133"
+};
+
+/*
+ * Setup MAC to receive the types of packets we want.
+ */
+static void t1_set_rxmode(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	struct t1_rx_mode rm;
+
+	rm.dev = dev;
+	rm.idx = 0;
+	rm.list = dev->mc_list;
+	mac->ops->set_rx_mode(mac, &rm);
+}
+
+static void link_report(struct port_info *p)
+{
+	if (!netif_carrier_ok(p->dev))
+        	printk(KERN_INFO "%s: link down\n", p->dev->name);
+	else {
+		const char *s = "10Mbps";
+
+		switch (p->link_config.speed) {
+			case SPEED_10000: s = "10Gbps"; break;
+			case SPEED_1000:  s = "1000Mbps"; break;
+			case SPEED_100:   s = "100Mbps"; break;
+		}
+
+        printk(KERN_INFO "%s: link up, %s, %s-duplex\n",
+		       p->dev->name, s,
+		       p->link_config.duplex == DUPLEX_FULL ? "full" : "half");
+	}
+}
+
+void t1_link_changed(struct adapter *adapter, int port_id, int link_stat,
+			int speed, int duplex, int pause)
+{
+	struct port_info *p = &adapter->port[port_id];
+
+	if (link_stat != netif_carrier_ok(p->dev)) {
+		if (link_stat)
+			netif_carrier_on(p->dev);
+		else
+			netif_carrier_off(p->dev);
+		link_report(p);
+
+	}
+}
+
+static void link_start(struct port_info *p)
+{
+	struct cmac *mac = p->mac;
+
+	mac->ops->reset(mac);
+	if (mac->ops->macaddress_set)
+		mac->ops->macaddress_set(mac, p->dev->dev_addr);
+	t1_set_rxmode(p->dev);
+	t1_link_start(p->phy, mac, &p->link_config);
+	mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+}
+
+static void enable_hw_csum(struct adapter *adapter)
+{
+	if (adapter->flags & TSO_CAPABLE)
+		t1_tp_set_ip_checksum_offload(adapter, 1); /* for TSO only */
+	t1_tp_set_tcp_checksum_offload(adapter, 1);
+}
+
+/*
+ * Things to do upon first use of a card.
+ * This must run with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adapter)
+{
+	int err = 0;
+
+	if (!(adapter->flags & FULL_INIT_DONE)) {
+		err = t1_init_hw_modules(adapter);
+		if (err)
+			goto out_err;
+
+		enable_hw_csum(adapter);
+		adapter->flags |= FULL_INIT_DONE;
+	}
+
+	t1_interrupts_clear(adapter);
+	if ((err = request_irq(adapter->pdev->irq,
+			       t1_select_intr_handler(adapter), SA_SHIRQ,
+			       adapter->name, adapter))) {
+		goto out_err;
+	}
+	t1_sge_start(adapter->sge);
+	t1_interrupts_enable(adapter);
+ out_err:
+	return err;
+}
+
+/*
+ * Release resources when all the ports have been stopped.
+ */
+static void cxgb_down(struct adapter *adapter)
+{
+	t1_sge_stop(adapter->sge);
+	t1_interrupts_disable(adapter);
+	free_irq(adapter->pdev->irq, adapter);
+}
+
+static int cxgb_open(struct net_device *dev)
+{
+	int err;
+	struct adapter *adapter = dev->priv;
+	int other_ports = adapter->open_device_map & PORT_MASK;
+
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+		return err;
+
+	__set_bit(dev->if_port, &adapter->open_device_map);
+	link_start(&adapter->port[dev->if_port]);
+	netif_start_queue(dev);
+	if (!other_ports && adapter->params.stats_update_period)
+		schedule_mac_stats_update(adapter,
+					  adapter->params.stats_update_period);
+	return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct cmac *mac = p->mac;
+
+	netif_stop_queue(dev);
+	mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
+	netif_carrier_off(dev);
+
+	clear_bit(dev->if_port, &adapter->open_device_map);
+	if (adapter->params.stats_update_period &&
+	    !(adapter->open_device_map & PORT_MASK)) {
+		/* Stop statistics accumulation. */
+		smp_mb__after_clear_bit();
+		spin_lock(&adapter->work_lock);   /* sync with update task */
+		spin_unlock(&adapter->work_lock);
+		cancel_mac_stats_update(adapter);
+	}
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter);
+	return 0;
+}
+
+static struct net_device_stats *t1_get_stats(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct net_device_stats *ns = &p->netstats;
+	const struct cmac_statistics *pstats;
+
+	/* Do a full update of the MAC stats */
+	pstats = p->mac->ops->statistics_update(p->mac,
+						      MAC_STATS_UPDATE_FULL);
+
+	ns->tx_packets = pstats->TxUnicastFramesOK +
+		pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
+
+	ns->rx_packets = pstats->RxUnicastFramesOK +
+		pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
+
+	ns->tx_bytes = pstats->TxOctetsOK;
+	ns->rx_bytes = pstats->RxOctetsOK;
+
+	ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
+		pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
+	ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
+		pstats->RxFCSErrors + pstats->RxAlignErrors +
+		pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
+		pstats->RxSymbolErrors + pstats->RxRuntErrors;
+
+	ns->multicast  = pstats->RxMulticastFramesOK;
+	ns->collisions = pstats->TxTotalCollisions;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = pstats->RxFrameTooLongErrors +
+		pstats->RxJabberErrors;
+	ns->rx_over_errors   = 0;
+	ns->rx_crc_errors    = pstats->RxFCSErrors;
+	ns->rx_frame_errors  = pstats->RxAlignErrors;
+	ns->rx_fifo_errors   = 0;
+	ns->rx_missed_errors = 0;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors   = pstats->TxFramesAbortedDueToXSCollisions;
+	ns->tx_carrier_errors   = 0;
+	ns->tx_fifo_errors      = pstats->TxUnderrun;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors    = pstats->TxLateCollisions;
+	return ns;
+}
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+
+	return adapter->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	struct adapter *adapter = dev->priv;
+
+	adapter->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+        "TxOctetsOK",
+        "TxOctetsBad",
+        "TxUnicastFramesOK",
+        "TxMulticastFramesOK",
+        "TxBroadcastFramesOK",
+        "TxPauseFrames",
+        "TxFramesWithDeferredXmissions",
+        "TxLateCollisions",
+        "TxTotalCollisions",
+        "TxFramesAbortedDueToXSCollisions",
+        "TxUnderrun",
+        "TxLengthErrors",
+        "TxInternalMACXmitError",
+        "TxFramesWithExcessiveDeferral",
+        "TxFCSErrors",
+
+        "RxOctetsOK",
+        "RxOctetsBad",
+        "RxUnicastFramesOK",
+        "RxMulticastFramesOK",
+        "RxBroadcastFramesOK",
+        "RxPauseFrames",
+        "RxFCSErrors",
+        "RxAlignErrors",
+        "RxSymbolErrors",
+        "RxDataErrors",
+        "RxSequenceErrors",
+        "RxRuntErrors",
+        "RxJabberErrors",
+        "RxInternalMACRcvError",
+        "RxInRangeLengthErrors",
+        "RxOutOfRangeLengthField",
+        "RxFrameTooLongErrors",
+
+	"TSO",
+	"VLANextractions",
+	"VLANinsertions",
+	"RxCsumGood",
+	"TxCsumOffload",
+	"RxDrops"
+
+	"respQ_empty",
+	"respQ_overflow",
+	"freelistQ_empty",
+	"pkt_too_big",
+	"pkt_mismatch",
+	"cmdQ_full0",
+	"cmdQ_full1",
+	"tx_ipfrags",
+	"tx_reg_pkts",
+	"tx_lso_pkts",
+	"tx_do_cksum",
+	
+	"espi_DIP2ParityErr",
+	"espi_DIP4Err",
+	"espi_RxDrops",
+	"espi_TxDrops",
+	"espi_RxOvfl",
+	"espi_ParityErr"
+};
+ 
+#define T2_REGMAP_SIZE (3 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+	return T2_REGMAP_SIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct adapter *adapter = dev->priv;
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->fw_version, "N/A");
+	strcpy(info->bus_info, pci_name(adapter->pdev));
+}
+
+static int get_stats_count(struct net_device *dev)
+{
+	return ARRAY_SIZE(stats_strings);
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	const struct cmac_statistics *s;
+	const struct sge_port_stats *ss;
+	const struct sge_intr_counts *t;
+
+	s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
+	ss = t1_sge_get_port_stats(adapter->sge, dev->if_port);
+	t = t1_sge_get_intr_counts(adapter->sge);
+
+        *data++ = s->TxOctetsOK;
+        *data++ = s->TxOctetsBad;
+        *data++ = s->TxUnicastFramesOK;
+        *data++ = s->TxMulticastFramesOK;
+        *data++ = s->TxBroadcastFramesOK;
+        *data++ = s->TxPauseFrames;
+        *data++ = s->TxFramesWithDeferredXmissions;
+        *data++ = s->TxLateCollisions;
+        *data++ = s->TxTotalCollisions;
+        *data++ = s->TxFramesAbortedDueToXSCollisions;
+        *data++ = s->TxUnderrun;
+        *data++ = s->TxLengthErrors;
+        *data++ = s->TxInternalMACXmitError;
+        *data++ = s->TxFramesWithExcessiveDeferral;
+        *data++ = s->TxFCSErrors;
+
+        *data++ = s->RxOctetsOK;
+        *data++ = s->RxOctetsBad;
+        *data++ = s->RxUnicastFramesOK;
+        *data++ = s->RxMulticastFramesOK;
+        *data++ = s->RxBroadcastFramesOK;
+        *data++ = s->RxPauseFrames;
+        *data++ = s->RxFCSErrors;
+        *data++ = s->RxAlignErrors;
+        *data++ = s->RxSymbolErrors;
+        *data++ = s->RxDataErrors;
+        *data++ = s->RxSequenceErrors;
+        *data++ = s->RxRuntErrors;
+        *data++ = s->RxJabberErrors;
+        *data++ = s->RxInternalMACRcvError;
+        *data++ = s->RxInRangeLengthErrors;
+        *data++ = s->RxOutOfRangeLengthField;
+        *data++ = s->RxFrameTooLongErrors;
+
+	*data++ = ss->tso;
+	*data++ = ss->vlan_xtract;
+	*data++ = ss->vlan_insert;
+	*data++ = ss->rx_cso_good;
+	*data++ = ss->tx_cso;
+	*data++ = ss->rx_drops;
+
+	*data++ = (u64)t->respQ_empty;
+	*data++ = (u64)t->respQ_overflow;
+	*data++ = (u64)t->freelistQ_empty;
+	*data++ = (u64)t->pkt_too_big;
+	*data++ = (u64)t->pkt_mismatch;
+	*data++ = (u64)t->cmdQ_full[0];
+	*data++ = (u64)t->cmdQ_full[1];
+	*data++ = (u64)t->tx_ipfrags;
+	*data++ = (u64)t->tx_reg_pkts;
+	*data++ = (u64)t->tx_lso_pkts;
+	*data++ = (u64)t->tx_do_cksum;
+}
+
+static inline void reg_block_dump(struct adapter *ap, void *buf,
+				  unsigned int start, unsigned int end)
+{
+	u32 *p = buf + start;
+
+	for ( ; start <= end; start += sizeof(u32))
+		*p++ = readl(ap->regs + start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	struct adapter *ap = dev->priv;
+
+	/*
+	 * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
+	 */
+	regs->version = 2;
+
+	memset(buf, 0, T2_REGMAP_SIZE);
+	reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+
+	cmd->supported = p->link_config.supported;
+	cmd->advertising = p->link_config.advertising;
+
+	if (netif_carrier_ok(dev)) {
+		cmd->speed = p->link_config.speed;
+		cmd->duplex = p->link_config.duplex;
+	} else {
+		cmd->speed = -1;
+		cmd->duplex = -1;
+	}
+
+        cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
+        cmd->phy_address = p->phy->addr;
+        cmd->transceiver = XCVR_EXTERNAL;
+        cmd->autoneg = p->link_config.autoneg;
+        cmd->maxtxpkt = 0;
+        cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static int speed_duplex_to_caps(int speed, int duplex)
+{
+	int cap = 0;
+
+	switch (speed) {
+	case SPEED_10:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10baseT_Full;
+		else
+			cap = SUPPORTED_10baseT_Half;
+		break;
+	case SPEED_100:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_100baseT_Full;
+		else
+			cap = SUPPORTED_100baseT_Half;
+		break;
+	case SPEED_1000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_1000baseT_Full;
+		else
+			cap = SUPPORTED_1000baseT_Half;
+		break;
+	case SPEED_10000:
+		if (duplex == DUPLEX_FULL)
+			cap = SUPPORTED_10000baseT_Full;
+	}
+	return cap;
+}
+
+#define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
+		      ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
+		      ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
+		      ADVERTISED_10000baseT_Full)
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct link_config *lc = &p->link_config;
+
+	if (!(lc->supported & SUPPORTED_Autoneg))
+		return -EOPNOTSUPP;             /* can't change speed/duplex */
+
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		int cap = speed_duplex_to_caps(cmd->speed, cmd->duplex);
+
+		if (!(lc->supported & cap) || cmd->speed == SPEED_1000)
+			return -EINVAL;
+		lc->requested_speed = cmd->speed;
+		lc->requested_duplex = cmd->duplex;
+		lc->advertising = 0;
+	} else {
+		cmd->advertising &= ADVERTISED_MASK;
+		if (cmd->advertising & (cmd->advertising - 1))
+			cmd->advertising = lc->supported;
+		cmd->advertising &= lc->supported;
+		if (!cmd->advertising)
+			return -EINVAL;
+		lc->requested_speed = SPEED_INVALID;
+		lc->requested_duplex = DUPLEX_INVALID;
+		lc->advertising = cmd->advertising | ADVERTISED_Autoneg;
+	}
+	lc->autoneg = cmd->autoneg;
+	if (netif_running(dev))
+		t1_link_start(p->phy, p->mac, lc);
+	return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+
+	epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct adapter *adapter = dev->priv;
+	struct port_info *p = &adapter->port[dev->if_port];
+	struct link_config *lc = &p->link_config;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & SUPPORTED_Autoneg)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (lc->autoneg == AUTONEG_ENABLE) {
+		if (netif_running(dev))
+			t1_link_start(p->phy, p->mac, lc);
+	} else {
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+		if (netif_running(dev))
+			p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
+							 lc->fc);
+	}
+	return 0;
+}
+
+static u32 get_rx_csum(struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+
+	return (adapter->flags & RX_CSUM_ENABLED) != 0;
+}
+
+static int set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct adapter *adapter = dev->priv;
+
+	if (data)
+		adapter->flags |= RX_CSUM_ENABLED;
+	else
+		adapter->flags &= ~RX_CSUM_ENABLED;
+	return 0;
+}
+
+static int set_tso(struct net_device *dev, u32 value)
+{
+	struct adapter *adapter = dev->priv;
+
+	if (!(adapter->flags & TSO_CAPABLE))
+		return value ? -EOPNOTSUPP : 0;
+	return ethtool_op_set_tso(dev, value);
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->priv;
+	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_mini_max_pending = 0;
+	e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
+	e->tx_max_pending = MAX_CMDQ_ENTRIES;
+
+	e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
+	e->rx_mini_pending = 0;
+	e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
+	e->tx_pending = adapter->params.sge.cmdQ_size[0];
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	struct adapter *adapter = dev->priv;
+	int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
+	    e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
+	    e->tx_pending > MAX_CMDQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES ||
+	    e->rx_jumbo_pending < MIN_FL_ENTRIES ||
+	    e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+        return -EBUSY;
+
+	adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
+	adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
+	adapter->params.sge.cmdQ_size[0] = e->tx_pending;
+	adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
+		MAX_CMDQ1_ENTRIES : e->tx_pending;
+	return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->priv;
+
+	/*
+	 * If RX coalescing is requested we use NAPI, otherwise interrupts.
+	 * This choice can be made only when all ports and the TOE are off.
+	 */
+	if (adapter->open_device_map == 0)
+		adapter->params.sge.polling = c->use_adaptive_rx_coalesce;
+
+	if (adapter->params.sge.polling) {
+		adapter->params.sge.rx_coalesce_usecs = 0;
+	} else {
+		adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
+	}
+ 	adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
+	adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
+	t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
+	return 0;
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	struct adapter *adapter = dev->priv;
+
+	c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
+	c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
+	c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
+	return 0;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+    return EEPROM_SIZE;
+}
+
+#define EEPROM_MAGIC(ap) \
+	(PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 *data)
+{
+	int i;
+	u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
+	struct adapter *adapter = dev->priv;
+
+	e->magic = EEPROM_MAGIC(adapter);
+	for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
+		t1_seeprom_read(adapter, i, (u32 *)&buf[i]);
+	memcpy(data, buf + e->offset, e->len);
+	return 0;
+}
+
+static struct ethtool_ops t1_ethtool_ops = {
+	.get_settings      = get_settings,
+	.set_settings      = set_settings,
+	.get_drvinfo       = get_drvinfo,
+	.get_msglevel      = get_msglevel,
+	.set_msglevel      = set_msglevel,
+	.get_ringparam     = get_sge_param,
+	.set_ringparam     = set_sge_param,
+	.get_coalesce      = get_coalesce,
+	.set_coalesce      = set_coalesce,
+	.get_eeprom_len    = get_eeprom_len,
+	.get_eeprom        = get_eeprom,
+	.get_pauseparam    = get_pauseparam,
+	.set_pauseparam    = set_pauseparam,
+	.get_rx_csum       = get_rx_csum,
+	.set_rx_csum       = set_rx_csum,
+	.get_tx_csum       = ethtool_op_get_tx_csum,
+	.set_tx_csum       = ethtool_op_set_tx_csum,
+	.get_sg            = ethtool_op_get_sg,
+	.set_sg            = ethtool_op_set_sg,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = get_strings,
+	.get_stats_count   = get_stats_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len      = get_regs_len,
+	.get_regs          = get_regs,
+	.get_tso           = ethtool_op_get_tso,
+	.set_tso           = set_tso,
+};
+
+static void cxgb_proc_cleanup(struct adapter *adapter,
+					struct proc_dir_entry *dir)
+{
+	const char *name;
+	name = adapter->name;
+	remove_proc_entry(name, dir);
+}
+//#define chtoe_setup_toedev(adapter) NULL
+#define update_mtu_tab(adapter)
+#define write_smt_entry(adapter, idx)
+
+static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+        struct adapter *adapter = dev->priv;
+        struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+	switch (cmd) {
+        case SIOCGMIIPHY:
+                data->phy_id = adapter->port[dev->if_port].phy->addr;
+                /* FALLTHRU */
+        case SIOCGMIIREG: {
+		struct cphy *phy = adapter->port[dev->if_port].phy;
+		u32 val;
+
+		if (!phy->mdio_read)
+            return -EOPNOTSUPP;
+		phy->mdio_read(adapter, data->phy_id, 0, data->reg_num & 0x1f,
+			       &val);
+                data->val_out = val;
+                break;
+	}
+        case SIOCSMIIREG: {
+		struct cphy *phy = adapter->port[dev->if_port].phy;
+
+                if (!capable(CAP_NET_ADMIN))
+                    return -EPERM;
+		if (!phy->mdio_write)
+            return -EOPNOTSUPP;
+		phy->mdio_write(adapter, data->phy_id, 0, data->reg_num & 0x1f,
+			        data->val_in);
+                break;
+	}
+
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+static int t1_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+
+	if (!mac->ops->set_mtu)
+        return -EOPNOTSUPP;
+	if (new_mtu < 68)
+        return -EINVAL;
+	if ((ret = mac->ops->set_mtu(mac, new_mtu)))
+		return ret;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+static int t1_set_mac_addr(struct net_device *dev, void *p)
+{
+	struct adapter *adapter = dev->priv;
+	struct cmac *mac = adapter->port[dev->if_port].mac;
+	struct sockaddr *addr = p;
+
+	if (!mac->ops->macaddress_set)
+		return -EOPNOTSUPP;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	mac->ops->macaddress_set(mac, dev->dev_addr);
+	return 0;
+}
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+static void vlan_rx_register(struct net_device *dev,
+				   struct vlan_group *grp)
+{
+	struct adapter *adapter = dev->priv;
+
+	spin_lock_irq(&adapter->async_lock);
+	adapter->vlan_grp = grp;
+	t1_set_vlan_accel(adapter, grp != NULL);
+	spin_unlock_irq(&adapter->async_lock);
+}
+
+static void vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
+{
+	struct adapter *adapter = dev->priv;
+
+	spin_lock_irq(&adapter->async_lock);
+	if (adapter->vlan_grp)
+		adapter->vlan_grp->vlan_devices[vid] = NULL;
+	spin_unlock_irq(&adapter->async_lock);
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void t1_netpoll(struct net_device *dev)
+{
+	unsigned long flags;
+	struct adapter *adapter = dev->priv;
+
+	local_irq_save(flags);
+        t1_select_intr_handler(adapter)(adapter->pdev->irq, adapter, NULL);
+	local_irq_restore(flags);
+}
+#endif
+
+/*
+ * Periodic accumulation of MAC statistics.  This is used only if the MAC
+ * does not have any other way to prevent stats counter overflow.
+ */
+static void mac_stats_task(void *data)
+{
+	int i;
+	struct adapter *adapter = data;
+
+	for_each_port(adapter, i) {
+		struct port_info *p = &adapter->port[i];
+
+		if (netif_running(p->dev))
+			p->mac->ops->statistics_update(p->mac,
+						       MAC_STATS_UPDATE_FAST);
+	}
+
+	/* Schedule the next statistics update if any port is active. */
+	spin_lock(&adapter->work_lock);
+	if (adapter->open_device_map & PORT_MASK)
+		schedule_mac_stats_update(adapter,
+					  adapter->params.stats_update_period);
+	spin_unlock(&adapter->work_lock);
+}
+
+/*
+ * Processes elmer0 external interrupts in process context.
+ */
+static void ext_intr_task(void *data)
+{
+	struct adapter *adapter = data;
+
+	elmer0_ext_intr_handler(adapter);
+
+	/* Now reenable external interrupts */
+	spin_lock_irq(&adapter->async_lock);
+	adapter->slow_intr_mask |= F_PL_INTR_EXT;
+	writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
+	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+                   adapter->regs + A_PL_ENABLE);
+	spin_unlock_irq(&adapter->async_lock);
+}
+
+/*
+ * Interrupt-context handler for elmer0 external interrupts.
+ */
+void t1_elmer0_ext_intr(struct adapter *adapter)
+{
+	/*
+	 * Schedule a task to handle external interrupts as we require
+	 * a process context.  We disable EXT interrupts in the interim
+	 * and let the task reenable them when it's done.
+	 */
+	adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
+	writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
+                   adapter->regs + A_PL_ENABLE);
+	schedule_work(&adapter->ext_intr_handler_task);
+}
+
+void t1_fatal_err(struct adapter *adapter)
+{
+	if (adapter->flags & FULL_INIT_DONE) {
+		t1_sge_stop(adapter->sge);
+		t1_interrupts_disable(adapter);
+	}
+	CH_ALERT("%s: encountered fatal error, operation suspended\n",
+		 adapter->name);
+}
+
+static int __devinit init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	static int version_printed;
+
+	int i, err, pci_using_dac = 0;
+	unsigned long mmio_start, mmio_len;
+	const struct board_info *bi;
+	struct adapter *adapter = NULL;
+	struct port_info *pi;
+
+	if (!version_printed) {
+		printk(KERN_INFO "%s - version %s\n", DRV_DESCRIPTION,
+		       DRV_VERSION);
+		++version_printed;
+	}
+
+	err = pci_enable_device(pdev);
+	if (err)
+        return err;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		CH_ERR("%s: cannot find PCI device memory base address\n",
+		       pci_name(pdev));
+		err = -ENODEV;
+		goto out_disable_pdev;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_64BIT_MASK)) {
+		pci_using_dac = 1;
+
+		if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK)) {
+			CH_ERR("%s: unable to obtain 64-bit DMA for"
+			       "consistent allocations\n", pci_name(pdev));
+			err = -ENODEV;
+			goto out_disable_pdev;
+		}
+
+	} else if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
+		CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
+		goto out_disable_pdev;
+	}
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err) {
+		CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
+		goto out_disable_pdev;
+	}
+
+	pci_set_master(pdev);
+
+    mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+	bi = t1_get_board_info(ent->driver_data);
+
+	for (i = 0; i < bi->port_number; ++i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_MODULE_OWNER(netdev);
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		if (!adapter) {
+			adapter = netdev->priv;
+			adapter->pdev = pdev;
+			adapter->port[0].dev = netdev;  /* so we don't leak it */
+
+			adapter->regs = ioremap(mmio_start, mmio_len);
+			if (!adapter->regs) {
+				CH_ERR("%s: cannot map device registers\n",
+				       pci_name(pdev));
+				err = -ENOMEM;
+				goto out_free_dev;
+			}
+
+			if (t1_get_board_rev(adapter, bi, &adapter->params)) {
+				err = -ENODEV;	  /* Can't handle this chip rev */
+				goto out_free_dev;
+			}
+
+			adapter->name = pci_name(pdev);
+			adapter->msg_enable = dflt_msg_enable;
+			adapter->mmio_len = mmio_len;
+
+			init_MUTEX(&adapter->mib_mutex);
+			spin_lock_init(&adapter->tpi_lock);
+			spin_lock_init(&adapter->work_lock);
+			spin_lock_init(&adapter->async_lock);
+
+			INIT_WORK(&adapter->ext_intr_handler_task,
+				  ext_intr_task, adapter);
+			INIT_WORK(&adapter->stats_update_task, mac_stats_task,
+				  adapter);
+#ifdef work_struct
+			init_timer(&adapter->stats_update_timer);
+			adapter->stats_update_timer.function = mac_stats_timer;
+			adapter->stats_update_timer.data =
+				(unsigned long)adapter;
+#endif
+
+			pci_set_drvdata(pdev, netdev);
+		}
+
+		pi = &adapter->port[i];
+		pi->dev = netdev;
+		netif_carrier_off(netdev);
+		netdev->irq = pdev->irq;
+		netdev->if_port = i;
+		netdev->mem_start = mmio_start;
+		netdev->mem_end = mmio_start + mmio_len - 1;
+		netdev->priv = adapter;
+		netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+		netdev->features |= NETIF_F_LLTX;
+
+		adapter->flags |= RX_CSUM_ENABLED | TCP_CSUM_CAPABLE;
+		if (pci_using_dac)
+			netdev->features |= NETIF_F_HIGHDMA;
+		if (vlan_tso_capable(adapter)) {
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+			adapter->flags |= VLAN_ACCEL_CAPABLE;
+			netdev->features |=
+				NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+			netdev->vlan_rx_register = vlan_rx_register;
+			netdev->vlan_rx_kill_vid = vlan_rx_kill_vid;
+#endif
+			adapter->flags |= TSO_CAPABLE;
+			netdev->features |= NETIF_F_TSO;
+		}
+
+		netdev->open = cxgb_open;
+		netdev->stop = cxgb_close;
+		netdev->hard_start_xmit = t1_start_xmit;
+		netdev->hard_header_len += (adapter->flags & TSO_CAPABLE) ?
+			sizeof(struct cpl_tx_pkt_lso) :
+			sizeof(struct cpl_tx_pkt);
+		netdev->get_stats = t1_get_stats;
+		netdev->set_multicast_list = t1_set_rxmode;
+		netdev->do_ioctl = t1_ioctl;
+		netdev->change_mtu = t1_change_mtu;
+		netdev->set_mac_address = t1_set_mac_addr;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+		netdev->poll_controller = t1_netpoll;
+#endif
+		netdev->weight = 64;
+
+        SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+	}
+
+	if (t1_init_sw_modules(adapter, bi) < 0) {
+		err = -ENODEV;
+		goto out_free_dev;
+	}
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for (i = 0; i < bi->port_number; ++i) {
+		err = register_netdev(adapter->port[i].dev);
+		if (err)
+			CH_WARN("%s: cannot register net device %s, skipping\n",
+				pci_name(pdev), adapter->port[i].dev->name);
+		else {
+			/*
+			 * Change the name we use for messages to the name of
+			 * the first successfully registered interface.
+			 */
+			if (!adapter->registered_device_map)
+				adapter->name = adapter->port[i].dev->name;
+
+                __set_bit(i, &adapter->registered_device_map);
+		}
+	}
+	if (!adapter->registered_device_map) {
+		CH_ERR("%s: could not register any net devices\n",
+		       pci_name(pdev));
+		goto out_release_adapter_res;
+	}
+
+	printk(KERN_INFO "%s: %s (rev %d), %s %dMHz/%d-bit\n", adapter->name,
+	       bi->desc, adapter->params.chip_revision,
+	       adapter->params.pci.is_pcix ? "PCIX" : "PCI",
+	       adapter->params.pci.speed, adapter->params.pci.width);
+	return 0;
+
+ out_release_adapter_res:
+	t1_free_sw_modules(adapter);
+ out_free_dev:
+	if (adapter) {
+		if (adapter->regs) iounmap(adapter->regs);
+		for (i = bi->port_number - 1; i >= 0; --i)
+			if (adapter->port[i].dev) {
+				cxgb_proc_cleanup(adapter, proc_root_driver);
+				kfree(adapter->port[i].dev);
+			}
+	}
+	pci_release_regions(pdev);
+ out_disable_pdev:
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static inline void t1_sw_reset(struct pci_dev *pdev)
+{
+	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
+	pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	if (dev) {
+		int i;
+		struct adapter *adapter = dev->priv;
+
+		for_each_port(adapter, i)
+			if (test_bit(i, &adapter->registered_device_map))
+				unregister_netdev(adapter->port[i].dev);
+
+		t1_free_sw_modules(adapter);
+		iounmap(adapter->regs);
+		while (--i >= 0)
+			if (adapter->port[i].dev) {
+				cxgb_proc_cleanup(adapter, proc_root_driver);
+				kfree(adapter->port[i].dev);
+			}
+		pci_release_regions(pdev);
+		pci_disable_device(pdev);
+		pci_set_drvdata(pdev, NULL);
+		t1_sw_reset(pdev);
+	}
+}
+
+static struct pci_driver driver = {
+	.name     = DRV_NAME,
+	.id_table = t1_pci_tbl,
+	.probe    = init_one,
+	.remove   = __devexit_p(remove_one),
+};
+
+static int __init t1_init_module(void)
+{
+	return pci_module_init(&driver);
+}
+
+static void __exit t1_cleanup_module(void)
+{
+	pci_unregister_driver(&driver);
+}
+
+module_init(t1_init_module);
+module_exit(t1_cleanup_module);
diff --git a/drivers/net/chelsio/elmer0.h b/drivers/net/chelsio/elmer0.h
new file mode 100644
index 0000000..5590cb2
--- /dev/null
+++ b/drivers/net/chelsio/elmer0.h
@@ -0,0 +1,151 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: elmer0.h                                                            *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 22:49:43 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ELMER0_H_
+#define _CXGB_ELMER0_H_
+
+/* ELMER0 registers */
+#define A_ELMER0_VERSION 0x100000
+#define A_ELMER0_PHY_CFG 0x100004
+#define A_ELMER0_INT_ENABLE 0x100008
+#define A_ELMER0_INT_CAUSE 0x10000c
+#define A_ELMER0_GPI_CFG 0x100010
+#define A_ELMER0_GPI_STAT 0x100014
+#define A_ELMER0_GPO 0x100018
+#define A_ELMER0_PORT0_MI1_CFG 0x400000
+
+#define S_MI1_MDI_ENABLE    0
+#define V_MI1_MDI_ENABLE(x) ((x) << S_MI1_MDI_ENABLE)
+#define F_MI1_MDI_ENABLE    V_MI1_MDI_ENABLE(1U)
+
+#define S_MI1_MDI_INVERT    1
+#define V_MI1_MDI_INVERT(x) ((x) << S_MI1_MDI_INVERT)
+#define F_MI1_MDI_INVERT    V_MI1_MDI_INVERT(1U)
+
+#define S_MI1_PREAMBLE_ENABLE    2
+#define V_MI1_PREAMBLE_ENABLE(x) ((x) << S_MI1_PREAMBLE_ENABLE)
+#define F_MI1_PREAMBLE_ENABLE    V_MI1_PREAMBLE_ENABLE(1U)
+
+#define S_MI1_SOF    3
+#define M_MI1_SOF    0x3
+#define V_MI1_SOF(x) ((x) << S_MI1_SOF)
+#define G_MI1_SOF(x) (((x) >> S_MI1_SOF) & M_MI1_SOF)
+
+#define S_MI1_CLK_DIV    5
+#define M_MI1_CLK_DIV    0xff
+#define V_MI1_CLK_DIV(x) ((x) << S_MI1_CLK_DIV)
+#define G_MI1_CLK_DIV(x) (((x) >> S_MI1_CLK_DIV) & M_MI1_CLK_DIV)
+
+#define A_ELMER0_PORT0_MI1_ADDR 0x400004
+
+#define S_MI1_REG_ADDR    0
+#define M_MI1_REG_ADDR    0x1f
+#define V_MI1_REG_ADDR(x) ((x) << S_MI1_REG_ADDR)
+#define G_MI1_REG_ADDR(x) (((x) >> S_MI1_REG_ADDR) & M_MI1_REG_ADDR)
+
+#define S_MI1_PHY_ADDR    5
+#define M_MI1_PHY_ADDR    0x1f
+#define V_MI1_PHY_ADDR(x) ((x) << S_MI1_PHY_ADDR)
+#define G_MI1_PHY_ADDR(x) (((x) >> S_MI1_PHY_ADDR) & M_MI1_PHY_ADDR)
+
+#define A_ELMER0_PORT0_MI1_DATA 0x400008
+
+#define S_MI1_DATA    0
+#define M_MI1_DATA    0xffff
+#define V_MI1_DATA(x) ((x) << S_MI1_DATA)
+#define G_MI1_DATA(x) (((x) >> S_MI1_DATA) & M_MI1_DATA)
+
+#define A_ELMER0_PORT0_MI1_OP 0x40000c
+
+#define S_MI1_OP    0
+#define M_MI1_OP    0x3
+#define V_MI1_OP(x) ((x) << S_MI1_OP)
+#define G_MI1_OP(x) (((x) >> S_MI1_OP) & M_MI1_OP)
+
+#define S_MI1_ADDR_AUTOINC    2
+#define V_MI1_ADDR_AUTOINC(x) ((x) << S_MI1_ADDR_AUTOINC)
+#define F_MI1_ADDR_AUTOINC    V_MI1_ADDR_AUTOINC(1U)
+
+#define S_MI1_OP_BUSY    31
+#define V_MI1_OP_BUSY(x) ((x) << S_MI1_OP_BUSY)
+#define F_MI1_OP_BUSY    V_MI1_OP_BUSY(1U)
+
+#define A_ELMER0_PORT1_MI1_CFG 0x500000
+#define A_ELMER0_PORT1_MI1_ADDR 0x500004
+#define A_ELMER0_PORT1_MI1_DATA 0x500008
+#define A_ELMER0_PORT1_MI1_OP 0x50000c
+#define A_ELMER0_PORT2_MI1_CFG 0x600000
+#define A_ELMER0_PORT2_MI1_ADDR 0x600004
+#define A_ELMER0_PORT2_MI1_DATA 0x600008
+#define A_ELMER0_PORT2_MI1_OP 0x60000c
+#define A_ELMER0_PORT3_MI1_CFG 0x700000
+#define A_ELMER0_PORT3_MI1_ADDR 0x700004
+#define A_ELMER0_PORT3_MI1_DATA 0x700008
+#define A_ELMER0_PORT3_MI1_OP 0x70000c
+
+/* Simple bit definition for GPI and GP0 registers. */
+#define     ELMER0_GP_BIT0              0x0001
+#define     ELMER0_GP_BIT1              0x0002
+#define     ELMER0_GP_BIT2              0x0004
+#define     ELMER0_GP_BIT3              0x0008
+#define     ELMER0_GP_BIT4              0x0010
+#define     ELMER0_GP_BIT5              0x0020
+#define     ELMER0_GP_BIT6              0x0040
+#define     ELMER0_GP_BIT7              0x0080
+#define     ELMER0_GP_BIT8              0x0100
+#define     ELMER0_GP_BIT9              0x0200
+#define     ELMER0_GP_BIT10             0x0400
+#define     ELMER0_GP_BIT11             0x0800
+#define     ELMER0_GP_BIT12             0x1000
+#define     ELMER0_GP_BIT13             0x2000
+#define     ELMER0_GP_BIT14             0x4000
+#define     ELMER0_GP_BIT15             0x8000
+#define     ELMER0_GP_BIT16             0x10000
+#define     ELMER0_GP_BIT17             0x20000
+#define     ELMER0_GP_BIT18             0x40000
+#define     ELMER0_GP_BIT19             0x80000
+
+#define MI1_OP_DIRECT_WRITE 1
+#define MI1_OP_DIRECT_READ  2
+
+#define MI1_OP_INDIRECT_ADDRESS  0
+#define MI1_OP_INDIRECT_WRITE    1
+#define MI1_OP_INDIRECT_READ_INC 2
+#define MI1_OP_INDIRECT_READ     3
+
+#endif /* _CXGB_ELMER0_H_ */
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
new file mode 100644
index 0000000..2306425
--- /dev/null
+++ b/drivers/net/chelsio/espi.c
@@ -0,0 +1,346 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.c                                                              *
+ * $Revision: 1.14 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  Ethernet SPI functionality.                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "espi.h"
+
+struct peespi {
+	adapter_t *adapter;
+	struct espi_intr_counts intr_cnt;
+	u32 misc_ctrl;
+	spinlock_t lock;
+};
+
+#define ESPI_INTR_MASK (F_DIP4ERR | F_RXDROP | F_TXDROP | F_RXOVERFLOW | \
+			F_RAMPARITYERR | F_DIP2PARITYERR)
+#define MON_MASK  (V_MONITORED_PORT_NUM(3) | F_MONITORED_DIRECTION \
+		   | F_MONITORED_INTERFACE)
+
+#define TRICN_CNFG 14
+#define TRICN_CMD_READ  0x11
+#define TRICN_CMD_WRITE 0x21
+#define TRICN_CMD_ATTEMPTS 10
+
+static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
+		       int ch_addr, int reg_offset, u32 wr_data)
+{
+	int busy, attempts = TRICN_CMD_ATTEMPTS;
+
+	writel(V_WRITE_DATA(wr_data) |
+	       V_REGISTER_OFFSET(reg_offset) |
+	       V_CHANNEL_ADDR(ch_addr) | V_MODULE_ADDR(module_addr) |
+	       V_BUNDLE_ADDR(bundle_addr) |
+	       V_SPI4_COMMAND(TRICN_CMD_WRITE),
+	       adapter->regs + A_ESPI_CMD_ADDR);
+	writel(0, adapter->regs + A_ESPI_GOSTAT);
+
+	do {
+		busy = readl(adapter->regs + A_ESPI_GOSTAT) & F_ESPI_CMD_BUSY;
+	} while (busy && --attempts);
+
+	if (busy)
+		CH_ERR("%s: TRICN write timed out\n", adapter->name);
+
+	return busy;
+}
+
+/* 1. Deassert rx_reset_core. */
+/* 2. Program TRICN_CNFG registers. */
+/* 3. Deassert rx_reset_link */
+static int tricn_init(adapter_t *adapter)
+{
+	int     i               = 0;
+	int     sme             = 1;
+	int     stat            = 0;
+	int     timeout         = 0;
+	int     is_ready        = 0;
+	int     dynamic_deskew  = 0;
+
+	if (dynamic_deskew)
+		sme = 0;
+
+
+	/* 1 */
+	timeout=1000;
+	do {
+		stat = readl(adapter->regs + A_ESPI_RX_RESET);
+		is_ready = (stat & 0x4);
+		timeout--;
+		udelay(5);
+	} while (!is_ready || (timeout==0));
+	writel(0x2, adapter->regs + A_ESPI_RX_RESET);
+	if (timeout==0)
+	{
+		CH_ERR("ESPI : ERROR : Timeout tricn_init() \n");
+		t1_fatal_err(adapter);
+	}
+
+	/* 2 */
+	if (sme) {
+		tricn_write(adapter, 0, 0, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 1, 0, TRICN_CNFG, 0x81);
+		tricn_write(adapter, 0, 2, 0, TRICN_CNFG, 0x81);
+	}
+	for (i=1; i<= 8; i++) tricn_write(adapter, 0, 0, i, TRICN_CNFG, 0xf1);
+	for (i=1; i<= 2; i++) tricn_write(adapter, 0, 1, i, TRICN_CNFG, 0xf1);
+	for (i=1; i<= 3; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+	for (i=4; i<= 4; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+	for (i=5; i<= 5; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xe1);
+	for (i=6; i<= 6; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+	for (i=7; i<= 7; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0x80);
+	for (i=8; i<= 8; i++) tricn_write(adapter, 0, 2, i, TRICN_CNFG, 0xf1);
+
+	/* 3 */
+	writel(0x3, adapter->regs + A_ESPI_RX_RESET);
+
+	return 0;
+}
+
+void t1_espi_intr_enable(struct peespi *espi)
+{
+	u32 enable, pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+	/*
+	 * Cannot enable ESPI interrupts on T1B because HW asserts the
+	 * interrupt incorrectly, namely the driver gets ESPI interrupts
+	 * but no data is actually dropped (can verify this reading the ESPI
+	 * drop registers).  Also, once the ESPI interrupt is asserted it
+	 * cannot be cleared (HW bug).
+	 */
+	enable = t1_is_T1B(espi->adapter) ? 0 : ESPI_INTR_MASK;
+	writel(enable, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	writel(pl_intr | F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+void t1_espi_intr_clear(struct peespi *espi)
+{
+	writel(0xffffffff, espi->adapter->regs + A_ESPI_INTR_STATUS);
+	writel(F_PL_INTR_ESPI, espi->adapter->regs + A_PL_CAUSE);
+}
+
+void t1_espi_intr_disable(struct peespi *espi)
+{
+	u32 pl_intr = readl(espi->adapter->regs + A_PL_ENABLE);
+
+	writel(0, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	writel(pl_intr & ~F_PL_INTR_ESPI, espi->adapter->regs + A_PL_ENABLE);
+}
+
+int t1_espi_intr_handler(struct peespi *espi)
+{
+	u32 cnt;
+	u32 status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+	if (status & F_DIP4ERR)
+		espi->intr_cnt.DIP4_err++;
+	if (status & F_RXDROP)
+		espi->intr_cnt.rx_drops++;
+	if (status & F_TXDROP)
+		espi->intr_cnt.tx_drops++;
+	if (status & F_RXOVERFLOW)
+		espi->intr_cnt.rx_ovflw++;
+	if (status & F_RAMPARITYERR)
+		espi->intr_cnt.parity_err++;
+	if (status & F_DIP2PARITYERR) {
+		espi->intr_cnt.DIP2_parity_err++;
+
+		/*
+		 * Must read the error count to clear the interrupt
+		 * that it causes.
+		 */
+		cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+	}
+
+	/*
+	 * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+	 * write the status as is.
+	 */
+	if (status && t1_is_T1B(espi->adapter))
+		status = 1;
+	writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+	return 0;
+}
+
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi)
+{
+    return &espi->intr_cnt;
+}
+
+static void espi_setup_for_pm3393(adapter_t *adapter)
+{
+	u32 wmark = t1_is_T1B(adapter) ? 0x4000 : 0x3200;
+
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN0);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN1);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN2);
+	writel(0x1f4, adapter->regs + A_ESPI_SCH_TOKEN3);
+	writel(0x100, adapter->regs + A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK);
+	writel(wmark, adapter->regs + A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK);
+	writel(3, adapter->regs + A_ESPI_CALENDAR_LENGTH);
+	writel(0x08000008, adapter->regs + A_ESPI_TRAIN);
+	writel(V_RX_NPORTS(1) | V_TX_NPORTS(1), adapter->regs + A_PORT_CONFIG);
+}
+
+/* T2 Init part --  */
+/* 1. Set T_ESPI_MISCCTRL_ADDR */
+/* 2. Init ESPI registers. */
+/* 3. Init TriCN Hard Macro */
+int t1_espi_init(struct peespi *espi, int mac_type, int nports)
+{
+	u32 cnt;
+
+	u32 status_enable_extra = 0;
+	adapter_t *adapter = espi->adapter;
+	u32 status, burstval = 0x800100;
+
+	/* Disable ESPI training.  MACs that can handle it enable it below. */
+	writel(0, adapter->regs + A_ESPI_TRAIN);
+
+	if (is_T2(adapter)) {
+		writel(V_OUT_OF_SYNC_COUNT(4) |
+		       V_DIP2_PARITY_ERR_THRES(3) |
+		       V_DIP4_THRES(1), adapter->regs + A_ESPI_MISC_CONTROL);
+		if (nports == 4) {
+			/* T204: maxburst1 = 0x40, maxburst2 = 0x20 */
+			burstval = 0x200040;
+		}
+	}
+	writel(burstval, adapter->regs + A_ESPI_MAXBURST1_MAXBURST2);
+
+	switch (mac_type) {
+	case CHBT_MAC_PM3393:
+		espi_setup_for_pm3393(adapter);
+		break;
+	default:
+		return -1;
+	}
+
+	/*
+	 * Make sure any pending interrupts from the SPI are
+	 * Cleared before enabling the interrupt.
+	 */
+	writel(ESPI_INTR_MASK, espi->adapter->regs + A_ESPI_INTR_ENABLE);
+	status = readl(espi->adapter->regs + A_ESPI_INTR_STATUS);
+	if (status & F_DIP2PARITYERR) {
+		cnt = readl(espi->adapter->regs + A_ESPI_DIP2_ERR_COUNT);
+	}
+
+	/*
+	 * For T1B we need to write 1 to clear ESPI interrupts.  For T2+ we
+	 * write the status as is.
+	 */
+	if (status && t1_is_T1B(espi->adapter))
+		status = 1;
+	writel(status, espi->adapter->regs + A_ESPI_INTR_STATUS);
+
+	writel(status_enable_extra | F_RXSTATUSENABLE,
+	       adapter->regs + A_ESPI_FIFO_STATUS_ENABLE);
+
+	if (is_T2(adapter)) {
+		tricn_init(adapter);
+		/*
+		 * Always position the control at the 1st port egress IN
+		 * (sop,eop) counter to reduce PIOs for T/N210 workaround.
+		 */
+		espi->misc_ctrl = (readl(adapter->regs + A_ESPI_MISC_CONTROL)
+				   & ~MON_MASK) | (F_MONITORED_DIRECTION
+				   | F_MONITORED_INTERFACE);
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+		spin_lock_init(&espi->lock);
+	}
+
+	return 0;
+}
+
+void t1_espi_destroy(struct peespi *espi)
+{
+	kfree(espi);
+}
+
+struct peespi *t1_espi_create(adapter_t *adapter)
+{
+	struct peespi *espi = kmalloc(sizeof(*espi), GFP_KERNEL);
+
+	memset(espi, 0, sizeof(*espi));
+
+	if (espi)
+		espi->adapter = adapter;
+	return espi;
+}
+
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val)
+{
+	struct peespi *espi = adapter->espi;
+
+	if (!is_T2(adapter))
+		return;
+	spin_lock(&espi->lock);
+	espi->misc_ctrl = (val & ~MON_MASK) |
+			  (espi->misc_ctrl & MON_MASK);
+	writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	spin_unlock(&espi->lock);
+}
+
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait)
+{
+	u32 sel;
+
+	struct peespi *espi = adapter->espi;
+
+	if (!is_T2(adapter))
+		return 0;
+	sel = V_MONITORED_PORT_NUM((addr & 0x3c) >> 2);
+	if (!wait) {
+		if (!spin_trylock(&espi->lock))
+			return 0;
+	}
+	else
+		spin_lock(&espi->lock);
+	if ((sel != (espi->misc_ctrl & MON_MASK))) {
+		writel(((espi->misc_ctrl & ~MON_MASK) | sel),
+		       adapter->regs + A_ESPI_MISC_CONTROL);
+		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+		writel(espi->misc_ctrl, adapter->regs + A_ESPI_MISC_CONTROL);
+	}
+	else
+		sel = readl(adapter->regs + A_ESPI_SCH_TOKEN3);
+	spin_unlock(&espi->lock);
+	return sel;
+}
diff --git a/drivers/net/chelsio/espi.h b/drivers/net/chelsio/espi.h
new file mode 100644
index 0000000..c90e37f
--- /dev/null
+++ b/drivers/net/chelsio/espi.h
@@ -0,0 +1,68 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: espi.h                                                              *
+ * $Revision: 1.7 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_ESPI_H_
+#define _CXGB_ESPI_H_
+
+#include "common.h"
+
+struct espi_intr_counts {
+	unsigned int DIP4_err;
+	unsigned int rx_drops;
+	unsigned int tx_drops;
+	unsigned int rx_ovflw;
+	unsigned int parity_err;
+	unsigned int DIP2_parity_err;
+};
+
+struct peespi;
+
+struct peespi *t1_espi_create(adapter_t *adapter);
+void t1_espi_destroy(struct peespi *espi);
+int t1_espi_init(struct peespi *espi, int mac_type, int nports);
+
+void t1_espi_intr_enable(struct peespi *);
+void t1_espi_intr_clear(struct peespi *);
+void t1_espi_intr_disable(struct peespi *);
+int t1_espi_intr_handler(struct peespi *);
+const struct espi_intr_counts *t1_espi_get_intr_counts(struct peespi *espi);
+
+void t1_espi_set_misc_ctrl(adapter_t *adapter, u32 val);
+u32 t1_espi_get_mon(adapter_t *adapter, u32 addr, u8 wait);
+
+#endif /* _CXGB_ESPI_H_ */
diff --git a/drivers/net/chelsio/gmac.h b/drivers/net/chelsio/gmac.h
new file mode 100644
index 0000000..746b0ee
--- /dev/null
+++ b/drivers/net/chelsio/gmac.h
@@ -0,0 +1,134 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: gmac.h                                                              *
+ * $Revision: 1.6 $                                                          *
+ * $Date: 2005/06/21 18:29:47 $                                              *
+ * Description:                                                              *
+ *  Generic MAC functionality.                                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_GMAC_H_
+#define _CXGB_GMAC_H_
+
+#include "common.h"
+
+enum { MAC_STATS_UPDATE_FAST, MAC_STATS_UPDATE_FULL };
+enum { MAC_DIRECTION_RX = 1, MAC_DIRECTION_TX = 2 };
+
+struct cmac_statistics {
+	/* Transmit */
+	u64 TxOctetsOK;
+	u64 TxOctetsBad;
+	u64 TxUnicastFramesOK;
+	u64 TxMulticastFramesOK;
+	u64 TxBroadcastFramesOK;
+	u64 TxPauseFrames;
+	u64 TxFramesWithDeferredXmissions;
+	u64 TxLateCollisions;
+	u64 TxTotalCollisions;
+	u64 TxFramesAbortedDueToXSCollisions;
+	u64 TxUnderrun;
+	u64 TxLengthErrors;
+	u64 TxInternalMACXmitError;
+	u64 TxFramesWithExcessiveDeferral;
+	u64 TxFCSErrors;
+
+	/* Receive */
+	u64 RxOctetsOK;
+	u64 RxOctetsBad;
+	u64 RxUnicastFramesOK;
+	u64 RxMulticastFramesOK;
+	u64 RxBroadcastFramesOK;
+	u64 RxPauseFrames;
+	u64 RxFCSErrors;
+	u64 RxAlignErrors;
+	u64 RxSymbolErrors;
+	u64 RxDataErrors;
+	u64 RxSequenceErrors;
+	u64 RxRuntErrors;
+	u64 RxJabberErrors;
+	u64 RxInternalMACRcvError;
+	u64 RxInRangeLengthErrors;
+	u64 RxOutOfRangeLengthField;
+	u64 RxFrameTooLongErrors;
+};
+
+struct cmac_ops {
+	void (*destroy)(struct cmac *);
+	int (*reset)(struct cmac *);
+	int (*interrupt_enable)(struct cmac *);
+	int (*interrupt_disable)(struct cmac *);
+	int (*interrupt_clear)(struct cmac *);
+	int (*interrupt_handler)(struct cmac *);
+
+	int (*enable)(struct cmac *, int);
+	int (*disable)(struct cmac *, int);
+
+	int (*loopback_enable)(struct cmac *);
+	int (*loopback_disable)(struct cmac *);
+
+	int (*set_mtu)(struct cmac *, int mtu);
+	int (*set_rx_mode)(struct cmac *, struct t1_rx_mode *rm);
+
+	int (*set_speed_duplex_fc)(struct cmac *, int speed, int duplex, int fc);
+	int (*get_speed_duplex_fc)(struct cmac *, int *speed, int *duplex,
+				   int *fc);
+
+	const struct cmac_statistics *(*statistics_update)(struct cmac *, int);
+
+	int (*macaddress_get)(struct cmac *, u8 mac_addr[6]);
+	int (*macaddress_set)(struct cmac *, u8 mac_addr[6]);
+};
+
+typedef struct _cmac_instance cmac_instance;
+
+struct cmac {
+	struct cmac_statistics stats;
+	adapter_t *adapter;
+	struct cmac_ops *ops;
+	cmac_instance *instance;
+};
+
+struct gmac {
+	unsigned int stats_update_period;
+	struct cmac *(*create)(adapter_t *adapter, int index);
+	int (*reset)(adapter_t *);
+};
+
+extern struct gmac t1_pm3393_ops;
+extern struct gmac t1_chelsio_mac_ops;
+extern struct gmac t1_vsc7321_ops;
+extern struct gmac t1_ixf1010_ops;
+extern struct gmac t1_dummy_mac_ops;
+
+#endif /* _CXGB_GMAC_H_ */
diff --git a/drivers/net/chelsio/mv88x201x.c b/drivers/net/chelsio/mv88x201x.c
new file mode 100644
index 0000000..db503428
--- /dev/null
+++ b/drivers/net/chelsio/mv88x201x.c
@@ -0,0 +1,252 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: mv88x201x.c                                                         *
+ * $Revision: 1.12 $                                                         *
+ * $Date: 2005/04/15 19:27:14 $                                              *
+ * Description:                                                              *
+ *  Marvell PHY (mv88x201x) functionality.                                   *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "cphy.h"
+#include "elmer0.h"
+
+/*
+ * The 88x2010 Rev C. requires some link status registers * to be read
+ * twice in order to get the right values. Future * revisions will fix
+ * this problem and then this macro * can disappear.
+ */
+#define MV88x2010_LINK_STATUS_BUGS    1
+
+static int led_init(struct cphy *cphy)
+{
+	/* Setup the LED registers so we can turn on/off.
+	 * Writing these bits maps control to another
+	 * register. mmd(0x1) addr(0x7)
+	 */
+	mdio_write(cphy, 0x3, 0x8304, 0xdddd);
+	return 0;
+}
+
+static int led_link(struct cphy *cphy, u32 do_enable)
+{
+	u32 led = 0;
+#define LINK_ENABLE_BIT 0x1
+
+	mdio_read(cphy, 0x1, 0x7, &led);
+
+	if (do_enable & LINK_ENABLE_BIT) {
+		led |= LINK_ENABLE_BIT;
+		mdio_write(cphy, 0x1, 0x7, led);
+	} else {
+		led &= ~LINK_ENABLE_BIT;
+		mdio_write(cphy, 0x1, 0x7, led);
+	}
+	return 0;
+}
+
+/* Port Reset */
+static int mv88x201x_reset(struct cphy *cphy, int wait)
+{
+	/* This can be done through registers.  It is not required since
+	 * a full chip reset is used.
+	 */
+	return 0;
+}
+
+static int mv88x201x_interrupt_enable(struct cphy *cphy)
+{
+	u32 elmer;
+
+	/* Enable PHY LASI interrupts. */
+	mdio_write(cphy, 0x1, 0x9002, 0x1);
+
+	/* Enable Marvell interrupts through Elmer0. */
+	t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer |= ELMER0_GP_BIT6;
+	t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	return 0;
+}
+
+static int mv88x201x_interrupt_disable(struct cphy *cphy)
+{
+	u32 elmer;
+
+	/* Disable PHY LASI interrupts. */
+	mdio_write(cphy, 0x1, 0x9002, 0x0);
+
+	/* Disable Marvell interrupts through Elmer0. */
+	t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer &= ~ELMER0_GP_BIT6;
+	t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
+	return 0;
+}
+
+static int mv88x201x_interrupt_clear(struct cphy *cphy)
+{
+	u32 elmer;
+	u32 val;
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+	/* Required to read twice before clear takes affect. */
+	mdio_read(cphy, 0x1, 0x9003, &val);
+	mdio_read(cphy, 0x1, 0x9004, &val);
+	mdio_read(cphy, 0x1, 0x9005, &val);
+
+	/* Read this register after the others above it else
+	 * the register doesn't clear correctly.
+	 */
+	mdio_read(cphy, 0x1, 0x1, &val);
+#endif
+
+	/* Clear link status. */
+	mdio_read(cphy, 0x1, 0x1, &val);
+	/* Clear PHY LASI interrupts. */
+	mdio_read(cphy, 0x1, 0x9005, &val);
+
+#ifdef MV88x2010_LINK_STATUS_BUGS
+	/* Do it again. */
+	mdio_read(cphy, 0x1, 0x9003, &val);
+	mdio_read(cphy, 0x1, 0x9004, &val);
+#endif
+
+	/* Clear Marvell interrupts through Elmer0. */
+	t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
+	elmer |= ELMER0_GP_BIT6;
+	t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
+	return 0;
+}
+
+static int mv88x201x_interrupt_handler(struct cphy *cphy)
+{
+	/* Clear interrupts */
+	mv88x201x_interrupt_clear(cphy);
+
+	/* We have only enabled link change interrupts and so
+	 * cphy_cause must be a link change interrupt.
+	 */
+	return cphy_cause_link_change;
+}
+
+static int mv88x201x_set_loopback(struct cphy *cphy, int on)
+{
+	return 0;
+}
+
+static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
+				     int *speed, int *duplex, int *fc)
+{
+	u32 val = 0;
+#define LINK_STATUS_BIT 0x4
+
+	if (link_ok) {
+		/* Read link status. */
+		mdio_read(cphy, 0x1, 0x1, &val);
+		val &= LINK_STATUS_BIT;
+		*link_ok = (val == LINK_STATUS_BIT);
+		/* Turn on/off Link LED */
+		led_link(cphy, *link_ok);
+	}
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (fc)
+		*fc = PAUSE_RX | PAUSE_TX;
+	return 0;
+}
+
+static void mv88x201x_destroy(struct cphy *cphy)
+{
+	kfree(cphy);
+}
+
+static struct cphy_ops mv88x201x_ops = {
+	.destroy           = mv88x201x_destroy,
+	.reset             = mv88x201x_reset,
+	.interrupt_enable  = mv88x201x_interrupt_enable,
+	.interrupt_disable = mv88x201x_interrupt_disable,
+	.interrupt_clear   = mv88x201x_interrupt_clear,
+	.interrupt_handler = mv88x201x_interrupt_handler,
+	.get_link_status   = mv88x201x_get_link_status,
+	.set_loopback      = mv88x201x_set_loopback,
+};
+
+static struct cphy *mv88x201x_phy_create(adapter_t *adapter, int phy_addr,
+					 struct mdio_ops *mdio_ops)
+{
+	u32 val;
+	struct cphy *cphy = kmalloc(sizeof(*cphy), GFP_KERNEL);
+
+	if (!cphy)
+		return NULL;
+	memset(cphy, 0, sizeof(*cphy));
+	cphy_init(cphy, adapter, phy_addr, &mv88x201x_ops, mdio_ops);
+
+	/* Commands the PHY to enable XFP's clock. */
+	mdio_read(cphy, 0x3, 0x8300, &val);
+	mdio_write(cphy, 0x3, 0x8300, val | 1);
+
+	/* Clear link status. Required because of a bug in the PHY.  */
+	mdio_read(cphy, 0x1, 0x8, &val);
+	mdio_read(cphy, 0x3, 0x8, &val);
+
+	/* Allows for Link,Ack LED turn on/off */
+	led_init(cphy);
+	return cphy;
+}
+
+/* Chip Reset */
+static int mv88x201x_phy_reset(adapter_t *adapter)
+{
+	u32 val;
+
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val &= ~4;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	msleep(100);
+
+	t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
+	msleep(1000);
+
+	/* Now lets enable the Laser. Delay 100us */
+	t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+	val |= 0x8000;
+	t1_tpi_write(adapter, A_ELMER0_GPO, val);
+	udelay(100);
+	return 0;
+}
+
+struct gphy t1_mv88x201x_ops = {
+	mv88x201x_phy_create,
+	mv88x201x_phy_reset
+};
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
new file mode 100644
index 0000000..04a1404
--- /dev/null
+++ b/drivers/net/chelsio/pm3393.c
@@ -0,0 +1,826 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: pm3393.c                                                            *
+ * $Revision: 1.16 $                                                         *
+ * $Date: 2005/05/14 00:59:32 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "regs.h"
+#include "gmac.h"
+#include "elmer0.h"
+#include "suni1x10gexp_regs.h"
+
+/* 802.3ae 10Gb/s MDIO Manageable Device(MMD)
+ */
+enum {
+    MMD_RESERVED,
+    MMD_PMAPMD,
+    MMD_WIS,
+    MMD_PCS,
+    MMD_PHY_XGXS,	/* XGMII Extender Sublayer */
+    MMD_DTE_XGXS,
+};
+
+enum {
+    PHY_XGXS_CTRL_1,
+    PHY_XGXS_STATUS_1
+};
+
+#define OFFSET(REG_ADDR)    (REG_ADDR << 2)
+
+/* Max frame size PM3393 can handle. Includes Ethernet header and CRC. */
+#define MAX_FRAME_SIZE  9600
+
+#define IPG 12
+#define TXXG_CONF1_VAL ((IPG << SUNI1x10GEXP_BITOFF_TXXG_IPGT) | \
+	SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN | SUNI1x10GEXP_BITMSK_TXXG_CRCEN | \
+	SUNI1x10GEXP_BITMSK_TXXG_PADEN)
+#define RXXG_CONF1_VAL (SUNI1x10GEXP_BITMSK_RXXG_PUREP | 0x14 | \
+	SUNI1x10GEXP_BITMSK_RXXG_FLCHK | SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP)
+
+/* Update statistics every 15 minutes */
+#define STATS_TICK_SECS (15 * 60)
+
+enum {                     /* RMON registers */
+	RxOctetsReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW,
+	RxUnicastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW,
+	RxMulticastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW,
+	RxBroadcastFramesReceivedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW,
+	RxPAUSEMACCtrlFramesReceived = SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW,
+	RxFrameCheckSequenceErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW,
+	RxFramesLostDueToInternalMACErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW,
+	RxSymbolErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW,
+	RxInRangeLengthErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW,
+	RxFramesTooLongErrors = SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW,
+	RxJabbers = SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW,
+	RxFragments = SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW,
+	RxUndersizedFrames =  SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW,
+
+	TxOctetsTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW,
+	TxFramesLostDueToInternalMACTransmissionError = SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW,
+	TxTransmitSystemError = SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW,
+	TxUnicastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW,
+	TxMulticastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW,
+	TxBroadcastFramesTransmittedOK = SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW,
+	TxPAUSEMACCtrlFramesTransmitted = SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW
+};
+
+struct _cmac_instance {
+	u8 enabled;
+	u8 fc;
+	u8 mac_addr[6];
+};
+
+static int pmread(struct cmac *cmac, u32 reg, u32 * data32)
+{
+	t1_tpi_read(cmac->adapter, OFFSET(reg), data32);
+	return 0;
+}
+
+static int pmwrite(struct cmac *cmac, u32 reg, u32 data32)
+{
+	t1_tpi_write(cmac->adapter, OFFSET(reg), data32);
+	return 0;
+}
+
+/* Port reset. */
+static int pm3393_reset(struct cmac *cmac)
+{
+	return 0;
+}
+
+/*
+ * Enable interrupts for the PM3393
+
+	1. Enable PM3393 BLOCK interrupts.
+	2. Enable PM3393 Master Interrupt bit(INTE)
+	3. Enable ELMER's PM3393 bit.
+	4. Enable Terminator external interrupt.
+*/
+static int pm3393_interrupt_enable(struct cmac *cmac)
+{
+	u32 pl_intr;
+
+	/* PM3393 - Enabling all hardware block interrupts.
+	 */
+	pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0xffff);
+
+	/* Don't interrupt on statistics overflow, we are polling */
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0xffff);
+	pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0xffff);
+
+	/* PM3393 - Global interrupt enable
+	 */
+	/* TBD XXX Disable for now until we figure out why error interrupts keep asserting. */
+	pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE,
+		0 /*SUNI1x10GEXP_BITMSK_TOP_INTE */ );
+
+	/* TERMINATOR - PL_INTERUPTS_EXT */
+	pl_intr = readl(cmac->adapter->regs + A_PL_ENABLE);
+	pl_intr |= F_PL_INTR_EXT;
+	writel(pl_intr, cmac->adapter->regs + A_PL_ENABLE);
+	return 0;
+}
+
+static int pm3393_interrupt_disable(struct cmac *cmac)
+{
+	u32 elmer;
+
+	/* PM3393 - Enabling HW interrupt blocks. */
+	pmwrite(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_3, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK, 0);
+	pmwrite(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE, 0);
+
+	/* PM3393 - Global interrupt enable */
+	pmwrite(cmac, SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE, 0);
+
+	/* ELMER - External chip interrupts. */
+	t1_tpi_read(cmac->adapter, A_ELMER0_INT_ENABLE, &elmer);
+	elmer &= ~ELMER0_GP_BIT1;
+	t1_tpi_write(cmac->adapter, A_ELMER0_INT_ENABLE, elmer);
+
+	/* TERMINATOR - PL_INTERUPTS_EXT */
+	/* DO NOT DISABLE TERMINATOR's EXTERNAL INTERRUPTS. ANOTHER CHIP
+	 * COULD WANT THEM ENABLED. We disable PM3393 at the ELMER level.
+	 */
+
+	return 0;
+}
+
+static int pm3393_interrupt_clear(struct cmac *cmac)
+{
+	u32 elmer;
+	u32 pl_intr;
+	u32 val32;
+
+	/* PM3393 - Clearing HW interrupt blocks. Note, this assumes
+	 *          bit WCIMODE=0 for a clear-on-read.
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4ODP_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_TXXG_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IDU_INTERRUPT, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION,
+	       &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS, &val32);
+	pmread(cmac, SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE, &val32);
+
+	/* PM3393 - Global interrupt status
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS, &val32);
+
+	/* ELMER - External chip interrupts.
+	 */
+	t1_tpi_read(cmac->adapter, A_ELMER0_INT_CAUSE, &elmer);
+	elmer |= ELMER0_GP_BIT1;
+	t1_tpi_write(cmac->adapter, A_ELMER0_INT_CAUSE, elmer);
+
+	/* TERMINATOR - PL_INTERUPTS_EXT
+	 */
+	pl_intr = readl(cmac->adapter->regs + A_PL_CAUSE);
+	pl_intr |= F_PL_INTR_EXT;
+	writel(pl_intr, cmac->adapter->regs + A_PL_CAUSE);
+
+	return 0;
+}
+
+/* Interrupt handler */
+static int pm3393_interrupt_handler(struct cmac *cmac)
+{
+	u32 master_intr_status;
+/*
+	1. Read master interrupt register.
+	2. Read BLOCK's interrupt status registers.
+	3. Handle BLOCK interrupts.
+*/
+	/* Read the master interrupt status register. */
+	pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
+	       &master_intr_status);
+
+	/* TBD XXX Lets just clear everything for now */
+	pm3393_interrupt_clear(cmac);
+
+	return 0;
+}
+
+static int pm3393_enable(struct cmac *cmac, int which)
+{
+	if (which & MAC_DIRECTION_RX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1,
+			(RXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_RXXG_RXEN));
+
+	if (which & MAC_DIRECTION_TX) {
+		u32 val = TXXG_CONF1_VAL | SUNI1x10GEXP_BITMSK_TXXG_TXEN0;
+
+		if (cmac->instance->fc & PAUSE_RX)
+			val |= SUNI1x10GEXP_BITMSK_TXXG_FCRX;
+		if (cmac->instance->fc & PAUSE_TX)
+			val |= SUNI1x10GEXP_BITMSK_TXXG_FCTX;
+		pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, val);
+	}
+
+	cmac->instance->enabled |= which;
+	return 0;
+}
+
+static int pm3393_enable_port(struct cmac *cmac, int which)
+{
+	/* Clear port statistics */
+	pmwrite(cmac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+		SUNI1x10GEXP_BITMSK_MSTAT_CLEAR);
+	udelay(2);
+	memset(&cmac->stats, 0, sizeof(struct cmac_statistics));
+
+	pm3393_enable(cmac, which);
+
+	/*
+	 * XXX This should be done by the PHY and preferrably not at all.
+	 * The PHY doesn't give us link status indication on its own so have
+	 * the link management code query it instead.
+	 */
+	{
+		extern void link_changed(adapter_t *adapter, int port_id);
+
+		link_changed(cmac->adapter, 0);
+	}
+	return 0;
+}
+
+static int pm3393_disable(struct cmac *cmac, int which)
+{
+	if (which & MAC_DIRECTION_RX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_CONFIG_1, RXXG_CONF1_VAL);
+	if (which & MAC_DIRECTION_TX)
+		pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_CONFIG_1, TXXG_CONF1_VAL);
+
+	/*
+	 * The disable is graceful. Give the PM3393 time.  Can't wait very
+	 * long here, we may be holding locks.
+	 */
+	udelay(20);
+
+	cmac->instance->enabled &= ~which;
+	return 0;
+}
+
+static int pm3393_loopback_enable(struct cmac *cmac)
+{
+	return 0;
+}
+
+static int pm3393_loopback_disable(struct cmac *cmac)
+{
+	return 0;
+}
+
+static int pm3393_set_mtu(struct cmac *cmac, int mtu)
+{
+	int enabled = cmac->instance->enabled;
+
+	/* MAX_FRAME_SIZE includes header + FCS, mtu doesn't */
+	mtu += 14 + 4;
+	if (mtu > MAX_FRAME_SIZE)
+		return -EINVAL;
+
+	/* Disable Rx/Tx MAC before configuring it. */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH, mtu);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE, mtu);
+
+	if (enabled)
+		pm3393_enable(cmac, enabled);
+	return 0;
+}
+
+static u32 calc_crc(u8 *b, int len)
+{
+	int i;
+	u32 crc = (u32)~0;
+
+	/* calculate crc one bit at a time */
+	while (len--) {
+		crc ^= *b++;
+		for (i = 0; i < 8; i++) {
+			if (crc & 0x1)
+				crc = (crc >> 1) ^ 0xedb88320;
+			else
+				crc = (crc >> 1);
+		}
+	}
+
+	/* reverse bits */
+	crc = ((crc >> 4) & 0x0f0f0f0f) | ((crc << 4) & 0xf0f0f0f0);
+	crc = ((crc >> 2) & 0x33333333) | ((crc << 2) & 0xcccccccc);
+	crc = ((crc >> 1) & 0x55555555) | ((crc << 1) & 0xaaaaaaaa);
+	/* swap bytes */
+	crc = (crc >> 16) | (crc << 16);
+	crc = (crc >> 8 & 0x00ff00ff) | (crc << 8 & 0xff00ff00);
+
+	return crc;
+}
+
+static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
+{
+	int enabled = cmac->instance->enabled & MAC_DIRECTION_RX;
+	u32 rx_mode;
+
+	/* Disable MAC RX before reconfiguring it */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX);
+
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, &rx_mode);
+	rx_mode &= ~(SUNI1x10GEXP_BITMSK_RXXG_PMODE |
+		     SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2,
+		(u16)rx_mode);
+
+	if (t1_rx_mode_promisc(rm)) {
+		/* Promiscuous mode. */
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_PMODE;
+	}
+	if (t1_rx_mode_allmulti(rm)) {
+		/* Accept all multicast. */
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, 0xffff);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, 0xffff);
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+	} else if (t1_rx_mode_mc_cnt(rm)) {
+		/* Accept one or more multicast(s). */
+		u8 *addr;
+		int bit;
+		u16 mc_filter[4] = { 0, };
+
+		while ((addr = t1_get_next_mcaddr(rm))) {
+			bit = (calc_crc(addr, ETH_ALEN) >> 23) & 0x3f;	/* bit[23:28] */
+			mc_filter[bit >> 4] |= 1 << (bit & 0xf);
+		}
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW, mc_filter[1]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH, mc_filter[2]);
+		pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH, mc_filter[3]);
+		rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
+	}
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2, (u16)rx_mode);
+
+	if (enabled)
+		pm3393_enable(cmac, MAC_DIRECTION_RX);
+
+	return 0;
+}
+
+static int pm3393_get_speed_duplex_fc(struct cmac *cmac, int *speed,
+				      int *duplex, int *fc)
+{
+	if (speed)
+		*speed = SPEED_10000;
+	if (duplex)
+		*duplex = DUPLEX_FULL;
+	if (fc)
+		*fc = cmac->instance->fc;
+	return 0;
+}
+
+static int pm3393_set_speed_duplex_fc(struct cmac *cmac, int speed, int duplex,
+				      int fc)
+{
+	if (speed >= 0 && speed != SPEED_10000)
+		return -1;
+	if (duplex >= 0 && duplex != DUPLEX_FULL)
+		return -1;
+	if (fc & ~(PAUSE_TX | PAUSE_RX))
+		return -1;
+
+	if (fc != cmac->instance->fc) {
+		cmac->instance->fc = (u8) fc;
+		if (cmac->instance->enabled & MAC_DIRECTION_TX)
+			pm3393_enable(cmac, MAC_DIRECTION_TX);
+	}
+	return 0;
+}
+
+#define RMON_UPDATE(mac, name, stat_name) \
+	{ \
+		t1_tpi_read((mac)->adapter, OFFSET(name), &val0);	\
+		t1_tpi_read((mac)->adapter, OFFSET(((name)+1)), &val1); \
+		t1_tpi_read((mac)->adapter, OFFSET(((name)+2)), &val2); \
+		(mac)->stats.stat_name = ((u64)val0 & 0xffff) | \
+						(((u64)val1 & 0xffff) << 16) | \
+						(((u64)val2 & 0xff) << 32) | \
+						((mac)->stats.stat_name & \
+							(~(u64)0 << 40)); \
+		if (ro &	\
+			((name -  SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW) >> 2)) \
+			(mac)->stats.stat_name += ((u64)1 << 40); \
+	}
+
+static const struct cmac_statistics *pm3393_update_statistics(struct cmac *mac,
+							      int flag)
+{
+	u64	ro;
+	u32	val0, val1, val2, val3;
+
+	/* Snap the counters */
+	pmwrite(mac, SUNI1x10GEXP_REG_MSTAT_CONTROL,
+		SUNI1x10GEXP_BITMSK_MSTAT_SNAP);
+
+	/* Counter rollover, clear on read */
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0, &val0);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1, &val1);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2, &val2);
+	pmread(mac, SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3, &val3);
+	ro = ((u64)val0 & 0xffff) | (((u64)val1 & 0xffff) << 16) |
+		(((u64)val2 & 0xffff) << 32) | (((u64)val3 & 0xffff) << 48);
+
+	/* Rx stats */
+	RMON_UPDATE(mac, RxOctetsReceivedOK, RxOctetsOK);
+	RMON_UPDATE(mac, RxUnicastFramesReceivedOK, RxUnicastFramesOK);
+	RMON_UPDATE(mac, RxMulticastFramesReceivedOK, RxMulticastFramesOK);
+	RMON_UPDATE(mac, RxBroadcastFramesReceivedOK, RxBroadcastFramesOK);
+	RMON_UPDATE(mac, RxPAUSEMACCtrlFramesReceived, RxPauseFrames);
+	RMON_UPDATE(mac, RxFrameCheckSequenceErrors, RxFCSErrors);
+	RMON_UPDATE(mac, RxFramesLostDueToInternalMACErrors,
+				RxInternalMACRcvError);
+	RMON_UPDATE(mac, RxSymbolErrors, RxSymbolErrors);
+	RMON_UPDATE(mac, RxInRangeLengthErrors, RxInRangeLengthErrors);
+	RMON_UPDATE(mac, RxFramesTooLongErrors , RxFrameTooLongErrors);
+	RMON_UPDATE(mac, RxJabbers, RxJabberErrors);
+	RMON_UPDATE(mac, RxFragments, RxRuntErrors);
+	RMON_UPDATE(mac, RxUndersizedFrames, RxRuntErrors);
+
+	/* Tx stats */
+	RMON_UPDATE(mac, TxOctetsTransmittedOK, TxOctetsOK);
+	RMON_UPDATE(mac, TxFramesLostDueToInternalMACTransmissionError,
+				TxInternalMACXmitError);
+	RMON_UPDATE(mac, TxTransmitSystemError, TxFCSErrors);
+	RMON_UPDATE(mac, TxUnicastFramesTransmittedOK, TxUnicastFramesOK);
+	RMON_UPDATE(mac, TxMulticastFramesTransmittedOK, TxMulticastFramesOK);
+	RMON_UPDATE(mac, TxBroadcastFramesTransmittedOK, TxBroadcastFramesOK);
+	RMON_UPDATE(mac, TxPAUSEMACCtrlFramesTransmitted, TxPauseFrames);
+
+	return &mac->stats;
+}
+
+static int pm3393_macaddress_get(struct cmac *cmac, u8 mac_addr[6])
+{
+	memcpy(mac_addr, cmac->instance->mac_addr, 6);
+	return 0;
+}
+
+static int pm3393_macaddress_set(struct cmac *cmac, u8 ma[6])
+{
+	u32 val, lo, mid, hi, enabled = cmac->instance->enabled;
+
+	/*
+	 * MAC addr: 00:07:43:00:13:09
+	 *
+	 * ma[5] = 0x09
+	 * ma[4] = 0x13
+	 * ma[3] = 0x00
+	 * ma[2] = 0x43
+	 * ma[1] = 0x07
+	 * ma[0] = 0x00
+	 *
+	 * The PM3393 requires byte swapping and reverse order entry
+	 * when programming MAC addresses:
+	 *
+	 * low_bits[15:0]    = ma[1]:ma[0]
+	 * mid_bits[31:16]   = ma[3]:ma[2]
+	 * high_bits[47:32]  = ma[5]:ma[4]
+	 */
+
+	/* Store local copy */
+	memcpy(cmac->instance->mac_addr, ma, 6);
+
+	lo = ((u32) ma[1] << 8) | (u32) ma[0];
+	mid = ((u32) ma[3] << 8) | (u32) ma[2];
+	hi = ((u32) ma[5] << 8) | (u32) ma[4];
+
+	/* Disable Rx/Tx MAC before configuring it. */
+	if (enabled)
+		pm3393_disable(cmac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
+
+	/* Set RXXG Station Address */
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_15_0, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_31_16, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_SA_47_32, hi);
+
+	/* Set TXXG Station Address */
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_15_0, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_31_16, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_TXXG_SA_47_32, hi);
+
+	/* Setup Exact Match Filter 1 with our MAC address
+	 *
+	 * Must disable exact match filter before configuring it.
+	 */
+	pmread(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, &val);
+	val &= 0xff0f;
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW, lo);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID, mid);
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH, hi);
+
+	val |= 0x0090;
+	pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0, val);
+
+	if (enabled)
+		pm3393_enable(cmac, enabled);
+	return 0;
+}
+
+static void pm3393_destroy(struct cmac *cmac)
+{
+	kfree(cmac);
+}
+
+static struct cmac_ops pm3393_ops = {
+	.destroy                 = pm3393_destroy,
+	.reset                   = pm3393_reset,
+	.interrupt_enable        = pm3393_interrupt_enable,
+	.interrupt_disable       = pm3393_interrupt_disable,
+	.interrupt_clear         = pm3393_interrupt_clear,
+	.interrupt_handler       = pm3393_interrupt_handler,
+	.enable                  = pm3393_enable_port,
+	.disable                 = pm3393_disable,
+	.loopback_enable         = pm3393_loopback_enable,
+	.loopback_disable        = pm3393_loopback_disable,
+	.set_mtu                 = pm3393_set_mtu,
+	.set_rx_mode             = pm3393_set_rx_mode,
+	.get_speed_duplex_fc     = pm3393_get_speed_duplex_fc,
+	.set_speed_duplex_fc     = pm3393_set_speed_duplex_fc,
+	.statistics_update       = pm3393_update_statistics,
+	.macaddress_get          = pm3393_macaddress_get,
+	.macaddress_set          = pm3393_macaddress_set
+};
+
+static struct cmac *pm3393_mac_create(adapter_t *adapter, int index)
+{
+	struct cmac *cmac;
+
+	cmac = kmalloc(sizeof(*cmac) + sizeof(cmac_instance), GFP_KERNEL);
+	if (!cmac)
+		return NULL;
+	memset(cmac, 0, sizeof(*cmac));
+
+	cmac->ops = &pm3393_ops;
+	cmac->instance = (cmac_instance *) (cmac + 1);
+	cmac->adapter = adapter;
+	cmac->instance->fc = PAUSE_TX | PAUSE_RX;
+
+	t1_tpi_write(adapter, OFFSET(0x0001), 0x00008000);
+	t1_tpi_write(adapter, OFFSET(0x0001), 0x00000000);
+	t1_tpi_write(adapter, OFFSET(0x2308), 0x00009800);
+	t1_tpi_write(adapter, OFFSET(0x2305), 0x00001001);   /* PL4IO Enable */
+	t1_tpi_write(adapter, OFFSET(0x2320), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2321), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2322), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2323), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2324), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2325), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2326), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2327), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2328), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x2329), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232a), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232b), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232c), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232d), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232e), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x232f), 0x00008800);
+	t1_tpi_write(adapter, OFFSET(0x230d), 0x00009c00);
+	t1_tpi_write(adapter, OFFSET(0x2304), 0x00000202);	/* PL4IO Calendar Repetitions */
+
+	t1_tpi_write(adapter, OFFSET(0x3200), 0x00008080);	/* EFLX Enable */
+	t1_tpi_write(adapter, OFFSET(0x3210), 0x00000000);	/* EFLX Channel Deprovision */
+	t1_tpi_write(adapter, OFFSET(0x3203), 0x00000000);	/* EFLX Low Limit */
+	t1_tpi_write(adapter, OFFSET(0x3204), 0x00000040);	/* EFLX High Limit */
+	t1_tpi_write(adapter, OFFSET(0x3205), 0x000002cc);	/* EFLX Almost Full */
+	t1_tpi_write(adapter, OFFSET(0x3206), 0x00000199);	/* EFLX Almost Empty */
+	t1_tpi_write(adapter, OFFSET(0x3207), 0x00000240);	/* EFLX Cut Through Threshold */
+	t1_tpi_write(adapter, OFFSET(0x3202), 0x00000000);	/* EFLX Indirect Register Update */
+	t1_tpi_write(adapter, OFFSET(0x3210), 0x00000001);	/* EFLX Channel Provision */
+	t1_tpi_write(adapter, OFFSET(0x3208), 0x0000ffff);	/* EFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320a), 0x0000ffff);	/* EFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320c), 0x0000ffff);	/* EFLX enable overflow interrupt The other bit are undocumented */
+	t1_tpi_write(adapter, OFFSET(0x320e), 0x0000ffff);	/* EFLX Undocumented */
+
+	t1_tpi_write(adapter, OFFSET(0x2200), 0x0000c000);	/* IFLX Configuration - enable */
+	t1_tpi_write(adapter, OFFSET(0x2201), 0x00000000);	/* IFLX Channel Deprovision */
+	t1_tpi_write(adapter, OFFSET(0x220e), 0x00000000);	/* IFLX Low Limit */
+	t1_tpi_write(adapter, OFFSET(0x220f), 0x00000100);	/* IFLX High Limit */
+	t1_tpi_write(adapter, OFFSET(0x2210), 0x00000c00);	/* IFLX Almost Full Limit */
+	t1_tpi_write(adapter, OFFSET(0x2211), 0x00000599);	/* IFLX Almost Empty Limit */
+	t1_tpi_write(adapter, OFFSET(0x220d), 0x00000000);	/* IFLX Indirect Register Update */
+	t1_tpi_write(adapter, OFFSET(0x2201), 0x00000001);	/* IFLX Channel Provision */
+	t1_tpi_write(adapter, OFFSET(0x2203), 0x0000ffff);	/* IFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2205), 0x0000ffff);	/* IFLX Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2209), 0x0000ffff);	/* IFLX Enable overflow interrupt.  The other bit are undocumented */
+
+	t1_tpi_write(adapter, OFFSET(0x2241), 0xfffffffe);	/* PL4MOS Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2242), 0x0000ffff);	/* PL4MOS Undocumented */
+	t1_tpi_write(adapter, OFFSET(0x2243), 0x00000008);	/* PL4MOS Starving Burst Size */
+	t1_tpi_write(adapter, OFFSET(0x2244), 0x00000008);	/* PL4MOS Hungry Burst Size */
+	t1_tpi_write(adapter, OFFSET(0x2245), 0x00000008);	/* PL4MOS Transfer Size */
+	t1_tpi_write(adapter, OFFSET(0x2240), 0x00000005);	/* PL4MOS Disable */
+
+	t1_tpi_write(adapter, OFFSET(0x2280), 0x00002103);	/* PL4ODP Training Repeat and SOP rule */
+	t1_tpi_write(adapter, OFFSET(0x2284), 0x00000000);	/* PL4ODP MAX_T setting */
+
+	t1_tpi_write(adapter, OFFSET(0x3280), 0x00000087);	/* PL4IDU Enable data forward, port state machine. Set ALLOW_NON_ZERO_OLB */
+	t1_tpi_write(adapter, OFFSET(0x3282), 0x0000001f);	/* PL4IDU Enable Dip4 check error interrupts */
+
+	t1_tpi_write(adapter, OFFSET(0x3040), 0x0c32);	/* # TXXG Config */
+	/* For T1 use timer based Mac flow control. */
+	t1_tpi_write(adapter, OFFSET(0x304d), 0x8000);
+	t1_tpi_write(adapter, OFFSET(0x2040), 0x059c);	/* # RXXG Config */
+	t1_tpi_write(adapter, OFFSET(0x2049), 0x0001);	/* # RXXG Cut Through */
+	t1_tpi_write(adapter, OFFSET(0x2070), 0x0000);	/* # Disable promiscuous mode */
+
+	/* Setup Exact Match Filter 0 to allow broadcast packets.
+	 */
+	t1_tpi_write(adapter, OFFSET(0x206e), 0x0000);	/* # Disable Match Enable bit */
+	t1_tpi_write(adapter, OFFSET(0x204a), 0xffff);	/* # low addr */
+	t1_tpi_write(adapter, OFFSET(0x204b), 0xffff);	/* # mid addr */
+	t1_tpi_write(adapter, OFFSET(0x204c), 0xffff);	/* # high addr */
+	t1_tpi_write(adapter, OFFSET(0x206e), 0x0009);	/* # Enable Match Enable bit */
+
+	t1_tpi_write(adapter, OFFSET(0x0003), 0x0000);	/* # NO SOP/ PAD_EN setup */
+	t1_tpi_write(adapter, OFFSET(0x0100), 0x0ff0);	/* # RXEQB disabled */
+	t1_tpi_write(adapter, OFFSET(0x0101), 0x0f0f);	/* # No Preemphasis */
+
+	return cmac;
+}
+
+static int pm3393_mac_reset(adapter_t * adapter)
+{
+	u32 val;
+	u32 x;
+	u32 is_pl4_reset_finished;
+	u32 is_pl4_outof_lock;
+	u32 is_xaui_mabc_pll_locked;
+	u32 successful_reset;
+	int i;
+
+	/* The following steps are required to properly reset
+	 * the PM3393. This information is provided in the
+	 * PM3393 datasheet (Issue 2: November 2002)
+	 * section 13.1 -- Device Reset.
+	 *
+	 * The PM3393 has three types of components that are
+	 * individually reset:
+	 *
+	 * DRESETB      - Digital circuitry
+	 * PL4_ARESETB  - PL4 analog circuitry
+	 * XAUI_ARESETB - XAUI bus analog circuitry
+	 *
+	 * Steps to reset PM3393 using RSTB pin:
+	 *
+	 * 1. Assert RSTB pin low ( write 0 )
+	 * 2. Wait at least 1ms to initiate a complete initialization of device.
+	 * 3. Wait until all external clocks and REFSEL are stable.
+	 * 4. Wait minimum of 1ms. (after external clocks and REFEL are stable)
+	 * 5. De-assert RSTB ( write 1 )
+	 * 6. Wait until internal timers to expires after ~14ms.
+	 *    - Allows analog clock synthesizer(PL4CSU) to stabilize to
+	 *      selected reference frequency before allowing the digital
+	 *      portion of the device to operate.
+	 * 7. Wait at least 200us for XAUI interface to stabilize.
+	 * 8. Verify the PM3393 came out of reset successfully.
+	 *    Set successful reset flag if everything worked else try again
+	 *    a few more times.
+	 */
+
+	successful_reset = 0;
+	for (i = 0; i < 3 && !successful_reset; i++) {
+		/* 1 */
+		t1_tpi_read(adapter, A_ELMER0_GPO, &val);
+		val &= ~1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		/* 2 */
+		msleep(1);
+
+		/* 3 */
+		msleep(1);
+
+		/* 4 */
+		msleep(2 /*1 extra ms for safety */ );
+
+		/* 5 */
+		val |= 1;
+		t1_tpi_write(adapter, A_ELMER0_GPO, val);
+
+		/* 6 */
+		msleep(15 /*1 extra ms for safety */ );
+
+		/* 7 */
+		msleep(1);
+
+		/* 8 */
+
+		/* Has PL4 analog block come out of reset correctly? */
+		t1_tpi_read(adapter, OFFSET(SUNI1x10GEXP_REG_DEVICE_STATUS), &val);
+		is_pl4_reset_finished = (val & SUNI1x10GEXP_BITMSK_TOP_EXPIRED);
+
+		/* TBD XXX SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL gets locked later in the init sequence
+		 *         figure out why? */
+
+		/* Have all PL4 block clocks locked? */
+		x = (SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL
+		     /*| SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL */  |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL |
+		     SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL);
+		is_pl4_outof_lock = (val & x);
+
+		/* ??? If this fails, might be able to software reset the XAUI part
+		 *     and try to recover... thus saving us from doing another HW reset */
+		/* Has the XAUI MABC PLL circuitry stablized? */
+		is_xaui_mabc_pll_locked =
+		    (val & SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED);
+
+		successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
+				    && is_xaui_mabc_pll_locked);
+	}
+	return successful_reset ? 0 : 1;
+}
+
+struct gmac t1_pm3393_ops = {
+	STATS_TICK_SECS,
+	pm3393_mac_create,
+	pm3393_mac_reset
+};
diff --git a/drivers/net/chelsio/regs.h b/drivers/net/chelsio/regs.h
new file mode 100644
index 0000000..b90e11f
--- /dev/null
+++ b/drivers/net/chelsio/regs.h
@@ -0,0 +1,468 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: regs.h                                                              *
+ * $Revision: 1.8 $                                                          *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_REGS_H_
+#define _CXGB_REGS_H_
+
+/* SGE registers */
+#define A_SG_CONTROL 0x0
+
+#define S_CMDQ0_ENABLE    0
+#define V_CMDQ0_ENABLE(x) ((x) << S_CMDQ0_ENABLE)
+#define F_CMDQ0_ENABLE    V_CMDQ0_ENABLE(1U)
+
+#define S_CMDQ1_ENABLE    1
+#define V_CMDQ1_ENABLE(x) ((x) << S_CMDQ1_ENABLE)
+#define F_CMDQ1_ENABLE    V_CMDQ1_ENABLE(1U)
+
+#define S_FL0_ENABLE    2
+#define V_FL0_ENABLE(x) ((x) << S_FL0_ENABLE)
+#define F_FL0_ENABLE    V_FL0_ENABLE(1U)
+
+#define S_FL1_ENABLE    3
+#define V_FL1_ENABLE(x) ((x) << S_FL1_ENABLE)
+#define F_FL1_ENABLE    V_FL1_ENABLE(1U)
+
+#define S_CPL_ENABLE    4
+#define V_CPL_ENABLE(x) ((x) << S_CPL_ENABLE)
+#define F_CPL_ENABLE    V_CPL_ENABLE(1U)
+
+#define S_RESPONSE_QUEUE_ENABLE    5
+#define V_RESPONSE_QUEUE_ENABLE(x) ((x) << S_RESPONSE_QUEUE_ENABLE)
+#define F_RESPONSE_QUEUE_ENABLE    V_RESPONSE_QUEUE_ENABLE(1U)
+
+#define S_CMDQ_PRIORITY    6
+#define M_CMDQ_PRIORITY    0x3
+#define V_CMDQ_PRIORITY(x) ((x) << S_CMDQ_PRIORITY)
+#define G_CMDQ_PRIORITY(x) (((x) >> S_CMDQ_PRIORITY) & M_CMDQ_PRIORITY)
+
+#define S_DISABLE_CMDQ1_GTS    9
+#define V_DISABLE_CMDQ1_GTS(x) ((x) << S_DISABLE_CMDQ1_GTS)
+#define F_DISABLE_CMDQ1_GTS    V_DISABLE_CMDQ1_GTS(1U)
+
+#define S_DISABLE_FL0_GTS    10
+#define V_DISABLE_FL0_GTS(x) ((x) << S_DISABLE_FL0_GTS)
+#define F_DISABLE_FL0_GTS    V_DISABLE_FL0_GTS(1U)
+
+#define S_DISABLE_FL1_GTS    11
+#define V_DISABLE_FL1_GTS(x) ((x) << S_DISABLE_FL1_GTS)
+#define F_DISABLE_FL1_GTS    V_DISABLE_FL1_GTS(1U)
+
+#define S_ENABLE_BIG_ENDIAN    12
+#define V_ENABLE_BIG_ENDIAN(x) ((x) << S_ENABLE_BIG_ENDIAN)
+#define F_ENABLE_BIG_ENDIAN    V_ENABLE_BIG_ENDIAN(1U)
+
+#define S_ISCSI_COALESCE    14
+#define V_ISCSI_COALESCE(x) ((x) << S_ISCSI_COALESCE)
+#define F_ISCSI_COALESCE    V_ISCSI_COALESCE(1U)
+
+#define S_RX_PKT_OFFSET    15
+#define V_RX_PKT_OFFSET(x) ((x) << S_RX_PKT_OFFSET)
+
+#define S_VLAN_XTRACT    18
+#define V_VLAN_XTRACT(x) ((x) << S_VLAN_XTRACT)
+#define F_VLAN_XTRACT    V_VLAN_XTRACT(1U)
+
+#define A_SG_DOORBELL 0x4
+#define A_SG_CMD0BASELWR 0x8
+#define A_SG_CMD0BASEUPR 0xc
+#define A_SG_CMD1BASELWR 0x10
+#define A_SG_CMD1BASEUPR 0x14
+#define A_SG_FL0BASELWR 0x18
+#define A_SG_FL0BASEUPR 0x1c
+#define A_SG_FL1BASELWR 0x20
+#define A_SG_FL1BASEUPR 0x24
+#define A_SG_CMD0SIZE 0x28
+#define A_SG_FL0SIZE 0x2c
+#define A_SG_RSPSIZE 0x30
+#define A_SG_RSPBASELWR 0x34
+#define A_SG_RSPBASEUPR 0x38
+#define A_SG_FLTHRESHOLD 0x3c
+#define A_SG_RSPQUEUECREDIT 0x40
+#define A_SG_SLEEPING 0x48
+#define A_SG_INTRTIMER 0x4c
+#define A_SG_CMD1SIZE 0xb0
+#define A_SG_FL1SIZE 0xb4
+#define A_SG_INT_ENABLE 0xb8
+
+#define S_RESPQ_EXHAUSTED    0
+#define V_RESPQ_EXHAUSTED(x) ((x) << S_RESPQ_EXHAUSTED)
+#define F_RESPQ_EXHAUSTED    V_RESPQ_EXHAUSTED(1U)
+
+#define S_RESPQ_OVERFLOW    1
+#define V_RESPQ_OVERFLOW(x) ((x) << S_RESPQ_OVERFLOW)
+#define F_RESPQ_OVERFLOW    V_RESPQ_OVERFLOW(1U)
+
+#define S_FL_EXHAUSTED    2
+#define V_FL_EXHAUSTED(x) ((x) << S_FL_EXHAUSTED)
+#define F_FL_EXHAUSTED    V_FL_EXHAUSTED(1U)
+
+#define S_PACKET_TOO_BIG    3
+#define V_PACKET_TOO_BIG(x) ((x) << S_PACKET_TOO_BIG)
+#define F_PACKET_TOO_BIG    V_PACKET_TOO_BIG(1U)
+
+#define S_PACKET_MISMATCH    4
+#define V_PACKET_MISMATCH(x) ((x) << S_PACKET_MISMATCH)
+#define F_PACKET_MISMATCH    V_PACKET_MISMATCH(1U)
+
+#define A_SG_INT_CAUSE 0xbc
+#define A_SG_RESPACCUTIMER 0xc0
+
+/* MC3 registers */
+
+#define S_READY    1
+#define V_READY(x) ((x) << S_READY)
+#define F_READY    V_READY(1U)
+
+/* MC4 registers */
+
+#define A_MC4_CFG 0x180
+#define S_MC4_SLOW    25
+#define V_MC4_SLOW(x) ((x) << S_MC4_SLOW)
+#define F_MC4_SLOW    V_MC4_SLOW(1U)
+
+/* TPI registers */
+
+#define A_TPI_ADDR 0x280
+#define A_TPI_WR_DATA 0x284
+#define A_TPI_RD_DATA 0x288
+#define A_TPI_CSR 0x28c
+
+#define S_TPIWR    0
+#define V_TPIWR(x) ((x) << S_TPIWR)
+#define F_TPIWR    V_TPIWR(1U)
+
+#define S_TPIRDY    1
+#define V_TPIRDY(x) ((x) << S_TPIRDY)
+#define F_TPIRDY    V_TPIRDY(1U)
+
+#define A_TPI_PAR 0x29c
+
+#define S_TPIPAR    0
+#define M_TPIPAR    0x7f
+#define V_TPIPAR(x) ((x) << S_TPIPAR)
+#define G_TPIPAR(x) (((x) >> S_TPIPAR) & M_TPIPAR)
+
+/* TP registers */
+
+#define A_TP_IN_CONFIG 0x300
+
+#define S_TP_IN_CSPI_CPL    3
+#define V_TP_IN_CSPI_CPL(x) ((x) << S_TP_IN_CSPI_CPL)
+#define F_TP_IN_CSPI_CPL    V_TP_IN_CSPI_CPL(1U)
+
+#define S_TP_IN_CSPI_CHECK_IP_CSUM    5
+#define V_TP_IN_CSPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_IP_CSUM)
+#define F_TP_IN_CSPI_CHECK_IP_CSUM    V_TP_IN_CSPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_CSPI_CHECK_TCP_CSUM    6
+#define V_TP_IN_CSPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_CSPI_CHECK_TCP_CSUM)
+#define F_TP_IN_CSPI_CHECK_TCP_CSUM    V_TP_IN_CSPI_CHECK_TCP_CSUM(1U)
+
+#define S_TP_IN_ESPI_ETHERNET    8
+#define V_TP_IN_ESPI_ETHERNET(x) ((x) << S_TP_IN_ESPI_ETHERNET)
+#define F_TP_IN_ESPI_ETHERNET    V_TP_IN_ESPI_ETHERNET(1U)
+
+#define S_TP_IN_ESPI_CHECK_IP_CSUM    12
+#define V_TP_IN_ESPI_CHECK_IP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_IP_CSUM)
+#define F_TP_IN_ESPI_CHECK_IP_CSUM    V_TP_IN_ESPI_CHECK_IP_CSUM(1U)
+
+#define S_TP_IN_ESPI_CHECK_TCP_CSUM    13
+#define V_TP_IN_ESPI_CHECK_TCP_CSUM(x) ((x) << S_TP_IN_ESPI_CHECK_TCP_CSUM)
+#define F_TP_IN_ESPI_CHECK_TCP_CSUM    V_TP_IN_ESPI_CHECK_TCP_CSUM(1U)
+
+#define S_OFFLOAD_DISABLE    14
+#define V_OFFLOAD_DISABLE(x) ((x) << S_OFFLOAD_DISABLE)
+#define F_OFFLOAD_DISABLE    V_OFFLOAD_DISABLE(1U)
+
+#define A_TP_OUT_CONFIG 0x304
+
+#define S_TP_OUT_CSPI_CPL    2
+#define V_TP_OUT_CSPI_CPL(x) ((x) << S_TP_OUT_CSPI_CPL)
+#define F_TP_OUT_CSPI_CPL    V_TP_OUT_CSPI_CPL(1U)
+
+#define S_TP_OUT_ESPI_ETHERNET    6
+#define V_TP_OUT_ESPI_ETHERNET(x) ((x) << S_TP_OUT_ESPI_ETHERNET)
+#define F_TP_OUT_ESPI_ETHERNET    V_TP_OUT_ESPI_ETHERNET(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_IP_CSUM    10
+#define V_TP_OUT_ESPI_GENERATE_IP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_IP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_IP_CSUM    V_TP_OUT_ESPI_GENERATE_IP_CSUM(1U)
+
+#define S_TP_OUT_ESPI_GENERATE_TCP_CSUM    11
+#define V_TP_OUT_ESPI_GENERATE_TCP_CSUM(x) ((x) << S_TP_OUT_ESPI_GENERATE_TCP_CSUM)
+#define F_TP_OUT_ESPI_GENERATE_TCP_CSUM    V_TP_OUT_ESPI_GENERATE_TCP_CSUM(1U)
+
+#define A_TP_GLOBAL_CONFIG 0x308
+
+#define S_IP_TTL    0
+#define M_IP_TTL    0xff
+#define V_IP_TTL(x) ((x) << S_IP_TTL)
+
+#define S_TCP_CSUM    11
+#define V_TCP_CSUM(x) ((x) << S_TCP_CSUM)
+#define F_TCP_CSUM    V_TCP_CSUM(1U)
+
+#define S_UDP_CSUM    12
+#define V_UDP_CSUM(x) ((x) << S_UDP_CSUM)
+#define F_UDP_CSUM    V_UDP_CSUM(1U)
+
+#define S_IP_CSUM    13
+#define V_IP_CSUM(x) ((x) << S_IP_CSUM)
+#define F_IP_CSUM    V_IP_CSUM(1U)
+
+#define S_PATH_MTU    15
+#define V_PATH_MTU(x) ((x) << S_PATH_MTU)
+#define F_PATH_MTU    V_PATH_MTU(1U)
+
+#define S_5TUPLE_LOOKUP    17
+#define V_5TUPLE_LOOKUP(x) ((x) << S_5TUPLE_LOOKUP)
+
+#define S_SYN_COOKIE_PARAMETER    26
+#define V_SYN_COOKIE_PARAMETER(x) ((x) << S_SYN_COOKIE_PARAMETER)
+
+#define A_TP_PC_CONFIG 0x348
+#define S_DIS_TX_FILL_WIN_PUSH    12
+#define V_DIS_TX_FILL_WIN_PUSH(x) ((x) << S_DIS_TX_FILL_WIN_PUSH)
+#define F_DIS_TX_FILL_WIN_PUSH    V_DIS_TX_FILL_WIN_PUSH(1U)
+
+#define S_TP_PC_REV    30
+#define M_TP_PC_REV    0x3
+#define G_TP_PC_REV(x) (((x) >> S_TP_PC_REV) & M_TP_PC_REV)
+#define A_TP_RESET 0x44c
+#define S_TP_RESET    0
+#define V_TP_RESET(x) ((x) << S_TP_RESET)
+#define F_TP_RESET    V_TP_RESET(1U)
+
+#define A_TP_INT_ENABLE 0x470
+#define A_TP_INT_CAUSE 0x474
+#define A_TP_TX_DROP_CONFIG 0x4b8
+
+#define S_ENABLE_TX_DROP    31
+#define V_ENABLE_TX_DROP(x) ((x) << S_ENABLE_TX_DROP)
+#define F_ENABLE_TX_DROP    V_ENABLE_TX_DROP(1U)
+
+#define S_ENABLE_TX_ERROR    30
+#define V_ENABLE_TX_ERROR(x) ((x) << S_ENABLE_TX_ERROR)
+#define F_ENABLE_TX_ERROR    V_ENABLE_TX_ERROR(1U)
+
+#define S_DROP_TICKS_CNT    4
+#define V_DROP_TICKS_CNT(x) ((x) << S_DROP_TICKS_CNT)
+
+#define S_NUM_PKTS_DROPPED    0
+#define V_NUM_PKTS_DROPPED(x) ((x) << S_NUM_PKTS_DROPPED)
+
+/* CSPI registers */
+
+#define S_DIP4ERR    0
+#define V_DIP4ERR(x) ((x) << S_DIP4ERR)
+#define F_DIP4ERR    V_DIP4ERR(1U)
+
+#define S_RXDROP    1
+#define V_RXDROP(x) ((x) << S_RXDROP)
+#define F_RXDROP    V_RXDROP(1U)
+
+#define S_TXDROP    2
+#define V_TXDROP(x) ((x) << S_TXDROP)
+#define F_TXDROP    V_TXDROP(1U)
+
+#define S_RXOVERFLOW    3
+#define V_RXOVERFLOW(x) ((x) << S_RXOVERFLOW)
+#define F_RXOVERFLOW    V_RXOVERFLOW(1U)
+
+#define S_RAMPARITYERR    4
+#define V_RAMPARITYERR(x) ((x) << S_RAMPARITYERR)
+#define F_RAMPARITYERR    V_RAMPARITYERR(1U)
+
+/* ESPI registers */
+
+#define A_ESPI_SCH_TOKEN0 0x880
+#define A_ESPI_SCH_TOKEN1 0x884
+#define A_ESPI_SCH_TOKEN2 0x888
+#define A_ESPI_SCH_TOKEN3 0x88c
+#define A_ESPI_RX_FIFO_ALMOST_EMPTY_WATERMARK 0x890
+#define A_ESPI_RX_FIFO_ALMOST_FULL_WATERMARK 0x894
+#define A_ESPI_CALENDAR_LENGTH 0x898
+#define A_PORT_CONFIG 0x89c
+
+#define S_RX_NPORTS    0
+#define V_RX_NPORTS(x) ((x) << S_RX_NPORTS)
+
+#define S_TX_NPORTS    8
+#define V_TX_NPORTS(x) ((x) << S_TX_NPORTS)
+
+#define A_ESPI_FIFO_STATUS_ENABLE 0x8a0
+
+#define S_RXSTATUSENABLE    0
+#define V_RXSTATUSENABLE(x) ((x) << S_RXSTATUSENABLE)
+#define F_RXSTATUSENABLE    V_RXSTATUSENABLE(1U)
+
+#define S_INTEL1010MODE    4
+#define V_INTEL1010MODE(x) ((x) << S_INTEL1010MODE)
+#define F_INTEL1010MODE    V_INTEL1010MODE(1U)
+
+#define A_ESPI_MAXBURST1_MAXBURST2 0x8a8
+#define A_ESPI_TRAIN 0x8ac
+#define A_ESPI_INTR_STATUS 0x8c8
+
+#define S_DIP2PARITYERR    5
+#define V_DIP2PARITYERR(x) ((x) << S_DIP2PARITYERR)
+#define F_DIP2PARITYERR    V_DIP2PARITYERR(1U)
+
+#define A_ESPI_INTR_ENABLE 0x8cc
+#define A_RX_DROP_THRESHOLD 0x8d0
+#define A_ESPI_RX_RESET 0x8ec
+#define A_ESPI_MISC_CONTROL 0x8f0
+
+#define S_OUT_OF_SYNC_COUNT    0
+#define V_OUT_OF_SYNC_COUNT(x) ((x) << S_OUT_OF_SYNC_COUNT)
+
+#define S_DIP2_PARITY_ERR_THRES    5
+#define V_DIP2_PARITY_ERR_THRES(x) ((x) << S_DIP2_PARITY_ERR_THRES)
+
+#define S_DIP4_THRES    9
+#define V_DIP4_THRES(x) ((x) << S_DIP4_THRES)
+
+#define S_MONITORED_PORT_NUM    25
+#define V_MONITORED_PORT_NUM(x) ((x) << S_MONITORED_PORT_NUM)
+
+#define S_MONITORED_DIRECTION    27
+#define V_MONITORED_DIRECTION(x) ((x) << S_MONITORED_DIRECTION)
+#define F_MONITORED_DIRECTION    V_MONITORED_DIRECTION(1U)
+
+#define S_MONITORED_INTERFACE    28
+#define V_MONITORED_INTERFACE(x) ((x) << S_MONITORED_INTERFACE)
+#define F_MONITORED_INTERFACE    V_MONITORED_INTERFACE(1U)
+
+#define A_ESPI_DIP2_ERR_COUNT 0x8f4
+#define A_ESPI_CMD_ADDR 0x8f8
+
+#define S_WRITE_DATA    0
+#define V_WRITE_DATA(x) ((x) << S_WRITE_DATA)
+
+#define S_REGISTER_OFFSET    8
+#define V_REGISTER_OFFSET(x) ((x) << S_REGISTER_OFFSET)
+
+#define S_CHANNEL_ADDR    12
+#define V_CHANNEL_ADDR(x) ((x) << S_CHANNEL_ADDR)
+
+#define S_MODULE_ADDR    16
+#define V_MODULE_ADDR(x) ((x) << S_MODULE_ADDR)
+
+#define S_BUNDLE_ADDR    20
+#define V_BUNDLE_ADDR(x) ((x) << S_BUNDLE_ADDR)
+
+#define S_SPI4_COMMAND    24
+#define V_SPI4_COMMAND(x) ((x) << S_SPI4_COMMAND)
+
+#define A_ESPI_GOSTAT 0x8fc
+#define S_ESPI_CMD_BUSY    8
+#define V_ESPI_CMD_BUSY(x) ((x) << S_ESPI_CMD_BUSY)
+#define F_ESPI_CMD_BUSY    V_ESPI_CMD_BUSY(1U)
+
+/* PL registers */
+
+#define A_PL_ENABLE 0xa00
+
+#define S_PL_INTR_SGE_ERR    0
+#define V_PL_INTR_SGE_ERR(x) ((x) << S_PL_INTR_SGE_ERR)
+#define F_PL_INTR_SGE_ERR    V_PL_INTR_SGE_ERR(1U)
+
+#define S_PL_INTR_SGE_DATA    1
+#define V_PL_INTR_SGE_DATA(x) ((x) << S_PL_INTR_SGE_DATA)
+#define F_PL_INTR_SGE_DATA    V_PL_INTR_SGE_DATA(1U)
+
+#define S_PL_INTR_TP    6
+#define V_PL_INTR_TP(x) ((x) << S_PL_INTR_TP)
+#define F_PL_INTR_TP    V_PL_INTR_TP(1U)
+
+#define S_PL_INTR_ESPI    8
+#define V_PL_INTR_ESPI(x) ((x) << S_PL_INTR_ESPI)
+#define F_PL_INTR_ESPI    V_PL_INTR_ESPI(1U)
+
+#define S_PL_INTR_PCIX    10
+#define V_PL_INTR_PCIX(x) ((x) << S_PL_INTR_PCIX)
+#define F_PL_INTR_PCIX    V_PL_INTR_PCIX(1U)
+
+#define S_PL_INTR_EXT    11
+#define V_PL_INTR_EXT(x) ((x) << S_PL_INTR_EXT)
+#define F_PL_INTR_EXT    V_PL_INTR_EXT(1U)
+
+#define A_PL_CAUSE 0xa04
+
+/* MC5 registers */
+
+#define A_MC5_CONFIG 0xc04
+
+#define S_TCAM_RESET    1
+#define V_TCAM_RESET(x) ((x) << S_TCAM_RESET)
+#define F_TCAM_RESET    V_TCAM_RESET(1U)
+
+#define S_M_BUS_ENABLE    5
+#define V_M_BUS_ENABLE(x) ((x) << S_M_BUS_ENABLE)
+#define F_M_BUS_ENABLE    V_M_BUS_ENABLE(1U)
+
+/* PCICFG registers */
+
+#define A_PCICFG_PM_CSR 0x44
+#define A_PCICFG_VPD_ADDR 0x4a
+
+#define S_VPD_OP_FLAG    15
+#define V_VPD_OP_FLAG(x) ((x) << S_VPD_OP_FLAG)
+#define F_VPD_OP_FLAG    V_VPD_OP_FLAG(1U)
+
+#define A_PCICFG_VPD_DATA 0x4c
+
+#define A_PCICFG_INTR_ENABLE 0xf4
+#define A_PCICFG_INTR_CAUSE 0xf8
+
+#define A_PCICFG_MODE 0xfc
+
+#define S_PCI_MODE_64BIT    0
+#define V_PCI_MODE_64BIT(x) ((x) << S_PCI_MODE_64BIT)
+#define F_PCI_MODE_64BIT    V_PCI_MODE_64BIT(1U)
+
+#define S_PCI_MODE_PCIX    5
+#define V_PCI_MODE_PCIX(x) ((x) << S_PCI_MODE_PCIX)
+#define F_PCI_MODE_PCIX    V_PCI_MODE_PCIX(1U)
+
+#define S_PCI_MODE_CLK    6
+#define M_PCI_MODE_CLK    0x3
+#define G_PCI_MODE_CLK(x) (((x) >> S_PCI_MODE_CLK) & M_PCI_MODE_CLK)
+
+#endif /* _CXGB_REGS_H_ */
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
new file mode 100644
index 0000000..53b41d9
--- /dev/null
+++ b/drivers/net/chelsio/sge.c
@@ -0,0 +1,1684 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.c                                                               *
+ * $Revision: 1.26 $                                                         *
+ * $Date: 2005/06/21 18:29:48 $                                              *
+ * Description:                                                              *
+ *  DMA engine.                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/if_arp.h>
+
+#include "cpl5_cmd.h"
+#include "sge.h"
+#include "regs.h"
+#include "espi.h"
+
+
+#ifdef NETIF_F_TSO
+#include <linux/tcp.h>
+#endif
+
+#define SGE_CMDQ_N		2
+#define SGE_FREELQ_N		2
+#define SGE_CMDQ0_E_N		1024
+#define SGE_CMDQ1_E_N		128
+#define SGE_FREEL_SIZE		4096
+#define SGE_JUMBO_FREEL_SIZE	512
+#define SGE_FREEL_REFILL_THRESH	16
+#define SGE_RESPQ_E_N		1024
+#define SGE_INTRTIMER_NRES	1000
+#define SGE_RX_COPY_THRES	256
+#define SGE_RX_SM_BUF_SIZE	1536
+
+# define SGE_RX_DROP_THRES 2
+
+#define SGE_RESPQ_REPLENISH_THRES (SGE_RESPQ_E_N / 4)
+
+/*
+ * Period of the TX buffer reclaim timer.  This timer does not need to run
+ * frequently as TX buffers are usually reclaimed by new TX packets.
+ */
+#define TX_RECLAIM_PERIOD (HZ / 4)
+
+#ifndef NET_IP_ALIGN
+# define NET_IP_ALIGN 2
+#endif
+
+#define M_CMD_LEN       0x7fffffff
+#define V_CMD_LEN(v)    (v)
+#define G_CMD_LEN(v)    ((v) & M_CMD_LEN)
+#define V_CMD_GEN1(v)   ((v) << 31)
+#define V_CMD_GEN2(v)   (v)
+#define F_CMD_DATAVALID (1 << 1)
+#define F_CMD_SOP       (1 << 2)
+#define V_CMD_EOP(v)    ((v) << 3)
+
+/*
+ * Command queue, receive buffer list, and response queue descriptors.
+ */
+#if defined(__BIG_ENDIAN_BITFIELD)
+struct cmdQ_e {
+	u32 addr_lo;
+	u32 len_gen;
+	u32 flags;
+	u32 addr_hi;
+};
+
+struct freelQ_e {
+	u32 addr_lo;
+	u32 len_gen;
+	u32 gen2;
+	u32 addr_hi;
+};
+
+struct respQ_e {
+	u32 Qsleeping		: 4;
+	u32 Cmdq1CreditReturn	: 5;
+	u32 Cmdq1DmaComplete	: 5;
+	u32 Cmdq0CreditReturn	: 5;
+	u32 Cmdq0DmaComplete	: 5;
+	u32 FreelistQid		: 2;
+	u32 CreditValid		: 1;
+	u32 DataValid		: 1;
+	u32 Offload		: 1;
+	u32 Eop			: 1;
+	u32 Sop			: 1;
+	u32 GenerationBit	: 1;
+	u32 BufferLength;
+};
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+struct cmdQ_e {
+	u32 len_gen;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 flags;
+};
+
+struct freelQ_e {
+	u32 len_gen;
+	u32 addr_lo;
+	u32 addr_hi;
+	u32 gen2;
+};
+
+struct respQ_e {
+	u32 BufferLength;
+	u32 GenerationBit	: 1;
+	u32 Sop			: 1;
+	u32 Eop			: 1;
+	u32 Offload		: 1;
+	u32 DataValid		: 1;
+	u32 CreditValid		: 1;
+	u32 FreelistQid		: 2;
+	u32 Cmdq0DmaComplete	: 5;
+	u32 Cmdq0CreditReturn	: 5;
+	u32 Cmdq1DmaComplete	: 5;
+	u32 Cmdq1CreditReturn	: 5;
+	u32 Qsleeping		: 4;
+} ;
+#endif
+
+/*
+ * SW Context Command and Freelist Queue Descriptors
+ */
+struct cmdQ_ce {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr);
+	DECLARE_PCI_UNMAP_LEN(dma_len);
+};
+
+struct freelQ_ce {
+	struct sk_buff *skb;
+	DECLARE_PCI_UNMAP_ADDR(dma_addr);
+	DECLARE_PCI_UNMAP_LEN(dma_len);
+};
+
+/*
+ * SW command, freelist and response rings
+ */
+struct cmdQ {
+	unsigned long   status;         /* HW DMA fetch status */
+	unsigned int    in_use;         /* # of in-use command descriptors */
+	unsigned int	size;	        /* # of descriptors */
+	unsigned int	processed;      /* total # of descs HW has processed */
+	unsigned int	cleaned;        /* total # of descs SW has reclaimed */
+	unsigned int	stop_thres;     /* SW TX queue suspend threshold */
+	u16		pidx;           /* producer index (SW) */
+	u16		cidx;           /* consumer index (HW) */
+	u8		genbit;         /* current generation (=valid) bit */
+	u8		sop;            /* is next entry start of packet? */
+	struct cmdQ_e  *entries;        /* HW command descriptor Q */
+	struct cmdQ_ce *centries;       /* SW command context descriptor Q */
+	spinlock_t	lock;           /* Lock to protect cmdQ enqueuing */
+	dma_addr_t	dma_addr;       /* DMA addr HW command descriptor Q */
+};
+
+struct freelQ {
+	unsigned int	credits;        /* # of available RX buffers */
+	unsigned int	size;	        /* free list capacity */
+	u16		pidx;           /* producer index (SW) */
+	u16		cidx;           /* consumer index (HW) */
+	u16		rx_buffer_size; /* Buffer size on this free list */
+	u16		dma_offset;     /* DMA offset to align IP headers */
+	u16		recycleq_idx;   /* skb recycle q to use */
+	u8		genbit;	        /* current generation (=valid) bit */
+	struct freelQ_e	*entries;       /* HW freelist descriptor Q */
+	struct freelQ_ce *centries;     /* SW freelist context descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW freelist descriptor Q */
+};
+
+struct respQ {
+	unsigned int	credits;        /* credits to be returned to SGE */
+	unsigned int	size;	        /* # of response Q descriptors */
+	u16		cidx;	        /* consumer index (SW) */
+	u8		genbit;	        /* current generation(=valid) bit */
+	struct respQ_e *entries;        /* HW response descriptor Q */
+	dma_addr_t	dma_addr;       /* DMA addr HW response descriptor Q */
+};
+
+/* Bit flags for cmdQ.status */
+enum {
+	CMDQ_STAT_RUNNING = 1,          /* fetch engine is running */
+	CMDQ_STAT_LAST_PKT_DB = 2       /* last packet rung the doorbell */
+};
+
+/*
+ * Main SGE data structure
+ *
+ * Interrupts are handled by a single CPU and it is likely that on a MP system
+ * the application is migrated to another CPU. In that scenario, we try to
+ * seperate the RX(in irq context) and TX state in order to decrease memory
+ * contention.
+ */
+struct sge {
+	struct adapter *adapter; 	/* adapter backpointer */
+	struct net_device *netdev;      /* netdevice backpointer */
+	struct freelQ 	freelQ[SGE_FREELQ_N]; /* buffer free lists */
+	struct respQ 	respQ;		/* response Q */
+	unsigned long   stopped_tx_queues; /* bitmap of suspended Tx queues */
+	unsigned int	rx_pkt_pad;     /* RX padding for L2 packets */
+	unsigned int	jumbo_fl;       /* jumbo freelist Q index */
+	unsigned int	intrtimer_nres;	/* no-resource interrupt timer */
+	unsigned int	fixed_intrtimer;/* non-adaptive interrupt timer */
+	struct timer_list tx_reclaim_timer; /* reclaims TX buffers */
+	struct timer_list espibug_timer;
+	unsigned int	espibug_timeout;
+	struct sk_buff	*espibug_skb;
+	u32		sge_control;	/* shadow value of sge control reg */
+	struct sge_intr_counts stats;
+	struct sge_port_stats port_stats[MAX_NPORTS];
+	struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
+};
+
+/*
+ * PIO to indicate that memory mapped Q contains valid descriptor(s).
+ */
+static inline void doorbell_pio(struct adapter *adapter, u32 val)
+{
+	wmb();
+	writel(val, adapter->regs + A_SG_DOORBELL);
+}
+
+/*
+ * Frees all RX buffers on the freelist Q. The caller must make sure that
+ * the SGE is turned off before calling this function.
+ */
+static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q)
+{
+	unsigned int cidx = q->cidx;
+
+	while (q->credits--) {
+		struct freelQ_ce *ce = &q->centries[cidx];
+
+		pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+				 pci_unmap_len(ce, dma_len),
+				 PCI_DMA_FROMDEVICE);
+		dev_kfree_skb(ce->skb);
+		ce->skb = NULL;
+		if (++cidx == q->size)
+			cidx = 0;
+	}
+}
+
+/*
+ * Free RX free list and response queue resources.
+ */
+static void free_rx_resources(struct sge *sge)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	if (sge->respQ.entries) {
+		size = sizeof(struct respQ_e) * sge->respQ.size;
+		pci_free_consistent(pdev, size, sge->respQ.entries,
+				    sge->respQ.dma_addr);
+	}
+
+	for (i = 0; i < SGE_FREELQ_N; i++) {
+		struct freelQ *q = &sge->freelQ[i];
+
+		if (q->centries) {
+			free_freelQ_buffers(pdev, q);
+			kfree(q->centries);
+		}
+		if (q->entries) {
+			size = sizeof(struct freelQ_e) * q->size;
+			pci_free_consistent(pdev, size, q->entries,
+					    q->dma_addr);
+		}
+	}
+}
+
+/*
+ * Allocates basic RX resources, consisting of memory mapped freelist Qs and a
+ * response queue.
+ */
+static int alloc_rx_resources(struct sge *sge, struct sge_params *p)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_FREELQ_N; i++) {
+		struct freelQ *q = &sge->freelQ[i];
+
+		q->genbit = 1;
+		q->size = p->freelQ_size[i];
+		q->dma_offset = sge->rx_pkt_pad ? 0 : NET_IP_ALIGN;
+		size = sizeof(struct freelQ_e) * q->size;
+		q->entries = (struct freelQ_e *)
+			      pci_alloc_consistent(pdev, size, &q->dma_addr);
+		if (!q->entries)
+			goto err_no_mem;
+		memset(q->entries, 0, size);
+		size = sizeof(struct freelQ_ce) * q->size;
+		q->centries = kmalloc(size, GFP_KERNEL);
+		if (!q->centries)
+			goto err_no_mem;
+		memset(q->centries, 0, size);
+	}
+
+	/*
+	 * Calculate the buffer sizes for the two free lists.  FL0 accommodates
+	 * regular sized Ethernet frames, FL1 is sized not to exceed 16K,
+	 * including all the sk_buff overhead.
+	 *
+	 * Note: For T2 FL0 and FL1 are reversed.
+	 */
+	sge->freelQ[!sge->jumbo_fl].rx_buffer_size = SGE_RX_SM_BUF_SIZE +
+		sizeof(struct cpl_rx_data) +
+		sge->freelQ[!sge->jumbo_fl].dma_offset;
+	sge->freelQ[sge->jumbo_fl].rx_buffer_size = (16 * 1024) -
+		SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+	/*
+	 * Setup which skb recycle Q should be used when recycling buffers from
+	 * each free list.
+	 */
+	sge->freelQ[!sge->jumbo_fl].recycleq_idx = 0;
+	sge->freelQ[sge->jumbo_fl].recycleq_idx = 1;
+
+	sge->respQ.genbit = 1;
+	sge->respQ.size = SGE_RESPQ_E_N;
+	sge->respQ.credits = 0;
+	size = sizeof(struct respQ_e) * sge->respQ.size;
+	sge->respQ.entries = (struct respQ_e *)
+		pci_alloc_consistent(pdev, size, &sge->respQ.dma_addr);
+	if (!sge->respQ.entries)
+		goto err_no_mem;
+	memset(sge->respQ.entries, 0, size);
+	return 0;
+
+err_no_mem:
+	free_rx_resources(sge);
+	return -ENOMEM;
+}
+
+/*
+ * Reclaims n TX descriptors and frees the buffers associated with them.
+ */
+static void free_cmdQ_buffers(struct sge *sge, struct cmdQ *q, unsigned int n)
+{
+	struct cmdQ_ce *ce;
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int cidx = q->cidx;
+
+	q->in_use -= n;
+	ce = &q->centries[cidx];
+	while (n--) {
+		if (q->sop)
+			pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+			 		 pci_unmap_len(ce, dma_len),
+					 PCI_DMA_TODEVICE);
+		else
+			pci_unmap_page(pdev, pci_unmap_addr(ce, dma_addr),
+			 	       pci_unmap_len(ce, dma_len),
+				       PCI_DMA_TODEVICE);
+		q->sop = 0;
+		if (ce->skb) {
+			dev_kfree_skb(ce->skb);
+			q->sop = 1;
+		}
+		ce++;
+		if (++cidx == q->size) {
+			cidx = 0;
+			ce = q->centries;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/*
+ * Free TX resources.
+ *
+ * Assumes that SGE is stopped and all interrupts are disabled.
+ */
+static void free_tx_resources(struct sge *sge)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_CMDQ_N; i++) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		if (q->centries) {
+			if (q->in_use)
+				free_cmdQ_buffers(sge, q, q->in_use);
+			kfree(q->centries);
+		}
+		if (q->entries) {
+			size = sizeof(struct cmdQ_e) * q->size;
+			pci_free_consistent(pdev, size, q->entries,
+					    q->dma_addr);
+		}
+	}
+}
+
+/*
+ * Allocates basic TX resources, consisting of memory mapped command Qs.
+ */
+static int alloc_tx_resources(struct sge *sge, struct sge_params *p)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	unsigned int size, i;
+
+	for (i = 0; i < SGE_CMDQ_N; i++) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		q->genbit = 1;
+		q->sop = 1;
+		q->size = p->cmdQ_size[i];
+		q->in_use = 0;
+		q->status = 0;
+		q->processed = q->cleaned = 0;
+		q->stop_thres = 0;
+		spin_lock_init(&q->lock);
+		size = sizeof(struct cmdQ_e) * q->size;
+		q->entries = (struct cmdQ_e *)
+			      pci_alloc_consistent(pdev, size, &q->dma_addr);
+		if (!q->entries)
+			goto err_no_mem;
+		memset(q->entries, 0, size);
+		size = sizeof(struct cmdQ_ce) * q->size;
+		q->centries = kmalloc(size, GFP_KERNEL);
+		if (!q->centries)
+			goto err_no_mem;
+		memset(q->centries, 0, size);
+	}
+
+	/*
+	 * CommandQ 0 handles Ethernet and TOE packets, while queue 1 is TOE
+	 * only.  For queue 0 set the stop threshold so we can handle one more
+	 * packet from each port, plus reserve an additional 24 entries for
+	 * Ethernet packets only.  Queue 1 never suspends nor do we reserve
+	 * space for Ethernet packets.
+	 */
+	sge->cmdQ[0].stop_thres = sge->adapter->params.nports *
+		(MAX_SKB_FRAGS + 1);
+	return 0;
+
+err_no_mem:
+	free_tx_resources(sge);
+	return -ENOMEM;
+}
+
+static inline void setup_ring_params(struct adapter *adapter, u64 addr,
+				     u32 size, int base_reg_lo,
+				     int base_reg_hi, int size_reg)
+{
+	writel((u32)addr, adapter->regs + base_reg_lo);
+	writel(addr >> 32, adapter->regs + base_reg_hi);
+	writel(size, adapter->regs + size_reg);
+}
+
+/*
+ * Enable/disable VLAN acceleration.
+ */
+void t1_set_vlan_accel(struct adapter *adapter, int on_off)
+{
+	struct sge *sge = adapter->sge;
+
+	sge->sge_control &= ~F_VLAN_XTRACT;
+	if (on_off)
+		sge->sge_control |= F_VLAN_XTRACT;
+	if (adapter->open_device_map) {
+		writel(sge->sge_control, adapter->regs + A_SG_CONTROL);
+		readl(adapter->regs + A_SG_CONTROL); /* flush */
+	}
+}
+
+/*
+ * Programs the various SGE registers. However, the engine is not yet enabled,
+ * but sge->sge_control is setup and ready to go.
+ */
+static void configure_sge(struct sge *sge, struct sge_params *p)
+{
+	struct adapter *ap = sge->adapter;
+	
+	writel(0, ap->regs + A_SG_CONTROL);
+	setup_ring_params(ap, sge->cmdQ[0].dma_addr, sge->cmdQ[0].size,
+			  A_SG_CMD0BASELWR, A_SG_CMD0BASEUPR, A_SG_CMD0SIZE);
+	setup_ring_params(ap, sge->cmdQ[1].dma_addr, sge->cmdQ[1].size,
+			  A_SG_CMD1BASELWR, A_SG_CMD1BASEUPR, A_SG_CMD1SIZE);
+	setup_ring_params(ap, sge->freelQ[0].dma_addr,
+			  sge->freelQ[0].size, A_SG_FL0BASELWR,
+			  A_SG_FL0BASEUPR, A_SG_FL0SIZE);
+	setup_ring_params(ap, sge->freelQ[1].dma_addr,
+			  sge->freelQ[1].size, A_SG_FL1BASELWR,
+			  A_SG_FL1BASEUPR, A_SG_FL1SIZE);
+
+	/* The threshold comparison uses <. */
+	writel(SGE_RX_SM_BUF_SIZE + 1, ap->regs + A_SG_FLTHRESHOLD);
+
+	setup_ring_params(ap, sge->respQ.dma_addr, sge->respQ.size,
+			  A_SG_RSPBASELWR, A_SG_RSPBASEUPR, A_SG_RSPSIZE);
+	writel((u32)sge->respQ.size - 1, ap->regs + A_SG_RSPQUEUECREDIT);
+
+	sge->sge_control = F_CMDQ0_ENABLE | F_CMDQ1_ENABLE | F_FL0_ENABLE |
+		F_FL1_ENABLE | F_CPL_ENABLE | F_RESPONSE_QUEUE_ENABLE |
+		V_CMDQ_PRIORITY(2) | F_DISABLE_CMDQ1_GTS | F_ISCSI_COALESCE |
+		F_DISABLE_FL0_GTS | F_DISABLE_FL1_GTS |
+		V_RX_PKT_OFFSET(sge->rx_pkt_pad);
+
+#if defined(__BIG_ENDIAN_BITFIELD)
+	sge->sge_control |= F_ENABLE_BIG_ENDIAN;
+#endif
+
+	/* Initialize no-resource timer */
+	sge->intrtimer_nres = SGE_INTRTIMER_NRES * core_ticks_per_usec(ap);
+
+	t1_sge_set_coalesce_params(sge, p);
+}
+
+/*
+ * Return the payload capacity of the jumbo free-list buffers.
+ */
+static inline unsigned int jumbo_payload_capacity(const struct sge *sge)
+{
+	return sge->freelQ[sge->jumbo_fl].rx_buffer_size -
+		sge->freelQ[sge->jumbo_fl].dma_offset -
+		sizeof(struct cpl_rx_data);
+}
+
+/*
+ * Frees all SGE related resources and the sge structure itself
+ */
+void t1_sge_destroy(struct sge *sge)
+{
+	if (sge->espibug_skb)
+		kfree_skb(sge->espibug_skb);
+
+	free_tx_resources(sge);
+	free_rx_resources(sge);
+	kfree(sge);
+}
+
+/*
+ * Allocates new RX buffers on the freelist Q (and tracks them on the freelist
+ * context Q) until the Q is full or alloc_skb fails.
+ *
+ * It is possible that the generation bits already match, indicating that the
+ * buffer is already valid and nothing needs to be done. This happens when we
+ * copied a received buffer into a new sk_buff during the interrupt processing.
+ *
+ * If the SGE doesn't automatically align packets properly (!sge->rx_pkt_pad),
+ * we specify a RX_OFFSET in order to make sure that the IP header is 4B
+ * aligned.
+ */
+static void refill_free_list(struct sge *sge, struct freelQ *q)
+{
+	struct pci_dev *pdev = sge->adapter->pdev;
+	struct freelQ_ce *ce = &q->centries[q->pidx];
+	struct freelQ_e *e = &q->entries[q->pidx];
+	unsigned int dma_len = q->rx_buffer_size - q->dma_offset;
+
+
+	while (q->credits < q->size) {
+		struct sk_buff *skb;
+		dma_addr_t mapping;
+
+		skb = alloc_skb(q->rx_buffer_size, GFP_ATOMIC);
+		if (!skb)
+			break;
+
+		skb_reserve(skb, q->dma_offset);
+		mapping = pci_map_single(pdev, skb->data, dma_len,
+					 PCI_DMA_FROMDEVICE);
+		ce->skb = skb;
+		pci_unmap_addr_set(ce, dma_addr, mapping);
+		pci_unmap_len_set(ce, dma_len, dma_len);
+		e->addr_lo = (u32)mapping;
+		e->addr_hi = (u64)mapping >> 32;
+		e->len_gen = V_CMD_LEN(dma_len) | V_CMD_GEN1(q->genbit);
+		wmb();
+		e->gen2 = V_CMD_GEN2(q->genbit);
+
+		e++;
+		ce++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			q->genbit ^= 1;
+			ce = q->centries;
+			e = q->entries;
+		}
+		q->credits++;
+	}
+
+}
+
+/*
+ * Calls refill_free_list for both free lists. If we cannot fill at least 1/4
+ * of both rings, we go into 'few interrupt mode' in order to give the system
+ * time to free up resources.
+ */
+static void freelQs_empty(struct sge *sge)
+{
+	struct adapter *adapter = sge->adapter;
+	u32 irq_reg = readl(adapter->regs + A_SG_INT_ENABLE);
+	u32 irqholdoff_reg;
+
+	refill_free_list(sge, &sge->freelQ[0]);
+	refill_free_list(sge, &sge->freelQ[1]);
+
+	if (sge->freelQ[0].credits > (sge->freelQ[0].size >> 2) &&
+	    sge->freelQ[1].credits > (sge->freelQ[1].size >> 2)) {
+		irq_reg |= F_FL_EXHAUSTED;
+		irqholdoff_reg = sge->fixed_intrtimer;
+	} else {
+		/* Clear the F_FL_EXHAUSTED interrupts for now */
+		irq_reg &= ~F_FL_EXHAUSTED;
+		irqholdoff_reg = sge->intrtimer_nres;
+	}
+	writel(irqholdoff_reg, adapter->regs + A_SG_INTRTIMER);
+	writel(irq_reg, adapter->regs + A_SG_INT_ENABLE);
+
+	/* We reenable the Qs to force a freelist GTS interrupt later */
+	doorbell_pio(adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+}
+
+#define SGE_PL_INTR_MASK (F_PL_INTR_SGE_ERR | F_PL_INTR_SGE_DATA)
+#define SGE_INT_FATAL (F_RESPQ_OVERFLOW | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+#define SGE_INT_ENABLE (F_RESPQ_EXHAUSTED | F_RESPQ_OVERFLOW | \
+			F_FL_EXHAUSTED | F_PACKET_TOO_BIG | F_PACKET_MISMATCH)
+
+/*
+ * Disable SGE Interrupts
+ */
+void t1_sge_intr_disable(struct sge *sge)
+{
+	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+	writel(val & ~SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+	writel(0, sge->adapter->regs + A_SG_INT_ENABLE);
+}
+
+/*
+ * Enable SGE interrupts.
+ */
+void t1_sge_intr_enable(struct sge *sge)
+{
+	u32 en = SGE_INT_ENABLE;
+	u32 val = readl(sge->adapter->regs + A_PL_ENABLE);
+
+	if (sge->adapter->flags & TSO_CAPABLE)
+		en &= ~F_PACKET_TOO_BIG;
+	writel(en, sge->adapter->regs + A_SG_INT_ENABLE);
+	writel(val | SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_ENABLE);
+}
+
+/*
+ * Clear SGE interrupts.
+ */
+void t1_sge_intr_clear(struct sge *sge)
+{
+	writel(SGE_PL_INTR_MASK, sge->adapter->regs + A_PL_CAUSE);
+	writel(0xffffffff, sge->adapter->regs + A_SG_INT_CAUSE);
+}
+
+/*
+ * SGE 'Error' interrupt handler
+ */
+int t1_sge_intr_error_handler(struct sge *sge)
+{
+	struct adapter *adapter = sge->adapter;
+	u32 cause = readl(adapter->regs + A_SG_INT_CAUSE);
+
+	if (adapter->flags & TSO_CAPABLE)
+		cause &= ~F_PACKET_TOO_BIG;
+	if (cause & F_RESPQ_EXHAUSTED)
+		sge->stats.respQ_empty++;
+	if (cause & F_RESPQ_OVERFLOW) {
+		sge->stats.respQ_overflow++;
+		CH_ALERT("%s: SGE response queue overflow\n",
+			 adapter->name);
+	}
+	if (cause & F_FL_EXHAUSTED) {
+		sge->stats.freelistQ_empty++;
+		freelQs_empty(sge);
+	}
+	if (cause & F_PACKET_TOO_BIG) {
+		sge->stats.pkt_too_big++;
+		CH_ALERT("%s: SGE max packet size exceeded\n",
+			 adapter->name);
+	}
+	if (cause & F_PACKET_MISMATCH) {
+		sge->stats.pkt_mismatch++;
+		CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
+	}
+	if (cause & SGE_INT_FATAL)
+		t1_fatal_err(adapter);
+
+	writel(cause, adapter->regs + A_SG_INT_CAUSE);
+	return 0;
+}
+
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge)
+{
+	return &sge->stats;
+}
+
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port)
+{
+	return &sge->port_stats[port];
+}
+
+/**
+ *	recycle_fl_buf - recycle a free list buffer
+ *	@fl: the free list
+ *	@idx: index of buffer to recycle
+ *
+ *	Recycles the specified buffer on the given free list by adding it at
+ *	the next available slot on the list.
+ */
+static void recycle_fl_buf(struct freelQ *fl, int idx)
+{
+	struct freelQ_e *from = &fl->entries[idx];
+	struct freelQ_e *to = &fl->entries[fl->pidx];
+
+	fl->centries[fl->pidx] = fl->centries[idx];
+	to->addr_lo = from->addr_lo;
+	to->addr_hi = from->addr_hi;
+	to->len_gen = G_CMD_LEN(from->len_gen) | V_CMD_GEN1(fl->genbit);
+	wmb();
+	to->gen2 = V_CMD_GEN2(fl->genbit);
+	fl->credits++;
+
+	if (++fl->pidx == fl->size) {
+		fl->pidx = 0;
+		fl->genbit ^= 1;
+	}
+}
+
+/**
+ *	get_packet - return the next ingress packet buffer
+ *	@pdev: the PCI device that received the packet
+ *	@fl: the SGE free list holding the packet
+ *	@len: the actual packet length, excluding any SGE padding
+ *	@dma_pad: padding at beginning of buffer left by SGE DMA
+ *	@skb_pad: padding to be used if the packet is copied
+ *	@copy_thres: length threshold under which a packet should be copied
+ *	@drop_thres: # of remaining buffers before we start dropping packets
+ *
+ *	Get the next packet from a free list and complete setup of the
+ *	sk_buff.  If the packet is small we make a copy and recycle the
+ *	original buffer, otherwise we use the original buffer itself.  If a
+ *	positive drop threshold is supplied packets are dropped and their
+ *	buffers recycled if (a) the number of remaining buffers is under the
+ *	threshold and the packet is too big to copy, or (b) the packet should
+ *	be copied but there is no memory for the copy.
+ */
+static inline struct sk_buff *get_packet(struct pci_dev *pdev,
+					 struct freelQ *fl, unsigned int len,
+					 int dma_pad, int skb_pad,
+					 unsigned int copy_thres,
+					 unsigned int drop_thres)
+{
+	struct sk_buff *skb;
+	struct freelQ_ce *ce = &fl->centries[fl->cidx];
+
+	if (len < copy_thres) {
+		skb = alloc_skb(len + skb_pad, GFP_ATOMIC);
+		if (likely(skb != NULL)) {
+			skb_reserve(skb, skb_pad);
+			skb_put(skb, len);
+			pci_dma_sync_single_for_cpu(pdev,
+					    pci_unmap_addr(ce, dma_addr),
+ 					    pci_unmap_len(ce, dma_len),
+					    PCI_DMA_FROMDEVICE);
+			memcpy(skb->data, ce->skb->data + dma_pad, len);
+			pci_dma_sync_single_for_device(pdev,
+					    pci_unmap_addr(ce, dma_addr),
+ 					    pci_unmap_len(ce, dma_len),
+					    PCI_DMA_FROMDEVICE);
+		} else if (!drop_thres)
+			goto use_orig_buf;
+
+		recycle_fl_buf(fl, fl->cidx);
+		return skb;
+	}
+
+	if (fl->credits < drop_thres) {
+		recycle_fl_buf(fl, fl->cidx);
+		return NULL;
+	}
+
+use_orig_buf:
+	pci_unmap_single(pdev, pci_unmap_addr(ce, dma_addr),
+			 pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+	skb = ce->skb;
+	skb_reserve(skb, dma_pad);
+	skb_put(skb, len);
+	return skb;
+}
+
+/**
+ *	unexpected_offload - handle an unexpected offload packet
+ *	@adapter: the adapter
+ *	@fl: the free list that received the packet
+ *
+ *	Called when we receive an unexpected offload packet (e.g., the TOE
+ *	function is disabled or the card is a NIC).  Prints a message and
+ *	recycles the buffer.
+ */
+static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
+{
+	struct freelQ_ce *ce = &fl->centries[fl->cidx];
+	struct sk_buff *skb = ce->skb;
+
+	pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
+			    pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
+	CH_ERR("%s: unexpected offload packet, cmd %u\n",
+	       adapter->name, *skb->data);
+	recycle_fl_buf(fl, fl->cidx);
+}
+
+/*
+ * Write the command descriptors to transmit the given skb starting at
+ * descriptor pidx with the given generation.
+ */
+static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
+				  unsigned int pidx, unsigned int gen,
+				  struct cmdQ *q)
+{
+	dma_addr_t mapping;
+	struct cmdQ_e *e, *e1;
+	struct cmdQ_ce *ce;
+	unsigned int i, flags, nfrags = skb_shinfo(skb)->nr_frags;
+
+	mapping = pci_map_single(adapter->pdev, skb->data,
+				 skb->len - skb->data_len, PCI_DMA_TODEVICE);
+	ce = &q->centries[pidx];
+	ce->skb = NULL;
+	pci_unmap_addr_set(ce, dma_addr, mapping);
+	pci_unmap_len_set(ce, dma_len, skb->len - skb->data_len);
+
+	flags = F_CMD_DATAVALID | F_CMD_SOP | V_CMD_EOP(nfrags == 0) |
+		V_CMD_GEN2(gen);
+	e = &q->entries[pidx];
+	e->addr_lo = (u32)mapping;
+	e->addr_hi = (u64)mapping >> 32;
+	e->len_gen = V_CMD_LEN(skb->len - skb->data_len) | V_CMD_GEN1(gen);
+	for (e1 = e, i = 0; nfrags--; i++) {
+		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+		ce++;
+		e1++;
+		if (++pidx == q->size) {
+			pidx = 0;
+			gen ^= 1;
+			ce = q->centries;
+			e1 = q->entries;
+		}
+
+		mapping = pci_map_page(adapter->pdev, frag->page,
+				       frag->page_offset, frag->size,
+				       PCI_DMA_TODEVICE);
+		ce->skb = NULL;
+		pci_unmap_addr_set(ce, dma_addr, mapping);
+		pci_unmap_len_set(ce, dma_len, frag->size);
+
+		e1->addr_lo = (u32)mapping;
+		e1->addr_hi = (u64)mapping >> 32;
+		e1->len_gen = V_CMD_LEN(frag->size) | V_CMD_GEN1(gen);
+		e1->flags = F_CMD_DATAVALID | V_CMD_EOP(nfrags == 0) |
+			    V_CMD_GEN2(gen);
+	}
+
+	ce->skb = skb;
+	wmb();
+	e->flags = flags;
+}
+
+/*
+ * Clean up completed Tx buffers.
+ */
+static inline void reclaim_completed_tx(struct sge *sge, struct cmdQ *q)
+{
+	unsigned int reclaim = q->processed - q->cleaned;
+
+	if (reclaim) {
+		free_cmdQ_buffers(sge, q, reclaim);
+		q->cleaned += reclaim;
+	}
+}
+
+#ifndef SET_ETHTOOL_OPS
+# define __netif_rx_complete(dev) netif_rx_complete(dev)
+#endif
+
+/*
+ * We cannot use the standard netif_rx_schedule_prep() because we have multiple
+ * ports plus the TOE all multiplexing onto a single response queue, therefore
+ * accepting new responses cannot depend on the state of any particular port.
+ * So define our own equivalent that omits the netif_running() test.
+ */
+static inline int napi_schedule_prep(struct net_device *dev)
+{
+	return !test_and_set_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+
+/**
+ *	sge_rx - process an ingress ethernet packet
+ *	@sge: the sge structure
+ *	@fl: the free list that contains the packet buffer
+ *	@len: the packet length
+ *
+ *	Process an ingress ethernet pakcet and deliver it to the stack.
+ */
+static int sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
+{
+	struct sk_buff *skb;
+	struct cpl_rx_pkt *p;
+	struct adapter *adapter = sge->adapter;
+
+	sge->stats.ethernet_pkts++;
+	skb = get_packet(adapter->pdev, fl, len - sge->rx_pkt_pad,
+			 sge->rx_pkt_pad, 2, SGE_RX_COPY_THRES,
+			 SGE_RX_DROP_THRES);
+	if (!skb) {
+		sge->port_stats[0].rx_drops++; /* charge only port 0 for now */
+		return 0;
+	}
+
+	p = (struct cpl_rx_pkt *)skb->data;
+	skb_pull(skb, sizeof(*p));
+	skb->dev = adapter->port[p->iff].dev;
+	skb->dev->last_rx = jiffies;
+	skb->protocol = eth_type_trans(skb, skb->dev);
+	if ((adapter->flags & RX_CSUM_ENABLED) && p->csum == 0xffff &&
+	    skb->protocol == htons(ETH_P_IP) &&
+	    (skb->data[9] == IPPROTO_TCP || skb->data[9] == IPPROTO_UDP)) {
+		sge->port_stats[p->iff].rx_cso_good++;
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
+		sge->port_stats[p->iff].vlan_xtract++;
+		if (adapter->params.sge.polling)
+			vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
+						 ntohs(p->vlan));
+		else
+			vlan_hwaccel_rx(skb, adapter->vlan_grp,
+					ntohs(p->vlan));
+	} else if (adapter->params.sge.polling)
+		netif_receive_skb(skb);
+	else
+		netif_rx(skb);
+	return 0;
+}
+
+/*
+ * Returns true if a command queue has enough available descriptors that
+ * we can resume Tx operation after temporarily disabling its packet queue.
+ */
+static inline int enough_free_Tx_descs(const struct cmdQ *q)
+{
+	unsigned int r = q->processed - q->cleaned;
+
+	return q->in_use - r < (q->size >> 1);
+}
+
+/*
+ * Called when sufficient space has become available in the SGE command queues
+ * after the Tx packet schedulers have been suspended to restart the Tx path.
+ */
+static void restart_tx_queues(struct sge *sge)
+{
+	struct adapter *adap = sge->adapter;
+
+	if (enough_free_Tx_descs(&sge->cmdQ[0])) {
+		int i;
+
+		for_each_port(adap, i) {
+			struct net_device *nd = adap->port[i].dev;
+
+			if (test_and_clear_bit(nd->if_port,
+					       &sge->stopped_tx_queues) &&
+			    netif_running(nd)) {
+				sge->stats.cmdQ_restarted[3]++;
+				netif_wake_queue(nd);
+			}
+		}
+	}
+}
+
+/*
+ * update_tx_info is called from the interrupt handler/NAPI to return cmdQ0 
+ * information.
+ */
+static unsigned int update_tx_info(struct adapter *adapter, 
+					  unsigned int flags, 
+					  unsigned int pr0)
+{
+	struct sge *sge = adapter->sge;
+	struct cmdQ *cmdq = &sge->cmdQ[0];
+
+	cmdq->processed += pr0;
+
+	if (flags & F_CMDQ0_ENABLE) {
+		clear_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+	
+		if (cmdq->cleaned + cmdq->in_use != cmdq->processed &&
+		    !test_and_set_bit(CMDQ_STAT_LAST_PKT_DB, &cmdq->status)) {
+			set_bit(CMDQ_STAT_RUNNING, &cmdq->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	 	flags &= ~F_CMDQ0_ENABLE;
+	}
+	
+	if (unlikely(sge->stopped_tx_queues != 0))
+		restart_tx_queues(sge);
+
+	return flags;
+}
+
+/*
+ * Process SGE responses, up to the supplied budget.  Returns the number of
+ * responses processed.  A negative budget is effectively unlimited.
+ */
+static int process_responses(struct adapter *adapter, int budget)
+{
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &sge->respQ;
+	struct respQ_e *e = &q->entries[q->cidx];
+	int budget_left = budget;
+	unsigned int flags = 0;
+	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+	
+
+	while (likely(budget_left && e->GenerationBit == q->genbit)) {
+		flags |= e->Qsleeping;
+		
+		cmdq_processed[0] += e->Cmdq0CreditReturn;
+		cmdq_processed[1] += e->Cmdq1CreditReturn;
+		
+		/* We batch updates to the TX side to avoid cacheline
+		 * ping-pong of TX state information on MP where the sender
+		 * might run on a different CPU than this function...
+		 */
+		if (unlikely(flags & F_CMDQ0_ENABLE || cmdq_processed[0] > 64)) {
+			flags = update_tx_info(adapter, flags, cmdq_processed[0]);
+			cmdq_processed[0] = 0;
+		}
+		if (unlikely(cmdq_processed[1] > 16)) {
+			sge->cmdQ[1].processed += cmdq_processed[1];
+			cmdq_processed[1] = 0;
+		}
+		if (likely(e->DataValid)) {
+			struct freelQ *fl = &sge->freelQ[e->FreelistQid];
+
+			if (unlikely(!e->Sop || !e->Eop))
+				BUG();
+			if (unlikely(e->Offload))
+				unexpected_offload(adapter, fl);
+			else
+				sge_rx(sge, fl, e->BufferLength);
+
+			/*
+			 * Note: this depends on each packet consuming a
+			 * single free-list buffer; cf. the BUG above.
+			 */
+			if (++fl->cidx == fl->size)
+				fl->cidx = 0;
+			if (unlikely(--fl->credits <
+				     fl->size - SGE_FREEL_REFILL_THRESH))
+				refill_free_list(sge, fl);
+		} else
+			sge->stats.pure_rsps++;
+
+		e++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->genbit ^= 1;
+			e = q->entries;
+		}
+		prefetch(e);
+
+		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+			q->credits = 0;
+		}
+		--budget_left;
+	}
+
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+	sge->cmdQ[1].processed += cmdq_processed[1];
+
+	budget -= budget_left;
+	return budget;
+}
+
+/*
+ * A simpler version of process_responses() that handles only pure (i.e.,
+ * non data-carrying) responses.  Such respones are too light-weight to justify
+ * calling a softirq when using NAPI, so we handle them specially in hard
+ * interrupt context.  The function is called with a pointer to a response,
+ * which the caller must ensure is a valid pure response.  Returns 1 if it
+ * encounters a valid data-carrying response, 0 otherwise.
+ */
+static int process_pure_responses(struct adapter *adapter, struct respQ_e *e)
+{
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &sge->respQ;
+	unsigned int flags = 0;
+	unsigned int cmdq_processed[SGE_CMDQ_N] = {0, 0};
+
+	do {
+		flags |= e->Qsleeping;
+
+		cmdq_processed[0] += e->Cmdq0CreditReturn;
+		cmdq_processed[1] += e->Cmdq1CreditReturn;
+		
+		e++;
+		if (unlikely(++q->cidx == q->size)) {
+			q->cidx = 0;
+			q->genbit ^= 1;
+			e = q->entries;
+		}
+		prefetch(e);
+
+		if (++q->credits > SGE_RESPQ_REPLENISH_THRES) {
+			writel(q->credits, adapter->regs + A_SG_RSPQUEUECREDIT);
+			q->credits = 0;
+		}
+		sge->stats.pure_rsps++;
+	} while (e->GenerationBit == q->genbit && !e->DataValid);
+
+	flags = update_tx_info(adapter, flags, cmdq_processed[0]); 
+	sge->cmdQ[1].processed += cmdq_processed[1];
+
+	return e->GenerationBit == q->genbit;
+}
+
+/*
+ * Handler for new data events when using NAPI.  This does not need any locking
+ * or protection from interrupts as data interrupts are off at this point and
+ * other adapter interrupts do not interfere.
+ */
+static int t1_poll(struct net_device *dev, int *budget)
+{
+	struct adapter *adapter = dev->priv;
+	int effective_budget = min(*budget, dev->quota);
+
+	int work_done = process_responses(adapter, effective_budget);
+	*budget -= work_done;
+	dev->quota -= work_done;
+
+	if (work_done >= effective_budget)
+		return 1;
+
+	__netif_rx_complete(dev);
+
+	/*
+	 * Because we don't atomically flush the following write it is
+	 * possible that in very rare cases it can reach the device in a way
+	 * that races with a new response being written plus an error interrupt
+	 * causing the NAPI interrupt handler below to return unhandled status
+	 * to the OS.  To protect against this would require flushing the write
+	 * and doing both the write and the flush with interrupts off.  Way too
+	 * expensive and unjustifiable given the rarity of the race.
+	 */
+	writel(adapter->sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
+	return 0;
+}
+
+/*
+ * Returns true if the device is already scheduled for polling.
+ */
+static inline int napi_is_scheduled(struct net_device *dev)
+{
+	return test_bit(__LINK_STATE_RX_SCHED, &dev->state);
+}
+
+/*
+ * NAPI version of the main interrupt handler.
+ */
+static irqreturn_t t1_interrupt_napi(int irq, void *data, struct pt_regs *regs)
+{
+	int handled;
+	struct adapter *adapter = data;
+	struct sge *sge = adapter->sge;
+	struct respQ *q = &adapter->sge->respQ;
+
+	/*
+	 * Clear the SGE_DATA interrupt first thing.  Normally the NAPI
+	 * handler has control of the response queue and the interrupt handler
+	 * can look at the queue reliably only once it knows NAPI is off.
+	 * We can't wait that long to clear the SGE_DATA interrupt because we
+	 * could race with t1_poll rearming the SGE interrupt, so we need to
+	 * clear the interrupt speculatively and really early on.
+	 */
+	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+	spin_lock(&adapter->async_lock);
+	if (!napi_is_scheduled(sge->netdev)) {
+		struct respQ_e *e = &q->entries[q->cidx];
+
+		if (e->GenerationBit == q->genbit) {
+			if (e->DataValid ||
+			    process_pure_responses(adapter, e)) {
+				if (likely(napi_schedule_prep(sge->netdev)))
+					__netif_rx_schedule(sge->netdev);
+				else
+					printk(KERN_CRIT
+					       "NAPI schedule failure!\n");
+			} else
+			writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+			handled = 1;
+			goto unlock;
+		} else
+		writel(q->cidx, adapter->regs + A_SG_SLEEPING);
+	}  else
+	if (readl(adapter->regs + A_PL_CAUSE) & F_PL_INTR_SGE_DATA)
+		printk(KERN_ERR "data interrupt while NAPI running\n");
+	
+	handled = t1_slow_intr_handler(adapter);
+	if (!handled)
+		sge->stats.unhandled_irqs++;
+ unlock:
+	spin_unlock(&adapter->async_lock);
+	return IRQ_RETVAL(handled != 0);
+}
+
+/*
+ * Main interrupt handler, optimized assuming that we took a 'DATA'
+ * interrupt.
+ *
+ * 1. Clear the interrupt
+ * 2. Loop while we find valid descriptors and process them; accumulate
+ *      information that can be processed after the loop
+ * 3. Tell the SGE at which index we stopped processing descriptors
+ * 4. Bookkeeping; free TX buffers, ring doorbell if there are any
+ *      outstanding TX buffers waiting, replenish RX buffers, potentially
+ *      reenable upper layers if they were turned off due to lack of TX
+ *      resources which are available again.
+ * 5. If we took an interrupt, but no valid respQ descriptors was found we
+ *      let the slow_intr_handler run and do error handling.
+ */
+static irqreturn_t t1_interrupt(int irq, void *cookie, struct pt_regs *regs)
+{
+	int work_done;
+	struct respQ_e *e;
+	struct adapter *adapter = cookie;
+	struct respQ *Q = &adapter->sge->respQ;
+
+	spin_lock(&adapter->async_lock);
+	e = &Q->entries[Q->cidx];
+	prefetch(e);
+
+	writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
+
+	if (likely(e->GenerationBit == Q->genbit))
+		work_done = process_responses(adapter, -1);
+	else
+		work_done = t1_slow_intr_handler(adapter);
+
+	/*
+	 * The unconditional clearing of the PL_CAUSE above may have raced
+	 * with DMA completion and the corresponding generation of a response
+	 * to cause us to miss the resulting data interrupt.  The next write
+	 * is also unconditional to recover the missed interrupt and render
+	 * this race harmless.
+	 */
+	writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
+
+	if (!work_done)
+		adapter->sge->stats.unhandled_irqs++;
+	spin_unlock(&adapter->async_lock);
+	return IRQ_RETVAL(work_done != 0);
+}
+
+intr_handler_t t1_select_intr_handler(adapter_t *adapter)
+{
+	return adapter->params.sge.polling ? t1_interrupt_napi : t1_interrupt;
+}
+
+/*
+ * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
+ *
+ * The code figures out how many entries the sk_buff will require in the
+ * cmdQ and updates the cmdQ data structure with the state once the enqueue
+ * has complete. Then, it doesn't access the global structure anymore, but
+ * uses the corresponding fields on the stack. In conjuction with a spinlock
+ * around that code, we can make the function reentrant without holding the
+ * lock when we actually enqueue (which might be expensive, especially on
+ * architectures with IO MMUs).
+ *
+ * This runs with softirqs disabled.
+ */
+unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+		       unsigned int qid, struct net_device *dev)
+{
+	struct sge *sge = adapter->sge;
+	struct cmdQ *q = &sge->cmdQ[qid];
+	unsigned int credits, pidx, genbit, count;
+
+	spin_lock(&q->lock);
+	reclaim_completed_tx(sge, q);
+
+	pidx = q->pidx;
+	credits = q->size - q->in_use;
+	count = 1 + skb_shinfo(skb)->nr_frags;
+
+	{	/* Ethernet packet */
+	 	if (unlikely(credits < count)) {
+			netif_stop_queue(dev);
+			set_bit(dev->if_port, &sge->stopped_tx_queues);
+			sge->stats.cmdQ_full[3]++;
+			spin_unlock(&q->lock);
+			CH_ERR("%s: Tx ring full while queue awake!\n",
+			       adapter->name);
+			return 1;
+		}
+		if (unlikely(credits - count < q->stop_thres)) {
+			sge->stats.cmdQ_full[3]++;
+			netif_stop_queue(dev);
+			set_bit(dev->if_port, &sge->stopped_tx_queues);
+		}
+	}
+	q->in_use += count;
+	genbit = q->genbit;
+	q->pidx += count;
+	if (q->pidx >= q->size) {
+		q->pidx -= q->size;
+		q->genbit ^= 1;
+	}
+	spin_unlock(&q->lock);
+
+	write_tx_descs(adapter, skb, pidx, genbit, q);
+
+	/*
+	 * We always ring the doorbell for cmdQ1.  For cmdQ0, we only ring
+	 * the doorbell if the Q is asleep. There is a natural race, where
+	 * the hardware is going to sleep just after we checked, however,
+	 * then the interrupt handler will detect the outstanding TX packet
+	 * and ring the doorbell for us.
+	 */
+	if (qid)
+		doorbell_pio(adapter, F_CMDQ1_ENABLE);
+	else {
+		clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+		if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) {
+			set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status);
+			writel(F_CMDQ0_ENABLE, adapter->regs + A_SG_DOORBELL);
+		}
+	}
+	return 0;
+}
+
+#define MK_ETH_TYPE_MSS(type, mss) (((mss) & 0x3FFF) | ((type) << 14))
+
+/*
+ *	eth_hdr_len - return the length of an Ethernet header
+ *	@data: pointer to the start of the Ethernet header
+ *
+ *	Returns the length of an Ethernet header, including optional VLAN tag.
+ */
+static inline int eth_hdr_len(const void *data)
+{
+	const struct ethhdr *e = data;
+
+	return e->h_proto == htons(ETH_P_8021Q) ? VLAN_ETH_HLEN : ETH_HLEN;
+}
+
+/*
+ * Adds the CPL header to the sk_buff and passes it to t1_sge_tx.
+ */
+int t1_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct adapter *adapter = dev->priv;
+	struct sge_port_stats *st = &adapter->sge->port_stats[dev->if_port];
+	struct sge *sge = adapter->sge;
+	struct cpl_tx_pkt *cpl;
+
+#ifdef NETIF_F_TSO
+	if (skb_shinfo(skb)->tso_size) {
+		int eth_type;
+		struct cpl_tx_pkt_lso *hdr;
+
+		st->tso++;
+
+		eth_type = skb->nh.raw - skb->data == ETH_HLEN ?
+			CPL_ETH_II : CPL_ETH_II_VLAN;
+
+		hdr = (struct cpl_tx_pkt_lso *)skb_push(skb, sizeof(*hdr));
+		hdr->opcode = CPL_TX_PKT_LSO;
+		hdr->ip_csum_dis = hdr->l4_csum_dis = 0;
+		hdr->ip_hdr_words = skb->nh.iph->ihl;
+		hdr->tcp_hdr_words = skb->h.th->doff;
+		hdr->eth_type_mss = htons(MK_ETH_TYPE_MSS(eth_type,
+						skb_shinfo(skb)->tso_size));
+		hdr->len = htonl(skb->len - sizeof(*hdr));
+		cpl = (struct cpl_tx_pkt *)hdr;
+		sge->stats.tx_lso_pkts++;
+	} else
+#endif
+	{
+		/*
+	 	 * Packets shorter than ETH_HLEN can break the MAC, drop them
+		 * early.  Also, we may get oversized packets because some
+		 * parts of the kernel don't handle our unusual hard_header_len
+		 * right, drop those too.
+		 */
+		if (unlikely(skb->len < ETH_HLEN ||
+			     skb->len > dev->mtu + eth_hdr_len(skb->data))) {
+			dev_kfree_skb_any(skb);
+			return NET_XMIT_SUCCESS;
+		}
+
+		/*
+		 * We are using a non-standard hard_header_len and some kernel
+		 * components, such as pktgen, do not handle it right.
+		 * Complain when this happens but try to fix things up.
+		 */
+		if (unlikely(skb_headroom(skb) <
+			     dev->hard_header_len - ETH_HLEN)) {
+			struct sk_buff *orig_skb = skb;
+
+			if (net_ratelimit())
+				printk(KERN_ERR "%s: inadequate headroom in "
+				       "Tx packet\n", dev->name);
+			skb = skb_realloc_headroom(skb, sizeof(*cpl));
+			dev_kfree_skb_any(orig_skb);
+			if (!skb)
+				return -ENOMEM;
+		}
+
+		if (!(adapter->flags & UDP_CSUM_CAPABLE) &&
+		    skb->ip_summed == CHECKSUM_HW &&
+		    skb->nh.iph->protocol == IPPROTO_UDP)
+			if (unlikely(skb_checksum_help(skb, 0))) {
+				dev_kfree_skb_any(skb);
+				return -ENOMEM;
+			}
+
+		/* Hmmm, assuming to catch the gratious arp... and we'll use
+		 * it to flush out stuck espi packets...
+		  */
+		if (unlikely(!adapter->sge->espibug_skb)) {
+			if (skb->protocol == htons(ETH_P_ARP) &&
+			    skb->nh.arph->ar_op == htons(ARPOP_REQUEST)) {
+				adapter->sge->espibug_skb = skb;
+				/* We want to re-use this skb later. We
+				 * simply bump the reference count and it
+				 * will not be freed...
+				 */
+				skb = skb_get(skb);
+			}
+		}
+
+		cpl = (struct cpl_tx_pkt *)__skb_push(skb, sizeof(*cpl));
+		cpl->opcode = CPL_TX_PKT;
+		cpl->ip_csum_dis = 1;    /* SW calculates IP csum */
+		cpl->l4_csum_dis = skb->ip_summed == CHECKSUM_HW ? 0 : 1;
+		/* the length field isn't used so don't bother setting it */
+
+		st->tx_cso += (skb->ip_summed == CHECKSUM_HW);
+		sge->stats.tx_do_cksum += (skb->ip_summed == CHECKSUM_HW);
+		sge->stats.tx_reg_pkts++;
+	}
+	cpl->iff = dev->if_port;
+
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+	if (adapter->vlan_grp && vlan_tx_tag_present(skb)) {
+		cpl->vlan_valid = 1;
+		cpl->vlan = htons(vlan_tx_tag_get(skb));
+		st->vlan_insert++;
+	} else
+#endif
+		cpl->vlan_valid = 0;
+
+	dev->trans_start = jiffies;
+	return t1_sge_tx(skb, adapter, 0, dev);
+}
+
+/*
+ * Callback for the Tx buffer reclaim timer.  Runs with softirqs disabled.
+ */
+static void sge_tx_reclaim_cb(unsigned long data)
+{
+	int i;
+	struct sge *sge = (struct sge *)data;
+
+	for (i = 0; i < SGE_CMDQ_N; ++i) {
+		struct cmdQ *q = &sge->cmdQ[i];
+
+		if (!spin_trylock(&q->lock))
+			continue;
+
+		reclaim_completed_tx(sge, q);
+		if (i == 0 && q->in_use)   /* flush pending credits */
+			writel(F_CMDQ0_ENABLE,
+				sge->adapter->regs + A_SG_DOORBELL);
+
+		spin_unlock(&q->lock);
+	}
+	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+}
+
+/*
+ * Propagate changes of the SGE coalescing parameters to the HW.
+ */
+int t1_sge_set_coalesce_params(struct sge *sge, struct sge_params *p)
+{
+	sge->netdev->poll = t1_poll;
+	sge->fixed_intrtimer = p->rx_coalesce_usecs *
+		core_ticks_per_usec(sge->adapter);
+	writel(sge->fixed_intrtimer, sge->adapter->regs + A_SG_INTRTIMER);
+	return 0;
+}
+
+/*
+ * Allocates both RX and TX resources and configures the SGE. However,
+ * the hardware is not enabled yet.
+ */
+int t1_sge_configure(struct sge *sge, struct sge_params *p)
+{
+	if (alloc_rx_resources(sge, p))
+		return -ENOMEM;
+	if (alloc_tx_resources(sge, p)) {
+		free_rx_resources(sge);
+		return -ENOMEM;
+	}
+	configure_sge(sge, p);
+
+	/*
+	 * Now that we have sized the free lists calculate the payload
+	 * capacity of the large buffers.  Other parts of the driver use
+	 * this to set the max offload coalescing size so that RX packets
+	 * do not overflow our large buffers.
+	 */
+	p->large_buf_capacity = jumbo_payload_capacity(sge);
+	return 0;
+}
+
+/*
+ * Disables the DMA engine.
+ */
+void t1_sge_stop(struct sge *sge)
+{
+	writel(0, sge->adapter->regs + A_SG_CONTROL);
+	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+	if (is_T2(sge->adapter))
+		del_timer_sync(&sge->espibug_timer);
+	del_timer_sync(&sge->tx_reclaim_timer);
+}
+
+/*
+ * Enables the DMA engine.
+ */
+void t1_sge_start(struct sge *sge)
+{
+	refill_free_list(sge, &sge->freelQ[0]);
+	refill_free_list(sge, &sge->freelQ[1]);
+
+	writel(sge->sge_control, sge->adapter->regs + A_SG_CONTROL);
+	doorbell_pio(sge->adapter, F_FL0_ENABLE | F_FL1_ENABLE);
+	(void) readl(sge->adapter->regs + A_SG_CONTROL); /* flush */
+
+	mod_timer(&sge->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
+
+	if (is_T2(sge->adapter)) 
+		mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Callback for the T2 ESPI 'stuck packet feature' workaorund
+ */
+static void espibug_workaround(void *data)
+{
+	struct adapter *adapter = (struct adapter *)data;
+	struct sge *sge = adapter->sge;
+
+	if (netif_running(adapter->port[0].dev)) {
+		struct sk_buff *skb = sge->espibug_skb;
+
+		u32 seop = t1_espi_get_mon(adapter, 0x930, 0);
+
+		if ((seop & 0xfff0fff) == 0xfff && skb) {
+			if (!skb->cb[0]) {
+				u8 ch_mac_addr[ETH_ALEN] =
+				    {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
+				memcpy(skb->data + sizeof(struct cpl_tx_pkt),
+				    ch_mac_addr, ETH_ALEN);
+				memcpy(skb->data + skb->len - 10, ch_mac_addr,
+				    ETH_ALEN);
+				skb->cb[0] = 0xff;
+			}
+
+			/* bump the reference count to avoid freeing of the
+			 * skb once the DMA has completed.
+			 */
+			skb = skb_get(skb);
+			t1_sge_tx(skb, adapter, 0, adapter->port[0].dev);
+		}
+	}
+	mod_timer(&sge->espibug_timer, jiffies + sge->espibug_timeout);
+}
+
+/*
+ * Creates a t1_sge structure and returns suggested resource parameters.
+ */
+struct sge * __devinit t1_sge_create(struct adapter *adapter,
+				     struct sge_params *p)
+{
+	struct sge *sge = kmalloc(sizeof(*sge), GFP_KERNEL);
+
+	if (!sge)
+		return NULL;
+	memset(sge, 0, sizeof(*sge));
+
+	sge->adapter = adapter;
+	sge->netdev = adapter->port[0].dev;
+	sge->rx_pkt_pad = t1_is_T1B(adapter) ? 0 : 2;
+	sge->jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
+
+	init_timer(&sge->tx_reclaim_timer);
+	sge->tx_reclaim_timer.data = (unsigned long)sge;
+	sge->tx_reclaim_timer.function = sge_tx_reclaim_cb;
+
+	if (is_T2(sge->adapter)) {
+		init_timer(&sge->espibug_timer);
+		sge->espibug_timer.function = (void *)&espibug_workaround;
+		sge->espibug_timer.data = (unsigned long)sge->adapter;
+		sge->espibug_timeout = 1;
+	}
+	 
+
+	p->cmdQ_size[0] = SGE_CMDQ0_E_N;
+	p->cmdQ_size[1] = SGE_CMDQ1_E_N;
+	p->freelQ_size[!sge->jumbo_fl] = SGE_FREEL_SIZE;
+	p->freelQ_size[sge->jumbo_fl] = SGE_JUMBO_FREEL_SIZE;
+	p->rx_coalesce_usecs =  50;
+	p->coalesce_enable = 0;
+	p->sample_interval_usecs = 0;
+	p->polling = 0;
+
+	return sge;
+}
diff --git a/drivers/net/chelsio/sge.h b/drivers/net/chelsio/sge.h
new file mode 100644
index 0000000..434b255
--- /dev/null
+++ b/drivers/net/chelsio/sge.h
@@ -0,0 +1,105 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: sge.h                                                               *
+ * $Revision: 1.11 $                                                          *
+ * $Date: 2005/06/21 22:10:55 $                                              *
+ * Description:                                                              *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SGE_H_
+#define _CXGB_SGE_H_
+
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <asm/byteorder.h>
+
+#ifndef IRQ_RETVAL
+#define IRQ_RETVAL(x)
+typedef void irqreturn_t;
+#endif
+
+typedef irqreturn_t (*intr_handler_t)(int, void *, struct pt_regs *);
+
+struct sge_intr_counts {
+	unsigned int respQ_empty;      /* # times respQ empty */
+	unsigned int respQ_overflow;   /* # respQ overflow (fatal) */
+	unsigned int freelistQ_empty;  /* # times freelist empty */
+	unsigned int pkt_too_big;      /* packet too large (fatal) */
+	unsigned int pkt_mismatch;
+	unsigned int cmdQ_full[3];     /* not HW IRQ, host cmdQ[] full */
+	unsigned int cmdQ_restarted[3];/* # of times cmdQ X was restarted */
+	unsigned int ethernet_pkts;    /* # of Ethernet packets received */
+	unsigned int offload_pkts;     /* # of offload packets received */
+	unsigned int offload_bundles;  /* # of offload pkt bundles delivered */
+	unsigned int pure_rsps;        /* # of non-payload responses */
+	unsigned int unhandled_irqs;   /* # of unhandled interrupts */
+	unsigned int tx_ipfrags;
+	unsigned int tx_reg_pkts;
+	unsigned int tx_lso_pkts;
+	unsigned int tx_do_cksum;
+};
+
+struct sge_port_stats {
+	unsigned long rx_cso_good;     /* # of successful RX csum offloads */
+	unsigned long tx_cso;          /* # of TX checksum offloads */
+	unsigned long vlan_xtract;     /* # of VLAN tag extractions */
+	unsigned long vlan_insert;     /* # of VLAN tag extractions */
+	unsigned long tso;             /* # of TSO requests */
+	unsigned long rx_drops;        /* # of packets dropped due to no mem */
+};
+
+struct sk_buff;
+struct net_device;
+struct adapter;
+struct sge_params;
+struct sge;
+
+struct sge *t1_sge_create(struct adapter *, struct sge_params *);
+int t1_sge_configure(struct sge *, struct sge_params *);
+int t1_sge_set_coalesce_params(struct sge *, struct sge_params *);
+void t1_sge_destroy(struct sge *);
+intr_handler_t t1_select_intr_handler(adapter_t *adapter);
+unsigned int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
+		       unsigned int qid, struct net_device *netdev);
+int t1_start_xmit(struct sk_buff *skb, struct net_device *dev);
+void t1_set_vlan_accel(struct adapter *adapter, int on_off);
+void t1_sge_start(struct sge *);
+void t1_sge_stop(struct sge *);
+int t1_sge_intr_error_handler(struct sge *);
+void t1_sge_intr_enable(struct sge *);
+void t1_sge_intr_disable(struct sge *);
+void t1_sge_intr_clear(struct sge *);
+const struct sge_intr_counts *t1_sge_get_intr_counts(struct sge *sge);
+const struct sge_port_stats *t1_sge_get_port_stats(struct sge *sge, int port);
+
+#endif /* _CXGB_SGE_H_ */
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
new file mode 100644
index 0000000..1ebb5d1
--- /dev/null
+++ b/drivers/net/chelsio/subr.c
@@ -0,0 +1,812 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: subr.c                                                              *
+ * $Revision: 1.27 $                                                         *
+ * $Date: 2005/06/22 01:08:36 $                                              *
+ * Description:                                                              *
+ *  Various subroutines (intr,pio,etc.) used by Chelsio 10G Ethernet driver. *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Copyright (c) 2003 - 2005 Chelsio Communications, Inc.                    *
+ * All rights reserved.                                                      *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: Dimitrios Michailidis   <dm@chelsio.com>                         *
+ *          Tina Yang               <tainay@chelsio.com>                     *
+ *          Felix Marti             <felix@chelsio.com>                      *
+ *          Scott Bardone           <sbardone@chelsio.com>                   *
+ *          Kurt Ottaway            <kottaway@chelsio.com>                   *
+ *          Frank DiMambro          <frank@chelsio.com>                      *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#include "common.h"
+#include "elmer0.h"
+#include "regs.h"
+#include "gmac.h"
+#include "cphy.h"
+#include "sge.h"
+#include "espi.h"
+
+/**
+ *	t1_wait_op_done - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *      @delay: delay in usecs between iterations
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  Returns %0 if the operation completes and %1
+ *	otherwise.
+ */
+static int t1_wait_op_done(adapter_t *adapter, int reg, u32 mask, int polarity,
+		    int attempts, int delay)
+{
+	while (1) {
+		u32 val = readl(adapter->regs + reg) & mask;
+
+		if (!!val == polarity)
+			return 0;
+		if (--attempts == 0)
+			return 1;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+#define TPI_ATTEMPTS 50
+
+/*
+ * Write a register over the TPI interface (unlocked and locked versions).
+ */
+static int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+	int tpi_busy;
+
+	writel(addr, adapter->regs + A_TPI_ADDR);
+	writel(value, adapter->regs + A_TPI_WR_DATA);
+	writel(F_TPIWR, adapter->regs + A_TPI_CSR);
+
+	tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+				   TPI_ATTEMPTS, 3);
+	if (tpi_busy)
+		CH_ALERT("%s: TPI write to 0x%x failed\n",
+			 adapter->name, addr);
+	return tpi_busy;
+}
+
+int t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
+{
+	int ret;
+
+	spin_lock(&(adapter)->tpi_lock);
+	ret = __t1_tpi_write(adapter, addr, value);
+	spin_unlock(&(adapter)->tpi_lock);
+	return ret;
+}
+
+/*
+ * Read a register over the TPI interface (unlocked and locked versions).
+ */
+static int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+	int tpi_busy;
+
+	writel(addr, adapter->regs + A_TPI_ADDR);
+	writel(0, adapter->regs + A_TPI_CSR);
+
+	tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
+				   TPI_ATTEMPTS, 3);
+	if (tpi_busy)
+		CH_ALERT("%s: TPI read from 0x%x failed\n",
+			 adapter->name, addr);
+	else
+		*valp = readl(adapter->regs + A_TPI_RD_DATA);
+	return tpi_busy;
+}
+
+int t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
+{
+	int ret;
+
+	spin_lock(&(adapter)->tpi_lock);
+	ret = __t1_tpi_read(adapter, addr, valp);
+	spin_unlock(&(adapter)->tpi_lock);
+	return ret;
+}
+
+/*
+ * Called when a port's link settings change to propagate the new values to the
+ * associated PHY and MAC.  After performing the common tasks it invokes an
+ * OS-specific handler.
+ */
+/* static */ void link_changed(adapter_t *adapter, int port_id)
+{
+	int link_ok, speed, duplex, fc;
+	struct cphy *phy = adapter->port[port_id].phy;
+	struct link_config *lc = &adapter->port[port_id].link_config;
+
+	phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
+
+	lc->speed = speed < 0 ? SPEED_INVALID : speed;
+	lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
+	if (!(lc->requested_fc & PAUSE_AUTONEG))
+		fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
+		/* Set MAC speed, duplex, and flow control to match PHY. */
+		struct cmac *mac = adapter->port[port_id].mac;
+
+		mac->ops->set_speed_duplex_fc(mac, speed, duplex, fc);
+		lc->fc = (unsigned char)fc;
+	}
+	t1_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+}
+
+static int t1_pci_intr_handler(adapter_t *adapter)
+{
+	u32 pcix_cause;
+
+    	pci_read_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, &pcix_cause);
+
+	if (pcix_cause) {
+		pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE,
+					 pcix_cause);
+		t1_fatal_err(adapter);    /* PCI errors are fatal */
+	}
+	return 0;
+}
+
+
+/*
+ * Wait until Elmer's MI1 interface is ready for new operations.
+ */
+static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
+{
+	int attempts = 100, busy;
+
+	do {
+		u32 val;
+
+		__t1_tpi_read(adapter, mi1_reg, &val);
+		busy = val & F_MI1_OP_BUSY;
+		if (busy)
+			udelay(10);
+	} while (busy && --attempts);
+	if (busy)
+		CH_ALERT("%s: MDIO operation timed out\n",
+			 adapter->name);
+	return busy;
+}
+
+/*
+ * MI1 MDIO initialization.
+ */
+static void mi1_mdio_init(adapter_t *adapter, const struct board_info *bi)
+{
+	u32 clkdiv = bi->clock_elmer0 / (2 * bi->mdio_mdc) - 1;
+	u32 val = F_MI1_PREAMBLE_ENABLE | V_MI1_MDI_INVERT(bi->mdio_mdiinv) |
+		V_MI1_MDI_ENABLE(bi->mdio_mdien) | V_MI1_CLK_DIV(clkdiv);
+
+	if (!(bi->caps & SUPPORTED_10000baseT_Full))
+		val |= V_MI1_SOF(1);
+	t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_CFG, val);
+}
+
+static int mi1_mdio_ext_read(adapter_t *adapter, int phy_addr, int mmd_addr,
+			     int reg_addr, unsigned int *valp)
+{
+	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	spin_lock(&(adapter)->tpi_lock);
+
+	/* Write the address we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+		       MI1_OP_INDIRECT_ADDRESS);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Write the operation we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_READ);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Read the data. */
+	__t1_tpi_read(adapter, A_ELMER0_PORT0_MI1_DATA, valp);
+	spin_unlock(&(adapter)->tpi_lock);
+	return 0;
+}
+
+static int mi1_mdio_ext_write(adapter_t *adapter, int phy_addr, int mmd_addr,
+			      int reg_addr, unsigned int val)
+{
+	u32 addr = V_MI1_REG_ADDR(mmd_addr) | V_MI1_PHY_ADDR(phy_addr);
+
+	spin_lock(&(adapter)->tpi_lock);
+
+	/* Write the address we want. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_ADDR, addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, reg_addr);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP,
+		       MI1_OP_INDIRECT_ADDRESS);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+
+	/* Write the data. */
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_DATA, val);
+	__t1_tpi_write(adapter, A_ELMER0_PORT0_MI1_OP, MI1_OP_INDIRECT_WRITE);
+	mi1_wait_until_ready(adapter, A_ELMER0_PORT0_MI1_OP);
+	spin_unlock(&(adapter)->tpi_lock);
+	return 0;
+}
+
+static struct mdio_ops mi1_mdio_ext_ops = {
+	mi1_mdio_init,
+	mi1_mdio_ext_read,
+	mi1_mdio_ext_write
+};
+
+enum {
+	CH_BRD_N110_1F,
+	CH_BRD_N210_1F,
+};
+
+static struct board_info t1_board[] = {
+
+{ CHBT_BOARD_N110, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T1,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio N110 1x10GBaseX NIC" },
+
+{ CHBT_BOARD_N210, 1/*ports#*/,
+  SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE /*caps*/, CHBT_TERM_T2,
+  CHBT_MAC_PM3393, CHBT_PHY_88X2010,
+  125000000/*clk-core*/, 0/*clk-mc3*/, 0/*clk-mc4*/,
+  1/*espi-ports*/, 0/*clk-cspi*/, 44/*clk-elmer0*/, 0/*mdien*/,
+  0/*mdiinv*/, 1/*mdc*/, 0/*phybaseaddr*/, &t1_pm3393_ops,
+  &t1_mv88x201x_ops, &mi1_mdio_ext_ops,
+  "Chelsio N210 1x10GBaseX NIC" },
+
+};
+
+struct pci_device_id t1_pci_tbl[] = {
+	CH_DEVICE(7, 0, CH_BRD_N110_1F),
+	CH_DEVICE(10, 1, CH_BRD_N210_1F),
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, t1_pci_tbl);
+
+/*
+ * Return the board_info structure with a given index.  Out-of-range indices
+ * return NULL.
+ */
+const struct board_info *t1_get_board_info(unsigned int board_id)
+{
+	return board_id < ARRAY_SIZE(t1_board) ? &t1_board[board_id] : NULL;
+}
+
+struct chelsio_vpd_t {
+	u32 format_version;
+	u8 serial_number[16];
+	u8 mac_base_address[6];
+	u8 pad[2];           /* make multiple-of-4 size requirement explicit */
+};
+
+#define EEPROMSIZE        (8 * 1024)
+#define EEPROM_MAX_POLL   4
+
+/*
+ * Read SEEPROM. A zero is written to the flag register when the addres is
+ * written to the Control register. The hardware device will set the flag to a
+ * one when 4B have been transferred to the Data register.
+ */
+int t1_seeprom_read(adapter_t *adapter, u32 addr, u32 *data)
+{
+	int i = EEPROM_MAX_POLL;
+	u16 val;
+
+	if (addr >= EEPROMSIZE || (addr & 3))
+		return -EINVAL;
+
+	pci_write_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, (u16)addr);
+	do {
+		udelay(50);
+		pci_read_config_word(adapter->pdev, A_PCICFG_VPD_ADDR, &val);
+	} while (!(val & F_VPD_OP_FLAG) && --i);
+
+	if (!(val & F_VPD_OP_FLAG)) {
+		CH_ERR("%s: reading EEPROM address 0x%x failed\n",
+		       adapter->name, addr);
+		return -EIO;
+	}
+	pci_read_config_dword(adapter->pdev, A_PCICFG_VPD_DATA, data);
+	*data = le32_to_cpu(*data);
+	return 0;
+}
+
+static int t1_eeprom_vpd_get(adapter_t *adapter, struct chelsio_vpd_t *vpd)
+{
+	int addr, ret = 0;
+
+	for (addr = 0; !ret && addr < sizeof(*vpd); addr += sizeof(u32))
+		ret = t1_seeprom_read(adapter, addr,
+				      (u32 *)((u8 *)vpd + addr));
+
+	return ret;
+}
+
+/*
+ * Read a port's MAC address from the VPD ROM.
+ */
+static int vpd_macaddress_get(adapter_t *adapter, int index, u8 mac_addr[])
+{
+	struct chelsio_vpd_t vpd;
+
+	if (t1_eeprom_vpd_get(adapter, &vpd))
+		return 1;
+	memcpy(mac_addr, vpd.mac_base_address, 5);
+	mac_addr[5] = vpd.mac_base_address[5] + index;
+	return 0;
+}
+
+/*
+ * Set up the MAC/PHY according to the requested link settings.
+ *
+ * If the PHY can auto-negotiate first decide what to advertise, then
+ * enable/disable auto-negotiation as desired and reset.
+ *
+ * If the PHY does not auto-negotiate we just reset it.
+ *
+ * If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ * otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t1_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
+{
+	unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising &= ~(ADVERTISED_ASYM_PAUSE | ADVERTISED_PAUSE);
+		if (fc) {
+			lc->advertising |= ADVERTISED_ASYM_PAUSE;
+			if (fc == (PAUSE_RX | PAUSE_TX))
+				lc->advertising |= ADVERTISED_PAUSE;
+		}
+		phy->ops->advertise(phy, lc->advertising);
+
+		if (lc->autoneg == AUTONEG_DISABLE) {
+			lc->speed = lc->requested_speed;
+			lc->duplex = lc->requested_duplex;
+			lc->fc = (unsigned char)fc;
+			mac->ops->set_speed_duplex_fc(mac, lc->speed,
+						      lc->duplex, fc);
+			/* Also disables autoneg */
+			phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
+			phy->ops->reset(phy, 0);
+		} else
+			phy->ops->autoneg_enable(phy); /* also resets PHY */
+	} else {
+		mac->ops->set_speed_duplex_fc(mac, -1, -1, fc);
+		lc->fc = (unsigned char)fc;
+		phy->ops->reset(phy, 0);
+	}
+	return 0;
+}
+
+/*
+ * External interrupt handler for boards using elmer0.
+ */
+int elmer0_ext_intr_handler(adapter_t *adapter)
+{
+    	struct cphy *phy;
+	int phy_cause;
+    	u32 cause;
+
+	t1_tpi_read(adapter, A_ELMER0_INT_CAUSE, &cause);
+
+	switch (board_info(adapter)->board) {
+	case CHBT_BOARD_N210:
+	case CHBT_BOARD_N110:
+		if (cause & ELMER0_GP_BIT6) { /* Marvell 88x2010 interrupt */
+			phy = adapter->port[0].phy;
+			phy_cause = phy->ops->interrupt_handler(phy);
+			if (phy_cause & cphy_cause_link_change)
+				link_changed(adapter, 0);
+		}
+		break;
+	}
+	t1_tpi_write(adapter, A_ELMER0_INT_CAUSE, cause);
+	return 0;
+}
+
+/* Enables all interrupts. */
+void t1_interrupts_enable(adapter_t *adapter)
+{
+	unsigned int i;
+	u32 pl_intr;
+
+	adapter->slow_intr_mask = F_PL_INTR_SGE_ERR;
+
+	t1_sge_intr_enable(adapter->sge);
+	if (adapter->espi) {
+		adapter->slow_intr_mask |= F_PL_INTR_ESPI;
+		t1_espi_intr_enable(adapter->espi);
+	}
+
+	/* Enable MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_enable(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_enable(adapter->port[i].phy);
+	}
+
+	/* Enable PCIX & external chip interrupts on ASIC boards. */
+	pl_intr = readl(adapter->regs + A_PL_ENABLE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE,
+			       0xffffffff);
+
+	adapter->slow_intr_mask |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+	pl_intr |= F_PL_INTR_EXT | F_PL_INTR_PCIX;
+	writel(pl_intr, adapter->regs + A_PL_ENABLE);
+}
+
+/* Disables all interrupts. */
+void t1_interrupts_disable(adapter_t* adapter)
+{
+	unsigned int i;
+
+	t1_sge_intr_disable(adapter->sge);
+	if (adapter->espi)
+		t1_espi_intr_disable(adapter->espi);
+
+	/* Disable MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_disable(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_disable(adapter->port[i].phy);
+	}
+
+	/* Disable PCIX & external chip interrupts. */
+	writel(0, adapter->regs + A_PL_ENABLE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_ENABLE, 0);
+
+	adapter->slow_intr_mask = 0;
+}
+
+/* Clears all interrupts */
+void t1_interrupts_clear(adapter_t* adapter)
+{
+	unsigned int i;
+	u32 pl_intr;
+
+
+	t1_sge_intr_clear(adapter->sge);
+	if (adapter->espi)
+		t1_espi_intr_clear(adapter->espi);
+
+	/* Clear MAC/PHY interrupts for each port. */
+	for_each_port(adapter, i) {
+		adapter->port[i].mac->ops->interrupt_clear(adapter->port[i].mac);
+		adapter->port[i].phy->ops->interrupt_clear(adapter->port[i].phy);
+	}
+
+	/* Enable interrupts for external devices. */
+    	pl_intr = readl(adapter->regs + A_PL_CAUSE);
+
+	writel(pl_intr | F_PL_INTR_EXT | F_PL_INTR_PCIX,
+	       adapter->regs + A_PL_CAUSE);
+
+	/* PCI-X interrupts */
+	pci_write_config_dword(adapter->pdev, A_PCICFG_INTR_CAUSE, 0xffffffff);
+}
+
+/*
+ * Slow path interrupt handler for ASICs.
+ */
+int t1_slow_intr_handler(adapter_t *adapter)
+{
+	u32 cause = readl(adapter->regs + A_PL_CAUSE);
+
+	cause &= adapter->slow_intr_mask;
+	if (!cause)
+		return 0;
+	if (cause & F_PL_INTR_SGE_ERR)
+		t1_sge_intr_error_handler(adapter->sge);
+	if (cause & F_PL_INTR_ESPI)
+		t1_espi_intr_handler(adapter->espi);
+	if (cause & F_PL_INTR_PCIX)
+		t1_pci_intr_handler(adapter);
+	if (cause & F_PL_INTR_EXT)
+		t1_elmer0_ext_intr(adapter);
+
+	/* Clear the interrupts just processed. */
+	writel(cause, adapter->regs + A_PL_CAUSE);
+	(void)readl(adapter->regs + A_PL_CAUSE); /* flush writes */
+	return 1;
+}
+
+/* Pause deadlock avoidance parameters */
+#define DROP_MSEC 16
+#define DROP_PKTS_CNT  1
+
+static void set_csum_offload(adapter_t *adapter, u32 csum_bit, int enable)
+{
+	u32 val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
+
+	if (enable)
+		val |= csum_bit;
+	else
+		val &= ~csum_bit;
+	writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+}
+
+void t1_tp_set_ip_checksum_offload(adapter_t *adapter, int enable)
+{
+	set_csum_offload(adapter, F_IP_CSUM, enable);
+}
+
+void t1_tp_set_udp_checksum_offload(adapter_t *adapter, int enable)
+{
+	set_csum_offload(adapter, F_UDP_CSUM, enable);
+}
+
+void t1_tp_set_tcp_checksum_offload(adapter_t *adapter, int enable)
+{
+	set_csum_offload(adapter, F_TCP_CSUM, enable);
+}
+
+static void t1_tp_reset(adapter_t *adapter, unsigned int tp_clk)
+{
+	u32 val;
+
+	val = F_TP_IN_CSPI_CPL | F_TP_IN_CSPI_CHECK_IP_CSUM |
+	      F_TP_IN_CSPI_CHECK_TCP_CSUM | F_TP_IN_ESPI_ETHERNET;
+	val |= F_TP_IN_ESPI_CHECK_IP_CSUM |
+	       F_TP_IN_ESPI_CHECK_TCP_CSUM;
+	writel(val, adapter->regs + A_TP_IN_CONFIG);
+	writel(F_TP_OUT_CSPI_CPL |
+	       F_TP_OUT_ESPI_ETHERNET |
+	       F_TP_OUT_ESPI_GENERATE_IP_CSUM |
+	       F_TP_OUT_ESPI_GENERATE_TCP_CSUM,
+	       adapter->regs + A_TP_OUT_CONFIG);
+
+	val = readl(adapter->regs + A_TP_GLOBAL_CONFIG);
+	val &= ~(F_IP_CSUM | F_UDP_CSUM | F_TCP_CSUM);
+	writel(val, adapter->regs + A_TP_GLOBAL_CONFIG);
+
+	/*
+	 * Enable pause frame deadlock prevention.
+	 */
+	if (is_T2(adapter)) {
+		u32 drop_ticks = DROP_MSEC * (tp_clk / 1000);
+
+		writel(F_ENABLE_TX_DROP | F_ENABLE_TX_ERROR |
+		       V_DROP_TICKS_CNT(drop_ticks) |
+		       V_NUM_PKTS_DROPPED(DROP_PKTS_CNT),
+		       adapter->regs + A_TP_TX_DROP_CONFIG);
+	}
+
+	writel(F_TP_RESET, adapter->regs + A_TP_RESET);
+}
+
+int __devinit t1_get_board_rev(adapter_t *adapter, const struct board_info *bi,
+			       struct adapter_params *p)
+{
+	p->chip_version = bi->chip_term;
+	if (p->chip_version == CHBT_TERM_T1 ||
+	    p->chip_version == CHBT_TERM_T2) {
+		u32 val = readl(adapter->regs + A_TP_PC_CONFIG);
+
+		val = G_TP_PC_REV(val);
+		if (val == 2)
+			p->chip_revision = TERM_T1B;
+		else if (val == 3)
+			p->chip_revision = TERM_T2;
+		else
+			return -1;
+	} else
+		return -1;
+	return 0;
+}
+
+/*
+ * Enable board components other than the Chelsio chip, such as external MAC
+ * and PHY.
+ */
+static int board_init(adapter_t *adapter, const struct board_info *bi)
+{
+	switch (bi->board) {
+	case CHBT_BOARD_N110:
+	case CHBT_BOARD_N210:
+		writel(V_TPIPAR(0xf), adapter->regs + A_TPI_PAR);
+    		t1_tpi_write(adapter, A_ELMER0_GPO, 0x800);
+		break;
+	}
+	return 0;
+}
+
+/*
+ * Initialize and configure the Terminator HW modules.  Note that external
+ * MAC and PHYs are initialized separately.
+ */
+int t1_init_hw_modules(adapter_t *adapter)
+{
+	int err = -EIO;
+	const struct board_info *bi = board_info(adapter);
+
+	if (!bi->clock_mc4) {
+		u32 val = readl(adapter->regs + A_MC4_CFG);
+
+		writel(val | F_READY | F_MC4_SLOW, adapter->regs + A_MC4_CFG);
+		writel(F_M_BUS_ENABLE | F_TCAM_RESET,
+		       adapter->regs + A_MC5_CONFIG);
+	}
+
+	if (adapter->espi && t1_espi_init(adapter->espi, bi->chip_mac,
+					  bi->espi_nports))
+		goto out_err;
+
+	t1_tp_reset(adapter, bi->clock_core);
+
+	err = t1_sge_configure(adapter->sge, &adapter->params.sge);
+	if (err)
+		goto out_err;
+
+	err = 0;
+ out_err:
+	return err;
+}
+
+/*
+ * Determine a card's PCI mode.
+ */
+static void __devinit get_pci_mode(adapter_t *adapter, struct chelsio_pci_params *p)
+{
+	static unsigned short speed_map[] = { 33, 66, 100, 133 };
+	u32 pci_mode;
+
+	pci_read_config_dword(adapter->pdev, A_PCICFG_MODE, &pci_mode);
+	p->speed = speed_map[G_PCI_MODE_CLK(pci_mode)];
+	p->width = (pci_mode & F_PCI_MODE_64BIT) ? 64 : 32;
+	p->is_pcix = (pci_mode & F_PCI_MODE_PCIX) != 0;
+}
+
+/*
+ * Release the structures holding the SW per-Terminator-HW-module state.
+ */
+void t1_free_sw_modules(adapter_t *adapter)
+{
+	unsigned int i;
+
+	for_each_port(adapter, i) {
+		struct cmac *mac = adapter->port[i].mac;
+		struct cphy *phy = adapter->port[i].phy;
+
+		if (mac)
+			mac->ops->destroy(mac);
+		if (phy)
+			phy->ops->destroy(phy);
+	}
+
+	if (adapter->sge)
+		t1_sge_destroy(adapter->sge);
+	if (adapter->espi)
+		t1_espi_destroy(adapter->espi);
+}
+
+static void __devinit init_link_config(struct link_config *lc,
+				       const struct board_info *bi)
+{
+	lc->supported = bi->caps;
+	lc->requested_speed = lc->speed = SPEED_INVALID;
+	lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & SUPPORTED_Autoneg) {
+		lc->advertising = lc->supported;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+
+/*
+ * Allocate and initialize the data structures that hold the SW state of
+ * the Terminator HW modules.
+ */
+int __devinit t1_init_sw_modules(adapter_t *adapter,
+				 const struct board_info *bi)
+{
+	unsigned int i;
+
+	adapter->params.brd_info = bi;
+	adapter->params.nports = bi->port_number;
+	adapter->params.stats_update_period = bi->gmac->stats_update_period;
+
+	adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
+	if (!adapter->sge) {
+		CH_ERR("%s: SGE initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
+		CH_ERR("%s: ESPI initialization failed\n",
+		       adapter->name);
+		goto error;
+	}
+
+	board_init(adapter, bi);
+	bi->mdio_ops->init(adapter, bi);
+	if (bi->gphy->reset)
+		bi->gphy->reset(adapter);
+	if (bi->gmac->reset)
+		bi->gmac->reset(adapter);
+
+	for_each_port(adapter, i) {
+		u8 hw_addr[6];
+		struct cmac *mac;
+		int phy_addr = bi->mdio_phybaseaddr + i;
+
+		adapter->port[i].phy = bi->gphy->create(adapter, phy_addr,
+							bi->mdio_ops);
+		if (!adapter->port[i].phy) {
+			CH_ERR("%s: PHY %d initialization failed\n",
+			       adapter->name, i);
+			goto error;
+		}
+
+		adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
+		if (!mac) {
+			CH_ERR("%s: MAC %d initialization failed\n",
+			       adapter->name, i);
+			goto error;
+		}
+
+		/*
+		 * Get the port's MAC addresses either from the EEPROM if one
+		 * exists or the one hardcoded in the MAC.
+		 */
+		if (vpd_macaddress_get(adapter, i, hw_addr)) {
+			CH_ERR("%s: could not read MAC address from VPD ROM\n",
+			       adapter->port[i].dev->name);
+			goto error;
+		}
+		memcpy(adapter->port[i].dev->dev_addr, hw_addr, ETH_ALEN);
+		init_link_config(&adapter->port[i].link_config, bi);
+	}
+
+	get_pci_mode(adapter, &adapter->params.pci);
+	t1_interrupts_clear(adapter);
+	return 0;
+
+ error:
+	t1_free_sw_modules(adapter);
+	return -1;
+}
diff --git a/drivers/net/chelsio/suni1x10gexp_regs.h b/drivers/net/chelsio/suni1x10gexp_regs.h
new file mode 100644
index 0000000..81816c2
--- /dev/null
+++ b/drivers/net/chelsio/suni1x10gexp_regs.h
@@ -0,0 +1,213 @@
+/*****************************************************************************
+ *                                                                           *
+ * File: suni1x10gexp_regs.h                                                 *
+ * $Revision: 1.9 $                                                          *
+ * $Date: 2005/06/22 00:17:04 $                                              *
+ * Description:                                                              *
+ *  PMC/SIERRA (pm3393) MAC-PHY functionality.                               *
+ *  part of the Chelsio 10Gb Ethernet Driver.                                *
+ *                                                                           *
+ * This program is free software; you can redistribute it and/or modify      *
+ * it under the terms of the GNU General Public License, version 2, as       *
+ * published by the Free Software Foundation.                                *
+ *                                                                           *
+ * You should have received a copy of the GNU General Public License along   *
+ * with this program; if not, write to the Free Software Foundation, Inc.,   *
+ * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.                 *
+ *                                                                           *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED    *
+ * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF      *
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.                     *
+ *                                                                           *
+ * http://www.chelsio.com                                                    *
+ *                                                                           *
+ * Maintainers: maintainers@chelsio.com                                      *
+ *                                                                           *
+ * Authors: PMC/SIERRA                                                       *
+ *                                                                           *
+ * History:                                                                  *
+ *                                                                           *
+ ****************************************************************************/
+
+#ifndef _CXGB_SUNI1x10GEXP_REGS_H_
+#define _CXGB_SUNI1x10GEXP_REGS_H_
+
+/******************************************************************************/
+/** S/UNI-1x10GE-XP REGISTER ADDRESS MAP                                     **/
+/******************************************************************************/
+/* Refer to the Register Bit Masks bellow for the naming of each register and */
+/* to the S/UNI-1x10GE-XP Data Sheet for the signification of each bit        */
+/******************************************************************************/
+
+#define SUNI1x10GEXP_REG_DEVICE_STATUS                                   0x0004
+#define SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS                         0x000D
+#define SUNI1x10GEXP_REG_GLOBAL_INTERRUPT_ENABLE                         0x000E
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_ENABLE                    0x0102
+#define SUNI1x10GEXP_REG_SERDES_3125_INTERRUPT_STATUS                    0x0104
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_1                                   0x2040
+#define SUNI1x10GEXP_REG_RXXG_CONFIG_3                                   0x2042
+#define SUNI1x10GEXP_REG_RXXG_INTERRUPT                                  0x2043
+#define SUNI1x10GEXP_REG_RXXG_MAX_FRAME_LENGTH                           0x2045
+#define SUNI1x10GEXP_REG_RXXG_SA_15_0                                    0x2046
+#define SUNI1x10GEXP_REG_RXXG_SA_31_16                                   0x2047
+#define SUNI1x10GEXP_REG_RXXG_SA_47_32                                   0x2048
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_LOW                     0x204D
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_MID                     0x204E
+#define SUNI1x10GEXP_REG_RXXG_EXACT_MATCH_ADDR_1_HIGH                    0x204F
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW                         0x206A
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDLOW                      0x206B
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_MIDHIGH                     0x206C
+#define SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_HIGH                        0x206D
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_0                   0x206E
+#define SUNI1x10GEXP_REG_RXXG_ADDRESS_FILTER_CONTROL_2                   0x2070
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_ENABLE                            0x2088
+#define SUNI1x10GEXP_REG_XRF_INTERRUPT_STATUS                            0x2089
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_ENABLE                       0x208B
+#define SUNI1x10GEXP_REG_XRF_DIAG_INTERRUPT_STATUS                       0x208C
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_ENABLE                          0x20C7
+#define SUNI1x10GEXP_REG_RXOAM_INTERRUPT_STATUS                          0x20C8
+#define SUNI1x10GEXP_REG_MSTAT_CONTROL                                   0x2100
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_0                        0x2101
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_1                        0x2102
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_2                        0x2103
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_ROLLOVER_3                        0x2104
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_0                          0x2105
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_1                          0x2106
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_2                          0x2107
+#define SUNI1x10GEXP_REG_MSTAT_INTERRUPT_MASK_3                          0x2108
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_0_LOW                             0x2110
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_1_LOW                             0x2114
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_4_LOW                             0x2120
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_5_LOW                             0x2124
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_6_LOW                             0x2128
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_8_LOW                             0x2130
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_10_LOW                            0x2138
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_11_LOW                            0x213C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_12_LOW                            0x2140
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_13_LOW                            0x2144
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_15_LOW                            0x214C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_16_LOW                            0x2150
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_17_LOW                            0x2154
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_18_LOW                            0x2158
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_33_LOW                            0x2194
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_35_LOW                            0x219C
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_36_LOW                            0x21A0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_38_LOW                            0x21A8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_40_LOW                            0x21B0
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_42_LOW                            0x21B8
+#define SUNI1x10GEXP_REG_MSTAT_COUNTER_43_LOW                            0x21BC
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_ENABLE                       0x2209
+#define SUNI1x10GEXP_REG_IFLX_FIFO_OVERFLOW_INTERRUPT                    0x220A
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT_MASK                           0x2282
+#define SUNI1x10GEXP_REG_PL4ODP_INTERRUPT                                0x2283
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_STATUS                        0x2300
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_CHANGE                        0x2301
+#define SUNI1x10GEXP_REG_PL4IO_LOCK_DETECT_MASK                          0x2302
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_1                                   0x3040
+#define SUNI1x10GEXP_REG_TXXG_CONFIG_3                                   0x3042
+#define SUNI1x10GEXP_REG_TXXG_INTERRUPT                                  0x3043
+#define SUNI1x10GEXP_REG_TXXG_MAX_FRAME_SIZE                             0x3045
+#define SUNI1x10GEXP_REG_TXXG_SA_15_0                                    0x3047
+#define SUNI1x10GEXP_REG_TXXG_SA_31_16                                   0x3048
+#define SUNI1x10GEXP_REG_TXXG_SA_47_32                                   0x3049
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_STATUS                           0x3084
+#define SUNI1x10GEXP_REG_XTEF_INTERRUPT_ENABLE                           0x3085
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_ENABLE                          0x30C6
+#define SUNI1x10GEXP_REG_TXOAM_INTERRUPT_STATUS                          0x30C7
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_ENABLE                 0x320C
+#define SUNI1x10GEXP_REG_EFLX_FIFO_OVERFLOW_ERROR_INDICATION             0x320D
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT_MASK                           0x3282
+#define SUNI1x10GEXP_REG_PL4IDU_INTERRUPT                                0x3283
+
+/******************************************************************************/
+/*                 -- End register offset definitions --                      */
+/******************************************************************************/
+
+/******************************************************************************/
+/** SUNI-1x10GE-XP REGISTER BIT MASKS                                        **/
+/******************************************************************************/
+
+/*----------------------------------------------------------------------------
+ * Register 0x0004: S/UNI-1x10GE-XP Device Status
+ *    Bit 9 TOP_SXRA_EXPIRED
+ *    Bit 8 TOP_MDIO_BUSY
+ *    Bit 7 TOP_DTRB
+ *    Bit 6 TOP_EXPIRED
+ *    Bit 5 TOP_PAUSED
+ *    Bit 4 TOP_PL4_ID_DOOL
+ *    Bit 3 TOP_PL4_IS_DOOL
+ *    Bit 2 TOP_PL4_ID_ROOL
+ *    Bit 1 TOP_PL4_IS_ROOL
+ *    Bit 0 TOP_PL4_OUT_ROOL
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_SXRA_EXPIRED  0x0200
+#define SUNI1x10GEXP_BITMSK_TOP_EXPIRED       0x0040
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_DOOL   0x0010
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_DOOL   0x0008
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_ID_ROOL   0x0004
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_IS_ROOL   0x0002
+#define SUNI1x10GEXP_BITMSK_TOP_PL4_OUT_ROOL  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x000E:PM3393 Global interrupt enable
+ *    Bit 15 TOP_INTE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TOP_INTE  0x8000
+
+/*----------------------------------------------------------------------------
+ * Register 0x2040: RXXG Configuration 1
+ *    Bit 15  RXXG_RXEN
+ *    Bit 14  RXXG_ROCF
+ *    Bit 13  RXXG_PAD_STRIP
+ *    Bit 10  RXXG_PUREP
+ *    Bit 9   RXXG_LONGP
+ *    Bit 8   RXXG_PARF
+ *    Bit 7   RXXG_FLCHK
+ *    Bit 5   RXXG_PASS_CTRL
+ *    Bit 3   RXXG_CRC_STRIP
+ *    Bit 2-0 RXXG_MIFG
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_RXEN       0x8000
+#define SUNI1x10GEXP_BITMSK_RXXG_PUREP      0x0400
+#define SUNI1x10GEXP_BITMSK_RXXG_FLCHK      0x0080
+#define SUNI1x10GEXP_BITMSK_RXXG_CRC_STRIP  0x0008
+
+/*----------------------------------------------------------------------------
+ * Register 0x2070: RXXG Address Filter Control 2
+ *    Bit 1 RXXG_PMODE
+ *    Bit 0 RXXG_MHASH_EN
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_RXXG_PMODE     0x0002
+#define SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN  0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x2100: MSTAT Control
+ *    Bit 2 MSTAT_WRITE
+ *    Bit 1 MSTAT_CLEAR
+ *    Bit 0 MSTAT_SNAP
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_MSTAT_CLEAR  0x0002
+#define SUNI1x10GEXP_BITMSK_MSTAT_SNAP   0x0001
+
+/*----------------------------------------------------------------------------
+ * Register 0x3040: TXXG Configuration Register 1
+ *    Bit 15   TXXG_TXEN0
+ *    Bit 13   TXXG_HOSTPAUSE
+ *    Bit 12-7 TXXG_IPGT
+ *    Bit 5    TXXG_32BIT_ALIGN
+ *    Bit 4    TXXG_CRCEN
+ *    Bit 3    TXXG_FCTX
+ *    Bit 2    TXXG_FCRX
+ *    Bit 1    TXXG_PADEN
+ *    Bit 0    TXXG_SPRE
+ *----------------------------------------------------------------------------*/
+#define SUNI1x10GEXP_BITMSK_TXXG_TXEN0        0x8000
+#define SUNI1x10GEXP_BITOFF_TXXG_IPGT         7
+#define SUNI1x10GEXP_BITMSK_TXXG_32BIT_ALIGN  0x0020
+#define SUNI1x10GEXP_BITMSK_TXXG_CRCEN        0x0010
+#define SUNI1x10GEXP_BITMSK_TXXG_FCTX         0x0008
+#define SUNI1x10GEXP_BITMSK_TXXG_FCRX         0x0004
+#define SUNI1x10GEXP_BITMSK_TXXG_PADEN        0x0002
+
+#endif /* _CXGB_SUNI1x10GEXP_REGS_H_ */
+
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d0fa244..25cc20e 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   
-  Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
+  Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
   
   This program is free software; you can redistribute it and/or modify it 
   under the terms of the GNU General Public License as published by the Free 
@@ -156,7 +156,7 @@
 
 #define DRV_NAME		"e100"
 #define DRV_EXT		"-NAPI"
-#define DRV_VERSION		"3.4.8-k2"DRV_EXT
+#define DRV_VERSION		"3.4.14-k2"DRV_EXT
 #define DRV_DESCRIPTION		"Intel(R) PRO/100 Network Driver"
 #define DRV_COPYRIGHT		"Copyright(c) 1999-2005 Intel Corporation"
 #define PFX			DRV_NAME ": "
@@ -785,6 +785,7 @@
 }
 
 #define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
+#define E100_WAIT_SCB_FAST 20       /* delay like the old code */
 static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
 {
 	unsigned long flags;
@@ -798,7 +799,7 @@
 		if(likely(!readb(&nic->csr->scb.cmd_lo)))
 			break;
 		cpu_relax();
-		if(unlikely(i > (E100_WAIT_SCB_TIMEOUT >> 1)))
+		if(unlikely(i > E100_WAIT_SCB_FAST))
 			udelay(5);
 	}
 	if(unlikely(i == E100_WAIT_SCB_TIMEOUT)) {
@@ -902,8 +903,8 @@
 
 static void e100_get_defaults(struct nic *nic)
 {
-	struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
-	struct param_range cbs  = { .min = 64, .max = 256, .count = 64 };
+	struct param_range rfds = { .min = 16, .max = 256, .count = 256 };
+	struct param_range cbs  = { .min = 64, .max = 256, .count = 128 };
 
 	pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
 	/* MAC type is encoded as rev ID; exception: ICH is treated as 82559 */
@@ -1006,25 +1007,213 @@
 		c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
 }
 
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 8                      */
+/********************************************************/
+
+/*  Parameter values for the D101M B-step  */
+#define D101M_CPUSAVER_TIMER_DWORD		78
+#define D101M_CPUSAVER_BUNDLE_DWORD		65
+#define D101M_CPUSAVER_MIN_SIZE_DWORD		126
+
+#define D101M_B_RCVBUNDLE_UCODE \
+{\
+0x00550215, 0xFFFF0437, 0xFFFFFFFF, 0x06A70789, 0xFFFFFFFF, 0x0558FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380216, \
+0x0010009C, 0x00204056, 0x002380CC, 0x00380056, \
+0x0010009C, 0x00244C0B, 0x00000800, 0x00124818, \
+0x00380438, 0x00000000, 0x00140000, 0x00380555, \
+0x00308000, 0x00100662, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222C21, 0x000C0002, 0x00103093, \
+0x00380C7A, 0x00080000, 0x00103090, 0x00380C7A, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244C2D, 0x00010004, 0x00041000, \
+0x003A0437, 0x00044010, 0x0038078A, 0x00000000, \
+0x00100099, 0x00206C7A, 0x0010009C, 0x00244C48, \
+0x00130824, 0x000C0001, 0x00101213, 0x00260C75, \
+0x00041000, 0x00010004, 0x00130826, 0x000C0006, \
+0x002206A8, 0x0013C926, 0x00101313, 0x003806A8, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380C34, 0x00000000, 0x00000000, \
+0x0021155B, 0x00100099, 0x00206559, 0x0010009C, \
+0x00244559, 0x00130836, 0x000C0000, 0x00220C62, \
+0x000C0001, 0x00101B13, 0x00229C0E, 0x00210C0E, \
+0x00226C0E, 0x00216C0E, 0x0022FC0E, 0x00215C0E, \
+0x00214C0E, 0x00380555, 0x00010004, 0x00041000, \
+0x00278C67, 0x00040800, 0x00018100, 0x003A0437, \
+0x00130826, 0x000C0001, 0x00220559, 0x00101313, \
+0x00380559, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00130831, 0x0010090B, 0x00124813, \
+0x000CFF80, 0x002606AB, 0x00041000, 0x00010004, \
+0x003806A8, 0x00000000, 0x00000000, 0x00000000, \
+}
+
+/********************************************************/
+/*  Micro code for 8086:1229 Rev 9                      */
+/********************************************************/
+
+/*  Parameter values for the D101S  */
+#define D101S_CPUSAVER_TIMER_DWORD		78
+#define D101S_CPUSAVER_BUNDLE_DWORD		67
+#define D101S_CPUSAVER_MIN_SIZE_DWORD		128
+
+#define D101S_RCVBUNDLE_UCODE \
+{\
+0x00550242, 0xFFFF047E, 0xFFFFFFFF, 0x06FF0818, 0xFFFFFFFF, 0x05A6FFFF, \
+0x000C0001, 0x00101312, 0x000C0008, 0x00380243, \
+0x0010009C, 0x00204056, 0x002380D0, 0x00380056, \
+0x0010009C, 0x00244F8B, 0x00000800, 0x00124818, \
+0x0038047F, 0x00000000, 0x00140000, 0x003805A3, \
+0x00308000, 0x00100610, 0x00100561, 0x000E0408, \
+0x00134861, 0x000C0002, 0x00103093, 0x00308000, \
+0x00100624, 0x00100561, 0x000E0408, 0x00100861, \
+0x000C007E, 0x00222FA1, 0x000C0002, 0x00103093, \
+0x00380F90, 0x00080000, 0x00103090, 0x00380F90, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x0010009C, 0x00244FAD, 0x00010004, 0x00041000, \
+0x003A047E, 0x00044010, 0x00380819, 0x00000000, \
+0x00100099, 0x00206FFD, 0x0010009A, 0x0020AFFD, \
+0x0010009C, 0x00244FC8, 0x00130824, 0x000C0001, \
+0x00101213, 0x00260FF7, 0x00041000, 0x00010004, \
+0x00130826, 0x000C0006, 0x00220700, 0x0013C926, \
+0x00101313, 0x00380700, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00080600, 0x00101B10, 0x00050004, 0x00100826, \
+0x00101210, 0x00380FB6, 0x00000000, 0x00000000, \
+0x002115A9, 0x00100099, 0x002065A7, 0x0010009A, \
+0x0020A5A7, 0x0010009C, 0x002445A7, 0x00130836, \
+0x000C0000, 0x00220FE4, 0x000C0001, 0x00101B13, \
+0x00229F8E, 0x00210F8E, 0x00226F8E, 0x00216F8E, \
+0x0022FF8E, 0x00215F8E, 0x00214F8E, 0x003805A3, \
+0x00010004, 0x00041000, 0x00278FE9, 0x00040800, \
+0x00018100, 0x003A047E, 0x00130826, 0x000C0001, \
+0x002205A7, 0x00101313, 0x003805A7, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00130831, \
+0x0010090B, 0x00124813, 0x000CFF80, 0x00260703, \
+0x00041000, 0x00010004, 0x00380700  \
+}
+
+/********************************************************/
+/*  Micro code for the 8086:1229 Rev F/10               */
+/********************************************************/
+
+/*  Parameter values for the D102 E-step  */
+#define D102_E_CPUSAVER_TIMER_DWORD		42
+#define D102_E_CPUSAVER_BUNDLE_DWORD		54
+#define D102_E_CPUSAVER_MIN_SIZE_DWORD		46
+
+#define     D102_E_RCVBUNDLE_UCODE \
+{\
+0x007D028F, 0x0E4204F9, 0x14ED0C85, 0x14FA14E9, 0x0EF70E36, 0x1FFF1FFF, \
+0x00E014B9, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014BD, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014D5, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C1, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00E014C8, 0x00000000, 0x00000000, 0x00000000, \
+0x00200600, 0x00E014EE, 0x00000000, 0x00000000, \
+0x0030FF80, 0x00940E46, 0x00038200, 0x00102000, \
+0x00E00E43, 0x00000000, 0x00000000, 0x00000000, \
+0x00300006, 0x00E014FB, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000, \
+0x00906EFD, 0x00900EFD, 0x00E00EF8, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+0x00000000, 0x00000000, 0x00000000, 0x00000000, \
+}
+
 static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
 {
-	int i;
-	static const u32 ucode[UCODE_SIZE] = {
-		/* NFS packets are misinterpreted as TCO packets and
-		 * incorrectly routed to the BMC over SMBus.  This
-		 * microcode patch checks the fragmented IP bit in the
-		 * NFS/UDP header to distinguish between NFS and TCO. */
-		0x0EF70E36, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF, 0x1FFF1FFF,
-		0x1FFF1FFF, 0x00906E41, 0x00800E3C, 0x00E00E39, 0x00000000,
-		0x00906EFD, 0x00900EFD,	0x00E00EF8,
-	};
+/* *INDENT-OFF* */
+	static struct {
+		u32 ucode[UCODE_SIZE + 1];
+		u8 mac;
+		u8 timer_dword;
+		u8 bundle_dword;
+		u8 min_size_dword;
+	} ucode_opts[] = {
+		{ D101M_B_RCVBUNDLE_UCODE,
+		  mac_82559_D101M,
+		  D101M_CPUSAVER_TIMER_DWORD,
+		  D101M_CPUSAVER_BUNDLE_DWORD,
+		  D101M_CPUSAVER_MIN_SIZE_DWORD },
+		{ D101S_RCVBUNDLE_UCODE,
+		  mac_82559_D101S,
+		  D101S_CPUSAVER_TIMER_DWORD,
+		  D101S_CPUSAVER_BUNDLE_DWORD,
+		  D101S_CPUSAVER_MIN_SIZE_DWORD },
+		{ D102_E_RCVBUNDLE_UCODE,
+		  mac_82551_F,
+		  D102_E_CPUSAVER_TIMER_DWORD,
+		  D102_E_CPUSAVER_BUNDLE_DWORD,
+		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
+		{ D102_E_RCVBUNDLE_UCODE,
+		  mac_82551_10,
+		  D102_E_CPUSAVER_TIMER_DWORD,
+		  D102_E_CPUSAVER_BUNDLE_DWORD,
+		  D102_E_CPUSAVER_MIN_SIZE_DWORD },
+		{ {0}, 0, 0, 0, 0}
+	}, *opts;
+/* *INDENT-ON* */
 
-	if(nic->mac == mac_82551_F || nic->mac == mac_82551_10) {
-		for(i = 0; i < UCODE_SIZE; i++)
-			cb->u.ucode[i] = cpu_to_le32(ucode[i]);
-		cb->command = cpu_to_le16(cb_ucode);
-	} else
-		cb->command = cpu_to_le16(cb_nop);
+#define BUNDLESMALL 1
+#define BUNDLEMAX 50
+#define INTDELAY 15000
+
+	opts = ucode_opts;
+
+	/* do not load u-code for ICH devices */
+	if (nic->flags & ich)
+		return;
+
+	/* Search for ucode match against h/w rev_id */
+	while (opts->mac) {
+		if (nic->mac == opts->mac) {
+			int i;
+			u32 *ucode = opts->ucode;
+
+			/* Insert user-tunable settings */
+			ucode[opts->timer_dword] &= 0xFFFF0000;
+			ucode[opts->timer_dword] |=
+				(u16) INTDELAY;
+			ucode[opts->bundle_dword] &= 0xFFFF0000;
+			ucode[opts->bundle_dword] |= (u16) BUNDLEMAX;
+			ucode[opts->min_size_dword] &= 0xFFFF0000;
+			ucode[opts->min_size_dword] |=
+				(BUNDLESMALL) ?  0xFFFF : 0xFF80;
+
+			for(i = 0; i < UCODE_SIZE; i++)
+				cb->u.ucode[i] = cpu_to_le32(ucode[i]);
+			cb->command = cpu_to_le16(cb_ucode);
+			return;
+		}
+		opts++;
+	}
+
+	cb->command = cpu_to_le16(cb_nop);
 }
 
 static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
@@ -1307,14 +1496,15 @@
 {
 	cb->command = nic->tx_command;
 	/* interrupt every 16 packets regardless of delay */
-	if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
+	if((nic->cbs_avail & ~15) == nic->cbs_avail)
+		cb->command |= cpu_to_le16(cb_i);
 	cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
 	cb->u.tcb.tcb_byte_count = 0;
 	cb->u.tcb.threshold = nic->tx_threshold;
 	cb->u.tcb.tbd_count = 1;
 	cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
 		skb->data, skb->len, PCI_DMA_TODEVICE));
-	// check for mapping failure?
+	/* check for mapping failure? */
 	cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
 }
 
@@ -1539,7 +1729,7 @@
 		/* Don't indicate if hardware indicates errors */
 		nic->net_stats.rx_dropped++;
 		dev_kfree_skb_any(skb);
-	} else if(actual_size > nic->netdev->mtu + VLAN_ETH_HLEN) {
+	} else if(actual_size > ETH_DATA_LEN + VLAN_ETH_HLEN) {
 		/* Don't indicate oversized frames */
 		nic->rx_over_length_errors++;
 		nic->net_stats.rx_dropped++;
@@ -1706,6 +1896,7 @@
 static void e100_netpoll(struct net_device *netdev)
 {
 	struct nic *nic = netdev_priv(netdev);
+
 	e100_disable_irq(nic);
 	e100_intr(nic->pdev->irq, netdev, NULL);
 	e100_tx_clean(nic);
@@ -2108,6 +2299,8 @@
 	}
 	for(i = 0; i < E100_TEST_LEN; i++)
 		test->flags |= data[i] ? ETH_TEST_FL_FAILED : 0;
+
+	msleep_interruptible(4 * 1000);
 }
 
 static int e100_phys_id(struct net_device *netdev, u32 data)
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
new file mode 100644
index 0000000..bf3440a
--- /dev/null
+++ b/drivers/net/sis190.c
@@ -0,0 +1,1843 @@
+/*
+   sis190.c: Silicon Integrated Systems SiS190 ethernet driver
+
+   Copyright (c) 2003 K.M. Liu <kmliu@sis.com>
+   Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com>
+   Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com>
+
+   Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191
+   genuine driver.
+
+   This software may be used and distributed according to the terms of
+   the GNU General Public License (GPL), incorporated herein by reference.
+   Drivers based on or derived from this code fall under the GPL and must
+   retain the authorship, copyright and license notice.  This file is not
+   a complete program and may only be used when the entire operating
+   system is licensed under the GPL.
+
+   See the file COPYING in this distribution for more information.
+
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/crc32.h>
+#include <linux/dma-mapping.h>
+#include <asm/irq.h>
+
+#define net_drv(p, arg...)	if (netif_msg_drv(p)) \
+					printk(arg)
+#define net_probe(p, arg...)	if (netif_msg_probe(p)) \
+					printk(arg)
+#define net_link(p, arg...)	if (netif_msg_link(p)) \
+					printk(arg)
+#define net_intr(p, arg...)	if (netif_msg_intr(p)) \
+					printk(arg)
+#define net_tx_err(p, arg...)	if (netif_msg_tx_err(p)) \
+					printk(arg)
+
+#define PHY_MAX_ADDR		32
+#define PHY_ID_ANY		0x1f
+#define MII_REG_ANY		0x1f
+
+#ifdef CONFIG_SIS190_NAPI
+#define NAPI_SUFFIX	"-NAPI"
+#else
+#define NAPI_SUFFIX	""
+#endif
+
+#define DRV_VERSION		"1.2" NAPI_SUFFIX
+#define DRV_NAME		"sis190"
+#define SIS190_DRIVER_NAME	DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+#ifdef CONFIG_SIS190_NAPI
+#define sis190_rx_skb			netif_receive_skb
+#define sis190_rx_quota(count, quota)	min(count, quota)
+#else
+#define sis190_rx_skb			netif_rx
+#define sis190_rx_quota(count, quota)	count
+#endif
+
+#define MAC_ADDR_LEN		6
+
+#define NUM_TX_DESC		64	/* [8..1024] */
+#define NUM_RX_DESC		64	/* [8..8192] */
+#define TX_RING_BYTES		(NUM_TX_DESC * sizeof(struct TxDesc))
+#define RX_RING_BYTES		(NUM_RX_DESC * sizeof(struct RxDesc))
+#define RX_BUF_SIZE		1536
+#define RX_BUF_MASK		0xfff8
+
+#define SIS190_REGS_SIZE	0x80
+#define SIS190_TX_TIMEOUT	(6*HZ)
+#define SIS190_PHY_TIMEOUT	(10*HZ)
+#define SIS190_MSG_DEFAULT	(NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+				 NETIF_MSG_LINK | NETIF_MSG_IFUP | \
+				 NETIF_MSG_IFDOWN)
+
+/* Enhanced PHY access register bit definitions */
+#define EhnMIIread		0x0000
+#define EhnMIIwrite		0x0020
+#define EhnMIIdataShift		16
+#define EhnMIIpmdShift		6	/* 7016 only */
+#define EhnMIIregShift		11
+#define EhnMIIreq		0x0010
+#define EhnMIInotDone		0x0010
+
+/* Write/read MMIO register */
+#define SIS_W8(reg, val)	writeb ((val), ioaddr + (reg))
+#define SIS_W16(reg, val)	writew ((val), ioaddr + (reg))
+#define SIS_W32(reg, val)	writel ((val), ioaddr + (reg))
+#define SIS_R8(reg)		readb (ioaddr + (reg))
+#define SIS_R16(reg)		readw (ioaddr + (reg))
+#define SIS_R32(reg)		readl (ioaddr + (reg))
+
+#define SIS_PCI_COMMIT()	SIS_R32(IntrControl)
+
+enum sis190_registers {
+	TxControl		= 0x00,
+	TxDescStartAddr		= 0x04,
+	rsv0			= 0x08,	// reserved
+	TxSts			= 0x0c,	// unused (Control/Status)
+	RxControl		= 0x10,
+	RxDescStartAddr		= 0x14,
+	rsv1			= 0x18,	// reserved
+	RxSts			= 0x1c,	// unused
+	IntrStatus		= 0x20,
+	IntrMask		= 0x24,
+	IntrControl		= 0x28,
+	IntrTimer		= 0x2c,	// unused (Interupt Timer)
+	PMControl		= 0x30,	// unused (Power Mgmt Control/Status)
+	rsv2			= 0x34,	// reserved
+	ROMControl		= 0x38,
+	ROMInterface		= 0x3c,
+	StationControl		= 0x40,
+	GMIIControl		= 0x44,
+	GIoCR			= 0x48, // unused (GMAC IO Compensation)
+	GIoCtrl			= 0x4c, // unused (GMAC IO Control)
+	TxMacControl		= 0x50,
+	TxLimit			= 0x54, // unused (Tx MAC Timer/TryLimit)
+	RGDelay			= 0x58, // unused (RGMII Tx Internal Delay)
+	rsv3			= 0x5c, // reserved
+	RxMacControl		= 0x60,
+	RxMacAddr		= 0x62,
+	RxHashTable		= 0x68,
+	// Undocumented		= 0x6c,
+	RxWolCtrl		= 0x70,
+	RxWolData		= 0x74, // unused (Rx WOL Data Access)
+	RxMPSControl		= 0x78,	// unused (Rx MPS Control)
+	rsv4			= 0x7c, // reserved
+};
+
+enum sis190_register_content {
+	/* IntrStatus */
+	SoftInt			= 0x40000000,	// unused
+	Timeup			= 0x20000000,	// unused
+	PauseFrame		= 0x00080000,	// unused
+	MagicPacket		= 0x00040000,	// unused
+	WakeupFrame		= 0x00020000,	// unused
+	LinkChange		= 0x00010000,
+	RxQEmpty		= 0x00000080,
+	RxQInt			= 0x00000040,
+	TxQ1Empty		= 0x00000020,	// unused
+	TxQ1Int			= 0x00000010,
+	TxQ0Empty		= 0x00000008,	// unused
+	TxQ0Int			= 0x00000004,
+	RxHalt			= 0x00000002,
+	TxHalt			= 0x00000001,
+
+	/* {Rx/Tx}CmdBits */
+	CmdReset		= 0x10,
+	CmdRxEnb		= 0x08,		// unused
+	CmdTxEnb		= 0x01,
+	RxBufEmpty		= 0x01,		// unused
+
+	/* Cfg9346Bits */
+	Cfg9346_Lock		= 0x00,		// unused
+	Cfg9346_Unlock		= 0xc0,		// unused
+
+	/* RxMacControl */
+	AcceptErr		= 0x20,		// unused
+	AcceptRunt		= 0x10,		// unused
+	AcceptBroadcast		= 0x0800,
+	AcceptMulticast		= 0x0400,
+	AcceptMyPhys		= 0x0200,
+	AcceptAllPhys		= 0x0100,
+
+	/* RxConfigBits */
+	RxCfgFIFOShift		= 13,
+	RxCfgDMAShift		= 8,		// 0x1a in RxControl ?
+
+	/* TxConfigBits */
+	TxInterFrameGapShift	= 24,
+	TxDMAShift		= 8, /* DMA burst value (0-7) is shift this many bits */
+
+	/* StationControl */
+	_1000bpsF		= 0x1c00,
+	_1000bpsH		= 0x0c00,
+	_100bpsF		= 0x1800,
+	_100bpsH		= 0x0800,
+	_10bpsF			= 0x1400,
+	_10bpsH			= 0x0400,
+
+	LinkStatus		= 0x02,		// unused
+	FullDup			= 0x01,		// unused
+
+	/* TBICSRBit */
+	TBILinkOK		= 0x02000000,	// unused
+};
+
+struct TxDesc {
+	__le32 PSize;
+	__le32 status;
+	__le32 addr;
+	__le32 size;
+};
+
+struct RxDesc {
+	__le32 PSize;
+	__le32 status;
+	__le32 addr;
+	__le32 size;
+};
+
+enum _DescStatusBit {
+	/* _Desc.status */
+	OWNbit		= 0x80000000, // RXOWN/TXOWN
+	INTbit		= 0x40000000, // RXINT/TXINT
+	CRCbit		= 0x00020000, // CRCOFF/CRCEN
+	PADbit		= 0x00010000, // PREADD/PADEN
+	/* _Desc.size */
+	RingEnd		= 0x80000000,
+	/* TxDesc.status */
+	LSEN		= 0x08000000, // TSO ? -- FR
+	IPCS		= 0x04000000,
+	TCPCS		= 0x02000000,
+	UDPCS		= 0x01000000,
+	BSTEN		= 0x00800000,
+	EXTEN		= 0x00400000,
+	DEFEN		= 0x00200000,
+	BKFEN		= 0x00100000,
+	CRSEN		= 0x00080000,
+	COLEN		= 0x00040000,
+	THOL3		= 0x30000000,
+	THOL2		= 0x20000000,
+	THOL1		= 0x10000000,
+	THOL0		= 0x00000000,
+	/* RxDesc.status */
+	IPON		= 0x20000000,
+	TCPON		= 0x10000000,
+	UDPON		= 0x08000000,
+	Wakup		= 0x00400000,
+	Magic		= 0x00200000,
+	Pause		= 0x00100000,
+	DEFbit		= 0x00200000,
+	BCAST		= 0x000c0000,
+	MCAST		= 0x00080000,
+	UCAST		= 0x00040000,
+	/* RxDesc.PSize */
+	TAGON		= 0x80000000,
+	RxDescCountMask	= 0x7f000000, // multi-desc pkt when > 1 ? -- FR
+	ABORT		= 0x00800000,
+	SHORT		= 0x00400000,
+	LIMIT		= 0x00200000,
+	MIIER		= 0x00100000,
+	OVRUN		= 0x00080000,
+	NIBON		= 0x00040000,
+	COLON		= 0x00020000,
+	CRCOK		= 0x00010000,
+	RxSizeMask	= 0x0000ffff
+	/*
+	 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and
+	 * provide two (unused with Linux) Tx queues. No publically
+	 * available documentation alas.
+	 */
+};
+
+enum sis190_eeprom_access_register_bits {
+	EECS	= 0x00000001,	// unused
+	EECLK	= 0x00000002,	// unused
+	EEDO	= 0x00000008,	// unused
+	EEDI	= 0x00000004,	// unused
+	EEREQ	= 0x00000080,
+	EEROP	= 0x00000200,
+	EEWOP	= 0x00000100	// unused
+};
+
+/* EEPROM Addresses */
+enum sis190_eeprom_address {
+	EEPROMSignature	= 0x00,
+	EEPROMCLK	= 0x01,	// unused
+	EEPROMInfo	= 0x02,
+	EEPROMMACAddr	= 0x03
+};
+
+struct sis190_private {
+	void __iomem *mmio_addr;
+	struct pci_dev *pci_dev;
+	struct net_device_stats stats;
+	spinlock_t lock;
+	u32 rx_buf_sz;
+	u32 cur_rx;
+	u32 cur_tx;
+	u32 dirty_rx;
+	u32 dirty_tx;
+	dma_addr_t rx_dma;
+	dma_addr_t tx_dma;
+	struct RxDesc *RxDescRing;
+	struct TxDesc *TxDescRing;
+	struct sk_buff *Rx_skbuff[NUM_RX_DESC];
+	struct sk_buff *Tx_skbuff[NUM_TX_DESC];
+	struct work_struct phy_task;
+	struct timer_list timer;
+	u32 msg_enable;
+	struct mii_if_info mii_if;
+	struct list_head first_phy;
+};
+
+struct sis190_phy {
+	struct list_head list;
+	int phy_id;
+	u16 id[2];
+	u16 status;
+	u8  type;
+};
+
+enum sis190_phy_type {
+	UNKNOWN	= 0x00,
+	HOME	= 0x01,
+	LAN	= 0x02,
+	MIX	= 0x03
+};
+
+static struct mii_chip_info {
+        const char *name;
+        u16 id[2];
+        unsigned int type;
+} mii_chip_table[] = {
+	{ "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN },
+	{ "Agere PHY ET1101B",    { 0x0282, 0xf010 }, LAN },
+	{ "Marvell PHY 88E1111",  { 0x0141, 0x0cc0 }, LAN },
+	{ "Realtek PHY RTL8201",  { 0x0000, 0x8200 }, LAN },
+	{ NULL, }
+};
+
+const static struct {
+	const char *name;
+	u8 version;		/* depend on docs */
+	u32 RxConfigMask;	/* clear the bits supported by this chip */
+} sis_chip_info[] = {
+	{ DRV_NAME, 0x00, 0xff7e1880, },
+};
+
+static struct pci_device_id sis190_pci_tbl[] __devinitdata = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
+	{ 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, sis190_pci_tbl);
+
+static int rx_copybreak = 200;
+
+static struct {
+	u32 msg_enable;
+} debug = { -1 };
+
+MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver");
+module_param(rx_copybreak, int, 0);
+MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
+module_param_named(debug, debug.msg_enable, int, 0);
+MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
+MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+
+static const u32 sis190_intr_mask =
+	RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt;
+
+/*
+ * Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
+ * The chips use a 64 element hash table based on the Ethernet CRC.
+ */
+static int multicast_filter_limit = 32;
+
+static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
+{
+	unsigned int i;
+
+	SIS_W32(GMIIControl, ctl);
+
+	msleep(1);
+
+	for (i = 0; i < 100; i++) {
+		if (!(SIS_R32(GMIIControl) & EhnMIInotDone))
+			break;
+		msleep(1);
+	}
+
+	if (i > 999)
+		printk(KERN_ERR PFX "PHY command failed !\n");
+}
+
+static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
+{
+	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite |
+		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) |
+		(((u32) val) << EhnMIIdataShift));
+}
+
+static int mdio_read(void __iomem *ioaddr, int phy_id, int reg)
+{
+	__mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread |
+		(((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift));
+
+	return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift);
+}
+
+static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	mdio_write(tp->mmio_addr, phy_id, reg, val);
+}
+
+static int __mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mdio_read(tp->mmio_addr, phy_id, reg);
+}
+
+static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg)
+{
+	mdio_read(ioaddr, phy_id, reg);
+	return mdio_read(ioaddr, phy_id, reg);
+}
+
+static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg)
+{
+	u16 data = 0xffff;
+	unsigned int i;
+
+	if (!(SIS_R32(ROMControl) & 0x0002))
+		return 0;
+
+	SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10));
+
+	for (i = 0; i < 200; i++) {
+		if (!(SIS_R32(ROMInterface) & EEREQ)) {
+			data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16;
+			break;
+		}
+		msleep(1);
+	}
+
+	return data;
+}
+
+static void sis190_irq_mask_and_ack(void __iomem *ioaddr)
+{
+	SIS_W32(IntrMask, 0x00);
+	SIS_W32(IntrStatus, 0xffffffff);
+	SIS_PCI_COMMIT();
+}
+
+static void sis190_asic_down(void __iomem *ioaddr)
+{
+	/* Stop the chip's Tx and Rx DMA processes. */
+
+	SIS_W32(TxControl, 0x1a00);
+	SIS_W32(RxControl, 0x1a00);
+
+	sis190_irq_mask_and_ack(ioaddr);
+}
+
+static void sis190_mark_as_last_descriptor(struct RxDesc *desc)
+{
+	desc->size |= cpu_to_le32(RingEnd);
+}
+
+static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
+{
+	u32 eor = le32_to_cpu(desc->size) & RingEnd;
+
+	desc->PSize = 0x0;
+	desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor);
+	wmb();
+	desc->status = cpu_to_le32(OWNbit | INTbit);
+}
+
+static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
+				      u32 rx_buf_sz)
+{
+	desc->addr = cpu_to_le32(mapping);
+	sis190_give_to_asic(desc, rx_buf_sz);
+}
+
+static inline void sis190_make_unusable_by_asic(struct RxDesc *desc)
+{
+	desc->PSize = 0x0;
+	desc->addr = 0xdeadbeef;
+	desc->size &= cpu_to_le32(RingEnd);
+	wmb();
+	desc->status = 0x0;
+}
+
+static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
+			       struct RxDesc *desc, u32 rx_buf_sz)
+{
+	struct sk_buff *skb;
+	dma_addr_t mapping;
+	int ret = 0;
+
+	skb = dev_alloc_skb(rx_buf_sz);
+	if (!skb)
+		goto err_out;
+
+	*sk_buff = skb;
+
+	mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
+				 PCI_DMA_FROMDEVICE);
+
+	sis190_map_to_asic(desc, mapping, rx_buf_sz);
+out:
+	return ret;
+
+err_out:
+	ret = -ENOMEM;
+	sis190_make_unusable_by_asic(desc);
+	goto out;
+}
+
+static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
+			  u32 start, u32 end)
+{
+	u32 cur;
+
+	for (cur = start; cur < end; cur++) {
+		int ret, i = cur % NUM_RX_DESC;
+
+		if (tp->Rx_skbuff[i])
+			continue;
+
+		ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i,
+					  tp->RxDescRing + i, tp->rx_buf_sz);
+		if (ret < 0)
+			break;
+	}
+	return cur - start;
+}
+
+static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
+				     struct RxDesc *desc, int rx_buf_sz)
+{
+	int ret = -1;
+
+	if (pkt_size < rx_copybreak) {
+		struct sk_buff *skb;
+
+		skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
+		if (skb) {
+			skb_reserve(skb, NET_IP_ALIGN);
+			eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
+			*sk_buff = skb;
+			sis190_give_to_asic(desc, rx_buf_sz);
+			ret = 0;
+		}
+	}
+	return ret;
+}
+
+static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats)
+{
+#define ErrMask	(OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT)
+
+	if ((status & CRCOK) && !(status & ErrMask))
+		return 0;
+
+	if (!(status & CRCOK))
+		stats->rx_crc_errors++;
+	else if (status & OVRUN)
+		stats->rx_over_errors++;
+	else if (status & (SHORT | LIMIT))
+		stats->rx_length_errors++;
+	else if (status & (MIIER | NIBON | COLON))
+		stats->rx_frame_errors++;
+
+	stats->rx_errors++;
+	return -1;
+}
+
+static int sis190_rx_interrupt(struct net_device *dev,
+			       struct sis190_private *tp, void __iomem *ioaddr)
+{
+	struct net_device_stats *stats = &tp->stats;
+	u32 rx_left, cur_rx = tp->cur_rx;
+	u32 delta, count;
+
+	rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
+	rx_left = sis190_rx_quota(rx_left, (u32) dev->quota);
+
+	for (; rx_left > 0; rx_left--, cur_rx++) {
+		unsigned int entry = cur_rx % NUM_RX_DESC;
+		struct RxDesc *desc = tp->RxDescRing + entry;
+		u32 status;
+
+		if (desc->status & OWNbit)
+			break;
+
+		status = le32_to_cpu(desc->PSize);
+
+		// net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
+		//	 status);
+
+		if (sis190_rx_pkt_err(status, stats) < 0)
+			sis190_give_to_asic(desc, tp->rx_buf_sz);
+		else {
+			struct sk_buff *skb = tp->Rx_skbuff[entry];
+			int pkt_size = (status & RxSizeMask) - 4;
+			void (*pci_action)(struct pci_dev *, dma_addr_t,
+				size_t, int) = pci_dma_sync_single_for_device;
+
+			if (unlikely(pkt_size > tp->rx_buf_sz)) {
+				net_intr(tp, KERN_INFO
+					 "%s: (frag) status = %08x.\n",
+					 dev->name, status);
+				stats->rx_dropped++;
+				stats->rx_length_errors++;
+				sis190_give_to_asic(desc, tp->rx_buf_sz);
+				continue;
+			}
+
+			pci_dma_sync_single_for_cpu(tp->pci_dev,
+				le32_to_cpu(desc->addr), tp->rx_buf_sz,
+				PCI_DMA_FROMDEVICE);
+
+			if (sis190_try_rx_copy(&skb, pkt_size, desc,
+					       tp->rx_buf_sz)) {
+				pci_action = pci_unmap_single;
+				tp->Rx_skbuff[entry] = NULL;
+				sis190_make_unusable_by_asic(desc);
+			}
+
+			pci_action(tp->pci_dev, le32_to_cpu(desc->addr),
+				   tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+
+			skb->dev = dev;
+			skb_put(skb, pkt_size);
+			skb->protocol = eth_type_trans(skb, dev);
+
+			sis190_rx_skb(skb);
+
+			dev->last_rx = jiffies;
+			stats->rx_packets++;
+			stats->rx_bytes += pkt_size;
+			if ((status & BCAST) == MCAST)
+				stats->multicast++;
+		}
+	}
+	count = cur_rx - tp->cur_rx;
+	tp->cur_rx = cur_rx;
+
+	delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
+	if (!delta && count && netif_msg_intr(tp))
+		printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
+	tp->dirty_rx += delta;
+
+	if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
+		printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
+
+	return count;
+}
+
+static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb,
+				struct TxDesc *desc)
+{
+	unsigned int len;
+
+	len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
+
+	pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE);
+
+	memset(desc, 0x00, sizeof(*desc));
+}
+
+static void sis190_tx_interrupt(struct net_device *dev,
+				struct sis190_private *tp, void __iomem *ioaddr)
+{
+	u32 pending, dirty_tx = tp->dirty_tx;
+	/*
+	 * It would not be needed if queueing was allowed to be enabled
+	 * again too early (hint: think preempt and unclocked smp systems).
+	 */
+	unsigned int queue_stopped;
+
+	smp_rmb();
+	pending = tp->cur_tx - dirty_tx;
+	queue_stopped = (pending == NUM_TX_DESC);
+
+	for (; pending; pending--, dirty_tx++) {
+		unsigned int entry = dirty_tx % NUM_TX_DESC;
+		struct TxDesc *txd = tp->TxDescRing + entry;
+		struct sk_buff *skb;
+
+		if (le32_to_cpu(txd->status) & OWNbit)
+			break;
+
+		skb = tp->Tx_skbuff[entry];
+
+		tp->stats.tx_packets++;
+		tp->stats.tx_bytes += skb->len;
+
+		sis190_unmap_tx_skb(tp->pci_dev, skb, txd);
+		tp->Tx_skbuff[entry] = NULL;
+		dev_kfree_skb_irq(skb);
+	}
+
+	if (tp->dirty_tx != dirty_tx) {
+		tp->dirty_tx = dirty_tx;
+		smp_wmb();
+		if (queue_stopped)
+			netif_wake_queue(dev);
+	}
+}
+
+/*
+ * The interrupt handler does all of the Rx thread work and cleans up after
+ * the Tx thread.
+ */
+static irqreturn_t sis190_interrupt(int irq, void *__dev, struct pt_regs *regs)
+{
+	struct net_device *dev = __dev;
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	unsigned int handled = 0;
+	u32 status;
+
+	status = SIS_R32(IntrStatus);
+
+	if ((status == 0xffffffff) || !status)
+		goto out;
+
+	handled = 1;
+
+	if (unlikely(!netif_running(dev))) {
+		sis190_asic_down(ioaddr);
+		goto out;
+	}
+
+	SIS_W32(IntrStatus, status);
+
+	// net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
+
+	if (status & LinkChange) {
+		net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
+		schedule_work(&tp->phy_task);
+	}
+
+	if (status & RxQInt)
+		sis190_rx_interrupt(dev, tp, ioaddr);
+
+	if (status & TxQ0Int)
+		sis190_tx_interrupt(dev, tp, ioaddr);
+out:
+	return IRQ_RETVAL(handled);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void sis190_netpoll(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *pdev = tp->pci_dev;
+
+	disable_irq(pdev->irq);
+	sis190_interrupt(pdev->irq, dev, NULL);
+	enable_irq(pdev->irq);
+}
+#endif
+
+static void sis190_free_rx_skb(struct sis190_private *tp,
+			       struct sk_buff **sk_buff, struct RxDesc *desc)
+{
+	struct pci_dev *pdev = tp->pci_dev;
+
+	pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz,
+			 PCI_DMA_FROMDEVICE);
+	dev_kfree_skb(*sk_buff);
+	*sk_buff = NULL;
+	sis190_make_unusable_by_asic(desc);
+}
+
+static void sis190_rx_clear(struct sis190_private *tp)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_RX_DESC; i++) {
+		if (!tp->Rx_skbuff[i])
+			continue;
+		sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i);
+	}
+}
+
+static void sis190_init_ring_indexes(struct sis190_private *tp)
+{
+	tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0;
+}
+
+static int sis190_init_ring(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	sis190_init_ring_indexes(tp);
+
+	memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *));
+	memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *));
+
+	if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC)
+		goto err_rx_clear;
+
+	sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1);
+
+	return 0;
+
+err_rx_clear:
+	sis190_rx_clear(tp);
+	return -ENOMEM;
+}
+
+static void sis190_set_rx_mode(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	unsigned long flags;
+	u32 mc_filter[2];	/* Multicast hash filter */
+	u16 rx_mode;
+
+	if (dev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		net_drv(tp, KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+			dev->name);
+		rx_mode =
+			AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+			AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if ((dev->mc_count > multicast_filter_limit) ||
+		   (dev->flags & IFF_ALLMULTI)) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		struct dev_mc_list *mclist;
+		unsigned int i;
+
+		rx_mode = AcceptBroadcast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+		for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
+		     i++, mclist = mclist->next) {
+			int bit_nr =
+				ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
+			mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+			rx_mode |= AcceptMulticast;
+		}
+	}
+
+	spin_lock_irqsave(&tp->lock, flags);
+
+	SIS_W16(RxMacControl, rx_mode | 0x2);
+	SIS_W32(RxHashTable, mc_filter[0]);
+	SIS_W32(RxHashTable + 4, mc_filter[1]);
+
+	spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static void sis190_soft_reset(void __iomem *ioaddr)
+{
+	SIS_W32(IntrControl, 0x8000);
+	SIS_PCI_COMMIT();
+	msleep(1);
+	SIS_W32(IntrControl, 0x0);
+	sis190_asic_down(ioaddr);
+	msleep(1);
+}
+
+static void sis190_hw_start(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+
+	sis190_soft_reset(ioaddr);
+
+	SIS_W32(TxDescStartAddr, tp->tx_dma);
+	SIS_W32(RxDescStartAddr, tp->rx_dma);
+
+	SIS_W32(IntrStatus, 0xffffffff);
+	SIS_W32(IntrMask, 0x0);
+	/*
+	 * Default is 100Mbps.
+	 * A bit strange: 100Mbps is 0x1801 elsewhere -- FR 2005/06/09
+	 */
+	SIS_W16(StationControl, 0x1901);
+	SIS_W32(GMIIControl, 0x0);
+	SIS_W32(TxMacControl, 0x60);
+	SIS_W16(RxMacControl, 0x02);
+	SIS_W32(RxHashTable, 0x0);
+	SIS_W32(0x6c, 0x0);
+	SIS_W32(RxWolCtrl, 0x0);
+	SIS_W32(RxWolData, 0x0);
+
+	SIS_PCI_COMMIT();
+
+	sis190_set_rx_mode(dev);
+
+	/* Enable all known interrupts by setting the interrupt mask. */
+	SIS_W32(IntrMask, sis190_intr_mask);
+
+	SIS_W32(TxControl, 0x1a00 | CmdTxEnb);
+	SIS_W32(RxControl, 0x1a1d);
+
+	netif_start_queue(dev);
+}
+
+static void sis190_phy_task(void * data)
+{
+	struct net_device *dev = data;
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	int phy_id = tp->mii_if.phy_id;
+	u16 val;
+
+	rtnl_lock();
+
+	val = mdio_read(ioaddr, phy_id, MII_BMCR);
+	if (val & BMCR_RESET) {
+		// FIXME: needlessly high ?  -- FR 02/07/2005
+		mod_timer(&tp->timer, jiffies + HZ/10);
+	} else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
+		     BMSR_ANEGCOMPLETE)) {
+		net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n",
+			 dev->name);
+		mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET);
+		mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
+	} else {
+		/* Rejoice ! */
+		struct {
+			int val;
+			const char *msg;
+			u16 ctl;
+		} reg31[] = {
+			{ LPA_1000XFULL | LPA_SLCT,
+				"1000 Mbps Full Duplex",
+				0x01 | _1000bpsF },
+			{ LPA_1000XHALF | LPA_SLCT,
+				"1000 Mbps Half Duplex",
+				0x01 | _1000bpsH },
+			{ LPA_100FULL,
+				"100 Mbps Full Duplex",
+				0x01 | _100bpsF },
+			{ LPA_100HALF,
+				"100 Mbps Half Duplex",
+				0x01 | _100bpsH },
+			{ LPA_10FULL,
+				"10 Mbps Full Duplex",
+				0x01 | _10bpsF },
+			{ LPA_10HALF,
+				"10 Mbps Half Duplex",
+				0x01 | _10bpsH },
+			{ 0, "unknown", 0x0000 }
+		}, *p;
+		u16 adv;
+
+		val = mdio_read(ioaddr, phy_id, 0x1f);
+		net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
+
+		val = mdio_read(ioaddr, phy_id, MII_LPA);
+		adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+		net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n",
+			 dev->name, val, adv);
+
+		val &= adv;
+
+		for (p = reg31; p->ctl; p++) {
+			if ((val & p->val) == p->val)
+				break;
+		}
+		if (p->ctl)
+			SIS_W16(StationControl, p->ctl);
+		net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
+			 p->msg);
+		netif_carrier_on(dev);
+	}
+
+	rtnl_unlock();
+}
+
+static void sis190_phy_timer(unsigned long __opaque)
+{
+	struct net_device *dev = (struct net_device *)__opaque;
+	struct sis190_private *tp = netdev_priv(dev);
+
+	if (likely(netif_running(dev)))
+		schedule_work(&tp->phy_task);
+}
+
+static inline void sis190_delete_timer(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	del_timer_sync(&tp->timer);
+}
+
+static inline void sis190_request_timer(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct timer_list *timer = &tp->timer;
+
+	init_timer(timer);
+	timer->expires = jiffies + SIS190_PHY_TIMEOUT;
+	timer->data = (unsigned long)dev;
+	timer->function = sis190_phy_timer;
+	add_timer(timer);
+}
+
+static void sis190_set_rxbufsize(struct sis190_private *tp,
+				 struct net_device *dev)
+{
+	unsigned int mtu = dev->mtu;
+
+	tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
+	/* RxDesc->size has a licence to kill the lower bits */
+	if (tp->rx_buf_sz & 0x07) {
+		tp->rx_buf_sz += 8;
+		tp->rx_buf_sz &= RX_BUF_MASK;
+	}
+}
+
+static int sis190_open(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *pdev = tp->pci_dev;
+	int rc = -ENOMEM;
+
+	sis190_set_rxbufsize(tp, dev);
+
+	/*
+	 * Rx and Tx descriptors need 256 bytes alignment.
+	 * pci_alloc_consistent() guarantees a stronger alignment.
+	 */
+	tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma);
+	if (!tp->TxDescRing)
+		goto out;
+
+	tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma);
+	if (!tp->RxDescRing)
+		goto err_free_tx_0;
+
+	rc = sis190_init_ring(dev);
+	if (rc < 0)
+		goto err_free_rx_1;
+
+	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+
+	sis190_request_timer(dev);
+
+	rc = request_irq(dev->irq, sis190_interrupt, SA_SHIRQ, dev->name, dev);
+	if (rc < 0)
+		goto err_release_timer_2;
+
+	sis190_hw_start(dev);
+out:
+	return rc;
+
+err_release_timer_2:
+	sis190_delete_timer(dev);
+	sis190_rx_clear(tp);
+err_free_rx_1:
+	pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing,
+		tp->rx_dma);
+err_free_tx_0:
+	pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing,
+		tp->tx_dma);
+	goto out;
+}
+
+static void sis190_tx_clear(struct sis190_private *tp)
+{
+	unsigned int i;
+
+	for (i = 0; i < NUM_TX_DESC; i++) {
+		struct sk_buff *skb = tp->Tx_skbuff[i];
+
+		if (!skb)
+			continue;
+
+		sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i);
+		tp->Tx_skbuff[i] = NULL;
+		dev_kfree_skb(skb);
+
+		tp->stats.tx_dropped++;
+	}
+	tp->cur_tx = tp->dirty_tx = 0;
+}
+
+static void sis190_down(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	unsigned int poll_locked = 0;
+
+	sis190_delete_timer(dev);
+
+	netif_stop_queue(dev);
+
+	flush_scheduled_work();
+
+	do {
+		spin_lock_irq(&tp->lock);
+
+		sis190_asic_down(ioaddr);
+
+		spin_unlock_irq(&tp->lock);
+
+		synchronize_irq(dev->irq);
+
+		if (!poll_locked) {
+			netif_poll_disable(dev);
+			poll_locked++;
+		}
+
+		synchronize_sched();
+
+	} while (SIS_R32(IntrMask));
+
+	sis190_tx_clear(tp);
+	sis190_rx_clear(tp);
+}
+
+static int sis190_close(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *pdev = tp->pci_dev;
+
+	sis190_down(dev);
+
+	free_irq(dev->irq, dev);
+
+	netif_poll_enable(dev);
+
+	pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma);
+	pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma);
+
+	tp->TxDescRing = NULL;
+	tp->RxDescRing = NULL;
+
+	return 0;
+}
+
+static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u32 len, entry, dirty_tx;
+	struct TxDesc *desc;
+	dma_addr_t mapping;
+
+	if (unlikely(skb->len < ETH_ZLEN)) {
+		skb = skb_padto(skb, ETH_ZLEN);
+		if (!skb) {
+			tp->stats.tx_dropped++;
+			goto out;
+		}
+		len = ETH_ZLEN;
+	} else {
+		len = skb->len;
+	}
+
+	entry = tp->cur_tx % NUM_TX_DESC;
+	desc = tp->TxDescRing + entry;
+
+	if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
+		netif_stop_queue(dev);
+		net_tx_err(tp, KERN_ERR PFX
+			   "%s: BUG! Tx Ring full when queue awake!\n",
+			   dev->name);
+		return NETDEV_TX_BUSY;
+	}
+
+	mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+
+	tp->Tx_skbuff[entry] = skb;
+
+	desc->PSize = cpu_to_le32(len);
+	desc->addr = cpu_to_le32(mapping);
+
+	desc->size = cpu_to_le32(len);
+	if (entry == (NUM_TX_DESC - 1))
+		desc->size |= cpu_to_le32(RingEnd);
+
+	wmb();
+
+	desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
+
+	tp->cur_tx++;
+
+	smp_wmb();
+
+	SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb);
+
+	dev->trans_start = jiffies;
+
+	dirty_tx = tp->dirty_tx;
+	if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) {
+		netif_stop_queue(dev);
+		smp_rmb();
+		if (dirty_tx != tp->dirty_tx)
+			netif_wake_queue(dev);
+	}
+out:
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *sis190_get_stats(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return &tp->stats;
+}
+
+static void sis190_free_phy(struct list_head *first_phy)
+{
+	struct sis190_phy *cur, *next;
+
+	list_for_each_entry_safe(cur, next, first_phy, list) {
+		kfree(cur);
+	}
+}
+
+/**
+ *	sis190_default_phy - Select default PHY for sis190 mac.
+ *	@dev: the net device to probe for
+ *
+ *	Select first detected PHY with link as default.
+ *	If no one is link on, select PHY whose types is HOME as default.
+ *	If HOME doesn't exist, select LAN.
+ */
+static u16 sis190_default_phy(struct net_device *dev)
+{
+	struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan;
+	struct sis190_private *tp = netdev_priv(dev);
+	struct mii_if_info *mii_if = &tp->mii_if;
+	void __iomem *ioaddr = tp->mmio_addr;
+	u16 status;
+
+	phy_home = phy_default = phy_lan = NULL;
+
+	list_for_each_entry(phy, &tp->first_phy, list) {
+		status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR);
+
+		// Link ON & Not select default PHY & not ghost PHY.
+		if ((status & BMSR_LSTATUS) &&
+		    !phy_default &&
+		    (phy->type != UNKNOWN)) {
+			phy_default = phy;
+		} else {
+			status = mdio_read(ioaddr, phy->phy_id, MII_BMCR);
+			mdio_write(ioaddr, phy->phy_id, MII_BMCR,
+				   status | BMCR_ANENABLE | BMCR_ISOLATE);
+			if (phy->type == HOME)
+				phy_home = phy;
+			else if (phy->type == LAN)
+				phy_lan = phy;
+		}
+	}
+
+	if (!phy_default) {
+		if (phy_home)
+			phy_default = phy_home;
+		else if (phy_lan)
+			phy_default = phy_lan;
+		else
+			phy_default = list_entry(&tp->first_phy,
+						 struct sis190_phy, list);
+	}
+
+	if (mii_if->phy_id != phy_default->phy_id) {
+		mii_if->phy_id = phy_default->phy_id;
+		net_probe(tp, KERN_INFO
+		       "%s: Using transceiver at address %d as default.\n",
+		       pci_name(tp->pci_dev), mii_if->phy_id);
+	}
+
+	status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
+	status &= (~BMCR_ISOLATE);
+
+	mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status);
+	status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR);
+
+	return status;
+}
+
+static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
+			    struct sis190_phy *phy, unsigned int phy_id,
+			    u16 mii_status)
+{
+	void __iomem *ioaddr = tp->mmio_addr;
+	struct mii_chip_info *p;
+
+	INIT_LIST_HEAD(&phy->list);
+	phy->status = mii_status;
+	phy->phy_id = phy_id;
+
+	phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1);
+	phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2);
+
+	for (p = mii_chip_table; p->type; p++) {
+		if ((p->id[0] == phy->id[0]) &&
+		    (p->id[1] == (phy->id[1] & 0xfff0))) {
+			break;
+		}
+	}
+
+	if (p->id[1]) {
+		phy->type = (p->type == MIX) ?
+			((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
+				LAN : HOME) : p->type;
+	} else
+		phy->type = UNKNOWN;
+
+	net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
+		  pci_name(tp->pci_dev),
+		  (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id);
+}
+
+/**
+ *	sis190_mii_probe - Probe MII PHY for sis190
+ *	@dev: the net device to probe for
+ *
+ *	Search for total of 32 possible mii phy addresses.
+ *	Identify and set current phy if found one,
+ *	return error if it failed to found.
+ */
+static int __devinit sis190_mii_probe(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct mii_if_info *mii_if = &tp->mii_if;
+	void __iomem *ioaddr = tp->mmio_addr;
+	int phy_id;
+	int rc = 0;
+
+	INIT_LIST_HEAD(&tp->first_phy);
+
+	for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+		struct sis190_phy *phy;
+		u16 status;
+
+		status = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+
+		// Try next mii if the current one is not accessible.
+		if (status == 0xffff || status == 0x0000)
+			continue;
+
+		phy = kmalloc(sizeof(*phy), GFP_KERNEL);
+		if (!phy) {
+			sis190_free_phy(&tp->first_phy);
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		sis190_init_phy(dev, tp, phy, phy_id, status);
+
+		list_add(&tp->first_phy, &phy->list);
+	}
+
+	if (list_empty(&tp->first_phy)) {
+		net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
+			  pci_name(tp->pci_dev));
+		rc = -EIO;
+		goto out;
+	}
+
+	/* Select default PHY for mac */
+	sis190_default_phy(dev);
+
+	mii_if->dev = dev;
+	mii_if->mdio_read = __mdio_read;
+	mii_if->mdio_write = __mdio_write;
+	mii_if->phy_id_mask = PHY_ID_ANY;
+	mii_if->reg_num_mask = MII_REG_ANY;
+out:
+	return rc;
+}
+
+static void __devexit sis190_mii_remove(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	sis190_free_phy(&tp->first_phy);
+}
+
+static void sis190_release_board(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct sis190_private *tp = netdev_priv(dev);
+
+	iounmap(tp->mmio_addr);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	free_netdev(dev);
+}
+
+static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
+{
+	struct sis190_private *tp;
+	struct net_device *dev;
+	void __iomem *ioaddr;
+	int rc;
+
+	dev = alloc_etherdev(sizeof(*tp));
+	if (!dev) {
+		net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
+		rc = -ENOMEM;
+		goto err_out_0;
+	}
+
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	tp = netdev_priv(dev);
+	tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT);
+
+	rc = pci_enable_device(pdev);
+	if (rc < 0) {
+		net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
+		goto err_free_dev_1;
+	}
+
+	rc = -ENODEV;
+
+	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+		net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
+			  pci_name(pdev));
+		goto err_pci_disable_2;
+	}
+	if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
+		net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
+			  pci_name(pdev));
+		goto err_pci_disable_2;
+	}
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc < 0) {
+		net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
+			  pci_name(pdev));
+		goto err_pci_disable_2;
+	}
+
+	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
+	if (rc < 0) {
+		net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
+			  pci_name(pdev));
+		goto err_free_res_3;
+	}
+
+	pci_set_master(pdev);
+
+	ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
+	if (!ioaddr) {
+		net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
+			  pci_name(pdev));
+		rc = -EIO;
+		goto err_free_res_3;
+	}
+
+	tp->pci_dev = pdev;
+	tp->mmio_addr = ioaddr;
+
+	sis190_irq_mask_and_ack(ioaddr);
+
+	sis190_soft_reset(ioaddr);
+out:
+	return dev;
+
+err_free_res_3:
+	pci_release_regions(pdev);
+err_pci_disable_2:
+	pci_disable_device(pdev);
+err_free_dev_1:
+	free_netdev(dev);
+err_out_0:
+	dev = ERR_PTR(rc);
+	goto out;
+}
+
+static void sis190_tx_timeout(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u8 tmp8;
+
+	/* Disable Tx, if not already */
+	tmp8 = SIS_R8(TxControl);
+	if (tmp8 & CmdTxEnb)
+		SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
+
+
+	net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
+		   dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	SIS_W32(IntrMask, 0x0000);
+
+	/* Stop a shared interrupt from scavenging while we are. */
+	spin_lock_irq(&tp->lock);
+	sis190_tx_clear(tp);
+	spin_unlock_irq(&tp->lock);
+
+	/* ...and finally, reset everything. */
+	sis190_hw_start(dev);
+
+	netif_wake_queue(dev);
+}
+
+static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
+						     struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u16 sig;
+	int i;
+
+	net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
+		  pci_name(pdev));
+
+	/* Check to see if there is a sane EEPROM */
+	sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
+
+	if ((sig == 0xffff) || (sig == 0x0000)) {
+		net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
+			  pci_name(pdev), sig);
+		return -EIO;
+	}
+
+	/* Get MAC address from EEPROM */
+	for (i = 0; i < MAC_ADDR_LEN / 2; i++) {
+		__le16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i);
+
+		((u16 *)dev->dev_addr)[0] = le16_to_cpu(w);
+	}
+
+	return 0;
+}
+
+/**
+ *	sis190_get_mac_addr_from_apc - Get MAC address for SiS965 model
+ *	@pdev: PCI device
+ *	@dev:  network device to get address for
+ *
+ *	SiS965 model, use APC CMOS RAM to store MAC address.
+ *	APC CMOS RAM is accessed through ISA bridge.
+ *	MAC address is read into @net_dev->dev_addr.
+ */
+static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
+						  struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	struct pci_dev *isa_bridge;
+	u8 reg, tmp8;
+	int i;
+
+	net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
+		  pci_name(pdev));
+
+	isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, 0x0965, NULL);
+	if (!isa_bridge) {
+		net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
+			  pci_name(pdev));
+		return -EIO;
+	}
+
+	/* Enable port 78h & 79h to access APC Registers. */
+	pci_read_config_byte(isa_bridge, 0x48, &tmp8);
+	reg = (tmp8 & ~0x02);
+	pci_write_config_byte(isa_bridge, 0x48, reg);
+	udelay(50);
+	pci_read_config_byte(isa_bridge, 0x48, &reg);
+
+        for (i = 0; i < MAC_ADDR_LEN; i++) {
+                outb(0x9 + i, 0x78);
+                dev->dev_addr[i] = inb(0x79);
+        }
+
+	outb(0x12, 0x78);
+	reg = inb(0x79);
+
+	/* Restore the value to ISA Bridge */
+	pci_write_config_byte(isa_bridge, 0x48, tmp8);
+	pci_dev_put(isa_bridge);
+
+	return 0;
+}
+
+/**
+ *      sis190_init_rxfilter - Initialize the Rx filter
+ *      @dev: network device to initialize
+ *
+ *      Set receive filter address to our MAC address
+ *      and enable packet filtering.
+ */
+static inline void sis190_init_rxfilter(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	u16 ctl;
+	int i;
+
+	ctl = SIS_R16(RxMacControl);
+	/*
+	 * Disable packet filtering before setting filter.
+	 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits
+	 * only and followed by RxMacAddr (6 bytes). Strange. -- FR
+	 */
+	SIS_W16(RxMacControl, ctl & ~0x0f00);
+
+	for (i = 0; i < MAC_ADDR_LEN; i++)
+		SIS_W8(RxMacAddr + i, dev->dev_addr[i]);
+
+	SIS_W16(RxMacControl, ctl);
+	SIS_PCI_COMMIT();
+}
+
+static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev)
+{
+	u8 from;
+
+	pci_read_config_byte(pdev, 0x73, &from);
+
+	return (from & 0x00000001) ?
+		sis190_get_mac_addr_from_apc(pdev, dev) :
+		sis190_get_mac_addr_from_eeprom(pdev, dev);
+}
+
+static void sis190_set_speed_auto(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	void __iomem *ioaddr = tp->mmio_addr;
+	int phy_id = tp->mii_if.phy_id;
+	int val;
+
+	net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
+
+	val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
+
+	// Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0
+	// unchanged.
+	mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) |
+		   ADVERTISE_100FULL | ADVERTISE_10FULL |
+		   ADVERTISE_100HALF | ADVERTISE_10HALF);
+
+	// Enable 1000 Full Mode.
+	mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL);
+
+	// Enable auto-negotiation and restart auto-negotiation.
+	mdio_write(ioaddr, phy_id, MII_BMCR,
+		   BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET);
+}
+
+static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mii_ethtool_gset(&tp->mii_if, cmd);
+}
+
+static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mii_ethtool_sset(&tp->mii_if, cmd);
+}
+
+static void sis190_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, pci_name(tp->pci_dev));
+}
+
+static int sis190_get_regs_len(struct net_device *dev)
+{
+	return SIS190_REGS_SIZE;
+}
+
+static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+			    void *p)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+	unsigned long flags;
+
+	if (regs->len > SIS190_REGS_SIZE)
+		regs->len = SIS190_REGS_SIZE;
+
+	spin_lock_irqsave(&tp->lock, flags);
+	memcpy_fromio(p, tp->mmio_addr, regs->len);
+	spin_unlock_irqrestore(&tp->lock, flags);
+}
+
+static int sis190_nway_reset(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return mii_nway_restart(&tp->mii_if);
+}
+
+static u32 sis190_get_msglevel(struct net_device *dev)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return tp->msg_enable;
+}
+
+static void sis190_set_msglevel(struct net_device *dev, u32 value)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	tp->msg_enable = value;
+}
+
+static struct ethtool_ops sis190_ethtool_ops = {
+	.get_settings	= sis190_get_settings,
+	.set_settings	= sis190_set_settings,
+	.get_drvinfo	= sis190_get_drvinfo,
+	.get_regs_len	= sis190_get_regs_len,
+	.get_regs	= sis190_get_regs,
+	.get_link	= ethtool_op_get_link,
+	.get_msglevel	= sis190_get_msglevel,
+	.set_msglevel	= sis190_set_msglevel,
+	.nway_reset	= sis190_nway_reset,
+};
+
+static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+	struct sis190_private *tp = netdev_priv(dev);
+
+	return !netif_running(dev) ? -EINVAL :
+		generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL);
+}
+
+static int __devinit sis190_init_one(struct pci_dev *pdev,
+				     const struct pci_device_id *ent)
+{
+	static int printed_version = 0;
+	struct sis190_private *tp;
+	struct net_device *dev;
+	void __iomem *ioaddr;
+	int rc;
+
+	if (!printed_version) {
+		net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
+		printed_version = 1;
+	}
+
+	dev = sis190_init_board(pdev);
+	if (IS_ERR(dev)) {
+		rc = PTR_ERR(dev);
+		goto out;
+	}
+
+	tp = netdev_priv(dev);
+	ioaddr = tp->mmio_addr;
+
+	rc = sis190_get_mac_addr(pdev, dev);
+	if (rc < 0)
+		goto err_release_board;
+
+	sis190_init_rxfilter(dev);
+
+	INIT_WORK(&tp->phy_task, sis190_phy_task, dev);
+
+	dev->open = sis190_open;
+	dev->stop = sis190_close;
+	dev->do_ioctl = sis190_ioctl;
+	dev->get_stats = sis190_get_stats;
+	dev->tx_timeout = sis190_tx_timeout;
+	dev->watchdog_timeo = SIS190_TX_TIMEOUT;
+	dev->hard_start_xmit = sis190_start_xmit;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	dev->poll_controller = sis190_netpoll;
+#endif
+	dev->set_multicast_list = sis190_set_rx_mode;
+	SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+	dev->irq = pdev->irq;
+	dev->base_addr = (unsigned long) 0xdead;
+
+	spin_lock_init(&tp->lock);
+
+	rc = sis190_mii_probe(dev);
+	if (rc < 0)
+		goto err_release_board;
+
+	rc = register_netdev(dev);
+	if (rc < 0)
+		goto err_remove_mii;
+
+	pci_set_drvdata(pdev, dev);
+
+	net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), "
+	       "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+	       pci_name(pdev), sis_chip_info[ent->driver_data].name,
+	       ioaddr, dev->irq,
+	       dev->dev_addr[0], dev->dev_addr[1],
+	       dev->dev_addr[2], dev->dev_addr[3],
+	       dev->dev_addr[4], dev->dev_addr[5]);
+
+	netif_carrier_off(dev);
+
+	sis190_set_speed_auto(dev);
+out:
+	return rc;
+
+err_remove_mii:
+	sis190_mii_remove(dev);
+err_release_board:
+	sis190_release_board(pdev);
+	goto out;
+}
+
+static void __devexit sis190_remove_one(struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+
+	sis190_mii_remove(dev);
+	unregister_netdev(dev);
+	sis190_release_board(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver sis190_pci_driver = {
+	.name		= DRV_NAME,
+	.id_table	= sis190_pci_tbl,
+	.probe		= sis190_init_one,
+	.remove		= __devexit_p(sis190_remove_one),
+};
+
+static int __init sis190_init_module(void)
+{
+	return pci_module_init(&sis190_pci_driver);
+}
+
+static void __exit sis190_cleanup_module(void)
+{
+	pci_unregister_driver(&sis190_pci_driver);
+}
+
+module_init(sis190_init_module);
+module_exit(sis190_cleanup_module);
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index e2cdaf8..8c9634a 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -135,6 +135,18 @@
 	  <file:Documentation/networking/net-modules.txt>.  The module will
 	  be called dmfe.
 
+config ULI526X
+	tristate "ULi M526x controller support"
+	depends on NET_TULIP && PCI
+	select CRC32
+	---help---
+	  This driver is for ULi M5261/M5263 10/100M Ethernet Controller
+	  (<http://www.uli.com.tw/>).
+
+	  To compile this driver as a module, choose M here and read
+	  <file:Documentation/networking/net-modules.txt>.  The module will
+	  be called uli526x.
+	  
 config PCMCIA_XIRCOM
 	tristate "Xircom CardBus support (new driver)"
 	depends on NET_TULIP && CARDBUS
diff --git a/drivers/net/tulip/Makefile b/drivers/net/tulip/Makefile
index 8bb9b46..451090d 100644
--- a/drivers/net/tulip/Makefile
+++ b/drivers/net/tulip/Makefile
@@ -9,6 +9,7 @@
 obj-$(CONFIG_DE2104X)		+= de2104x.o
 obj-$(CONFIG_TULIP)		+= tulip.o
 obj-$(CONFIG_DE4X5)		+= de4x5.o
+obj-$(CONFIG_ULI526X)		+= uli526x.o
 
 # Declare multi-part drivers.
 
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index e26c31f..f53396f 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -81,25 +81,6 @@
 		return retval & 0xffff;
 	}
 
-	if(tp->chip_id == ULI526X && tp->revision >= 0x40) {
-		int value;
-		int i = 1000;
-		
-		value = ioread32(ioaddr + CSR9);
-		iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
-		
-		value = (phy_id << 21) | (location << 16) | 0x08000000;
-		iowrite32(value, ioaddr + CSR10);
-		
-		while(--i > 0) {
-			mdio_delay();
-			if(ioread32(ioaddr + CSR10) & 0x10000000)
-				break;
-		}
-		retval = ioread32(ioaddr + CSR10);
-		spin_unlock_irqrestore(&tp->mii_lock, flags);
-		return retval & 0xFFFF;
-	}
 	/* Establish sync by sending at least 32 logic ones. */
 	for (i = 32; i >= 0; i--) {
 		iowrite32(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
@@ -159,23 +140,6 @@
 		spin_unlock_irqrestore(&tp->mii_lock, flags);
 		return;
 	}
-	if (tp->chip_id == ULI526X && tp->revision >= 0x40) {
-		int value;
-		int i = 1000;
-		
-		value = ioread32(ioaddr + CSR9);
-		iowrite32(value & 0xFFEFFFFF, ioaddr + CSR9);
-		
-		value = (phy_id << 21) | (location << 16) | 0x04000000 | (val & 0xFFFF);
-		iowrite32(value, ioaddr + CSR10);
-		
-		while(--i > 0) {
-			if (ioread32(ioaddr + CSR10) & 0x10000000)
-				break;
-		}
-		spin_unlock_irqrestore(&tp->mii_lock, flags);
-		return;
-	}
 		
 	/* Establish sync by sending 32 logic ones. */
 	for (i = 32; i >= 0; i--) {
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index 6915682..e058a9f 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -39,7 +39,6 @@
 	case MX98713:
 	case COMPEX9881:
 	case DM910X:
-	case ULI526X:
 	default: {
 		struct medialeaf *mleaf;
 		unsigned char *p;
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h
index 20346d8..05d2d96 100644
--- a/drivers/net/tulip/tulip.h
+++ b/drivers/net/tulip/tulip.h
@@ -88,7 +88,6 @@
 	I21145,
 	DM910X,
 	CONEXANT,
-	ULI526X
 };
 
 
@@ -482,11 +481,8 @@
 
 static inline void tulip_restart_rxtx(struct tulip_private *tp)
 {
-	if(!(tp->chip_id == ULI526X && 
-		(tp->revision == 0x40 || tp->revision == 0x50))) {
-		tulip_stop_rxtx(tp);
-		udelay(5);
-	}
+	tulip_stop_rxtx(tp);
+	udelay(5);
 	tulip_start_rxtx(tp);
 }
 
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index d45d8f5..05da5be 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -199,9 +199,6 @@
   { "Conexant LANfinity", 256, 0x0001ebef,
 	HAS_MII | HAS_ACPI, tulip_timer },
 
-   /* ULi526X */
-   { "ULi M5261/M5263", 128, 0x0001ebef,
-        HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, tulip_timer },
 };
 
 
@@ -239,8 +236,6 @@
 	{ 0x1737, 0xAB09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 	{ 0x1737, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
 	{ 0x17B3, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
- 	{ 0x10b9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X },	/* ALi 1563 integrated ethernet */
- 	{ 0x10b9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ULI526X },	/* ALi 1563 integrated ethernet */
 	{ 0x10b7, 0x9300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* 3Com 3CSOHO100B-TX */
 	{ 0x14ea, 0xab08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, /* Planex FNW-3602-TX */
 	{ } /* terminate list */
@@ -522,7 +517,7 @@
 				   dev->name);
 	} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142
 			   || tp->chip_id == MX98713 || tp->chip_id == COMPEX9881
-			   || tp->chip_id == DM910X || tp->chip_id == ULI526X) {
+			   || tp->chip_id == DM910X) {
 		printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
 			   "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
 			   dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
@@ -1103,18 +1098,16 @@
 			entry = tp->cur_tx++ % TX_RING_SIZE;
 
 			if (entry != 0) {
-				/* Avoid a chip errata by prefixing a dummy entry. Don't do
-				   this on the ULI526X as it triggers a different problem */
-				if (!(tp->chip_id == ULI526X && (tp->revision == 0x40 || tp->revision == 0x50))) {
-					tp->tx_buffers[entry].skb = NULL;
-					tp->tx_buffers[entry].mapping = 0;
-					tp->tx_ring[entry].length =
-						(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
-					tp->tx_ring[entry].buffer1 = 0;
-					/* Must set DescOwned later to avoid race with chip */
-					dummy = entry;
-					entry = tp->cur_tx++ % TX_RING_SIZE;
-				}
+				/* Avoid a chip errata by prefixing a dummy entry. */
+				tp->tx_buffers[entry].skb = NULL;
+				tp->tx_buffers[entry].mapping = 0;
+				tp->tx_ring[entry].length =
+					(entry == TX_RING_SIZE-1) ? cpu_to_le32(DESC_RING_WRAP) : 0;
+				tp->tx_ring[entry].buffer1 = 0;
+				/* Must set DescOwned later to avoid race with chip */
+				dummy = entry;
+				entry = tp->cur_tx++ % TX_RING_SIZE;
+
 			}
 
 			tp->tx_buffers[entry].skb = NULL;
@@ -1235,10 +1228,6 @@
 {
 	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
 		return 1;
-	if (pdev->vendor == 0x10b9 && pdev->device == 0x5261)
-		return 1;
-	if (pdev->vendor == 0x10b9 && pdev->device == 0x5263)
-		return 1;
 	return 0;
 }
 
@@ -1680,7 +1669,6 @@
 	switch (chip_idx) {
 	case DC21140:
 	case DM910X:
-	case ULI526X:
 	default:
 		if (tp->mtable)
 			iowrite32(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
new file mode 100644
index 0000000..5ae22b7
--- /dev/null
+++ b/drivers/net/tulip/uli526x.c
@@ -0,0 +1,1749 @@
+/*
+    This program is free software; you can redistribute it and/or
+    modify it under the terms of the GNU General Public License
+    as published by the Free Software Foundation; either version 2
+    of the License, or (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    
+*/
+
+#define DRV_NAME	"uli526x"
+#define DRV_VERSION	"0.9.3"
+#define DRV_RELDATE	"2005-7-29"
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+
+#include <asm/processor.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/uaccess.h>
+
+
+/* Board/System/Debug information/definition ---------------- */
+#define PCI_ULI5261_ID  0x526110B9	/* ULi M5261 ID*/
+#define PCI_ULI5263_ID  0x526310B9	/* ULi M5263 ID*/
+
+#define ULI526X_IO_SIZE 0x100
+#define TX_DESC_CNT     0x20            /* Allocated Tx descriptors */
+#define RX_DESC_CNT     0x30            /* Allocated Rx descriptors */
+#define TX_FREE_DESC_CNT (TX_DESC_CNT - 2)	/* Max TX packet count */
+#define TX_WAKE_DESC_CNT (TX_DESC_CNT - 3)	/* TX wakeup count */
+#define DESC_ALL_CNT    (TX_DESC_CNT + RX_DESC_CNT)
+#define TX_BUF_ALLOC    0x600
+#define RX_ALLOC_SIZE   0x620
+#define ULI526X_RESET    1
+#define CR0_DEFAULT     0
+#define CR6_DEFAULT     0x22200000
+#define CR7_DEFAULT     0x180c1
+#define CR15_DEFAULT    0x06            /* TxJabber RxWatchdog */
+#define TDES0_ERR_MASK  0x4302          /* TXJT, LC, EC, FUE */
+#define MAX_PACKET_SIZE 1514
+#define ULI5261_MAX_MULTICAST 14
+#define RX_COPY_SIZE	100
+#define MAX_CHECK_PACKET 0x8000
+
+#define ULI526X_10MHF      0
+#define ULI526X_100MHF     1
+#define ULI526X_10MFD      4
+#define ULI526X_100MFD     5
+#define ULI526X_AUTO       8
+
+#define ULI526X_TXTH_72	0x400000	/* TX TH 72 byte */
+#define ULI526X_TXTH_96	0x404000	/* TX TH 96 byte */
+#define ULI526X_TXTH_128	0x0000		/* TX TH 128 byte */
+#define ULI526X_TXTH_256	0x4000		/* TX TH 256 byte */
+#define ULI526X_TXTH_512	0x8000		/* TX TH 512 byte */
+#define ULI526X_TXTH_1K	0xC000		/* TX TH 1K  byte */
+
+#define ULI526X_TIMER_WUT  (jiffies + HZ * 1)/* timer wakeup time : 1 second */
+#define ULI526X_TX_TIMEOUT ((16*HZ)/2)	/* tx packet time-out time 8 s" */
+#define ULI526X_TX_KICK 	(4*HZ/2)	/* tx packet Kick-out time 2 s" */
+
+#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+
+#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+
+
+/* CR9 definition: SROM/MII */
+#define CR9_SROM_READ   0x4800
+#define CR9_SRCS        0x1
+#define CR9_SRCLK       0x2
+#define CR9_CRDOUT      0x8
+#define SROM_DATA_0     0x0
+#define SROM_DATA_1     0x4
+#define PHY_DATA_1      0x20000
+#define PHY_DATA_0      0x00000
+#define MDCLKH          0x10000
+
+#define PHY_POWER_DOWN	0x800
+
+#define SROM_V41_CODE   0x14
+
+#define SROM_CLK_WRITE(data, ioaddr)					\
+		outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);		\
+		udelay(5);						\
+		outl(data|CR9_SROM_READ|CR9_SRCS|CR9_SRCLK,ioaddr);	\
+		udelay(5);						\
+		outl(data|CR9_SROM_READ|CR9_SRCS,ioaddr);		\
+		udelay(5);
+
+/* Structure/enum declaration ------------------------------- */
+struct tx_desc {
+        u32 tdes0, tdes1, tdes2, tdes3; /* Data for the card */
+        char *tx_buf_ptr;               /* Data for us */
+        struct tx_desc *next_tx_desc;
+} __attribute__(( aligned(32) ));
+
+struct rx_desc {
+	u32 rdes0, rdes1, rdes2, rdes3; /* Data for the card */
+	struct sk_buff *rx_skb_ptr;	/* Data for us */
+	struct rx_desc *next_rx_desc;
+} __attribute__(( aligned(32) ));
+
+struct uli526x_board_info {
+	u32 chip_id;			/* Chip vendor/Device ID */
+	struct net_device *next_dev;	/* next device */
+	struct pci_dev *pdev;		/* PCI device */
+	spinlock_t lock;
+
+	long ioaddr;			/* I/O base address */
+	u32 cr0_data;
+	u32 cr5_data;
+	u32 cr6_data;
+	u32 cr7_data;
+	u32 cr15_data;
+
+	/* pointer for memory physical address */
+	dma_addr_t buf_pool_dma_ptr;	/* Tx buffer pool memory */
+	dma_addr_t buf_pool_dma_start;	/* Tx buffer pool align dword */
+	dma_addr_t desc_pool_dma_ptr;	/* descriptor pool memory */
+	dma_addr_t first_tx_desc_dma;
+	dma_addr_t first_rx_desc_dma;
+
+	/* descriptor pointer */
+	unsigned char *buf_pool_ptr;	/* Tx buffer pool memory */
+	unsigned char *buf_pool_start;	/* Tx buffer pool align dword */
+	unsigned char *desc_pool_ptr;	/* descriptor pool memory */
+	struct tx_desc *first_tx_desc;
+	struct tx_desc *tx_insert_ptr;
+	struct tx_desc *tx_remove_ptr;
+	struct rx_desc *first_rx_desc;
+	struct rx_desc *rx_insert_ptr;
+	struct rx_desc *rx_ready_ptr;	/* packet come pointer */
+	unsigned long tx_packet_cnt;	/* transmitted packet count */
+	unsigned long rx_avail_cnt;	/* available rx descriptor count */
+	unsigned long interval_rx_cnt;	/* rx packet count a callback time */
+
+	u16 dbug_cnt;
+	u16 NIC_capability;		/* NIC media capability */
+	u16 PHY_reg4;			/* Saved Phyxcer register 4 value */
+
+	u8 media_mode;			/* user specify media mode */
+	u8 op_mode;			/* real work media mode */
+	u8 phy_addr;
+	u8 link_failed;			/* Ever link failed */
+	u8 wait_reset;			/* Hardware failed, need to reset */
+	struct timer_list timer;
+
+	/* System defined statistic counter */
+	struct net_device_stats stats;
+
+	/* Driver defined statistic counter */
+	unsigned long tx_fifo_underrun;
+	unsigned long tx_loss_carrier;
+	unsigned long tx_no_carrier;
+	unsigned long tx_late_collision;
+	unsigned long tx_excessive_collision;
+	unsigned long tx_jabber_timeout;
+	unsigned long reset_count;
+	unsigned long reset_cr8;
+	unsigned long reset_fatal;
+	unsigned long reset_TXtimeout;
+
+	/* NIC SROM data */
+	unsigned char srom[128];
+	u8 init;	
+};
+
+enum uli526x_offsets {
+	DCR0 = 0x00, DCR1 = 0x08, DCR2 = 0x10, DCR3 = 0x18, DCR4 = 0x20,
+	DCR5 = 0x28, DCR6 = 0x30, DCR7 = 0x38, DCR8 = 0x40, DCR9 = 0x48,
+	DCR10 = 0x50, DCR11 = 0x58, DCR12 = 0x60, DCR13 = 0x68, DCR14 = 0x70,
+	DCR15 = 0x78
+};
+
+enum uli526x_CR6_bits {
+	CR6_RXSC = 0x2, CR6_PBF = 0x8, CR6_PM = 0x40, CR6_PAM = 0x80,
+	CR6_FDM = 0x200, CR6_TXSC = 0x2000, CR6_STI = 0x100000,
+	CR6_SFT = 0x200000, CR6_RXA = 0x40000000, CR6_NO_PURGE = 0x20000000
+};
+
+/* Global variable declaration ----------------------------- */
+static int __devinitdata printed_version;
+static char version[] __devinitdata =
+	KERN_INFO DRV_NAME ": ULi M5261/M5263 net driver, version "
+	DRV_VERSION " (" DRV_RELDATE ")\n";
+
+static int uli526x_debug;
+static unsigned char uli526x_media_mode = ULI526X_AUTO;
+static u32 uli526x_cr6_user_set;
+
+/* For module input parameter */
+static int debug;
+static u32 cr6set;
+static unsigned char mode = 8;
+
+/* function declaration ------------------------------------- */
+static int uli526x_open(struct net_device *);
+static int uli526x_start_xmit(struct sk_buff *, struct net_device *);
+static int uli526x_stop(struct net_device *);
+static struct net_device_stats * uli526x_get_stats(struct net_device *);
+static void uli526x_set_filter_mode(struct net_device *);
+static struct ethtool_ops netdev_ethtool_ops;
+static u16 read_srom_word(long, int);
+static irqreturn_t uli526x_interrupt(int, void *, struct pt_regs *);
+static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long);
+static void allocate_rx_buffer(struct uli526x_board_info *);
+static void update_cr6(u32, unsigned long);
+static void send_filter_frame(struct net_device *, int);
+static u16 phy_read(unsigned long, u8, u8, u32);
+static u16 phy_readby_cr10(unsigned long, u8, u8);
+static void phy_write(unsigned long, u8, u8, u16, u32);
+static void phy_writeby_cr10(unsigned long, u8, u8, u16);
+static void phy_write_1bit(unsigned long, u32, u32);
+static u16 phy_read_1bit(unsigned long, u32);
+static u8 uli526x_sense_speed(struct uli526x_board_info *);
+static void uli526x_process_mode(struct uli526x_board_info *);
+static void uli526x_timer(unsigned long);
+static void uli526x_rx_packet(struct net_device *, struct uli526x_board_info *);
+static void uli526x_free_tx_pkt(struct net_device *, struct uli526x_board_info *);
+static void uli526x_reuse_skb(struct uli526x_board_info *, struct sk_buff *);
+static void uli526x_dynamic_reset(struct net_device *);
+static void uli526x_free_rxbuffer(struct uli526x_board_info *);
+static void uli526x_init(struct net_device *);
+static void uli526x_set_phyxcer(struct uli526x_board_info *);
+
+/* ULI526X network board routine ---------------------------- */
+
+/*
+ *	Search ULI526X board, allocate space and register it
+ */
+
+static int __devinit uli526x_init_one (struct pci_dev *pdev,
+				    const struct pci_device_id *ent)
+{
+	struct uli526x_board_info *db;	/* board information structure */
+	struct net_device *dev;
+	int i, err;
+	
+	ULI526X_DBUG(0, "uli526x_init_one()", 0);
+
+	if (!printed_version++)
+		printk(version);
+
+	/* Init network device */
+	dev = alloc_etherdev(sizeof(*db));
+	if (dev == NULL)
+		return -ENOMEM;
+	SET_MODULE_OWNER(dev);
+	SET_NETDEV_DEV(dev, &pdev->dev);
+
+	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+		err = -ENODEV;
+		goto err_out_free;
+	}
+
+	/* Enable Master/IO access, Disable memory access */
+	err = pci_enable_device(pdev);
+	if (err)
+		goto err_out_free;
+
+	if (!pci_resource_start(pdev, 0)) {
+		printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+		err = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
+		printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+		err = -ENODEV;
+		goto err_out_disable;
+	}
+
+	if (pci_request_regions(pdev, DRV_NAME)) {
+		printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+		err = -ENODEV;
+		goto err_out_disable;
+	}
+
+	/* Init system & device */
+	db = netdev_priv(dev);
+
+	/* Allocate Tx/Rx descriptor memory */
+	db->desc_pool_ptr = pci_alloc_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20, &db->desc_pool_dma_ptr);
+	if(db->desc_pool_ptr == NULL)
+	{
+		err = -ENOMEM;
+		goto err_out_nomem;
+	}
+	db->buf_pool_ptr = pci_alloc_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4, &db->buf_pool_dma_ptr);
+	if(db->buf_pool_ptr == NULL)
+	{
+		err = -ENOMEM;
+		goto err_out_nomem;
+	}
+	
+	db->first_tx_desc = (struct tx_desc *) db->desc_pool_ptr;
+	db->first_tx_desc_dma = db->desc_pool_dma_ptr;
+	db->buf_pool_start = db->buf_pool_ptr;
+	db->buf_pool_dma_start = db->buf_pool_dma_ptr;
+
+	db->chip_id = ent->driver_data;
+	db->ioaddr = pci_resource_start(pdev, 0);
+	
+	db->pdev = pdev;
+	db->init = 1;
+	
+	dev->base_addr = db->ioaddr;
+	dev->irq = pdev->irq;
+	pci_set_drvdata(pdev, dev);
+	
+	/* Register some necessary functions */
+	dev->open = &uli526x_open;
+	dev->hard_start_xmit = &uli526x_start_xmit;
+	dev->stop = &uli526x_stop;
+	dev->get_stats = &uli526x_get_stats;
+	dev->set_multicast_list = &uli526x_set_filter_mode;
+	dev->ethtool_ops = &netdev_ethtool_ops;
+	spin_lock_init(&db->lock);
+
+		
+	/* read 64 word srom data */
+	for (i = 0; i < 64; i++)
+		((u16 *) db->srom)[i] = cpu_to_le16(read_srom_word(db->ioaddr, i));
+
+	/* Set Node address */
+	if(((u16 *) db->srom)[0] == 0xffff || ((u16 *) db->srom)[0] == 0)		/* SROM absent, so read MAC address from ID Table */
+	{
+		outl(0x10000, db->ioaddr + DCR0);	//Diagnosis mode
+		outl(0x1c0, db->ioaddr + DCR13);	//Reset dianostic pointer port
+		outl(0, db->ioaddr + DCR14);		//Clear reset port
+		outl(0x10, db->ioaddr + DCR14);		//Reset ID Table pointer
+		outl(0, db->ioaddr + DCR14);		//Clear reset port
+		outl(0, db->ioaddr + DCR13);		//Clear CR13
+		outl(0x1b0, db->ioaddr + DCR13);	//Select ID Table access port
+		//Read MAC address from CR14
+		for (i = 0; i < 6; i++)
+			dev->dev_addr[i] = inl(db->ioaddr + DCR14);
+		//Read end
+		outl(0, db->ioaddr + DCR13);	//Clear CR13
+		outl(0, db->ioaddr + DCR0);		//Clear CR0
+		udelay(10);
+	}
+	else		/*Exist SROM*/
+	{
+		for (i = 0; i < 6; i++)
+			dev->dev_addr[i] = db->srom[20 + i];
+	}
+	err = register_netdev (dev);
+	if (err)
+		goto err_out_res;
+
+	printk(KERN_INFO "%s: ULi M%04lx at pci%s,",dev->name,ent->driver_data >> 16,pci_name(pdev));
+	
+	for (i = 0; i < 6; i++)
+		printk("%c%02x", i ? ':' : ' ', dev->dev_addr[i]);
+	printk(", irq %d.\n", dev->irq);
+
+	pci_set_master(pdev);
+
+	return 0;
+
+err_out_res:
+	pci_release_regions(pdev);
+err_out_nomem:
+	if(db->desc_pool_ptr)
+		pci_free_consistent(pdev, sizeof(struct tx_desc) * DESC_ALL_CNT + 0x20,
+			db->desc_pool_ptr, db->desc_pool_dma_ptr);
+			
+	if(db->buf_pool_ptr != NULL)
+		pci_free_consistent(pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+			db->buf_pool_ptr, db->buf_pool_dma_ptr);
+err_out_disable:
+	pci_disable_device(pdev);
+err_out_free:
+	pci_set_drvdata(pdev, NULL);
+	free_netdev(dev);
+
+	return err;
+}
+
+
+static void __devexit uli526x_remove_one (struct pci_dev *pdev)
+{
+	struct net_device *dev = pci_get_drvdata(pdev);
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_remove_one()", 0);
+
+	pci_free_consistent(db->pdev, sizeof(struct tx_desc) *
+				DESC_ALL_CNT + 0x20, db->desc_pool_ptr,
+ 				db->desc_pool_dma_ptr);
+	pci_free_consistent(db->pdev, TX_BUF_ALLOC * TX_DESC_CNT + 4,
+				db->buf_pool_ptr, db->buf_pool_dma_ptr);
+	unregister_netdev(dev);
+	pci_release_regions(pdev);
+	free_netdev(dev);	/* free board information */
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+	ULI526X_DBUG(0, "uli526x_remove_one() exit", 0);
+}
+
+
+/*
+ *	Open the interface.
+ *	The interface is opened whenever "ifconfig" activates it.
+ */
+
+static int uli526x_open(struct net_device *dev)
+{
+	int ret;
+	struct uli526x_board_info *db = netdev_priv(dev);
+	
+	ULI526X_DBUG(0, "uli526x_open", 0);
+
+	ret = request_irq(dev->irq, &uli526x_interrupt, SA_SHIRQ, dev->name, dev);
+	if (ret)
+		return ret;
+
+	/* system variable init */
+	db->cr6_data = CR6_DEFAULT | uli526x_cr6_user_set;
+	db->tx_packet_cnt = 0;
+	db->rx_avail_cnt = 0;
+	db->link_failed = 1;
+	netif_carrier_off(dev);
+	db->wait_reset = 0;
+
+	db->NIC_capability = 0xf;	/* All capability*/
+	db->PHY_reg4 = 0x1e0;
+
+	/* CR6 operation mode decision */
+	db->cr6_data |= ULI526X_TXTH_256;
+	db->cr0_data = CR0_DEFAULT;
+	
+	/* Initialize ULI526X board */
+	uli526x_init(dev);
+
+	/* Active System Interface */
+	netif_wake_queue(dev);
+
+	/* set and active a timer process */
+	init_timer(&db->timer);
+	db->timer.expires = ULI526X_TIMER_WUT + HZ * 2;
+	db->timer.data = (unsigned long)dev;
+	db->timer.function = &uli526x_timer;
+	add_timer(&db->timer);
+
+	return 0;
+}
+
+
+/*	Initialize ULI526X board
+ *	Reset ULI526X board
+ *	Initialize TX/Rx descriptor chain structure
+ *	Send the set-up frame
+ *	Enable Tx/Rx machine
+ */
+
+static void uli526x_init(struct net_device *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	unsigned long ioaddr = db->ioaddr;
+	u8	phy_tmp;
+	u16	phy_value;
+	u16 phy_reg_reset;
+
+	ULI526X_DBUG(0, "uli526x_init()", 0);
+
+	/* Reset M526x MAC controller */
+	outl(ULI526X_RESET, ioaddr + DCR0);	/* RESET MAC */
+	udelay(100);
+	outl(db->cr0_data, ioaddr + DCR0);
+	udelay(5);
+
+	/* Phy addr : In some boards,M5261/M5263 phy address != 1 */
+	db->phy_addr = 1;
+	for(phy_tmp=0;phy_tmp<32;phy_tmp++)
+	{
+		phy_value=phy_read(db->ioaddr,phy_tmp,3,db->chip_id);//peer add
+		if(phy_value != 0xffff&&phy_value!=0)
+		{
+			db->phy_addr = phy_tmp;
+			break;
+		}
+	}
+	if(phy_tmp == 32)
+		printk(KERN_WARNING "Can not find the phy address!!!");
+	/* Parser SROM and media mode */
+	db->media_mode = uli526x_media_mode;
+
+	/* Phyxcer capability setting */
+	phy_reg_reset = phy_read(db->ioaddr, db->phy_addr, 0, db->chip_id);
+	phy_reg_reset = (phy_reg_reset | 0x8000);
+	phy_write(db->ioaddr, db->phy_addr, 0, phy_reg_reset, db->chip_id);
+	udelay(500);
+
+	/* Process Phyxcer Media Mode */
+	uli526x_set_phyxcer(db);
+
+	/* Media Mode Process */
+	if ( !(db->media_mode & ULI526X_AUTO) )
+		db->op_mode = db->media_mode; 	/* Force Mode */
+
+	/* Initialize Transmit/Receive decriptor and CR3/4 */
+	uli526x_descriptor_init(db, ioaddr);
+
+	/* Init CR6 to program M526X operation */
+	update_cr6(db->cr6_data, ioaddr);
+
+	/* Send setup frame */
+	send_filter_frame(dev, dev->mc_count);	/* M5261/M5263 */
+
+	/* Init CR7, interrupt active bit */
+	db->cr7_data = CR7_DEFAULT;
+	outl(db->cr7_data, ioaddr + DCR7);
+
+	/* Init CR15, Tx jabber and Rx watchdog timer */
+	outl(db->cr15_data, ioaddr + DCR15);
+
+	/* Enable ULI526X Tx/Rx function */
+	db->cr6_data |= CR6_RXSC | CR6_TXSC;
+	update_cr6(db->cr6_data, ioaddr);
+}
+
+
+/*
+ *	Hardware start transmission.
+ *	Send a packet to media from the upper layer.
+ */
+
+static int uli526x_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	struct tx_desc *txptr;
+	unsigned long flags;
+
+	ULI526X_DBUG(0, "uli526x_start_xmit", 0);
+
+	/* Resource flag check */
+	netif_stop_queue(dev);
+
+	/* Too large packet check */
+	if (skb->len > MAX_PACKET_SIZE) {
+		printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+		dev_kfree_skb(skb);
+		return 0;
+	}
+
+	spin_lock_irqsave(&db->lock, flags);
+
+	/* No Tx resource check, it never happen nromally */
+	if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
+		spin_unlock_irqrestore(&db->lock, flags);
+		printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+		return 1;
+	}
+
+	/* Disable NIC interrupt */
+	outl(0, dev->base_addr + DCR7);
+
+	/* transmit this packet */
+	txptr = db->tx_insert_ptr;
+	memcpy(txptr->tx_buf_ptr, skb->data, skb->len);
+	txptr->tdes1 = cpu_to_le32(0xe1000000 | skb->len);
+
+	/* Point to next transmit free descriptor */
+	db->tx_insert_ptr = txptr->next_tx_desc;
+
+	/* Transmit Packet Process */
+	if ( (db->tx_packet_cnt < TX_DESC_CNT) ) {
+		txptr->tdes0 = cpu_to_le32(0x80000000);	/* Set owner bit */
+		db->tx_packet_cnt++;			/* Ready to send */
+		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		dev->trans_start = jiffies;		/* saved time stamp */
+	}
+
+	/* Tx resource check */
+	if ( db->tx_packet_cnt < TX_FREE_DESC_CNT )
+		netif_wake_queue(dev);
+
+	/* Restore CR7 to enable interrupt */
+	spin_unlock_irqrestore(&db->lock, flags);
+	outl(db->cr7_data, dev->base_addr + DCR7);
+	
+	/* free this SKB */
+	dev_kfree_skb(skb);
+
+	return 0;
+}
+
+
+/*
+ *	Stop the interface.
+ *	The interface is stopped when it is brought.
+ */
+
+static int uli526x_stop(struct net_device *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+
+	ULI526X_DBUG(0, "uli526x_stop", 0);
+
+	/* disable system */
+	netif_stop_queue(dev);
+
+	/* deleted timer */
+	del_timer_sync(&db->timer);
+
+	/* Reset & stop ULI526X board */
+	outl(ULI526X_RESET, ioaddr + DCR0);
+	udelay(5);
+	phy_write(db->ioaddr, db->phy_addr, 0, 0x8000, db->chip_id);
+
+	/* free interrupt */
+	free_irq(dev->irq, dev);
+
+	/* free allocated rx buffer */
+	uli526x_free_rxbuffer(db);
+
+#if 0
+	/* show statistic counter */
+	printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+		db->tx_fifo_underrun, db->tx_excessive_collision,
+		db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+		db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+		db->reset_fatal, db->reset_TXtimeout);
+#endif
+
+	return 0;
+}
+
+
+/*
+ *	M5261/M5263 insterrupt handler
+ *	receive the packet to upper layer, free the transmitted packet
+ */
+
+static irqreturn_t uli526x_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct net_device *dev = dev_id;
+	struct uli526x_board_info *db = netdev_priv(dev);
+	unsigned long ioaddr = dev->base_addr;
+	unsigned long flags;
+
+	if (!dev) {
+		ULI526X_DBUG(1, "uli526x_interrupt() without DEVICE arg", 0);
+		return IRQ_NONE;
+	}
+
+	spin_lock_irqsave(&db->lock, flags);
+	outl(0, ioaddr + DCR7);
+
+	/* Got ULI526X status */
+	db->cr5_data = inl(ioaddr + DCR5);
+	outl(db->cr5_data, ioaddr + DCR5);
+	if ( !(db->cr5_data & 0x180c1) ) {
+		spin_unlock_irqrestore(&db->lock, flags);
+		outl(db->cr7_data, ioaddr + DCR7);
+		return IRQ_HANDLED;
+	}
+
+	/* Check system status */
+	if (db->cr5_data & 0x2000) {
+		/* system bus error happen */
+		ULI526X_DBUG(1, "System bus error happen. CR5=", db->cr5_data);
+		db->reset_fatal++;
+		db->wait_reset = 1;	/* Need to RESET */
+		spin_unlock_irqrestore(&db->lock, flags);
+		return IRQ_HANDLED;
+	}
+
+	 /* Received the coming packet */
+	if ( (db->cr5_data & 0x40) && db->rx_avail_cnt )
+		uli526x_rx_packet(dev, db);
+
+	/* reallocate rx descriptor buffer */
+	if (db->rx_avail_cnt<RX_DESC_CNT)
+		allocate_rx_buffer(db);
+
+	/* Free the transmitted descriptor */
+	if ( db->cr5_data & 0x01)
+		uli526x_free_tx_pkt(dev, db);
+
+	/* Restore CR7 to enable interrupt mask */
+	outl(db->cr7_data, ioaddr + DCR7);
+
+	spin_unlock_irqrestore(&db->lock, flags);
+	return IRQ_HANDLED;
+}
+
+
+/*
+ *	Free TX resource after TX complete
+ */
+
+static void uli526x_free_tx_pkt(struct net_device *dev, struct uli526x_board_info * db)
+{
+	struct tx_desc *txptr;
+	u32 tdes0;
+
+	txptr = db->tx_remove_ptr;
+	while(db->tx_packet_cnt) {
+		tdes0 = le32_to_cpu(txptr->tdes0);
+		/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+		if (tdes0 & 0x80000000)
+			break;
+
+		/* A packet sent completed */
+		db->tx_packet_cnt--;
+		db->stats.tx_packets++;
+
+		/* Transmit statistic counter */
+		if ( tdes0 != 0x7fffffff ) {
+			/* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+			db->stats.collisions += (tdes0 >> 3) & 0xf;
+			db->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
+			if (tdes0 & TDES0_ERR_MASK) {
+				db->stats.tx_errors++;
+				if (tdes0 & 0x0002) {	/* UnderRun */
+					db->tx_fifo_underrun++;
+					if ( !(db->cr6_data & CR6_SFT) ) {
+						db->cr6_data = db->cr6_data | CR6_SFT;
+						update_cr6(db->cr6_data, db->ioaddr);
+					}
+				}
+				if (tdes0 & 0x0100)
+					db->tx_excessive_collision++;
+				if (tdes0 & 0x0200)
+					db->tx_late_collision++;
+				if (tdes0 & 0x0400)
+					db->tx_no_carrier++;
+				if (tdes0 & 0x0800)
+					db->tx_loss_carrier++;
+				if (tdes0 & 0x4000)
+					db->tx_jabber_timeout++;
+			}
+		}
+
+    		txptr = txptr->next_tx_desc;
+	}/* End of while */
+
+	/* Update TX remove pointer to next */
+	db->tx_remove_ptr = txptr;
+
+	/* Resource available check */
+	if ( db->tx_packet_cnt < TX_WAKE_DESC_CNT )
+		netif_wake_queue(dev);	/* Active upper layer, send again */
+}
+
+
+/*
+ *	Receive the come packet and pass to upper layer
+ */
+
+static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info * db)
+{
+	struct rx_desc *rxptr;
+	struct sk_buff *skb;
+	int rxlen;
+	u32 rdes0;
+	
+	rxptr = db->rx_ready_ptr;
+
+	while(db->rx_avail_cnt) {
+		rdes0 = le32_to_cpu(rxptr->rdes0);
+		if (rdes0 & 0x80000000)	/* packet owner check */
+		{
+			break;
+		}
+
+		db->rx_avail_cnt--;
+		db->interval_rx_cnt++;
+
+		pci_unmap_single(db->pdev, le32_to_cpu(rxptr->rdes2), RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE);
+		if ( (rdes0 & 0x300) != 0x300) {
+			/* A packet without First/Last flag */
+			/* reuse this SKB */
+			ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+			uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+		} else {
+			/* A packet with First/Last flag */
+			rxlen = ( (rdes0 >> 16) & 0x3fff) - 4;
+
+			/* error summary bit check */
+			if (rdes0 & 0x8000) {
+				/* This is a error packet */
+				//printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+				db->stats.rx_errors++;
+				if (rdes0 & 1)
+					db->stats.rx_fifo_errors++;
+				if (rdes0 & 2)
+					db->stats.rx_crc_errors++;
+				if (rdes0 & 0x80)
+					db->stats.rx_length_errors++;
+			}
+
+			if ( !(rdes0 & 0x8000) ||
+				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+				skb = rxptr->rx_skb_ptr;
+		
+				/* Good packet, send to upper layer */
+				/* Shorst packet used new SKB */
+				if ( (rxlen < RX_COPY_SIZE) &&
+					( (skb = dev_alloc_skb(rxlen + 2) )
+					!= NULL) ) {
+					/* size less than COPY_SIZE, allocate a rxlen SKB */
+					skb->dev = dev;
+					skb_reserve(skb, 2); /* 16byte align */
+					memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen);
+					uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+				} else {
+					skb->dev = dev;
+					skb_put(skb, rxlen);
+				}
+				skb->protocol = eth_type_trans(skb, dev);
+				netif_rx(skb);
+				dev->last_rx = jiffies;
+				db->stats.rx_packets++;
+				db->stats.rx_bytes += rxlen;
+				
+			} else {
+				/* Reuse SKB buffer when the packet is error */
+				ULI526X_DBUG(0, "Reuse SK buffer, rdes0", rdes0);
+				uli526x_reuse_skb(db, rxptr->rx_skb_ptr);
+			}
+		}
+
+		rxptr = rxptr->next_rx_desc;
+	}
+
+	db->rx_ready_ptr = rxptr;
+}
+
+
+/*
+ *	Get statistics from driver.
+ */
+
+static struct net_device_stats * uli526x_get_stats(struct net_device *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_get_stats", 0);
+	return &db->stats;
+}
+
+
+/*
+ * Set ULI526X multicast address
+ */
+
+static void uli526x_set_filter_mode(struct net_device * dev)
+{
+	struct uli526x_board_info *db = dev->priv;
+	unsigned long flags;
+
+	ULI526X_DBUG(0, "uli526x_set_filter_mode()", 0);
+	spin_lock_irqsave(&db->lock, flags);
+
+	if (dev->flags & IFF_PROMISC) {
+		ULI526X_DBUG(0, "Enable PROM Mode", 0);
+		db->cr6_data |= CR6_PM | CR6_PBF;
+		update_cr6(db->cr6_data, db->ioaddr);
+		spin_unlock_irqrestore(&db->lock, flags);
+		return;
+	}
+
+	if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
+		ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+		db->cr6_data &= ~(CR6_PM | CR6_PBF);
+		db->cr6_data |= CR6_PAM;
+		spin_unlock_irqrestore(&db->lock, flags);
+		return;
+	}
+
+	ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
+	send_filter_frame(dev, dev->mc_count); 	/* M5261/M5263 */
+	spin_unlock_irqrestore(&db->lock, flags);
+}
+
+static void
+ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
+{
+	ecmd->supported = (SUPPORTED_10baseT_Half |
+	                   SUPPORTED_10baseT_Full |
+	                   SUPPORTED_100baseT_Half |
+	                   SUPPORTED_100baseT_Full |
+	                   SUPPORTED_Autoneg |
+	                   SUPPORTED_MII);
+		
+	ecmd->advertising = (ADVERTISED_10baseT_Half |
+	                   ADVERTISED_10baseT_Full |
+	                   ADVERTISED_100baseT_Half |
+	                   ADVERTISED_100baseT_Full |
+	                   ADVERTISED_Autoneg |
+	                   ADVERTISED_MII);
+
+
+	ecmd->port = PORT_MII;
+	ecmd->phy_address = db->phy_addr;
+
+	ecmd->transceiver = XCVR_EXTERNAL;
+		
+	ecmd->speed = 10;
+	ecmd->duplex = DUPLEX_HALF;
+	
+	if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+	{
+		ecmd->speed = 100;               
+	}
+	if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+	{
+		ecmd->duplex = DUPLEX_FULL;
+	}
+	if(db->link_failed)
+	{
+		ecmd->speed = -1;
+		ecmd->duplex = -1;	
+	}
+	
+	if (db->media_mode & ULI526X_AUTO)
+	{	
+		ecmd->autoneg = AUTONEG_ENABLE;
+	}
+}
+
+static void netdev_get_drvinfo(struct net_device *dev,
+			       struct ethtool_drvinfo *info)
+{
+	struct uli526x_board_info *np = netdev_priv(dev);
+
+	strcpy(info->driver, DRV_NAME);
+	strcpy(info->version, DRV_VERSION);
+	if (np->pdev)
+		strcpy(info->bus_info, pci_name(np->pdev));
+	else
+		sprintf(info->bus_info, "EISA 0x%lx %d",
+			dev->base_addr, dev->irq);
+}
+
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
+	struct uli526x_board_info *np = netdev_priv(dev);
+	
+	ULi_ethtool_gset(np, cmd);
+	
+	return 0;
+}
+
+static u32 netdev_get_link(struct net_device *dev) {
+	struct uli526x_board_info *np = netdev_priv(dev);
+		
+	if(np->link_failed)
+		return 0;
+	else
+		return 1;
+}
+
+static void uli526x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	wol->supported = WAKE_PHY | WAKE_MAGIC;
+	wol->wolopts = 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+	.get_drvinfo		= netdev_get_drvinfo,
+	.get_settings		= netdev_get_settings,
+	.get_link		= netdev_get_link,
+	.get_wol		= uli526x_get_wol,
+};
+
+/*
+ *	A periodic timer routine
+ *	Dynamic media sense, allocate Rx buffer...
+ */
+
+static void uli526x_timer(unsigned long data)
+{
+	u32 tmp_cr8;
+	unsigned char tmp_cr12=0;
+	struct net_device *dev = (struct net_device *) data;
+	struct uli526x_board_info *db = netdev_priv(dev);
+ 	unsigned long flags;
+	u8 TmpSpeed=10;
+	
+	//ULI526X_DBUG(0, "uli526x_timer()", 0);
+	spin_lock_irqsave(&db->lock, flags);
+
+	
+	/* Dynamic reset ULI526X : system error or transmit time-out */
+	tmp_cr8 = inl(db->ioaddr + DCR8);
+	if ( (db->interval_rx_cnt==0) && (tmp_cr8) ) {
+		db->reset_cr8++;
+		db->wait_reset = 1;
+	}
+	db->interval_rx_cnt = 0;
+
+	/* TX polling kick monitor */
+	if ( db->tx_packet_cnt &&
+	     time_after(jiffies, dev->trans_start + ULI526X_TX_KICK) ) {
+		outl(0x1, dev->base_addr + DCR1);   // Tx polling again 
+
+		// TX Timeout 
+		if ( time_after(jiffies, dev->trans_start + ULI526X_TX_TIMEOUT) ) {
+			db->reset_TXtimeout++;
+			db->wait_reset = 1;
+			printk( "%s: Tx timeout - resetting\n",
+			       dev->name);
+		}
+	}
+
+	if (db->wait_reset) {
+		ULI526X_DBUG(0, "Dynamic Reset device", db->tx_packet_cnt);
+		db->reset_count++;
+		uli526x_dynamic_reset(dev);
+		db->timer.expires = ULI526X_TIMER_WUT;
+		add_timer(&db->timer);
+		spin_unlock_irqrestore(&db->lock, flags);
+		return;
+	}
+
+	/* Link status check, Dynamic media type change */
+	if((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)!=0)
+		tmp_cr12 = 3;
+
+	if ( !(tmp_cr12 & 0x3) && !db->link_failed ) {
+		/* Link Failed */
+		ULI526X_DBUG(0, "Link Failed", tmp_cr12);
+		netif_carrier_off(dev);
+		printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+		db->link_failed = 1;
+
+		/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
+		/* AUTO don't need */
+		if ( !(db->media_mode & 0x8) )
+			phy_write(db->ioaddr, db->phy_addr, 0, 0x1000, db->chip_id);
+
+		/* AUTO mode, if INT phyxcer link failed, select EXT device */
+		if (db->media_mode & ULI526X_AUTO) {
+			db->cr6_data&=~0x00000200;	/* bit9=0, HD mode */
+			update_cr6(db->cr6_data, db->ioaddr);
+		}
+	} else
+		if ((tmp_cr12 & 0x3) && db->link_failed) {
+			ULI526X_DBUG(0, "Link link OK", tmp_cr12);
+			db->link_failed = 0;
+
+			/* Auto Sense Speed */
+			if ( (db->media_mode & ULI526X_AUTO) &&
+				uli526x_sense_speed(db) )
+				db->link_failed = 1;
+			uli526x_process_mode(db);
+			
+			if(db->link_failed==0)
+			{
+				if(db->op_mode==ULI526X_100MHF || db->op_mode==ULI526X_100MFD)
+				{
+					TmpSpeed = 100;
+				}
+				if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
+				{
+					printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+				}
+				else
+				{
+					printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+				}
+				netif_carrier_on(dev);
+			}
+			/* SHOW_MEDIA_TYPE(db->op_mode); */
+		}
+		else if(!(tmp_cr12 & 0x3) && db->link_failed)
+		{
+			if(db->init==1)
+			{
+				printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+				netif_carrier_off(dev);
+			}
+		}
+		db->init=0;
+
+	/* Timer active again */
+	db->timer.expires = ULI526X_TIMER_WUT;
+	add_timer(&db->timer);
+	spin_unlock_irqrestore(&db->lock, flags);
+}
+
+
+/*
+ *	Dynamic reset the ULI526X board
+ *	Stop ULI526X board
+ *	Free Tx/Rx allocated memory
+ *	Reset ULI526X board
+ *	Re-initialize ULI526X board
+ */
+
+static void uli526x_dynamic_reset(struct net_device *dev)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+
+	ULI526X_DBUG(0, "uli526x_dynamic_reset()", 0);
+
+	/* Sopt MAC controller */
+	db->cr6_data &= ~(CR6_RXSC | CR6_TXSC);	/* Disable Tx/Rx */
+	update_cr6(db->cr6_data, dev->base_addr);
+	outl(0, dev->base_addr + DCR7);		/* Disable Interrupt */
+	outl(inl(dev->base_addr + DCR5), dev->base_addr + DCR5);
+
+	/* Disable upper layer interface */
+	netif_stop_queue(dev);
+
+	/* Free Rx Allocate buffer */
+	uli526x_free_rxbuffer(db);
+
+	/* system variable init */
+	db->tx_packet_cnt = 0;
+	db->rx_avail_cnt = 0;
+	db->link_failed = 1;
+	db->init=1;
+	db->wait_reset = 0;
+
+	/* Re-initialize ULI526X board */
+	uli526x_init(dev);
+
+	/* Restart upper layer interface */
+	netif_wake_queue(dev);
+}
+
+
+/*
+ *	free all allocated rx buffer
+ */
+
+static void uli526x_free_rxbuffer(struct uli526x_board_info * db)
+{
+	ULI526X_DBUG(0, "uli526x_free_rxbuffer()", 0);
+
+	/* free allocated rx buffer */
+	while (db->rx_avail_cnt) {
+		dev_kfree_skb(db->rx_ready_ptr->rx_skb_ptr);
+		db->rx_ready_ptr = db->rx_ready_ptr->next_rx_desc;
+		db->rx_avail_cnt--;
+	}
+}
+
+
+/*
+ *	Reuse the SK buffer
+ */
+
+static void uli526x_reuse_skb(struct uli526x_board_info *db, struct sk_buff * skb)
+{
+	struct rx_desc *rxptr = db->rx_insert_ptr;
+
+	if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
+		rxptr->rx_skb_ptr = skb;
+		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+		wmb();
+		rxptr->rdes0 = cpu_to_le32(0x80000000);
+		db->rx_avail_cnt++;
+		db->rx_insert_ptr = rxptr->next_rx_desc;
+	} else
+		ULI526X_DBUG(0, "SK Buffer reuse method error", db->rx_avail_cnt);
+}
+
+
+/*
+ *	Initialize transmit/Receive descriptor
+ *	Using Chain structure, and allocate Tx/Rx buffer
+ */
+
+static void uli526x_descriptor_init(struct uli526x_board_info *db, unsigned long ioaddr)
+{
+	struct tx_desc *tmp_tx;
+	struct rx_desc *tmp_rx;
+	unsigned char *tmp_buf;
+	dma_addr_t tmp_tx_dma, tmp_rx_dma;
+	dma_addr_t tmp_buf_dma;
+	int i;
+
+	ULI526X_DBUG(0, "uli526x_descriptor_init()", 0);
+
+	/* tx descriptor start pointer */
+	db->tx_insert_ptr = db->first_tx_desc;
+	db->tx_remove_ptr = db->first_tx_desc;
+	outl(db->first_tx_desc_dma, ioaddr + DCR4);     /* TX DESC address */
+
+	/* rx descriptor start pointer */
+	db->first_rx_desc = (void *)db->first_tx_desc + sizeof(struct tx_desc) * TX_DESC_CNT;
+	db->first_rx_desc_dma =  db->first_tx_desc_dma + sizeof(struct tx_desc) * TX_DESC_CNT;
+	db->rx_insert_ptr = db->first_rx_desc;
+	db->rx_ready_ptr = db->first_rx_desc;
+	outl(db->first_rx_desc_dma, ioaddr + DCR3);	/* RX DESC address */
+
+	/* Init Transmit chain */
+	tmp_buf = db->buf_pool_start;
+	tmp_buf_dma = db->buf_pool_dma_start;
+	tmp_tx_dma = db->first_tx_desc_dma;
+	for (tmp_tx = db->first_tx_desc, i = 0; i < TX_DESC_CNT; i++, tmp_tx++) {
+		tmp_tx->tx_buf_ptr = tmp_buf;
+		tmp_tx->tdes0 = cpu_to_le32(0);
+		tmp_tx->tdes1 = cpu_to_le32(0x81000000);	/* IC, chain */
+		tmp_tx->tdes2 = cpu_to_le32(tmp_buf_dma);
+		tmp_tx_dma += sizeof(struct tx_desc);
+		tmp_tx->tdes3 = cpu_to_le32(tmp_tx_dma);
+		tmp_tx->next_tx_desc = tmp_tx + 1;
+		tmp_buf = tmp_buf + TX_BUF_ALLOC;
+		tmp_buf_dma = tmp_buf_dma + TX_BUF_ALLOC;
+	}
+	(--tmp_tx)->tdes3 = cpu_to_le32(db->first_tx_desc_dma);
+	tmp_tx->next_tx_desc = db->first_tx_desc;
+
+	 /* Init Receive descriptor chain */
+	tmp_rx_dma=db->first_rx_desc_dma;
+	for (tmp_rx = db->first_rx_desc, i = 0; i < RX_DESC_CNT; i++, tmp_rx++) {
+		tmp_rx->rdes0 = cpu_to_le32(0);
+		tmp_rx->rdes1 = cpu_to_le32(0x01000600);
+		tmp_rx_dma += sizeof(struct rx_desc);
+		tmp_rx->rdes3 = cpu_to_le32(tmp_rx_dma);
+		tmp_rx->next_rx_desc = tmp_rx + 1;
+	}
+	(--tmp_rx)->rdes3 = cpu_to_le32(db->first_rx_desc_dma);
+	tmp_rx->next_rx_desc = db->first_rx_desc;
+
+	/* pre-allocate Rx buffer */
+	allocate_rx_buffer(db);
+}
+
+
+/*
+ *	Update CR6 value
+ *	Firstly stop ULI526X, then written value and start
+ */
+
+static void update_cr6(u32 cr6_data, unsigned long ioaddr)
+{
+
+	outl(cr6_data, ioaddr + DCR6);
+	udelay(5);
+}
+
+
+/*
+ *	Send a setup frame for M5261/M5263
+ *	This setup frame initialize ULI526X address filter mode
+ */
+
+static void send_filter_frame(struct net_device *dev, int mc_cnt)
+{
+	struct uli526x_board_info *db = netdev_priv(dev);
+	struct dev_mc_list *mcptr;
+	struct tx_desc *txptr;
+	u16 * addrptr;
+	u32 * suptr;
+	int i;
+
+	ULI526X_DBUG(0, "send_filter_frame()", 0);
+
+	txptr = db->tx_insert_ptr;
+	suptr = (u32 *) txptr->tx_buf_ptr;
+
+	/* Node address */
+	addrptr = (u16 *) dev->dev_addr;
+	*suptr++ = addrptr[0];
+	*suptr++ = addrptr[1];
+	*suptr++ = addrptr[2];
+
+	/* broadcast address */
+	*suptr++ = 0xffff;
+	*suptr++ = 0xffff;
+	*suptr++ = 0xffff;
+
+	/* fit the multicast address */
+	for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+		addrptr = (u16 *) mcptr->dmi_addr;
+		*suptr++ = addrptr[0];
+		*suptr++ = addrptr[1];
+		*suptr++ = addrptr[2];
+	}
+
+	for (; i<14; i++) {
+		*suptr++ = 0xffff;
+		*suptr++ = 0xffff;
+		*suptr++ = 0xffff;
+	}
+
+	/* prepare the setup frame */
+	db->tx_insert_ptr = txptr->next_tx_desc;
+	txptr->tdes1 = cpu_to_le32(0x890000c0);
+
+	/* Resource Check and Send the setup packet */
+	if (db->tx_packet_cnt < TX_DESC_CNT) {
+		/* Resource Empty */
+		db->tx_packet_cnt++;
+		txptr->tdes0 = cpu_to_le32(0x80000000);
+		update_cr6(db->cr6_data | 0x2000, dev->base_addr);
+		outl(0x1, dev->base_addr + DCR1);	/* Issue Tx polling */
+		update_cr6(db->cr6_data, dev->base_addr);
+		dev->trans_start = jiffies;
+	} else
+		printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+}
+
+
+/*
+ *	Allocate rx buffer,
+ *	As possible as allocate maxiumn Rx buffer
+ */
+
+static void allocate_rx_buffer(struct uli526x_board_info *db)
+{
+	struct rx_desc *rxptr;
+	struct sk_buff *skb;
+
+	rxptr = db->rx_insert_ptr;
+
+	while(db->rx_avail_cnt < RX_DESC_CNT) {
+		if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
+			break;
+		rxptr->rx_skb_ptr = skb; /* FIXME (?) */
+		rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
+		wmb();
+		rxptr->rdes0 = cpu_to_le32(0x80000000);
+		rxptr = rxptr->next_rx_desc;
+		db->rx_avail_cnt++;
+	}
+
+	db->rx_insert_ptr = rxptr;
+}
+
+
+/*
+ *	Read one word data from the serial ROM
+ */
+
+static u16 read_srom_word(long ioaddr, int offset)
+{
+	int i;
+	u16 srom_data = 0;
+	long cr9_ioaddr = ioaddr + DCR9;
+
+	outl(CR9_SROM_READ, cr9_ioaddr);
+	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+	/* Send the Read Command 110b */
+	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+	SROM_CLK_WRITE(SROM_DATA_1, cr9_ioaddr);
+	SROM_CLK_WRITE(SROM_DATA_0, cr9_ioaddr);
+
+	/* Send the offset */
+	for (i = 5; i >= 0; i--) {
+		srom_data = (offset & (1 << i)) ? SROM_DATA_1 : SROM_DATA_0;
+		SROM_CLK_WRITE(srom_data, cr9_ioaddr);
+	}
+
+	outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+
+	for (i = 16; i > 0; i--) {
+		outl(CR9_SROM_READ | CR9_SRCS | CR9_SRCLK, cr9_ioaddr);
+		udelay(5);
+		srom_data = (srom_data << 1) | ((inl(cr9_ioaddr) & CR9_CRDOUT) ? 1 : 0);
+		outl(CR9_SROM_READ | CR9_SRCS, cr9_ioaddr);
+		udelay(5);
+	}
+
+	outl(CR9_SROM_READ, cr9_ioaddr);
+	return srom_data;
+}
+
+
+/*
+ *	Auto sense the media mode
+ */
+
+static u8 uli526x_sense_speed(struct uli526x_board_info * db)
+{
+	u8 ErrFlag = 0;
+	u16 phy_mode;
+
+	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+	phy_mode = phy_read(db->ioaddr, db->phy_addr, 1, db->chip_id);
+
+	if ( (phy_mode & 0x24) == 0x24 ) {
+		
+		phy_mode = ((phy_read(db->ioaddr, db->phy_addr, 5, db->chip_id) & 0x01e0)<<7);
+		if(phy_mode&0x8000)
+			phy_mode = 0x8000;
+		else if(phy_mode&0x4000)
+			phy_mode = 0x4000;
+		else if(phy_mode&0x2000)
+			phy_mode = 0x2000;
+		else
+			phy_mode = 0x1000;
+		
+		/* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+		switch (phy_mode) {
+		case 0x1000: db->op_mode = ULI526X_10MHF; break;
+		case 0x2000: db->op_mode = ULI526X_10MFD; break;
+		case 0x4000: db->op_mode = ULI526X_100MHF; break;
+		case 0x8000: db->op_mode = ULI526X_100MFD; break;
+		default: db->op_mode = ULI526X_10MHF; ErrFlag = 1; break;
+		}
+	} else {
+		db->op_mode = ULI526X_10MHF;
+		ULI526X_DBUG(0, "Link Failed :", phy_mode);
+		ErrFlag = 1;
+	}
+
+	return ErrFlag;
+}
+
+
+/*
+ *	Set 10/100 phyxcer capability
+ *	AUTO mode : phyxcer register4 is NIC capability
+ *	Force mode: phyxcer register4 is the force media
+ */
+
+static void uli526x_set_phyxcer(struct uli526x_board_info *db)
+{
+	u16 phy_reg;
+	
+	/* Phyxcer capability setting */
+	phy_reg = phy_read(db->ioaddr, db->phy_addr, 4, db->chip_id) & ~0x01e0;
+
+	if (db->media_mode & ULI526X_AUTO) {
+		/* AUTO Mode */
+		phy_reg |= db->PHY_reg4;
+	} else {
+		/* Force Mode */
+		switch(db->media_mode) {
+		case ULI526X_10MHF: phy_reg |= 0x20; break;
+		case ULI526X_10MFD: phy_reg |= 0x40; break;
+		case ULI526X_100MHF: phy_reg |= 0x80; break;
+		case ULI526X_100MFD: phy_reg |= 0x100; break;
+		}
+		
+	}
+
+  	/* Write new capability to Phyxcer Reg4 */
+	if ( !(phy_reg & 0x01e0)) {
+		phy_reg|=db->PHY_reg4;
+		db->media_mode|=ULI526X_AUTO;
+	}
+	phy_write(db->ioaddr, db->phy_addr, 4, phy_reg, db->chip_id);
+
+ 	/* Restart Auto-Negotiation */
+	phy_write(db->ioaddr, db->phy_addr, 0, 0x1200, db->chip_id);
+	udelay(50);
+}
+
+
+/*
+ *	Process op-mode
+ 	AUTO mode : PHY controller in Auto-negotiation Mode
+ *	Force mode: PHY controller in force mode with HUB
+ *			N-way force capability with SWITCH
+ */
+
+static void uli526x_process_mode(struct uli526x_board_info *db)
+{
+	u16 phy_reg;
+
+	/* Full Duplex Mode Check */
+	if (db->op_mode & 0x4)
+		db->cr6_data |= CR6_FDM;	/* Set Full Duplex Bit */
+	else
+		db->cr6_data &= ~CR6_FDM;	/* Clear Full Duplex Bit */
+
+	update_cr6(db->cr6_data, db->ioaddr);
+
+	/* 10/100M phyxcer force mode need */
+	if ( !(db->media_mode & 0x8)) {
+		/* Forece Mode */
+		phy_reg = phy_read(db->ioaddr, db->phy_addr, 6, db->chip_id);
+		if ( !(phy_reg & 0x1) ) {
+			/* parter without N-Way capability */
+			phy_reg = 0x0;
+			switch(db->op_mode) {
+			case ULI526X_10MHF: phy_reg = 0x0; break;
+			case ULI526X_10MFD: phy_reg = 0x100; break;
+			case ULI526X_100MHF: phy_reg = 0x2000; break;
+			case ULI526X_100MFD: phy_reg = 0x2100; break;
+			}
+			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+       			phy_write(db->ioaddr, db->phy_addr, 0, phy_reg, db->chip_id);
+		}
+	}
+}
+
+
+/*
+ *	Write a word to Phy register
+ */
+
+static void phy_write(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data, u32 chip_id)
+{
+	u16 i;
+	unsigned long ioaddr;
+
+	if(chip_id == PCI_ULI5263_ID)
+	{
+		phy_writeby_cr10(iobase, phy_addr, offset, phy_data);
+		return;
+	}
+	/* M5261/M5263 Chip */
+	ioaddr = iobase + DCR9;
+
+	/* Send 33 synchronization clock to Phy controller */
+	for (i = 0; i < 35; i++)
+		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send start command(01) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send write command(01) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send Phy address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* Send register address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* written trasnition */
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+	/* Write a word data to PHY controller */
+	for ( i = 0x8000; i > 0; i >>= 1)
+		phy_write_1bit(ioaddr, phy_data & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+	
+}
+
+
+/*
+ *	Read a word data from phy register
+ */
+
+static u16 phy_read(unsigned long iobase, u8 phy_addr, u8 offset, u32 chip_id)
+{
+	int i;
+	u16 phy_data;
+	unsigned long ioaddr;
+
+	if(chip_id == PCI_ULI5263_ID)
+		return phy_readby_cr10(iobase, phy_addr, offset);
+	/* M5261/M5263 Chip */
+	ioaddr = iobase + DCR9;
+	
+	/* Send 33 synchronization clock to Phy controller */
+	for (i = 0; i < 35; i++)
+		phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send start command(01) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+
+	/* Send read command(10) to Phy */
+	phy_write_1bit(ioaddr, PHY_DATA_1, chip_id);
+	phy_write_1bit(ioaddr, PHY_DATA_0, chip_id);
+
+	/* Send Phy address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, phy_addr & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* Send register address */
+	for (i = 0x10; i > 0; i = i >> 1)
+		phy_write_1bit(ioaddr, offset & i ? PHY_DATA_1 : PHY_DATA_0, chip_id);
+
+	/* Skip transition state */
+	phy_read_1bit(ioaddr, chip_id);
+
+	/* read 16bit data */
+	for (phy_data = 0, i = 0; i < 16; i++) {
+		phy_data <<= 1;
+		phy_data |= phy_read_1bit(ioaddr, chip_id);
+	}
+
+	return phy_data;
+}
+
+static u16 phy_readby_cr10(unsigned long iobase, u8 phy_addr, u8 offset)
+{
+	unsigned long ioaddr,cr10_value;
+	
+	ioaddr = iobase + DCR10;
+	cr10_value = phy_addr;
+	cr10_value = (cr10_value<<5) + offset;
+	cr10_value = (cr10_value<<16) + 0x08000000;
+	outl(cr10_value,ioaddr);
+	udelay(1);
+	while(1)
+	{
+		cr10_value = inl(ioaddr);
+		if(cr10_value&0x10000000)
+			break;
+	}
+	return (cr10_value&0x0ffff);
+}
+
+static void phy_writeby_cr10(unsigned long iobase, u8 phy_addr, u8 offset, u16 phy_data)
+{
+	unsigned long ioaddr,cr10_value;
+	
+	ioaddr = iobase + DCR10;
+	cr10_value = phy_addr;
+	cr10_value = (cr10_value<<5) + offset;
+	cr10_value = (cr10_value<<16) + 0x04000000 + phy_data;
+	outl(cr10_value,ioaddr);
+	udelay(1);
+}
+/*
+ *	Write one bit data to Phy Controller
+ */
+
+static void phy_write_1bit(unsigned long ioaddr, u32 phy_data, u32 chip_id)
+{
+	outl(phy_data , ioaddr);			/* MII Clock Low */
+	udelay(1);
+	outl(phy_data  | MDCLKH, ioaddr);	/* MII Clock High */
+	udelay(1);
+	outl(phy_data , ioaddr);			/* MII Clock Low */
+	udelay(1);
+}
+
+
+/*
+ *	Read one bit phy data from PHY controller
+ */
+
+static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
+{
+	u16 phy_data;
+	
+	outl(0x50000 , ioaddr);
+	udelay(1);
+	phy_data = ( inl(ioaddr) >> 19 ) & 0x1;
+	outl(0x40000 , ioaddr);
+	udelay(1);
+
+	return phy_data;
+}
+
+
+static struct pci_device_id uli526x_pci_tbl[] = {
+	{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
+	{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
+	{ 0, }
+};
+MODULE_DEVICE_TABLE(pci, uli526x_pci_tbl);
+
+
+static struct pci_driver uli526x_driver = {
+	.name		= "uli526x",
+	.id_table	= uli526x_pci_tbl,
+	.probe		= uli526x_init_one,
+	.remove		= __devexit_p(uli526x_remove_one),
+};
+
+MODULE_AUTHOR("Peer Chen, peer.chen@uli.com.tw");
+MODULE_DESCRIPTION("ULi M5261/M5263 fast ethernet driver");
+MODULE_LICENSE("GPL");
+
+MODULE_PARM(debug, "i");
+MODULE_PARM(mode, "i");
+MODULE_PARM(cr6set, "i");
+MODULE_PARM_DESC(debug, "ULi M5261/M5263 enable debugging (0-1)");
+MODULE_PARM_DESC(mode, "ULi M5261/M5263: Bit 0: 10/100Mbps, bit 2: duplex, bit 8: HomePNA");
+
+/*	Description:
+ *	when user used insmod to add module, system invoked init_module()
+ *	to register the services.
+ */
+
+static int __init uli526x_init_module(void)
+{
+	int rc;
+
+	printk(version);
+	printed_version = 1;
+
+	ULI526X_DBUG(0, "init_module() ", debug);
+
+	if (debug)
+		uli526x_debug = debug;	/* set debug flag */
+	if (cr6set)
+		uli526x_cr6_user_set = cr6set;
+
+ 	switch(mode) {
+   	case ULI526X_10MHF:
+	case ULI526X_100MHF:
+	case ULI526X_10MFD:
+	case ULI526X_100MFD:
+		uli526x_media_mode = mode;
+		break;
+	default:uli526x_media_mode = ULI526X_AUTO;
+		break;
+	}
+
+	rc = pci_module_init(&uli526x_driver);
+	if (rc < 0)
+		return rc;
+
+	return 0;
+}
+
+
+/*
+ *	Description:
+ *	when user used rmmod to delete module, system invoked clean_module()
+ *	to un-register all registered services.
+ */
+
+static void __exit uli526x_cleanup_module(void)
+{
+	ULI526X_DBUG(0, "uli526x_clean_module() ", debug);
+	pci_unregister_driver(&uli526x_driver);
+}
+
+module_init(uli526x_init_module);
+module_exit(uli526x_cleanup_module);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6d864c5..6b0e646 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -40,7 +40,7 @@
  * FIXME: IO should be max 256 bytes.  However, since we may
  * have a P2P bridge below a cardbus bridge, we need 4K.
  */
-#define CARDBUS_IO_SIZE		(256)
+#define CARDBUS_IO_SIZE		(4*1024)
 #define CARDBUS_MEM_SIZE	(32*1024*1024)
 
 static void __devinit
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 179c95c..3106526 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -189,7 +189,6 @@
 static void ahci_eng_timeout(struct ata_port *ap);
 static int ahci_port_start(struct ata_port *ap);
 static void ahci_port_stop(struct ata_port *ap);
-static void ahci_host_stop(struct ata_host_set *host_set);
 static void ahci_tf_read(struct ata_port *ap, struct ata_taskfile *tf);
 static void ahci_qc_prep(struct ata_queued_cmd *qc);
 static u8 ahci_check_status(struct ata_port *ap);
@@ -242,7 +241,6 @@
 
 	.port_start		= ahci_port_start,
 	.port_stop		= ahci_port_stop,
-	.host_stop		= ahci_host_stop,
 };
 
 static struct ata_port_info ahci_port_info[] = {
@@ -296,17 +294,9 @@
 	return base + 0x100 + (port * 0x80);
 }
 
-static inline void *ahci_port_base (void *base, unsigned int port)
+static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
 {
-	return (void *) ahci_port_base_ul((unsigned long)base, port);
-}
-
-static void ahci_host_stop(struct ata_host_set *host_set)
-{
-	struct ahci_host_priv *hpriv = host_set->private_data;
-	kfree(hpriv);
-
-	ata_host_stop(host_set);
+	return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
 }
 
 static int ahci_port_start(struct ata_port *ap)
@@ -314,8 +304,9 @@
 	struct device *dev = ap->host_set->dev;
 	struct ahci_host_priv *hpriv = ap->host_set->private_data;
 	struct ahci_port_priv *pp;
-	void *mem, *mmio = ap->host_set->mmio_base;
-	void *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *mmio = ap->host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void *mem;
 	dma_addr_t mem_dma;
 
 	pp = kmalloc(sizeof(*pp), GFP_KERNEL);
@@ -383,8 +374,8 @@
 {
 	struct device *dev = ap->host_set->dev;
 	struct ahci_port_priv *pp = ap->private_data;
-	void *mmio = ap->host_set->mmio_base;
-	void *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *mmio = ap->host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	u32 tmp;
 
 	tmp = readl(port_mmio + PORT_CMD);
@@ -546,8 +537,8 @@
 
 static void ahci_intr_error(struct ata_port *ap, u32 irq_stat)
 {
-	void *mmio = ap->host_set->mmio_base;
-	void *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *mmio = ap->host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	u32 tmp;
 	int work;
 
@@ -595,8 +586,8 @@
 static void ahci_eng_timeout(struct ata_port *ap)
 {
 	struct ata_host_set *host_set = ap->host_set;
-	void *mmio = host_set->mmio_base;
-	void *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *mmio = host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	struct ata_queued_cmd *qc;
 	unsigned long flags;
 
@@ -626,8 +617,8 @@
 
 static inline int ahci_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
 {
-	void *mmio = ap->host_set->mmio_base;
-	void *port_mmio = ahci_port_base(mmio, ap->port_no);
+	void __iomem *mmio = ap->host_set->mmio_base;
+	void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
 	u32 status, serr, ci;
 
 	serr = readl(port_mmio + PORT_SCR_ERR);
@@ -663,7 +654,7 @@
 	struct ata_host_set *host_set = dev_instance;
 	struct ahci_host_priv *hpriv;
 	unsigned int i, handled = 0;
-	void *mmio;
+	void __iomem *mmio;
 	u32 irq_stat, irq_ack = 0;
 
 	VPRINTK("ENTER\n");
@@ -709,7 +700,7 @@
 static int ahci_qc_issue(struct ata_queued_cmd *qc)
 {
 	struct ata_port *ap = qc->ap;
-	void *port_mmio = (void *) ap->ioaddr.cmd_addr;
+	void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
 
 	writel(1, port_mmio + PORT_CMD_ISSUE);
 	readl(port_mmio + PORT_CMD_ISSUE);	/* flush */
@@ -894,7 +885,7 @@
 {
 	struct ahci_host_priv *hpriv = probe_ent->private_data;
 	struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
-	void *mmio = probe_ent->mmio_base;
+	void __iomem *mmio = probe_ent->mmio_base;
 	u32 vers, cap, impl, speed;
 	const char *speed_s;
 	u16 cc;
@@ -967,7 +958,7 @@
 	struct ata_probe_ent *probe_ent = NULL;
 	struct ahci_host_priv *hpriv;
 	unsigned long base;
-	void *mmio_base;
+	void __iomem *mmio_base;
 	unsigned int board_idx = (unsigned int) ent->driver_data;
 	int have_msi, pci_dev_busy = 0;
 	int rc;
@@ -1004,8 +995,7 @@
 	probe_ent->dev = pci_dev_to_dev(pdev);
 	INIT_LIST_HEAD(&probe_ent->node);
 
-	mmio_base = ioremap(pci_resource_start(pdev, AHCI_PCI_BAR),
-		            pci_resource_len(pdev, AHCI_PCI_BAR));
+	mmio_base = pci_iomap(pdev, AHCI_PCI_BAR, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_free_ent;
@@ -1049,7 +1039,7 @@
 err_out_hpriv:
 	kfree(hpriv);
 err_out_iounmap:
-	iounmap(mmio_base);
+	pci_iounmap(pdev, mmio_base);
 err_out_free_ent:
 	kfree(probe_ent);
 err_out_msi:
@@ -1089,7 +1079,8 @@
 		scsi_host_put(ap->host);
 	}
 
-	host_set->ops->host_stop(host_set);
+	kfree(hpriv);
+	pci_iounmap(pdev, host_set->mmio_base);
 	kfree(host_set);
 
 	if (have_msi)
@@ -1106,7 +1097,6 @@
 	return pci_module_init(&ahci_pci_driver);
 }
 
-
 static void __exit ahci_exit(void)
 {
 	pci_unregister_driver(&ahci_pci_driver);
diff --git a/drivers/scsi/ata_piix.c b/drivers/scsi/ata_piix.c
index fb28c12..deec0ce 100644
--- a/drivers/scsi/ata_piix.c
+++ b/drivers/scsi/ata_piix.c
@@ -583,8 +583,7 @@
 #define AHCI_ENABLE (1 << 31)
 static int piix_disable_ahci(struct pci_dev *pdev)
 {
-	void *mmio;
-	unsigned long addr;
+	void __iomem *mmio;
 	u32 tmp;
 	int rc = 0;
 
@@ -592,11 +591,11 @@
 	 * works because this device is usually set up by BIOS.
 	 */
 
-	addr = pci_resource_start(pdev, AHCI_PCI_BAR);
-	if (!addr || !pci_resource_len(pdev, AHCI_PCI_BAR))
+	if (!pci_resource_start(pdev, AHCI_PCI_BAR) ||
+	    !pci_resource_len(pdev, AHCI_PCI_BAR))
 		return 0;
 
-	mmio = ioremap(addr, 64);
+	mmio = pci_iomap(pdev, AHCI_PCI_BAR, 64);
 	if (!mmio)
 		return -ENOMEM;
 
@@ -610,7 +609,7 @@
 			rc = -EIO;
 	}
 
-	iounmap(mmio);
+	pci_iounmap(pdev, mmio);
 	return rc;
 }
 
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index d824938..9fb9814 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -4204,6 +4204,15 @@
 
 
 
+#ifdef CONFIG_PCI
+
+void ata_pci_host_stop (struct ata_host_set *host_set)
+{
+	struct pci_dev *pdev = to_pci_dev(host_set->dev);
+
+	pci_iounmap(pdev, host_set->mmio_base);
+}
+
 /**
  *	ata_pci_init_native_mode - Initialize native-mode driver
  *	@pdev:  pci device to be initialized
@@ -4216,7 +4225,6 @@
  *	ata_probe_ent structure should then be freed with kfree().
  */
 
-#ifdef CONFIG_PCI
 struct ata_probe_ent *
 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
 {
@@ -4599,6 +4607,7 @@
 
 #ifdef CONFIG_PCI
 EXPORT_SYMBOL_GPL(pci_test_config_bits);
+EXPORT_SYMBOL_GPL(ata_pci_host_stop);
 EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
 EXPORT_SYMBOL_GPL(ata_pci_init_one);
 EXPORT_SYMBOL_GPL(ata_pci_remove_one);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 03d9bc6..a1d62de 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -351,6 +351,7 @@
 static void nv_host_stop (struct ata_host_set *host_set)
 {
 	struct nv_host *host = host_set->private_data;
+	struct pci_dev *pdev = to_pci_dev(host_set->dev);
 
 	// Disable hotplug event interrupts.
 	if (host->host_desc->disable_hotplug)
@@ -358,7 +359,8 @@
 
 	kfree(host);
 
-	ata_host_stop(host_set);
+	if (host_set->mmio_base)
+		pci_iounmap(pdev, host_set->mmio_base);
 }
 
 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -420,8 +422,7 @@
 	if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO) {
 		unsigned long base;
 
-		probe_ent->mmio_base = ioremap(pci_resource_start(pdev, 5),
-				pci_resource_len(pdev, 5));
+		probe_ent->mmio_base = pci_iomap(pdev, 5, 0);
 		if (probe_ent->mmio_base == NULL) {
 			rc = -EIO;
 			goto err_out_free_host;
@@ -457,7 +458,7 @@
 
 err_out_iounmap:
 	if (host->host_flags & NV_HOST_FLAGS_SCR_MMIO)
-		iounmap(probe_ent->mmio_base);
+		pci_iounmap(pdev, probe_ent->mmio_base);
 err_out_free_host:
 	kfree(host);
 err_out_free_ent:
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index 7c4f6ec..538ad72 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -92,6 +92,7 @@
 static void pdc_irq_clear(struct ata_port *ap);
 static int pdc_qc_issue_prot(struct ata_queued_cmd *qc);
 
+
 static Scsi_Host_Template pdc_ata_sht = {
 	.module			= THIS_MODULE,
 	.name			= DRV_NAME,
@@ -132,7 +133,7 @@
 	.scr_write		= pdc_sata_scr_write,
 	.port_start		= pdc_port_start,
 	.port_stop		= pdc_port_stop,
-	.host_stop		= ata_host_stop,
+	.host_stop		= ata_pci_host_stop,
 };
 
 static struct ata_port_operations pdc_pata_ops = {
@@ -153,7 +154,7 @@
 
 	.port_start		= pdc_port_start,
 	.port_stop		= pdc_port_stop,
-	.host_stop		= ata_host_stop,
+	.host_stop		= ata_pci_host_stop,
 };
 
 static struct ata_port_info pdc_port_info[] = {
@@ -282,7 +283,7 @@
 
 static void pdc_reset_port(struct ata_port *ap)
 {
-	void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
 	unsigned int i;
 	u32 tmp;
 
@@ -418,7 +419,7 @@
 	u8 status;
 	unsigned int handled = 0, have_err = 0;
 	u32 tmp;
-	void *mmio = (void *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
+	void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
 
 	tmp = readl(mmio);
 	if (tmp & PDC_ERR_MASK) {
@@ -447,7 +448,7 @@
 static void pdc_irq_clear(struct ata_port *ap)
 {
 	struct ata_host_set *host_set = ap->host_set;
-	void *mmio = host_set->mmio_base;
+	void __iomem *mmio = host_set->mmio_base;
 
 	readl(mmio + PDC_INT_SEQMASK);
 }
@@ -459,7 +460,7 @@
 	u32 mask = 0;
 	unsigned int i, tmp;
 	unsigned int handled = 0;
-	void *mmio_base;
+	void __iomem *mmio_base;
 
 	VPRINTK("ENTER\n");
 
@@ -581,7 +582,7 @@
 
 static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
 {
-	void *mmio = pe->mmio_base;
+	void __iomem *mmio = pe->mmio_base;
 	u32 tmp;
 
 	/*
@@ -624,7 +625,7 @@
 	static int printed_version;
 	struct ata_probe_ent *probe_ent = NULL;
 	unsigned long base;
-	void *mmio_base;
+	void __iomem *mmio_base;
 	unsigned int board_idx = (unsigned int) ent->driver_data;
 	int pci_dev_busy = 0;
 	int rc;
@@ -663,8 +664,7 @@
 	probe_ent->dev = pci_dev_to_dev(pdev);
 	INIT_LIST_HEAD(&probe_ent->node);
 
-	mmio_base = ioremap(pci_resource_start(pdev, 3),
-		            pci_resource_len(pdev, 3));
+	mmio_base = pci_iomap(pdev, 3, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_free_ent;
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index 9c99ab4..029c248 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -538,11 +538,12 @@
 static void qs_host_stop(struct ata_host_set *host_set)
 {
 	void __iomem *mmio_base = host_set->mmio_base;
+	struct pci_dev *pdev = to_pci_dev(host_set->dev);
 
 	writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
 	writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
 
-	ata_host_stop(host_set);
+	pci_iounmap(pdev, mmio_base);
 }
 
 static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
@@ -646,8 +647,7 @@
 		goto err_out_regions;
 	}
 
-	mmio_base = ioremap(pci_resource_start(pdev, 4),
-		            pci_resource_len(pdev, 4));
+	mmio_base = pci_iomap(pdev, 4, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_regions;
@@ -697,7 +697,7 @@
 	return 0;
 
 err_out_iounmap:
-	iounmap(mmio_base);
+	pci_iounmap(pdev, mmio_base);
 err_out_regions:
 	pci_release_regions(pdev);
 err_out:
diff --git a/drivers/scsi/sata_sil.c b/drivers/scsi/sata_sil.c
index 71d4954..ba98a17 100644
--- a/drivers/scsi/sata_sil.c
+++ b/drivers/scsi/sata_sil.c
@@ -86,6 +86,7 @@
 static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val);
 static void sil_post_set_mode (struct ata_port *ap);
 
+
 static struct pci_device_id sil_pci_tbl[] = {
 	{ 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
 	{ 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112_m15w },
@@ -172,7 +173,7 @@
 	.scr_write		= sil_scr_write,
 	.port_start		= ata_port_start,
 	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
+	.host_stop		= ata_pci_host_stop,
 };
 
 static struct ata_port_info sil_port_info[] = {
@@ -231,6 +232,7 @@
 MODULE_DEVICE_TABLE(pci, sil_pci_tbl);
 MODULE_VERSION(DRV_VERSION);
 
+
 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
 {
 	u8 cache_line = 0;
@@ -242,7 +244,8 @@
 {
 	struct ata_host_set *host_set = ap->host_set;
 	struct ata_device *dev;
-	void *addr = host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
+	void __iomem *addr =
+		host_set->mmio_base + sil_port[ap->port_no].xfer_mode;
 	u32 tmp, dev_mode[2];
 	unsigned int i;
 
@@ -375,7 +378,7 @@
 	static int printed_version;
 	struct ata_probe_ent *probe_ent = NULL;
 	unsigned long base;
-	void *mmio_base;
+	void __iomem *mmio_base;
 	int rc;
 	unsigned int i;
 	int pci_dev_busy = 0;
@@ -425,8 +428,7 @@
        	probe_ent->irq_flags = SA_SHIRQ;
 	probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags;
 
-	mmio_base = ioremap(pci_resource_start(pdev, 5),
-		            pci_resource_len(pdev, 5));
+	mmio_base = pci_iomap(pdev, 5, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_free_ent;
diff --git a/drivers/scsi/sata_svw.c b/drivers/scsi/sata_svw.c
index 19d3bb3..d89d968 100644
--- a/drivers/scsi/sata_svw.c
+++ b/drivers/scsi/sata_svw.c
@@ -318,7 +318,7 @@
 	.scr_write		= k2_sata_scr_write,
 	.port_start		= ata_port_start,
 	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
+	.host_stop		= ata_pci_host_stop,
 };
 
 static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
@@ -346,7 +346,7 @@
 	static int printed_version;
 	struct ata_probe_ent *probe_ent = NULL;
 	unsigned long base;
-	void *mmio_base;
+	void __iomem *mmio_base;
 	int pci_dev_busy = 0;
 	int rc;
 	int i;
@@ -392,8 +392,7 @@
 	probe_ent->dev = pci_dev_to_dev(pdev);
 	INIT_LIST_HEAD(&probe_ent->node);
 
-	mmio_base = ioremap(pci_resource_start(pdev, 5),
-		            pci_resource_len(pdev, 5));
+	mmio_base = pci_iomap(pdev, 5, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_free_ent;
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index c72fcc4..540a851 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -245,13 +245,14 @@
 
 static void pdc20621_host_stop(struct ata_host_set *host_set)
 {
+	struct pci_dev *pdev = to_pci_dev(host_set->dev);
 	struct pdc_host_priv *hpriv = host_set->private_data;
 	void *dimm_mmio = hpriv->dimm_mmio;
 
-	iounmap(dimm_mmio);
+	pci_iounmap(pdev, dimm_mmio);
 	kfree(hpriv);
 
-	ata_host_stop(host_set);
+	pci_iounmap(pdev, host_set->mmio_base);
 }
 
 static int pdc_port_start(struct ata_port *ap)
@@ -451,9 +452,9 @@
 	struct scatterlist *sg = qc->sg;
 	struct ata_port *ap = qc->ap;
 	struct pdc_port_priv *pp = ap->private_data;
-	void *mmio = ap->host_set->mmio_base;
+	void __iomem *mmio = ap->host_set->mmio_base;
 	struct pdc_host_priv *hpriv = ap->host_set->private_data;
-	void *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *dimm_mmio = hpriv->dimm_mmio;
 	unsigned int portno = ap->port_no;
 	unsigned int i, last, idx, total_len = 0, sgt_len;
 	u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
@@ -513,9 +514,9 @@
 {
 	struct ata_port *ap = qc->ap;
 	struct pdc_port_priv *pp = ap->private_data;
-	void *mmio = ap->host_set->mmio_base;
+	void __iomem *mmio = ap->host_set->mmio_base;
 	struct pdc_host_priv *hpriv = ap->host_set->private_data;
-	void *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *dimm_mmio = hpriv->dimm_mmio;
 	unsigned int portno = ap->port_no;
 	unsigned int i;
 
@@ -565,7 +566,7 @@
 {
 	struct ata_port *ap = qc->ap;
 	struct ata_host_set *host_set = ap->host_set;
-	void *mmio = host_set->mmio_base;
+	void __iomem *mmio = host_set->mmio_base;
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -639,7 +640,7 @@
 	struct ata_port *ap = qc->ap;
 	struct ata_host_set *host_set = ap->host_set;
 	unsigned int port_no = ap->port_no;
-	void *mmio = host_set->mmio_base;
+	void __iomem *mmio = host_set->mmio_base;
 	unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
 	u8 seq = (u8) (port_no + 1);
 	unsigned int port_ofs;
@@ -699,7 +700,7 @@
 static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
                                           struct ata_queued_cmd *qc,
 					  unsigned int doing_hdma,
-					  void *mmio)
+					  void __iomem *mmio)
 {
 	unsigned int port_no = ap->port_no;
 	unsigned int port_ofs =
@@ -778,7 +779,7 @@
 static void pdc20621_irq_clear(struct ata_port *ap)
 {
 	struct ata_host_set *host_set = ap->host_set;
-	void *mmio = host_set->mmio_base;
+	void __iomem *mmio = host_set->mmio_base;
 
 	mmio += PDC_CHIP0_OFS;
 
@@ -792,7 +793,7 @@
 	u32 mask = 0;
 	unsigned int i, tmp, port_no;
 	unsigned int handled = 0;
-	void *mmio_base;
+	void __iomem *mmio_base;
 
 	VPRINTK("ENTER\n");
 
@@ -940,9 +941,9 @@
 	u16 idx;
 	u8 page_mask;
 	long dist;
-	void *mmio = pe->mmio_base;
+	void __iomem *mmio = pe->mmio_base;
 	struct pdc_host_priv *hpriv = pe->private_data;
-	void *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *dimm_mmio = hpriv->dimm_mmio;
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -996,9 +997,9 @@
 	u16 idx;
 	u8 page_mask;
 	long dist;
-	void *mmio = pe->mmio_base;
+	void __iomem *mmio = pe->mmio_base;
 	struct pdc_host_priv *hpriv = pe->private_data;
-	void *dimm_mmio = hpriv->dimm_mmio;
+	void __iomem *dimm_mmio = hpriv->dimm_mmio;
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -1044,7 +1045,7 @@
 static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
 				      u32 subaddr, u32 *pdata)
 {
-	void *mmio = pe->mmio_base;
+	void __iomem *mmio = pe->mmio_base;
 	u32 i2creg  = 0;
 	u32 status;
 	u32 count =0;
@@ -1103,7 +1104,7 @@
 	u32 data = 0;
    	int size, i;
    	u8 bdimmsize;
-   	void *mmio = pe->mmio_base;
+   	void __iomem *mmio = pe->mmio_base;
 	static const struct {
 		unsigned int reg;
 		unsigned int ofs;
@@ -1166,7 +1167,7 @@
 {
 	u32 data, spd0;
    	int error, i;
-   	void *mmio = pe->mmio_base;
+   	void __iomem *mmio = pe->mmio_base;
 
 	/* hard-code chip #0 */
    	mmio += PDC_CHIP0_OFS;
@@ -1220,7 +1221,7 @@
 	u32 ticks=0;
 	u32 clock=0;
 	u32 fparam=0;
-   	void *mmio = pe->mmio_base;
+   	void __iomem *mmio = pe->mmio_base;
 
 	/* hard-code chip #0 */
    	mmio += PDC_CHIP0_OFS;
@@ -1344,7 +1345,7 @@
 static void pdc_20621_init(struct ata_probe_ent *pe)
 {
 	u32 tmp;
-	void *mmio = pe->mmio_base;
+	void __iomem *mmio = pe->mmio_base;
 
 	/* hard-code chip #0 */
 	mmio += PDC_CHIP0_OFS;
@@ -1377,7 +1378,8 @@
 	static int printed_version;
 	struct ata_probe_ent *probe_ent = NULL;
 	unsigned long base;
-	void *mmio_base, *dimm_mmio = NULL;
+	void __iomem *mmio_base;
+	void __iomem *dimm_mmio = NULL;
 	struct pdc_host_priv *hpriv = NULL;
 	unsigned int board_idx = (unsigned int) ent->driver_data;
 	int pci_dev_busy = 0;
@@ -1417,8 +1419,7 @@
 	probe_ent->dev = pci_dev_to_dev(pdev);
 	INIT_LIST_HEAD(&probe_ent->node);
 
-	mmio_base = ioremap(pci_resource_start(pdev, 3),
-		            pci_resource_len(pdev, 3));
+	mmio_base = pci_iomap(pdev, 3, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_free_ent;
@@ -1432,8 +1433,7 @@
 	}
 	memset(hpriv, 0, sizeof(*hpriv));
 
-	dimm_mmio = ioremap(pci_resource_start(pdev, 4),
-			    pci_resource_len(pdev, 4));
+	dimm_mmio = pci_iomap(pdev, 4, 0);
 	if (!dimm_mmio) {
 		kfree(hpriv);
 		rc = -ENOMEM;
@@ -1480,9 +1480,9 @@
 
 err_out_iounmap_dimm:		/* only get to this label if 20621 */
 	kfree(hpriv);
-	iounmap(dimm_mmio);
+	pci_iounmap(pdev, dimm_mmio);
 err_out_iounmap:
-	iounmap(mmio_base);
+	pci_iounmap(pdev, mmio_base);
 err_out_free_ent:
 	kfree(probe_ent);
 err_out_regions:
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 3985f34..cf94e01 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -252,7 +252,7 @@
 	.scr_write		= vsc_sata_scr_write,
 	.port_start		= ata_port_start,
 	.port_stop		= ata_port_stop,
-	.host_stop		= ata_host_stop,
+	.host_stop		= ata_pci_host_stop,
 };
 
 static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
@@ -326,8 +326,7 @@
 	probe_ent->dev = pci_dev_to_dev(pdev);
 	INIT_LIST_HEAD(&probe_ent->node);
 
-	mmio_base = ioremap(pci_resource_start(pdev, 0),
-		            pci_resource_len(pdev, 0));
+	mmio_base = pci_iomap(pdev, 0, 0);
 	if (mmio_base == NULL) {
 		rc = -ENOMEM;
 		goto err_out_free_ent;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 1cae14e..49ccde3 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1390,6 +1390,8 @@
 
 	jfs_info("jfs_lookup: name = %s", name);
 
+	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
+		dentry->d_op = &jfs_ci_dentry_operations;
 
 	if ((name[0] == '.') && (len == 1))
 		inum = dip->i_ino;
@@ -1417,9 +1419,6 @@
 		return ERR_PTR(-EACCES);
 	}
 
-	if (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2)
-		dentry->d_op = &jfs_ci_dentry_operations;
-
 	dentry = d_splice_alias(ip, dentry);
 
 	if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 1eaba40..022105c 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -449,6 +449,7 @@
 	unsigned long		val;
 };
 
+extern void ata_pci_host_stop (struct ata_host_set *host_set);
 extern struct ata_probe_ent *
 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port);
 extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 499a532..d513c16 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2145,6 +2145,7 @@
 #define PCI_DEVICE_ID_ENE_1225		0x1225
 #define PCI_DEVICE_ID_ENE_1410		0x1410
 #define PCI_DEVICE_ID_ENE_1420		0x1420
+#define PCI_VENDOR_ID_CHELSIO		0x1425
 
 #define PCI_VENDOR_ID_SYBA		0x1592
 #define PCI_DEVICE_ID_SYBA_2P_EPP	0x0782
diff --git a/include/linux/sound.h b/include/linux/sound.h
index 428f597..72b9af4 100644
--- a/include/linux/sound.h
+++ b/include/linux/sound.h
@@ -29,7 +29,9 @@
  *	Sound core interface functions
  */
  
+struct device;
 extern int register_sound_special(struct file_operations *fops, int unit);
+extern int register_sound_special_device(struct file_operations *fops, int unit, struct device *dev);
 extern int register_sound_mixer(struct file_operations *fops, int dev);
 extern int register_sound_midi(struct file_operations *fops, int dev);
 extern int register_sound_dsp(struct file_operations *fops, int dev);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 8a87a3a..651f824 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -147,7 +147,7 @@
 	}
 #ifdef INET_CSK_DEBUG
 	else {
-		pr_debug(inet_csk_timer_bug_msg);
+		pr_debug("%s", inet_csk_timer_bug_msg);
 	}
 #endif
 }
@@ -180,7 +180,7 @@
 	}
 #ifdef INET_CSK_DEBUG
 	else {
-		pr_debug(inet_csk_timer_bug_msg);
+		pr_debug("%s", inet_csk_timer_bug_msg);
 	}
 #endif
 }
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 1309c12..2857cf0 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -26,6 +26,7 @@
  */
 
 #include <linux/bitops.h>
+#include <linux/device.h>
 #include "pcm.h"
 #include "control.h"
 #include "info.h"
@@ -374,6 +375,9 @@
 #define AC97_HAS_NO_PC_BEEP	(1<<12) /* no PC Beep volume */
 #define AC97_HAS_NO_VIDEO	(1<<13) /* no Video volume */
 #define AC97_HAS_NO_CD		(1<<14) /* no CD volume */
+#define AC97_HAS_NO_MIC	(1<<15) /* no MIC volume */
+#define AC97_HAS_NO_TONE	(1<<16) /* no Tone volume */
+#define AC97_HAS_NO_STD_PCM	(1<<17)	/* no standard AC97 PCM volume and mute */
 
 /* rates indexes */
 #define AC97_RATES_FRONT_DAC	0
@@ -520,6 +524,7 @@
 	/* jack-sharing info */
 	unsigned char indep_surround;
 	unsigned char channel_mode;
+	struct device dev;
 };
 
 /* conditions */
@@ -599,4 +604,8 @@
 	unsigned short mask;
 	const char **texts;
 };
+
+/* ad hoc AC97 device driver access */
+extern struct bus_type ac97_bus_type;
+
 #endif /* __SOUND_AC97_CODEC_H */
diff --git a/include/sound/ad1816a.h b/include/sound/ad1816a.h
index 395978e..ca2e0e4 100644
--- a/include/sound/ad1816a.h
+++ b/include/sound/ad1816a.h
@@ -138,6 +138,7 @@
 	spinlock_t lock;
 
 	unsigned short mode;
+	unsigned int clock_freq;
 
 	snd_card_t *card;
 	snd_pcm_t *pcm;
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 9974f83..8e552d6 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -560,7 +560,7 @@
  *  Timer section - /dev/snd/timer
  */
 
-#define SNDRV_TIMER_VERSION		SNDRV_PROTOCOL_VERSION(2, 0, 4)
+#define SNDRV_TIMER_VERSION		SNDRV_PROTOCOL_VERSION(2, 0, 5)
 
 enum sndrv_timer_class {
 	SNDRV_TIMER_CLASS_NONE = -1,
@@ -693,11 +693,15 @@
 	SNDRV_TIMER_EVENT_CONTINUE,		/* val = resolution in ns */
 	SNDRV_TIMER_EVENT_PAUSE,		/* val = 0 */
 	SNDRV_TIMER_EVENT_EARLY,		/* val = 0, early event */
+	SNDRV_TIMER_EVENT_SUSPEND,		/* val = 0 */
+	SNDRV_TIMER_EVENT_RESUME,		/* val = resolution in ns */
 	/* master timer events for slave timer instances */
 	SNDRV_TIMER_EVENT_MSTART = SNDRV_TIMER_EVENT_START + 10,
 	SNDRV_TIMER_EVENT_MSTOP = SNDRV_TIMER_EVENT_STOP + 10,
 	SNDRV_TIMER_EVENT_MCONTINUE = SNDRV_TIMER_EVENT_CONTINUE + 10,
 	SNDRV_TIMER_EVENT_MPAUSE = SNDRV_TIMER_EVENT_PAUSE + 10,
+	SNDRV_TIMER_EVENT_MSUSPEND = SNDRV_TIMER_EVENT_SUSPEND + 10,
+	SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10,
 };
 
 struct sndrv_timer_tread {
diff --git a/include/sound/cs46xx.h b/include/sound/cs46xx.h
index 182dd27..9b94510 100644
--- a/include/sound/cs46xx.h
+++ b/include/sound/cs46xx.h
@@ -1748,7 +1748,7 @@
 int snd_cs46xx_pcm_rear(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
 int snd_cs46xx_pcm_iec958(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
 int snd_cs46xx_pcm_center_lfe(cs46xx_t *chip, int device, snd_pcm_t **rpcm);
-int snd_cs46xx_mixer(cs46xx_t *chip);
+int snd_cs46xx_mixer(cs46xx_t *chip, int spdif_device);
 int snd_cs46xx_midi(cs46xx_t *chip, int device, snd_rawmidi_t **rmidi);
 int snd_cs46xx_start_dsp(cs46xx_t *chip);
 int snd_cs46xx_gameport(cs46xx_t *chip);
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index c2ef3f0..4e3993d 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -1178,7 +1178,7 @@
 int snd_p16v_mixer(emu10k1_t * emu);
 int snd_emu10k1_pcm_multi(emu10k1_t * emu, int device, snd_pcm_t ** rpcm);
 int snd_emu10k1_fx8010_pcm(emu10k1_t * emu, int device, snd_pcm_t ** rpcm);
-int snd_emu10k1_mixer(emu10k1_t * emu);
+int snd_emu10k1_mixer(emu10k1_t * emu, int pcm_device, int multi_device);
 int snd_emu10k1_timer(emu10k1_t * emu, int device);
 int snd_emu10k1_fx8010_new(emu10k1_t *emu, int device, snd_hwdep_t ** rhwdep);
 
diff --git a/include/sound/gus.h b/include/sound/gus.h
index b4b461c..7000d9d 100644
--- a/include/sound/gus.h
+++ b/include/sound/gus.h
@@ -512,13 +512,13 @@
 
 extern void snd_gf1_write8(snd_gus_card_t * gus, unsigned char reg, unsigned char data);
 extern unsigned char snd_gf1_look8(snd_gus_card_t * gus, unsigned char reg);
-extern inline unsigned char snd_gf1_read8(snd_gus_card_t * gus, unsigned char reg)
+static inline unsigned char snd_gf1_read8(snd_gus_card_t * gus, unsigned char reg)
 {
 	return snd_gf1_look8(gus, reg | 0x80);
 }
 extern void snd_gf1_write16(snd_gus_card_t * gus, unsigned char reg, unsigned int data);
 extern unsigned short snd_gf1_look16(snd_gus_card_t * gus, unsigned char reg);
-extern inline unsigned short snd_gf1_read16(snd_gus_card_t * gus, unsigned char reg)
+static inline unsigned short snd_gf1_read16(snd_gus_card_t * gus, unsigned char reg)
 {
 	return snd_gf1_look16(gus, reg | 0x80);
 }
@@ -532,12 +532,12 @@
 extern void snd_gf1_i_write8(snd_gus_card_t * gus, unsigned char reg, unsigned char data);
 extern unsigned char snd_gf1_i_look8(snd_gus_card_t * gus, unsigned char reg);
 extern void snd_gf1_i_write16(snd_gus_card_t * gus, unsigned char reg, unsigned int data);
-extern inline unsigned char snd_gf1_i_read8(snd_gus_card_t * gus, unsigned char reg)
+static inline unsigned char snd_gf1_i_read8(snd_gus_card_t * gus, unsigned char reg)
 {
 	return snd_gf1_i_look8(gus, reg | 0x80);
 }
 extern unsigned short snd_gf1_i_look16(snd_gus_card_t * gus, unsigned char reg);
-extern inline unsigned short snd_gf1_i_read16(snd_gus_card_t * gus, unsigned char reg)
+static inline unsigned short snd_gf1_i_read16(snd_gus_card_t * gus, unsigned char reg)
 {
 	return snd_gf1_i_look16(gus, reg | 0x80);
 }
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index d935417..fa23ebf 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -379,7 +379,6 @@
 	unsigned int dma_buf_id;
 	size_t dma_max;
 	/* -- hardware operations -- */
-	unsigned int open_flag: 1;	/* lowlevel device has been opened */
 	snd_pcm_ops_t *ops;
 	/* -- runtime information -- */
 	snd_pcm_runtime_t *runtime;
diff --git a/include/sound/version.h b/include/sound/version.h
index c085136..8d19bfa 100644
--- a/include/sound/version.h
+++ b/include/sound/version.h
@@ -1,3 +1,3 @@
 /* include/version.h.  Generated by configure.  */
-#define CONFIG_SND_VERSION "1.0.9b"
-#define CONFIG_SND_DATE " (Thu Jul 28 12:20:13 2005 UTC)"
+#define CONFIG_SND_VERSION "1.0.10rc1"
+#define CONFIG_SND_DATE " (Tue Aug 30 05:31:08 2005 UTC)"
diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
index 4b57068..9a3c1e6 100644
--- a/include/sound/ymfpci.h
+++ b/include/sound/ymfpci.h
@@ -295,6 +295,7 @@
 	unsigned int running: 1;
 	unsigned int output_front: 1;
 	unsigned int output_rear: 1;
+	unsigned int update_pcm_vol;
 	u32 period_size;		/* cached from runtime->period_size */
 	u32 buffer_size;		/* cached from runtime->buffer_size */
 	u32 period_pos;
@@ -367,6 +368,11 @@
 	int mode_dup4ch;
 	int rear_opened;
 	int spdif_opened;
+	struct {
+		u16 left;
+		u16 right;
+		snd_kcontrol_t *ctl;
+	} pcm_mixer[32];
 
 	spinlock_t reg_lock;
 	spinlock_t voice_lock;
diff --git a/sound/arm/pxa2xx-ac97.c b/sound/arm/pxa2xx-ac97.c
index 4605230..29450be 100644
--- a/sound/arm/pxa2xx-ac97.c
+++ b/sound/arm/pxa2xx-ac97.c
@@ -132,9 +132,9 @@
 		udelay(10);
 		GCR |= GCR_WARM_RST;
 		pxa_gpio_mode(113 | GPIO_ALT_FN_2_OUT);
-		udelay(50);
+		udelay(500);
 #else
-		GCR |= GCR_WARM_RST|GCR_PRIRDY_IEN|GCR_SECRDY_IEN;;
+		GCR |= GCR_WARM_RST|GCR_PRIRDY_IEN|GCR_SECRDY_IEN;
 		wait_event_timeout(gsr_wq, gsr_bits & (GSR_PCR | GSR_SCR), 1);
 #endif			
 
@@ -261,7 +261,7 @@
 	return 0;
 }
 
-static int pxa2xx_ac97_do_resume(snd_card_t *card, unsigned int state)
+static int pxa2xx_ac97_do_resume(snd_card_t *card)
 {
 	if (card->power_state != SNDRV_CTL_POWER_D0) {
 		pxa2xx_audio_ops_t *platform_ops = card->dev->platform_data;
@@ -275,13 +275,13 @@
 	return 0;
 }
 
-static int pxa2xx_ac97_suspend(struct device *_dev, u32 state, u32 level)
+static int pxa2xx_ac97_suspend(struct device *_dev, pm_message_t state, u32 level)
 {
 	snd_card_t *card = dev_get_drvdata(_dev);
 	int ret = 0;
 
 	if (card && level == SUSPEND_DISABLE)
-		ret = pxa2xx_ac97_do_suspend(card, SNDRV_CTL_POWER_D3cold);
+		ret = pxa2xx_ac97_do_suspend(card, PMSG_SUSPEND);
 
 	return ret;
 }
@@ -292,7 +292,7 @@
 	int ret = 0;
 
 	if (card && level == RESUME_ENABLE)
-		ret = pxa2xx_ac97_do_resume(card, SNDRV_CTL_POWER_D0);
+		ret = pxa2xx_ac97_do_resume(card);
 
 	return ret;
 }
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c
index 0213256..39a54a4 100644
--- a/sound/core/memalloc.c
+++ b/sound/core/memalloc.c
@@ -512,7 +512,7 @@
  * proc file interface
  */
 #define SND_MEM_PROC_FILE	"driver/snd-page-alloc"
-struct proc_dir_entry *snd_mem_proc;
+static struct proc_dir_entry *snd_mem_proc;
 
 static int snd_mem_proc_read(char *page, char **start, off_t off,
 			     int count, int *eof, void *data)
@@ -655,8 +655,7 @@
 
 static void __exit snd_mem_exit(void)
 {
-	if (snd_mem_proc)
-		remove_proc_entry(SND_MEM_PROC_FILE, NULL);
+	remove_proc_entry(SND_MEM_PROC_FILE, NULL);
 	free_all_reserved_pages();
 	if (snd_allocated_pages > 0)
 		printk(KERN_ERR "snd-malloc: Memory leak?  pages not freed = %li\n", snd_allocated_pages);
diff --git a/sound/core/memory.c b/sound/core/memory.c
index f689557..1622893 100644
--- a/sound/core/memory.c
+++ b/sound/core/memory.c
@@ -56,7 +56,7 @@
 #define VMALLOC_MAGIC 0x87654320
 static snd_info_entry_t *snd_memory_info_entry;
 
-void snd_memory_init(void)
+void __init snd_memory_init(void)
 {
 	snd_alloc_kmalloc = 0;
 	snd_alloc_vmalloc = 0;
diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
index de7444c..a13bd7b 100644
--- a/sound/core/oss/pcm_oss.c
+++ b/sound/core/oss/pcm_oss.c
@@ -1705,13 +1705,12 @@
 		if (snd_pcm_running(substream))
 			snd_pcm_stop(substream, SNDRV_PCM_STATE_SETUP);
 		snd_pcm_stream_unlock_irq(substream);
-		if (substream->open_flag) {
+		if (substream->ffile != NULL) {
 			if (substream->ops->hw_free != NULL)
 				substream->ops->hw_free(substream);
 			substream->ops->close(substream);
-			substream->open_flag = 0;
+			substream->ffile = NULL;
 		}
-		substream->ffile = NULL;
 		snd_pcm_oss_release_substream(substream);
 		snd_pcm_release_substream(substream);
 	}
@@ -1778,14 +1777,13 @@
 			snd_pcm_oss_release_file(pcm_oss_file);
 			return err;
 		}
-		psubstream->open_flag = 1;
+		psubstream->ffile = file;
 		err = snd_pcm_hw_constraints_complete(psubstream);
 		if (err < 0) {
 			snd_printd("snd_pcm_hw_constraint_complete failed\n");
 			snd_pcm_oss_release_file(pcm_oss_file);
 			return err;
 		}
-		psubstream->ffile = file;
 		snd_pcm_oss_init_substream(psubstream, psetup, minor);
 	}
 	if (csubstream != NULL) {
@@ -1800,14 +1798,13 @@
 			snd_pcm_oss_release_file(pcm_oss_file);
 			return err;
 		}
-		csubstream->open_flag = 1;
+		csubstream->ffile = file;
 		err = snd_pcm_hw_constraints_complete(csubstream);
 		if (err < 0) {
 			snd_printd("snd_pcm_hw_constraint_complete failed\n");
 			snd_pcm_oss_release_file(pcm_oss_file);
 			return err;
 		}
-		csubstream->ffile = file;
 		snd_pcm_oss_init_substream(csubstream, csetup, minor);
 	}
 
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 3920bf0..4b6307d 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -103,10 +103,24 @@
 	unsigned char reserved[64];
 };
 
+/* recalcuate the boundary within 32bit */
+static snd_pcm_uframes_t recalculate_boundary(snd_pcm_runtime_t *runtime)
+{
+	snd_pcm_uframes_t boundary;
+
+	if (! runtime->buffer_size)
+		return 0;
+	boundary = runtime->buffer_size;
+	while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
+		boundary *= 2;
+	return boundary;
+}
+
 static int snd_pcm_ioctl_sw_params_compat(snd_pcm_substream_t *substream,
 					  struct sndrv_pcm_sw_params32 __user *src)
 {
 	snd_pcm_sw_params_t params;
+	snd_pcm_uframes_t boundary;
 	int err;
 
 	memset(&params, 0, sizeof(params));
@@ -120,10 +134,17 @@
 	    get_user(params.silence_threshold, &src->silence_threshold) ||
 	    get_user(params.silence_size, &src->silence_size))
 		return -EFAULT;
+	/*
+	 * Check silent_size parameter.  Since we have 64bit boundary,
+	 * silence_size must be compared with the 32bit boundary.
+	 */
+	boundary = recalculate_boundary(substream->runtime);
+	if (boundary && params.silence_size >= boundary)
+		params.silence_size = substream->runtime->boundary;
 	err = snd_pcm_sw_params(substream, &params);
 	if (err < 0)
 		return err;
-	if (put_user(params.boundary, &src->boundary))
+	if (boundary && put_user(boundary, &src->boundary))
 		return -EFAULT;
 	return err;
 }
@@ -199,16 +220,6 @@
 	return err;
 }
 
-/* recalcuate the boundary within 32bit */
-static void recalculate_boundary(snd_pcm_runtime_t *runtime)
-{
-	if (! runtime->buffer_size)
-		return;
-	runtime->boundary = runtime->buffer_size;
-	while (runtime->boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
-		runtime->boundary *= 2;
-}
-
 /* both for HW_PARAMS and HW_REFINE */
 static int snd_pcm_ioctl_hw_params_compat(snd_pcm_substream_t *substream,
 					  int refine, 
@@ -241,8 +252,11 @@
 		goto error;
 	}
 
-	if (! refine)
-		recalculate_boundary(runtime);
+	if (! refine) {
+		unsigned int new_boundary = recalculate_boundary(runtime);
+		if (new_boundary)
+			runtime->boundary = new_boundary;
+	}
  error:
 	kfree(data);
 	return err;
@@ -380,6 +394,7 @@
 	u32 sflags;
 	struct sndrv_pcm_mmap_control scontrol;
 	struct sndrv_pcm_mmap_status sstatus;
+	snd_pcm_uframes_t boundary;
 	int err;
 
 	snd_assert(runtime, return -EINVAL);
@@ -395,17 +410,21 @@
 	}
 	status = runtime->status;
 	control = runtime->control;
+	boundary = recalculate_boundary(runtime);
+	if (! boundary)
+		boundary = 0x7fffffff;
 	snd_pcm_stream_lock_irq(substream);
+	/* FIXME: we should consider the boundary for the sync from app */
 	if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
 		control->appl_ptr = scontrol.appl_ptr;
 	else
-		scontrol.appl_ptr = control->appl_ptr;
+		scontrol.appl_ptr = control->appl_ptr % boundary;
 	if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
 		control->avail_min = scontrol.avail_min;
 	else
 		scontrol.avail_min = control->avail_min;
 	sstatus.state = status->state;
-	sstatus.hw_ptr = status->hw_ptr;
+	sstatus.hw_ptr = status->hw_ptr % boundary;
 	sstatus.tstamp = status->tstamp;
 	sstatus.suspended_state = status->suspended_state;
 	snd_pcm_stream_unlock_irq(substream);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index c5bfd09..0082914 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1584,8 +1584,8 @@
 	return snd_pcm_hw_param_value(params, var, NULL);
 }
 
-int _snd_pcm_hw_param_mask(snd_pcm_hw_params_t *params,
-			   snd_pcm_hw_param_t var, const snd_mask_t *val)
+static int _snd_pcm_hw_param_mask(snd_pcm_hw_params_t *params,
+				  snd_pcm_hw_param_t var, const snd_mask_t *val)
 {
 	int changed;
 	assert(hw_is_mask(var));
@@ -2063,7 +2063,7 @@
 		if (((avail < runtime->control->avail_min && size > avail) ||
 		   (size >= runtime->xfer_align && avail < runtime->xfer_align))) {
 			wait_queue_t wait;
-			enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED } state;
+			enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED, DROPPED } state;
 			long tout;
 
 			if (nonblock) {
@@ -2097,6 +2097,9 @@
 				case SNDRV_PCM_STATE_SUSPENDED:
 					state = SUSPENDED;
 					goto _end_loop;
+				case SNDRV_PCM_STATE_SETUP:
+					state = DROPPED;
+					goto _end_loop;
 				default:
 					break;
 				}
@@ -2123,6 +2126,9 @@
 				snd_printd("playback write error (DMA or IRQ trouble?)\n");
 				err = -EIO;
 				goto _end_unlock;
+			case DROPPED:
+				err = -EBADFD;
+				goto _end_unlock;
 			default:
 				break;
 			}
@@ -2359,7 +2365,7 @@
 		} else if ((avail < runtime->control->avail_min && size > avail) ||
 			   (size >= runtime->xfer_align && avail < runtime->xfer_align)) {
 			wait_queue_t wait;
-			enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED } state;
+			enum { READY, SIGNALED, ERROR, SUSPENDED, EXPIRED, DROPPED } state;
 			long tout;
 
 			if (nonblock) {
@@ -2394,6 +2400,9 @@
 					goto _end_loop;
 				case SNDRV_PCM_STATE_DRAINING:
 					goto __draining;
+				case SNDRV_PCM_STATE_SETUP:
+					state = DROPPED;
+					goto _end_loop;
 				default:
 					break;
 				}
@@ -2420,6 +2429,9 @@
 				snd_printd("capture read error (DMA or IRQ trouble?)\n");
 				err = -EIO;
 				goto _end_unlock;
+			case DROPPED:
+				err = -EBADFD;
+				goto _end_unlock;
 			default:
 				break;
 			}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 10c2c98..03c1715 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -1025,7 +1025,7 @@
 	snd_pcm_runtime_t *runtime = substream->runtime;
 	snd_pcm_trigger_tstamp(substream);
 	if (substream->timer)
-		snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MPAUSE, &runtime->trigger_tstamp);
+		snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MSUSPEND, &runtime->trigger_tstamp);
 	runtime->status->suspended_state = runtime->status->state;
 	runtime->status->state = SNDRV_PCM_STATE_SUSPENDED;
 	snd_pcm_tick_set(substream, 0);
@@ -1115,7 +1115,7 @@
 	snd_pcm_runtime_t *runtime = substream->runtime;
 	snd_pcm_trigger_tstamp(substream);
 	if (substream->timer)
-		snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MCONTINUE, &runtime->trigger_tstamp);
+		snd_timer_notify(substream->timer, SNDRV_TIMER_EVENT_MRESUME, &runtime->trigger_tstamp);
 	runtime->status->state = runtime->status->suspended_state;
 	if (runtime->sleep_min)
 		snd_pcm_tick_prepare(substream);
@@ -1967,13 +1967,12 @@
 	runtime = substream->runtime;
 	str = substream->pstr;
 	snd_pcm_unlink(substream);
-	if (substream->open_flag) {
+	if (substream->ffile != NULL) {
 		if (substream->ops->hw_free != NULL)
 			substream->ops->hw_free(substream);
 		substream->ops->close(substream);
-		substream->open_flag = 0;
+		substream->ffile = NULL;
 	}
-	substream->ffile = NULL;
 	snd_pcm_remove_file(str, pcm_file);
 	snd_pcm_release_substream(substream);
 	kfree(pcm_file);
@@ -2022,18 +2021,15 @@
 		snd_pcm_release_file(pcm_file);
 		return err;
 	}
-	substream->open_flag = 1;
+	substream->ffile = file;
 
 	err = snd_pcm_hw_constraints_complete(substream);
 	if (err < 0) {
 		snd_printd("snd_pcm_hw_constraints_complete failed\n");
-		substream->ops->close(substream);
 		snd_pcm_release_file(pcm_file);
 		return err;
 	}
 
-	substream->ffile = file;
-
 	file->private_data = pcm_file;
 	*rpcm_file = pcm_file;
 	return 0;
diff --git a/sound/core/sound_oss.c b/sound/core/sound_oss.c
index de39d21..e401c67 100644
--- a/sound/core/sound_oss.c
+++ b/sound/core/sound_oss.c
@@ -98,6 +98,7 @@
 	int cidx = SNDRV_MINOR_OSS_CARD(minor);
 	int track2 = -1;
 	int register1 = -1, register2 = -1;
+	struct device *carddev = NULL;
 
 	if (minor < 0)
 		return minor;
@@ -121,11 +122,13 @@
 		track2 = SNDRV_MINOR_OSS(cidx, SNDRV_MINOR_OSS_DMMIDI1);
 		break;
 	}
-	register1 = register_sound_special(reg->f_ops, minor);
+	if (card)
+		carddev = card->dev;
+	register1 = register_sound_special_device(reg->f_ops, minor, carddev);
 	if (register1 != minor)
 		goto __end;
 	if (track2 >= 0) {
-		register2 = register_sound_special(reg->f_ops, track2);
+		register2 = register_sound_special_device(reg->f_ops, track2, carddev);
 		if (register2 != track2)
 			goto __end;
 	}
diff --git a/sound/core/timer.c b/sound/core/timer.c
index cfaccd4..4104f6e 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -799,13 +799,13 @@
 	return 0;
 }
 
-int snd_timer_dev_free(snd_device_t *device)
+static int snd_timer_dev_free(snd_device_t *device)
 {
 	snd_timer_t *timer = device->device_data;
 	return snd_timer_free(timer);
 }
 
-int snd_timer_dev_register(snd_device_t *dev)
+static int snd_timer_dev_register(snd_device_t *dev)
 {
 	snd_timer_t *timer = dev->device_data;
 	snd_timer_t *timer1;
@@ -880,9 +880,11 @@
 	struct list_head *p, *n;
 
 	snd_runtime_check(timer->hw.flags & SNDRV_TIMER_HW_SLAVE, return);	
-	snd_assert(event >= SNDRV_TIMER_EVENT_MSTART && event <= SNDRV_TIMER_EVENT_MPAUSE, return);
+	snd_assert(event >= SNDRV_TIMER_EVENT_MSTART && event <= SNDRV_TIMER_EVENT_MRESUME, return);
 	spin_lock_irqsave(&timer->lock, flags);
-	if (event == SNDRV_TIMER_EVENT_MSTART || event == SNDRV_TIMER_EVENT_MCONTINUE) {
+	if (event == SNDRV_TIMER_EVENT_MSTART ||
+	    event == SNDRV_TIMER_EVENT_MCONTINUE ||
+	    event == SNDRV_TIMER_EVENT_MRESUME) {
 		if (timer->hw.c_resolution)
 			resolution = timer->hw.c_resolution(timer);
 		else
@@ -1555,10 +1557,14 @@
 			      (1<<SNDRV_TIMER_EVENT_STOP)|
 			      (1<<SNDRV_TIMER_EVENT_CONTINUE)|
 			      (1<<SNDRV_TIMER_EVENT_PAUSE)|
+			      (1<<SNDRV_TIMER_EVENT_SUSPEND)|
+			      (1<<SNDRV_TIMER_EVENT_RESUME)|
 			      (1<<SNDRV_TIMER_EVENT_MSTART)|
 			      (1<<SNDRV_TIMER_EVENT_MSTOP)|
 			      (1<<SNDRV_TIMER_EVENT_MCONTINUE)|
-			      (1<<SNDRV_TIMER_EVENT_MPAUSE))) {
+			      (1<<SNDRV_TIMER_EVENT_MPAUSE)|
+			      (1<<SNDRV_TIMER_EVENT_MSUSPEND)|
+			      (1<<SNDRV_TIMER_EVENT_MRESUME))) {
 		err = -EINVAL;
 		goto _end;
 	}
diff --git a/sound/drivers/vx/vx_mixer.c b/sound/drivers/vx/vx_mixer.c
index f00c888..19fc68c 100644
--- a/sound/drivers/vx/vx_mixer.c
+++ b/sound/drivers/vx/vx_mixer.c
@@ -796,14 +796,14 @@
 
 static snd_kcontrol_new_t vx_control_iec958_mask = {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
 	.info =		vx_iec958_info,	/* shared */
 	.get =		vx_iec958_mask_get,
 };
 
 static snd_kcontrol_new_t vx_control_iec958 = {
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
 	.info =         vx_iec958_info,
 	.get =          vx_iec958_get,
diff --git a/sound/drivers/vx/vx_pcm.c b/sound/drivers/vx/vx_pcm.c
index af381b1..d4becf4 100644
--- a/sound/drivers/vx/vx_pcm.c
+++ b/sound/drivers/vx/vx_pcm.c
@@ -549,8 +549,8 @@
 
 static snd_pcm_hardware_t vx_pcm_playback_hw = {
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID |
-				 SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID /*|*/
+				 /*SNDRV_PCM_INFO_RESUME*/),
 	.formats =		/*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE,
 	.rates =		SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
 	.rate_min =		5000,
@@ -949,8 +949,8 @@
 
 static snd_pcm_hardware_t vx_pcm_capture_hw = {
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID |
-				 SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_MMAP_VALID /*|*/
+				 /*SNDRV_PCM_INFO_RESUME*/),
 	.formats =		/*SNDRV_PCM_FMTBIT_U8 |*/ SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE,
 	.rates =		SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
 	.rate_min =		5000,
diff --git a/sound/isa/ad1816a/ad1816a.c b/sound/isa/ad1816a/ad1816a.c
index 563296d..0eb442c 100644
--- a/sound/isa/ad1816a/ad1816a.c
+++ b/sound/isa/ad1816a/ad1816a.c
@@ -53,6 +53,7 @@
 static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ;	/* Pnp setup */
 static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA;	/* PnP setup */
 static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA;	/* PnP setup */
+static int clockfreq[SNDRV_CARDS];
 
 module_param_array(index, int, NULL, 0444);
 MODULE_PARM_DESC(index, "Index value for ad1816a based soundcard.");
@@ -74,6 +75,8 @@
 MODULE_PARM_DESC(dma1, "1st DMA # for ad1816a driver.");
 module_param_array(dma2, int, NULL, 0444);
 MODULE_PARM_DESC(dma2, "2nd DMA # for ad1816a driver.");
+module_param_array(clockfreq, int, NULL, 0444);
+MODULE_PARM_DESC(clockfreq, "Clock frequency for ad1816a driver (default = 0).");
 
 struct snd_card_ad1816a {
 	struct pnp_dev *dev;
@@ -209,6 +212,8 @@
 		snd_card_free(card);
 		return error;
 	}
+	if (clockfreq[dev] >= 5000 && clockfreq[dev] <= 100000)
+		chip->clock_freq = clockfreq[dev];
 
 	strcpy(card->driver, "AD1816A");
 	strcpy(card->shortname, "ADI SoundPort AD1816A");
diff --git a/sound/isa/ad1816a/ad1816a_lib.c b/sound/isa/ad1816a/ad1816a_lib.c
index 625b2eff..ae86036 100644
--- a/sound/isa/ad1816a/ad1816a_lib.c
+++ b/sound/isa/ad1816a/ad1816a_lib.c
@@ -234,7 +234,7 @@
 	ad1816a_t *chip = snd_pcm_substream_chip(substream);
 	unsigned long flags;
 	snd_pcm_runtime_t *runtime = substream->runtime;
-	unsigned int size;
+	unsigned int size, rate;
 
 	spin_lock_irqsave(&chip->lock, flags);
 
@@ -245,7 +245,10 @@
 	snd_dma_program(chip->dma1, runtime->dma_addr, size,
 			DMA_MODE_WRITE | DMA_AUTOINIT);
 
-	snd_ad1816a_write(chip, AD1816A_PLAYBACK_SAMPLE_RATE, runtime->rate);
+	rate = runtime->rate;
+	if (chip->clock_freq)
+		rate = (rate * 33000) / chip->clock_freq;
+	snd_ad1816a_write(chip, AD1816A_PLAYBACK_SAMPLE_RATE, rate);
 	snd_ad1816a_out_mask(chip, AD1816A_PLAYBACK_CONFIG,
 		AD1816A_FMT_ALL | AD1816A_FMT_STEREO,
 		snd_ad1816a_get_format(chip, runtime->format,
@@ -263,7 +266,7 @@
 	ad1816a_t *chip = snd_pcm_substream_chip(substream);
 	unsigned long flags;
 	snd_pcm_runtime_t *runtime = substream->runtime;
-	unsigned int size;
+	unsigned int size, rate;
 
 	spin_lock_irqsave(&chip->lock, flags);
 
@@ -274,7 +277,10 @@
 	snd_dma_program(chip->dma2, runtime->dma_addr, size,
 			DMA_MODE_READ | DMA_AUTOINIT);
 
-	snd_ad1816a_write(chip, AD1816A_CAPTURE_SAMPLE_RATE, runtime->rate);
+	rate = runtime->rate;
+	if (chip->clock_freq)
+		rate = (rate * 33000) / chip->clock_freq;
+	snd_ad1816a_write(chip, AD1816A_CAPTURE_SAMPLE_RATE, rate);
 	snd_ad1816a_out_mask(chip, AD1816A_CAPTURE_CONFIG,
 		AD1816A_FMT_ALL | AD1816A_FMT_STEREO,
 		snd_ad1816a_get_format(chip, runtime->format,
diff --git a/sound/isa/ad1848/ad1848_lib.c b/sound/isa/ad1848/ad1848_lib.c
index 8fb3db1..bc642dc 100644
--- a/sound/isa/ad1848/ad1848_lib.c
+++ b/sound/isa/ad1848/ad1848_lib.c
@@ -1196,6 +1196,7 @@
 			.put = snd_ad1848_put_double,
 		},
 		[AD1848_MIX_CAPTURE] = {
+			.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 			.info = snd_ad1848_info_mux,
 			.get = snd_ad1848_get_mux,
 			.put = snd_ad1848_put_mux,
diff --git a/sound/isa/cmi8330.c b/sound/isa/cmi8330.c
index 46776cc..1fce8b9 100644
--- a/sound/isa/cmi8330.c
+++ b/sound/isa/cmi8330.c
@@ -194,8 +194,8 @@
 AD1848_SINGLE("3D Control - Switch", 0, CMI8330_RMUX3D, 5, 1, 1),
 AD1848_SINGLE("PC Speaker Playback Volume", 0, CMI8330_OUTPUTVOL, 3, 3, 0),
 AD1848_SINGLE("FM Playback Switch", 0, CMI8330_RECMUX, 3, 1, 1),
-AD1848_SINGLE("IEC958 Input Capture Switch", 0, CMI8330_RMUX3D, 7, 1, 1),
-AD1848_SINGLE("IEC958 Input Playback Switch", 0, CMI8330_MUTEMUX, 7, 1, 1),
+AD1848_SINGLE(SNDRV_CTL_NAME_IEC958("Input ",CAPTURE,SWITCH), 0, CMI8330_RMUX3D, 7, 1, 1),
+AD1848_SINGLE(SNDRV_CTL_NAME_IEC958("Input ",PLAYBACK,SWITCH), 0, CMI8330_MUTEMUX, 7, 1, 1),
 };
 
 #ifdef ENABLE_SB_MIXER
diff --git a/sound/isa/cs423x/cs4231_lib.c b/sound/isa/cs423x/cs4231_lib.c
index 3e7a2a3..3199941 100644
--- a/sound/isa/cs423x/cs4231_lib.c
+++ b/sound/isa/cs423x/cs4231_lib.c
@@ -1346,6 +1346,8 @@
 	int reg;
 	unsigned long flags;
 	
+	if (chip->pcm)
+		snd_pcm_suspend_all(chip->pcm);
 	spin_lock_irqsave(&chip->reg_lock, flags);
 	for (reg = 0; reg < 32; reg++)
 		chip->image[reg] = snd_cs4231_in(chip, reg);
diff --git a/sound/isa/gus/gus_io.c b/sound/isa/gus/gus_io.c
index 337b0e2..23e1b5f 100644
--- a/sound/isa/gus/gus_io.c
+++ b/sound/isa/gus/gus_io.c
@@ -269,8 +269,9 @@
 
 #endif  /*  0  */
 
-unsigned int snd_gf1_i_read_addr(snd_gus_card_t * gus,
-				 unsigned char reg, short w_16bit)
+#ifdef CONFIG_SND_DEBUG
+static unsigned int snd_gf1_i_read_addr(snd_gus_card_t * gus,
+					unsigned char reg, short w_16bit)
 {
 	unsigned int res;
 	unsigned long flags;
@@ -280,6 +281,7 @@
 	spin_unlock_irqrestore(&gus->reg_lock, flags);
 	return res;
 }
+#endif
 
 /*
 
diff --git a/sound/isa/opl3sa2.c b/sound/isa/opl3sa2.c
index 95c7b3e..75bd6ec 100644
--- a/sound/isa/opl3sa2.c
+++ b/sound/isa/opl3sa2.c
@@ -145,6 +145,14 @@
 
 #ifdef CONFIG_PNP
 
+static struct pnp_device_id snd_opl3sa2_pnpbiosids[] = {
+	{ .id = "YMH0021" },
+	{ .id = "NMX2210" },	/* Gateway Solo 2500 */
+	{ .id = "" }		/* end */
+};
+
+MODULE_DEVICE_TABLE(pnp, snd_opl3sa2_pnpbiosids);
+
 static struct pnp_card_device_id snd_opl3sa2_pnpids[] = {
 	/* Yamaha YMF719E-S (Genius Sound Maker 3DX) */
 	{ .id = "YMH0020", .devs = { { "YMH0021" } } },
@@ -568,20 +576,18 @@
 
 #ifdef CONFIG_PNP
 static int __init snd_opl3sa2_pnp(int dev, opl3sa2_t *chip,
-				  struct pnp_card_link *card,
-				  const struct pnp_card_device_id *id)
+				  struct pnp_dev *pdev,
+				  int isapnp)
 {
-	struct pnp_dev *pdev;
-	struct pnp_resource_table * cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
+	struct pnp_resource_table * cfg;
 	int err;
 
+	if (!isapnp && pnp_device_is_isapnp(pdev))
+		return -ENOENT;	/* we have another procedure - card */
+
+	cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
 	if (!cfg)
 		return -ENOMEM;
-	pdev = chip->dev = pnp_request_card_device(card, id->devs[0].id, NULL);
-	if (chip->dev == NULL) {
-		kfree(cfg);
-		return -EBUSY;
-	}
 	/* PnP initialization */
 	pnp_init_resource_table(cfg);
 	if (sb_port[dev] != SNDRV_AUTO_PORT)
@@ -601,7 +607,7 @@
 	if (irq[dev] != SNDRV_AUTO_IRQ)
 		pnp_resource_change(&cfg->irq_resource[0], irq[dev], 1);
 	err = pnp_manual_config_dev(pdev, cfg, 0);
-	if (err < 0)
+	if (err < 0 && isapnp)
 		snd_printk(KERN_ERR "PnP manual resources are invalid, using auto config\n");
 	err = pnp_activate_dev(pdev);
 	if (err < 0) {
@@ -617,13 +623,31 @@
 	dma1[dev] = pnp_dma(pdev, 0);
 	dma2[dev] = pnp_dma(pdev, 1);
 	irq[dev] = pnp_irq(pdev, 0);
-	snd_printdd("PnP OPL3-SA: sb port=0x%lx, wss port=0x%lx, fm port=0x%lx, midi port=0x%lx\n",
-		sb_port[dev], wss_port[dev], fm_port[dev], midi_port[dev]);
-	snd_printdd("PnP OPL3-SA: control port=0x%lx, dma1=%i, dma2=%i, irq=%i\n",
-		port[dev], dma1[dev], dma2[dev], irq[dev]);
+	snd_printdd("%sPnP OPL3-SA: sb port=0x%lx, wss port=0x%lx, fm port=0x%lx, midi port=0x%lx\n",
+		pnp_device_is_pnpbios(pdev) ? "BIOS" : "ISA", sb_port[dev], wss_port[dev], fm_port[dev], midi_port[dev]);
+	snd_printdd("%sPnP OPL3-SA: control port=0x%lx, dma1=%i, dma2=%i, irq=%i\n",
+		pnp_device_is_pnpbios(pdev) ? "BIOS" : "ISA", port[dev], dma1[dev], dma2[dev], irq[dev]);
 	kfree(cfg);
+	chip->dev = pdev;
 	return 0;
 }
+
+static int __init snd_opl3sa2_cpnp(int dev, opl3sa2_t *chip,
+				   struct pnp_card_link *card,
+				   const struct pnp_card_device_id *id)
+{
+	struct pnp_dev *pdev;
+	struct pnp_resource_table * cfg = kmalloc(sizeof(struct pnp_resource_table), GFP_KERNEL);
+
+	if (!cfg)
+		return -ENOMEM;
+	pdev = pnp_request_card_device(card, id->devs[0].id, NULL);
+	if (pdev == NULL) {
+		kfree(cfg);
+		return -EBUSY;
+	}
+	return snd_opl3sa2_pnp(dev, chip, pdev, 1);
+}
 #endif /* CONFIG_PNP */
 
 static int snd_opl3sa2_free(opl3sa2_t *chip)
@@ -645,6 +669,7 @@
 }
 
 static int __devinit snd_opl3sa2_probe(int dev,
+				       struct pnp_dev *pdev,
 				       struct pnp_card_link *pcard,
 				       const struct pnp_card_device_id *pid)
 {
@@ -695,8 +720,13 @@
 	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0)
 		goto __error;
 #ifdef CONFIG_PNP
-	if (isapnp[dev]) {
-		if ((err = snd_opl3sa2_pnp(dev, chip, pcard, pid)) < 0)
+	if (pdev) {
+		if ((err = snd_opl3sa2_pnp(dev, chip, pdev, 0)) < 0)
+			goto __error;
+		snd_card_set_dev(card, &pdev->dev);
+	}
+	if (pcard) {
+		if ((err = snd_opl3sa2_cpnp(dev, chip, pcard, pid)) < 0)
 			goto __error;
 		snd_card_set_dev(card, &pcard->card->dev);
 	}
@@ -768,7 +798,9 @@
 	if ((err = snd_card_register(card)) < 0)
 		goto __error;
 
-	if (pcard)
+	if (pdev)
+		pnp_set_drvdata(pdev, card);
+	else if (pcard)
 		pnp_set_card_drvdata(pcard, card);
 	else
 		snd_opl3sa2_legacy[dev] = card;
@@ -780,8 +812,8 @@
 }
 
 #ifdef CONFIG_PNP
-static int __devinit snd_opl3sa2_pnp_detect(struct pnp_card_link *card,
-					    const struct pnp_card_device_id *id)
+static int __devinit snd_opl3sa2_pnp_detect(struct pnp_dev *pdev,
+					    const struct pnp_device_id *id)
 {
         static int dev;
         int res;
@@ -789,7 +821,7 @@
         for ( ; dev < SNDRV_CARDS; dev++) {
                 if (!enable[dev] || !isapnp[dev])
                         continue;
-                res = snd_opl3sa2_probe(dev, card, id);
+                res = snd_opl3sa2_probe(dev, pdev, NULL, NULL);
                 if (res < 0)
                         return res;
                 dev++;
@@ -798,7 +830,40 @@
         return -ENODEV;
 }
 
-static void __devexit snd_opl3sa2_pnp_remove(struct pnp_card_link * pcard)
+static void __devexit snd_opl3sa2_pnp_remove(struct pnp_dev * pdev)
+{
+	snd_card_t *card = (snd_card_t *) pnp_get_drvdata(pdev);
+        
+	snd_card_disconnect(card);
+	snd_card_free_in_thread(card);
+}
+
+static struct pnp_driver opl3sa2_pnp_driver = {
+	.name = "opl3sa2-pnpbios",
+	.id_table = snd_opl3sa2_pnpbiosids,
+	.probe = snd_opl3sa2_pnp_detect,
+	.remove = __devexit_p(snd_opl3sa2_pnp_remove),
+};
+
+static int __devinit snd_opl3sa2_pnp_cdetect(struct pnp_card_link *card,
+					     const struct pnp_card_device_id *id)
+{
+        static int dev;
+        int res;
+
+        for ( ; dev < SNDRV_CARDS; dev++) {
+                if (!enable[dev] || !isapnp[dev])
+                        continue;
+                res = snd_opl3sa2_probe(dev, NULL, card, id);
+                if (res < 0)
+                        return res;
+                dev++;
+                return 0;
+        }
+        return -ENODEV;
+}
+
+static void __devexit snd_opl3sa2_pnp_cremove(struct pnp_card_link * pcard)
 {
 	snd_card_t *card = (snd_card_t *) pnp_get_card_drvdata(pcard);
         
@@ -810,8 +875,8 @@
 	.flags = PNP_DRIVER_RES_DISABLE,
 	.name = "opl3sa2",
 	.id_table = snd_opl3sa2_pnpids,
-	.probe = snd_opl3sa2_pnp_detect,
-	.remove = __devexit_p(snd_opl3sa2_pnp_remove),
+	.probe = snd_opl3sa2_pnp_cdetect,
+	.remove = __devexit_p(snd_opl3sa2_pnp_cremove),
 };
 #endif /* CONFIG_PNP */
 
@@ -826,10 +891,11 @@
 		if (isapnp[dev])
 			continue;
 #endif
-		if (snd_opl3sa2_probe(dev, NULL, NULL) >= 0)
+		if (snd_opl3sa2_probe(dev, NULL, NULL, NULL) >= 0)
 			cards++;
 	}
 #ifdef CONFIG_PNP
+	cards += pnp_register_driver(&opl3sa2_pnp_driver);
 	cards += pnp_register_card_driver(&opl3sa2_pnpc_driver);
 #endif
 	if (!cards) {
diff --git a/sound/isa/sb/sb16_main.c b/sound/isa/sb/sb16_main.c
index a6a0fa5..a99e642 100644
--- a/sound/isa/sb/sb16_main.c
+++ b/sound/isa/sb/sb16_main.c
@@ -729,7 +729,7 @@
 }
 
 static snd_kcontrol_new_t snd_sb16_dma_control = {
-	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.iface = SNDRV_CTL_ELEM_IFACE_CARD,
 	.name = "16-bit DMA Allocation",
 	.info = snd_sb16_dma_control_info,
 	.get = snd_sb16_dma_control_get,
diff --git a/sound/pci/Kconfig b/sound/pci/Kconfig
index 26b42bb..1e45891 100644
--- a/sound/pci/Kconfig
+++ b/sound/pci/Kconfig
@@ -1,11 +1,15 @@
 # ALSA PCI drivers
 
-menu "PCI devices"
-	depends on SND!=n && PCI
-
 config SND_AC97_CODEC
 	tristate
 	select SND_PCM
+	select SND_AC97_BUS
+
+config SND_AC97_BUS
+	tristate
+
+menu "PCI devices"
+	depends on SND!=n && PCI
 
 config SND_ALI5451
 	tristate "ALi M5451 PCI Audio Controller"
diff --git a/sound/pci/ac97/Makefile b/sound/pci/ac97/Makefile
index 3c32221..77b3482 100644
--- a/sound/pci/ac97/Makefile
+++ b/sound/pci/ac97/Makefile
@@ -10,9 +10,11 @@
 endif
 
 snd-ak4531-codec-objs := ak4531_codec.o
+snd-ac97-bus-objs := ac97_bus.o
 
 # Toplevel Module Dependency
 obj-$(CONFIG_SND_AC97_CODEC) += snd-ac97-codec.o
 obj-$(CONFIG_SND_ENS1370) += snd-ak4531-codec.o
+obj-$(CONFIG_SND_AC97_BUS) += snd-ac97-bus.o
 
 obj-m := $(sort $(obj-m))
diff --git a/sound/pci/ac97/ac97_bus.c b/sound/pci/ac97/ac97_bus.c
new file mode 100644
index 0000000..227f8b9
--- /dev/null
+++ b/sound/pci/ac97/ac97_bus.c
@@ -0,0 +1,79 @@
+/*
+ * Linux driver model AC97 bus interface
+ *
+ * Author:	Nicolas Pitre
+ * Created:	Jan 14, 2005
+ * Copyright:	(C) MontaVista Software Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/string.h>
+
+/*
+ * Codec families have names seperated by commas, so we search for an
+ * individual codec name within the family string. 
+ */
+static int ac97_bus_match(struct device *dev, struct device_driver *drv)
+{
+	return (strstr(dev->bus_id, drv->name) != NULL);
+}
+
+static int ac97_bus_suspend(struct device *dev, pm_message_t state)
+{
+	int ret = 0;
+
+	if (dev->driver && dev->driver->suspend) {
+		ret = dev->driver->suspend(dev, state, SUSPEND_DISABLE);
+		if (ret == 0)
+			ret = dev->driver->suspend(dev, state, SUSPEND_SAVE_STATE);
+		if (ret == 0)
+			ret = dev->driver->suspend(dev, state, SUSPEND_POWER_DOWN);
+	}
+	return ret;
+}
+
+static int ac97_bus_resume(struct device *dev)
+{
+	int ret = 0;
+
+	if (dev->driver && dev->driver->resume) {
+		ret = dev->driver->resume(dev, RESUME_POWER_ON);
+		if (ret == 0)
+			ret = dev->driver->resume(dev, RESUME_RESTORE_STATE);
+		if (ret == 0)
+			ret = dev->driver->resume(dev, RESUME_ENABLE);
+	}
+	return ret;
+}
+
+struct bus_type ac97_bus_type = {
+	.name		= "ac97",
+	.match		= ac97_bus_match,
+	.suspend	= ac97_bus_suspend,
+	.resume		= ac97_bus_resume,
+};
+
+static int __init ac97_bus_init(void)
+{
+	return bus_register(&ac97_bus_type);
+}
+
+subsys_initcall(ac97_bus_init);
+
+static void __exit ac97_bus_exit(void)
+{
+	bus_unregister(&ac97_bus_type);
+}
+
+module_exit(ac97_bus_exit);
+
+EXPORT_SYMBOL(ac97_bus_type);
+
+MODULE_LICENSE("GPL");
diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c
index 6983eea..5501f44 100644
--- a/sound/pci/ac97/ac97_codec.c
+++ b/sound/pci/ac97/ac97_codec.c
@@ -157,6 +157,7 @@
 { 0x54524123, 0xffffffff, "TR28602",		NULL,		NULL }, // only guess --jk [TR28023 = eMicro EM28023 (new CT1297)]
 { 0x54584e20, 0xffffffff, "TLC320AD9xC",	NULL,		NULL },
 { 0x56494161, 0xffffffff, "VIA1612A",		NULL,		NULL }, // modified ICE1232 with S/PDIF
+{ 0x56494170, 0xffffffff, "VIA1617A",		patch_vt1617a,	NULL }, // modified VT1616 with S/PDIF
 { 0x57454301, 0xffffffff, "W83971D",		NULL,		NULL },
 { 0x574d4c00, 0xffffffff, "WM9701A",		NULL,		NULL },
 { 0x574d4C03, 0xffffffff, "WM9703,WM9707,WM9708,WM9717", patch_wolfson03, NULL},
@@ -1307,16 +1308,18 @@
 	}
 	
 	/* build master tone controls */
-	if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_TONE)) {
-		for (idx = 0; idx < 2; idx++) {
-			if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97))) < 0)
-				return err;
-			if (ac97->id == AC97_ID_YMF753) {
-				kctl->private_value &= ~(0xff << 16);
-				kctl->private_value |= 7 << 16;
+	if (!(ac97->flags & AC97_HAS_NO_TONE)) {
+		if (snd_ac97_try_volume_mix(ac97, AC97_MASTER_TONE)) {
+			for (idx = 0; idx < 2; idx++) {
+				if ((err = snd_ctl_add(card, kctl = snd_ac97_cnew(&snd_ac97_controls_tone[idx], ac97))) < 0)
+					return err;
+				if (ac97->id == AC97_ID_YMF753) {
+					kctl->private_value &= ~(0xff << 16);
+					kctl->private_value |= 7 << 16;
+				}
 			}
+			snd_ac97_write_cache(ac97, AC97_MASTER_TONE, 0x0f0f);
 		}
-		snd_ac97_write_cache(ac97, AC97_MASTER_TONE, 0x0f0f);
 	}
 	
 	/* build PC Speaker controls */
@@ -1339,11 +1342,13 @@
 	}
 	
 	/* build MIC controls */
-	if (snd_ac97_try_volume_mix(ac97, AC97_MIC)) {
-		if ((err = snd_ac97_cmix_new(card, "Mic Playback", AC97_MIC, ac97)) < 0)
-			return err;
-		if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97))) < 0)
-			return err;
+	if (!(ac97->flags & AC97_HAS_NO_MIC)) {
+		if (snd_ac97_try_volume_mix(ac97, AC97_MIC)) {
+			if ((err = snd_ac97_cmix_new(card, "Mic Playback", AC97_MIC, ac97)) < 0)
+				return err;
+			if ((err = snd_ctl_add(card, snd_ac97_cnew(&snd_ac97_controls_mic_boost, ac97))) < 0)
+				return err;
+		}
 	}
 
 	/* build Line controls */
@@ -1402,12 +1407,14 @@
 		}
 		snd_ac97_write_cache(ac97, AC97_PCM, init_val);
 	} else {
-		if (ac97->flags & AC97_HAS_NO_PCM_VOL)
-			err = snd_ac97_cmute_new(card, "PCM Playback Switch", AC97_PCM, ac97);
-		else
-			err = snd_ac97_cmix_new(card, "PCM Playback", AC97_PCM, ac97);
-		if (err < 0)
-			return err;
+		if (!(ac97->flags & AC97_HAS_NO_STD_PCM)) {
+			if (ac97->flags & AC97_HAS_NO_PCM_VOL)
+				err = snd_ac97_cmute_new(card, "PCM Playback Switch", AC97_PCM, ac97);
+			else
+				err = snd_ac97_cmix_new(card, "PCM Playback", AC97_PCM, ac97);
+			if (err < 0)
+				return err;
+		}
 	}
 
 	/* build Capture controls */
@@ -1807,6 +1814,39 @@
 	return 0;
 }
 
+/* stop no dev release warning */
+static void ac97_device_release(struct device * dev)
+{
+}
+
+/* register ac97 codec to bus */
+static int snd_ac97_dev_register(snd_device_t *device)
+{
+	ac97_t *ac97 = device->device_data;
+	int err;
+
+	ac97->dev.bus = &ac97_bus_type;
+	ac97->dev.parent = ac97->bus->card->dev;
+	ac97->dev.platform_data = ac97;
+	ac97->dev.release = ac97_device_release;
+	snprintf(ac97->dev.bus_id, BUS_ID_SIZE, "card%d-%d", ac97->bus->card->number, ac97->num);
+	if ((err = device_register(&ac97->dev)) < 0) {
+		snd_printk(KERN_ERR "Can't register ac97 bus\n");
+		ac97->dev.bus = NULL;
+		return err;
+	}
+	return 0;
+}
+
+/* unregister ac97 codec */
+static int snd_ac97_dev_unregister(snd_device_t *device)
+{
+	ac97_t *ac97 = device->device_data;
+	if (ac97->dev.bus)
+		device_unregister(&ac97->dev);
+	return snd_ac97_free(ac97);
+}
+
 /* build_ops to do nothing */
 static struct snd_ac97_build_ops null_build_ops;
 
@@ -1840,6 +1880,8 @@
 	const ac97_codec_id_t *pid;
 	static snd_device_ops_t ops = {
 		.dev_free =	snd_ac97_dev_free,
+		.dev_register =	snd_ac97_dev_register,
+		.dev_unregister =	snd_ac97_dev_unregister,
 	};
 
 	snd_assert(rac97 != NULL, return -EINVAL);
@@ -2539,8 +2581,6 @@
 {
 	int result;
 
-	snd_assert(quirk, return -EINVAL);
-
 	/* quirk overriden? */
 	if (override && strcmp(override, "-1") && strcmp(override, "default")) {
 		result = apply_quirk_str(ac97, override);
@@ -2549,6 +2589,9 @@
 		return result;
 	}
 
+	if (! quirk)
+		return -EINVAL;
+
 	for (; quirk->subvendor; quirk++) {
 		if (quirk->subvendor != ac97->subsystem_vendor)
 			continue;
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 66edc85..b584172 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -370,141 +370,387 @@
  *  added support for WM9705,WM9708,WM9709,WM9710,WM9711,WM9712 and WM9717.
  */
 
-int patch_wolfson03(ac97_t * ac97)
+static const snd_kcontrol_new_t wm97xx_snd_ac97_controls[] = {
+AC97_DOUBLE("Front Playback Volume", AC97_WM97XX_FMIXER_VOL, 8, 0, 31, 1),
+AC97_SINGLE("Front Playback Switch", AC97_WM97XX_FMIXER_VOL, 15, 1, 1),
+};
+
+static int patch_wolfson_wm9703_specific(ac97_t * ac97)
 {
 	/* This is known to work for the ViewSonic ViewPad 1000
-	   Randolph Bentson <bentson@holmsjoen.com> */
-
-	// WM9703/9707/9708/9717
-	snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808);
-	snd_ac97_write_cache(ac97, AC97_GENERAL_PURPOSE, 0x8000);
+	 * Randolph Bentson <bentson@holmsjoen.com>
+	 * WM9703/9707/9708/9717 
+	 */
+	int err, i;
+	
+	for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
+		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
+			return err;
+	}
+	snd_ac97_write_cache(ac97,  AC97_WM97XX_FMIXER_VOL, 0x0808);
 	return 0;
 }
-  
+
+static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = {
+	.build_specific = patch_wolfson_wm9703_specific,
+};
+
+int patch_wolfson03(ac97_t * ac97)
+{
+	ac97->build_ops = &patch_wolfson_wm9703_ops;
+	return 0;
+}
+
+static const snd_kcontrol_new_t wm9704_snd_ac97_controls[] = {
+AC97_DOUBLE("Front Playback Volume", AC97_WM97XX_FMIXER_VOL, 8, 0, 31, 1),
+AC97_SINGLE("Front Playback Switch", AC97_WM97XX_FMIXER_VOL, 15, 1, 1),
+AC97_DOUBLE("Rear Playback Volume", AC97_WM9704_RMIXER_VOL, 8, 0, 31, 1),
+AC97_SINGLE("Rear Playback Switch", AC97_WM9704_RMIXER_VOL, 15, 1, 1),
+AC97_DOUBLE("Rear DAC Volume", AC97_WM9704_RPCM_VOL, 8, 0, 31, 1),
+AC97_DOUBLE("Surround Volume", AC97_SURROUND_MASTER, 8, 0, 31, 1),
+};
+
+static int patch_wolfson_wm9704_specific(ac97_t * ac97)
+{
+	int err, i;
+	for (i = 0; i < ARRAY_SIZE(wm9704_snd_ac97_controls); i++) {
+		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9704_snd_ac97_controls[i], ac97))) < 0)
+			return err;
+	}
+	/* patch for DVD noise */
+	snd_ac97_write_cache(ac97, AC97_WM9704_TEST, 0x0200);
+	return 0;
+}
+
+static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = {
+	.build_specific = patch_wolfson_wm9704_specific,
+};
+
 int patch_wolfson04(ac97_t * ac97)
 {
-	// WM9704M/9704Q
-	// set front and rear mixer volume
-	snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808);
-	snd_ac97_write_cache(ac97, AC97_WM9704_RMIXER_VOL, 0x0808);
-	
-	// patch for DVD noise
-	snd_ac97_write_cache(ac97, AC97_WM9704_TEST, 0x0200);
- 
-	// init vol
-	snd_ac97_write_cache(ac97, AC97_WM9704_RPCM_VOL, 0x0808);
- 
-	// set rear surround volume
-	snd_ac97_write_cache(ac97, AC97_SURROUND_MASTER, 0x0000);
+	/* WM9704M/9704Q */
+	ac97->build_ops = &patch_wolfson_wm9704_ops;
 	return 0;
 }
-  
+
+static int patch_wolfson_wm9705_specific(ac97_t * ac97)
+{
+	int err, i;
+	for (i = 0; i < ARRAY_SIZE(wm97xx_snd_ac97_controls); i++) {
+		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm97xx_snd_ac97_controls[i], ac97))) < 0)
+			return err;
+	}
+	snd_ac97_write_cache(ac97,  0x72, 0x0808);
+	return 0;
+}
+
+static struct snd_ac97_build_ops patch_wolfson_wm9705_ops = {
+	.build_specific = patch_wolfson_wm9705_specific,
+};
+
 int patch_wolfson05(ac97_t * ac97)
 {
-	// WM9705, WM9710
-	// set front mixer volume
-	snd_ac97_write_cache(ac97, AC97_WM97XX_FMIXER_VOL, 0x0808);
+	/* WM9705, WM9710 */
+	ac97->build_ops = &patch_wolfson_wm9705_ops;
 	return 0;
 }
 
+static const char* wm9711_alc_select[] = {"None", "Left", "Right", "Stereo"};
+static const char* wm9711_alc_mix[] = {"Stereo", "Right", "Left", "None"};
+static const char* wm9711_out3_src[] = {"Left", "VREF", "Left + Right", "Mono"};
+static const char* wm9711_out3_lrsrc[] = {"Master Mix", "Headphone Mix"};
+static const char* wm9711_rec_adc[] = {"Stereo", "Left", "Right", "Mute"};
+static const char* wm9711_base[] = {"Linear Control", "Adaptive Boost"};
+static const char* wm9711_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
+static const char* wm9711_mic[] = {"Mic 1", "Differential", "Mic 2", "Stereo"};
+static const char* wm9711_rec_sel[] = 
+	{"Mic 1", "NC", "NC", "Master Mix", "Line", "Headphone Mix", "Phone Mix", "Phone"};
+static const char* wm9711_ng_type[] = {"Constant Gain", "Mute"};
+
+static const struct ac97_enum wm9711_enum[] = {
+AC97_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9711_alc_select),
+AC97_ENUM_SINGLE(AC97_VIDEO, 10, 4, wm9711_alc_mix),
+AC97_ENUM_SINGLE(AC97_AUX, 9, 4, wm9711_out3_src),
+AC97_ENUM_SINGLE(AC97_AUX, 8, 2, wm9711_out3_lrsrc),
+AC97_ENUM_SINGLE(AC97_REC_SEL, 12, 4, wm9711_rec_adc),
+AC97_ENUM_SINGLE(AC97_MASTER_TONE, 15, 2, wm9711_base),
+AC97_ENUM_DOUBLE(AC97_REC_GAIN, 14, 6, 2, wm9711_rec_gain),
+AC97_ENUM_SINGLE(AC97_MIC, 5, 4, wm9711_mic),
+AC97_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 8, wm9711_rec_sel),
+AC97_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9711_ng_type),
+};
+
+static const snd_kcontrol_new_t wm9711_snd_ac97_controls[] = {
+AC97_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
+AC97_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
+AC97_SINGLE("ALC Decay Time", AC97_CODEC_CLASS_REV, 4, 15, 0),
+AC97_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
+AC97_ENUM("ALC Function", wm9711_enum[0]),
+AC97_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 1),
+AC97_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 1),
+AC97_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
+AC97_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
+AC97_ENUM("ALC NG Type", wm9711_enum[9]),
+AC97_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 1),
+
+AC97_SINGLE("Side Tone Switch", AC97_VIDEO, 15, 1, 1),
+AC97_SINGLE("Side Tone Volume", AC97_VIDEO, 12, 7, 1),
+AC97_ENUM("ALC Headphone Mux", wm9711_enum[1]),
+AC97_SINGLE("ALC Headphone Volume", AC97_VIDEO, 7, 7, 1),
+
+AC97_SINGLE("Out3 Switch", AC97_AUX, 15, 1, 1),
+AC97_SINGLE("Out3 ZC Switch", AC97_AUX, 7, 1, 1),
+AC97_ENUM("Out3 Mux", wm9711_enum[2]),
+AC97_ENUM("Out3 LR Mux", wm9711_enum[3]),
+AC97_SINGLE("Out3 Volume", AC97_AUX, 0, 31, 1),
+
+AC97_SINGLE("Beep to Headphone Switch", AC97_PC_BEEP, 15, 1, 1),
+AC97_SINGLE("Beep to Headphone Volume", AC97_PC_BEEP, 12, 7, 1),
+AC97_SINGLE("Beep to Side Tone Switch", AC97_PC_BEEP, 11, 1, 1),
+AC97_SINGLE("Beep to Side Tone Volume", AC97_PC_BEEP, 8, 7, 1),
+AC97_SINGLE("Beep to Phone Switch", AC97_PC_BEEP, 7, 1, 1),
+AC97_SINGLE("Beep to Phone Volume", AC97_PC_BEEP, 4, 7, 1),
+
+AC97_SINGLE("Aux to Headphone Switch", AC97_CD, 15, 1, 1),
+AC97_SINGLE("Aux to Headphone Volume", AC97_CD, 12, 7, 1),
+AC97_SINGLE("Aux to Side Tone Switch", AC97_CD, 11, 1, 1),
+AC97_SINGLE("Aux to Side Tone Volume", AC97_CD, 8, 7, 1),
+AC97_SINGLE("Aux to Phone Switch", AC97_CD, 7, 1, 1),
+AC97_SINGLE("Aux to Phone Volume", AC97_CD, 4, 7, 1),
+
+AC97_SINGLE("Phone to Headphone Switch", AC97_PHONE, 15, 1, 1),
+AC97_SINGLE("Phone to Master Switch", AC97_PHONE, 14, 1, 1),
+
+AC97_SINGLE("Line to Headphone Switch", AC97_LINE, 15, 1, 1),
+AC97_SINGLE("Line to Master Switch", AC97_LINE, 14, 1, 1),
+AC97_SINGLE("Line to Phone Switch", AC97_LINE, 13, 1, 1),
+
+AC97_SINGLE("PCM Playback to Headphone Switch", AC97_PCM, 15, 1, 1),
+AC97_SINGLE("PCM Playback to Master Switch", AC97_PCM, 14, 1, 1),
+AC97_SINGLE("PCM Playback to Phone Switch", AC97_PCM, 13, 1, 1),
+
+AC97_SINGLE("Capture 20dB Boost Switch", AC97_REC_SEL, 14, 1, 0),
+AC97_ENUM("Capture to Phone Mux", wm9711_enum[4]),
+AC97_SINGLE("Capture to Phone 20dB Boost Switch", AC97_REC_SEL, 11, 1, 1),
+AC97_ENUM("Capture Select", wm9711_enum[8]),
+
+AC97_SINGLE("3D Upper Cut-off Switch", AC97_3D_CONTROL, 5, 1, 1),
+AC97_SINGLE("3D Lower Cut-off Switch", AC97_3D_CONTROL, 4, 1, 1),
+
+AC97_ENUM("Bass Control", wm9711_enum[5]),
+AC97_SINGLE("Bass Cut-off Switch", AC97_MASTER_TONE, 12, 1, 1),
+AC97_SINGLE("Tone Cut-off Switch", AC97_MASTER_TONE, 4, 1, 1),
+AC97_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
+
+AC97_SINGLE("ADC Switch", AC97_REC_GAIN, 15, 1, 1),
+AC97_ENUM("Capture Volume Steps", wm9711_enum[6]),
+AC97_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 15, 1),
+AC97_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
+
+AC97_SINGLE("Mic 1 to Phone Switch", AC97_MIC, 14, 1, 1),
+AC97_SINGLE("Mic 2 to Phone Switch", AC97_MIC, 13, 1, 1),
+AC97_ENUM("Mic Select Source", wm9711_enum[7]),
+AC97_SINGLE("Mic 1 Volume", AC97_MIC, 8, 32, 1),
+AC97_SINGLE("Mic 20dB Boost Switch", AC97_MIC, 7, 1, 0),
+
+AC97_SINGLE("Master ZC Switch", AC97_MASTER, 7, 1, 0),
+AC97_SINGLE("Headphone ZC Switch", AC97_HEADPHONE, 7, 1, 0),
+AC97_SINGLE("Mono ZC Switch", AC97_MASTER_MONO, 7, 1, 0),
+};
+
+static int patch_wolfson_wm9711_specific(ac97_t * ac97)
+{
+	int err, i;
+	
+	for (i = 0; i < ARRAY_SIZE(wm9711_snd_ac97_controls); i++) {
+		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm9711_snd_ac97_controls[i], ac97))) < 0)
+			return err;
+	}
+	snd_ac97_write_cache(ac97,  AC97_CODEC_CLASS_REV, 0x0808);
+	snd_ac97_write_cache(ac97,  AC97_PCI_SVID, 0x0808);
+	snd_ac97_write_cache(ac97,  AC97_VIDEO, 0x0808);
+	snd_ac97_write_cache(ac97,  AC97_AUX, 0x0808);
+	snd_ac97_write_cache(ac97,  AC97_PC_BEEP, 0x0808);
+	snd_ac97_write_cache(ac97,  AC97_CD, 0x0000);
+	return 0;
+}
+
+static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = {
+	.build_specific = patch_wolfson_wm9711_specific,
+};
+
 int patch_wolfson11(ac97_t * ac97)
 {
-	// WM9711, WM9712
-	// set out3 volume
-	snd_ac97_write_cache(ac97, AC97_WM9711_OUT3VOL, 0x0808);
+	/* WM9711, WM9712 */
+	ac97->build_ops = &patch_wolfson_wm9711_ops;
+
+	ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_MIC |
+		AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD;
+	
 	return 0;
 }
 
-static const char* wm9713_mic_mixer[] = {"Stereo", "Mic1", "Mic2", "Mute"};
+static const char* wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"};
 static const char* wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"};
-static const char* wm9713_rec_src_l[] = {"Mic1", "Mic2", "Line L", "Mono In", "HP Mix L", "Spk Mix", "Mono Mix", "Zh"};
-static const char* wm9713_rec_src_r[] = {"Mic1", "Mic2", "Line R", "Mono In", "HP Mix R", "Spk Mix", "Mono Mix", "Zh"};
+static const char* wm9713_rec_src[] = 
+	{"Mic 1", "Mic 2", "Line", "Mono In", "Headphone Mix", "Master Mix", 
+	"Mono Mix", "Zh"};
+static const char* wm9713_rec_gain[] = {"+1.5dB Steps", "+0.75dB Steps"};
+static const char* wm9713_alc_select[] = {"None", "Left", "Right", "Stereo"};
+static const char* wm9713_mono_pga[] = {"Vmid", "Zh", "Mono Mix", "Inv 1"};
+static const char* wm9713_spk_pga[] = 
+	{"Vmid", "Zh", "Headphone Mix", "Master Mix", "Inv", "NC", "NC", "NC"};
+static const char* wm9713_hp_pga[] = {"Vmid", "Zh", "Headphone Mix", "NC"};
+static const char* wm9713_out3_pga[] = {"Vmid", "Zh", "Inv 1", "NC"};
+static const char* wm9713_out4_pga[] = {"Vmid", "Zh", "Inv 2", "NC"};
+static const char* wm9713_dac_inv[] = 
+	{"Off", "Mono Mix", "Master Mix", "Headphone Mix L", "Headphone Mix R", 
+	"Headphone Mix Mono", "NC", "Vmid"};
+static const char* wm9713_base[] = {"Linear Control", "Adaptive Boost"};
+static const char* wm9713_ng_type[] = {"Constant Gain", "Mute"};
 
 static const struct ac97_enum wm9713_enum[] = {
 AC97_ENUM_SINGLE(AC97_LINE, 3, 4, wm9713_mic_mixer),
 AC97_ENUM_SINGLE(AC97_VIDEO, 14, 4, wm9713_rec_mux),
 AC97_ENUM_SINGLE(AC97_VIDEO, 9, 4, wm9713_rec_mux),
-AC97_ENUM_SINGLE(AC97_VIDEO, 3, 8, wm9713_rec_src_l),
-AC97_ENUM_SINGLE(AC97_VIDEO, 0, 8, wm9713_rec_src_r),
+AC97_ENUM_DOUBLE(AC97_VIDEO, 3, 0, 8, wm9713_rec_src),
+AC97_ENUM_DOUBLE(AC97_CD, 14, 6, 2, wm9713_rec_gain),
+AC97_ENUM_SINGLE(AC97_PCI_SVID, 14, 4, wm9713_alc_select),
+AC97_ENUM_SINGLE(AC97_REC_GAIN, 14, 4, wm9713_mono_pga),
+AC97_ENUM_DOUBLE(AC97_REC_GAIN, 11, 8, 8, wm9713_spk_pga),
+AC97_ENUM_DOUBLE(AC97_REC_GAIN, 6, 4, 4, wm9713_hp_pga),
+AC97_ENUM_SINGLE(AC97_REC_GAIN, 2, 4, wm9713_out3_pga),
+AC97_ENUM_SINGLE(AC97_REC_GAIN, 0, 4, wm9713_out4_pga),
+AC97_ENUM_DOUBLE(AC97_REC_GAIN_MIC, 13, 10, 8, wm9713_dac_inv),
+AC97_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_base),
+AC97_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type),
 };
 
-static const snd_kcontrol_new_t wm13_snd_ac97_controls_line_in[] = {
+static const snd_kcontrol_new_t wm13_snd_ac97_controls[] = {
 AC97_DOUBLE("Line In Volume", AC97_PC_BEEP, 8, 0, 31, 1),
-AC97_SINGLE("Line In to Headphone Mute", AC97_PC_BEEP, 15, 1, 1),
-AC97_SINGLE("Line In to Speaker Mute", AC97_PC_BEEP, 14, 1, 1),
-AC97_SINGLE("Line In to Mono Mute", AC97_PC_BEEP, 13, 1, 1),
+AC97_SINGLE("Line In to Headphone Switch", AC97_PC_BEEP, 15, 1, 1),
+AC97_SINGLE("Line In to Master Switch", AC97_PC_BEEP, 14, 1, 1),
+AC97_SINGLE("Line In to Mono Switch", AC97_PC_BEEP, 13, 1, 1),
+
+AC97_DOUBLE("PCM Playback Volume", AC97_PHONE, 8, 0, 31, 1),
+AC97_SINGLE("PCM Playback to Headphone Switch", AC97_PHONE, 15, 1, 1),
+AC97_SINGLE("PCM Playback to Master Switch", AC97_PHONE, 14, 1, 1),
+AC97_SINGLE("PCM Playback to Mono Switch", AC97_PHONE, 13, 1, 1),
+
+AC97_SINGLE("Mic 1 Volume", AC97_MIC, 8, 31, 1),
+AC97_SINGLE("Mic 2 Volume", AC97_MIC, 0, 31, 1),
+AC97_SINGLE("Mic 1 to Mono Switch", AC97_LINE, 7, 1, 1),
+AC97_SINGLE("Mic 2 to Mono Switch", AC97_LINE, 6, 1, 1),
+AC97_SINGLE("Mic Boost (+20dB) Switch", AC97_LINE, 5, 1, 0),
+AC97_ENUM("Mic to Headphone Mux", wm9713_enum[0]),
+AC97_SINGLE("Mic Headphone Mixer Volume", AC97_LINE, 0, 7, 1),
+
+AC97_SINGLE("Capture Switch", AC97_CD, 15, 1, 1),
+AC97_ENUM("Capture Volume Steps", wm9713_enum[4]),
+AC97_DOUBLE("Capture Volume", AC97_CD, 8, 0, 15, 0),
+AC97_SINGLE("Capture ZC Switch", AC97_CD, 7, 1, 0),
+
+AC97_ENUM("Capture to Headphone Mux", wm9713_enum[1]),
+AC97_SINGLE("Capture to Headphone Volume", AC97_VIDEO, 11, 7, 1),
+AC97_ENUM("Capture to Mono Mux", wm9713_enum[2]),
+AC97_SINGLE("Capture to Mono Boost (+20dB) Switch", AC97_VIDEO, 8, 1, 0),
+AC97_SINGLE("Capture ADC Boost (+20dB) Switch", AC97_VIDEO, 6, 1, 0),
+AC97_ENUM("Capture Select", wm9713_enum[3]),
+
+AC97_SINGLE("ALC Target Volume", AC97_CODEC_CLASS_REV, 12, 15, 0),
+AC97_SINGLE("ALC Hold Time", AC97_CODEC_CLASS_REV, 8, 15, 0),
+AC97_SINGLE("ALC Decay Time ", AC97_CODEC_CLASS_REV, 4, 15, 0),
+AC97_SINGLE("ALC Attack Time", AC97_CODEC_CLASS_REV, 0, 15, 0),
+AC97_ENUM("ALC Function", wm9713_enum[5]),
+AC97_SINGLE("ALC Max Volume", AC97_PCI_SVID, 11, 7, 0),
+AC97_SINGLE("ALC ZC Timeout", AC97_PCI_SVID, 9, 3, 0),
+AC97_SINGLE("ALC ZC Switch", AC97_PCI_SVID, 8, 1, 0),
+AC97_SINGLE("ALC NG Switch", AC97_PCI_SVID, 7, 1, 0),
+AC97_ENUM("ALC NG Type", wm9713_enum[13]),
+AC97_SINGLE("ALC NG Threshold", AC97_PCI_SVID, 0, 31, 0),
+
+AC97_DOUBLE("Master ZC Switch", AC97_MASTER, 14, 6, 1, 0),
+AC97_DOUBLE("Headphone ZC Switch", AC97_HEADPHONE, 14, 6, 1, 0),
+AC97_DOUBLE("Out3/4 ZC Switch", AC97_MASTER_MONO, 14, 6, 1, 0),
+AC97_SINGLE("Master Right Switch", AC97_MASTER, 7, 1, 1),
+AC97_SINGLE("Headphone Right Switch", AC97_HEADPHONE, 7, 1, 1),
+AC97_SINGLE("Out3/4 Right Switch", AC97_MASTER_MONO, 7, 1, 1),
+
+AC97_SINGLE("Mono In to Headphone Switch", AC97_MASTER_TONE, 15, 1, 1),
+AC97_SINGLE("Mono In to Master Switch", AC97_MASTER_TONE, 14, 1, 1),
+AC97_SINGLE("Mono In Volume", AC97_MASTER_TONE, 8, 31, 1),
+AC97_SINGLE("Mono Switch", AC97_MASTER_TONE, 7, 1, 1),
+AC97_SINGLE("Mono ZC Switch", AC97_MASTER_TONE, 6, 1, 0),
+AC97_SINGLE("Mono Volume", AC97_MASTER_TONE, 0, 31, 1),
+
+AC97_SINGLE("PC Beep to Headphone Switch", AC97_AUX, 15, 1, 1),
+AC97_SINGLE("PC Beep to Headphone Volume", AC97_AUX, 12, 7, 1),
+AC97_SINGLE("PC Beep to Master Switch", AC97_AUX, 11, 1, 1),
+AC97_SINGLE("PC Beep to Master Volume", AC97_AUX, 8, 7, 1),
+AC97_SINGLE("PC Beep to Mono Switch", AC97_AUX, 7, 1, 1),
+AC97_SINGLE("PC Beep to Mono Volume", AC97_AUX, 4, 7, 1),
+
+AC97_SINGLE("Voice to Headphone Switch", AC97_PCM, 15, 1, 1),
+AC97_SINGLE("Voice to Headphone Volume", AC97_PCM, 12, 7, 1),
+AC97_SINGLE("Voice to Master Switch", AC97_PCM, 11, 1, 1),
+AC97_SINGLE("Voice to Master Volume", AC97_PCM, 8, 7, 1),
+AC97_SINGLE("Voice to Mono Switch", AC97_PCM, 7, 1, 1),
+AC97_SINGLE("Voice to Mono Volume", AC97_PCM, 4, 7, 1),
+
+AC97_SINGLE("Aux to Headphone Switch", AC97_REC_SEL, 15, 1, 1),
+AC97_SINGLE("Aux to Headphone Volume", AC97_REC_SEL, 12, 7, 1),
+AC97_SINGLE("Aux to Master Switch", AC97_REC_SEL, 11, 1, 1),
+AC97_SINGLE("Aux to Master Volume", AC97_REC_SEL, 8, 7, 1),
+AC97_SINGLE("Aux to Mono Switch", AC97_REC_SEL, 7, 1, 1),
+AC97_SINGLE("Aux to Mono Volume", AC97_REC_SEL, 4, 7, 1),
+
+AC97_ENUM("Mono Input Mux", wm9713_enum[6]),
+AC97_ENUM("Master Input Mux", wm9713_enum[7]),
+AC97_ENUM("Headphone Input Mux", wm9713_enum[8]),
+AC97_ENUM("Out 3 Input Mux", wm9713_enum[9]),
+AC97_ENUM("Out 4 Input Mux", wm9713_enum[10]),
+
+AC97_ENUM("Bass Control", wm9713_enum[12]),
+AC97_SINGLE("Bass Cut-off Switch", AC97_GENERAL_PURPOSE, 12, 1, 1),
+AC97_SINGLE("Tone Cut-off Switch", AC97_GENERAL_PURPOSE, 4, 1, 1),
+AC97_SINGLE("Playback Attenuate (-6dB) Switch", AC97_GENERAL_PURPOSE, 6, 1, 0),
+AC97_SINGLE("Bass Volume", AC97_GENERAL_PURPOSE, 8, 15, 1),
+AC97_SINGLE("Tone Volume", AC97_GENERAL_PURPOSE, 0, 15, 1),
 };
 
-static const snd_kcontrol_new_t wm13_snd_ac97_controls_dac[] = {
-AC97_DOUBLE("DAC Volume", AC97_PHONE, 8, 0, 31, 1),
-AC97_SINGLE("DAC to Headphone Mute", AC97_PHONE, 15, 1, 1),
-AC97_SINGLE("DAC to Speaker Mute", AC97_PHONE, 14, 1, 1),
-AC97_SINGLE("DAC to Mono Mute", AC97_PHONE, 13, 1, 1),
+static const snd_kcontrol_new_t wm13_snd_ac97_controls_3d[] = {
+AC97_ENUM("Inv Input Mux", wm9713_enum[11]),
+AC97_SINGLE("3D Upper Cut-off Switch", AC97_REC_GAIN_MIC, 5, 1, 0),
+AC97_SINGLE("3D Lower Cut-off Switch", AC97_REC_GAIN_MIC, 4, 1, 0),
+AC97_SINGLE("3D Depth", AC97_REC_GAIN_MIC, 0, 15, 1),
 };
 
-static const snd_kcontrol_new_t wm13_snd_ac97_controls_mic[] = {
-AC97_SINGLE("MICA Volume", AC97_MIC, 8, 31, 1),
-AC97_SINGLE("MICB Volume", AC97_MIC, 0, 31, 1),
-AC97_SINGLE("MICA to Mono Mute", AC97_LINE, 7, 1, 1),
-AC97_SINGLE("MICB to Mono Mute", AC97_LINE, 6, 1, 1),
-AC97_SINGLE("MIC Boost (+20dB)", AC97_LINE, 5, 1, 1),
-AC97_ENUM("MIC Headphone Routing", wm9713_enum[0]),
-AC97_SINGLE("MIC Headphone Mixer Volume", AC97_LINE, 0, 7, 1)
-};
-
-static const snd_kcontrol_new_t wm13_snd_ac97_controls_adc[] = {
-AC97_SINGLE("ADC Mute", AC97_CD, 15, 1, 1),
-AC97_DOUBLE("Gain Step Size (1.5dB/0.75dB)", AC97_CD, 14, 6, 1, 1),
-AC97_DOUBLE("ADC Volume",AC97_CD, 8, 0, 15, 0),
-AC97_SINGLE("ADC Zero Cross", AC97_CD, 7, 1, 1),
-};
-
-static const snd_kcontrol_new_t wm13_snd_ac97_controls_recsel[] = {
-AC97_ENUM("Record to Headphone Path", wm9713_enum[1]),
-AC97_SINGLE("Record to Headphone Volume", AC97_VIDEO, 11, 7, 0),
-AC97_ENUM("Record to Mono Path", wm9713_enum[2]),
-AC97_SINGLE("Record to Mono Boost (+20dB)", AC97_VIDEO, 8, 1, 0),
-AC97_SINGLE("Record ADC Boost (+20dB)", AC97_VIDEO, 6, 1, 0),
-AC97_ENUM("Record Select Left", wm9713_enum[3]),
-AC97_ENUM("Record Select Right", wm9713_enum[4]),
-};
+static int patch_wolfson_wm9713_3d (ac97_t * ac97)
+{
+	int err, i;
+    
+	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_3d); i++) {
+		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_3d[i], ac97))) < 0)
+			return err;
+	}
+	return 0;
+}
 
 static int patch_wolfson_wm9713_specific(ac97_t * ac97)
 {
 	int err, i;
 	
-	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_line_in); i++) {
-		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_line_in[i], ac97))) < 0)
+	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls); i++) {
+		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls[i], ac97))) < 0)
 			return err;
 	}
 	snd_ac97_write_cache(ac97, AC97_PC_BEEP, 0x0808);
-	
-	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_dac); i++) {
-		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_dac[i], ac97))) < 0)
-			return err;
-	}
 	snd_ac97_write_cache(ac97, AC97_PHONE, 0x0808);
-	
-	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_mic); i++) {
-		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_mic[i], ac97))) < 0)
-			return err;
-	}
 	snd_ac97_write_cache(ac97, AC97_MIC, 0x0808);
 	snd_ac97_write_cache(ac97, AC97_LINE, 0x00da);
-	
-	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_adc); i++) {
-		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_adc[i], ac97))) < 0)
-			return err;
-	}
 	snd_ac97_write_cache(ac97, AC97_CD, 0x0808);
-	
-	for (i = 0; i < ARRAY_SIZE(wm13_snd_ac97_controls_recsel); i++) {
-		if ((err = snd_ctl_add(ac97->bus->card, snd_ac97_cnew(&wm13_snd_ac97_controls_recsel[i], ac97))) < 0)
-			return err;
-	}
 	snd_ac97_write_cache(ac97, AC97_VIDEO, 0xd612);
 	snd_ac97_write_cache(ac97, AC97_REC_GAIN, 0x1ba0);
-	
 	return 0;
 }
 
@@ -525,6 +771,7 @@
 
 static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = {
 	.build_specific = patch_wolfson_wm9713_specific,
+	.build_3d = patch_wolfson_wm9713_3d,
 #ifdef CONFIG_PM	
 	.suspend = patch_wolfson_wm9713_suspend,
 	.resume = patch_wolfson_wm9713_resume
@@ -533,10 +780,13 @@
 
 int patch_wolfson13(ac97_t * ac97)
 {
+	/* WM9713, WM9714 */
 	ac97->build_ops = &patch_wolfson_wm9713_ops;
 
 	ac97->flags |= AC97_HAS_NO_REC_GAIN | AC97_STEREO_MUTES | AC97_HAS_NO_PHONE |
-		AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD;
+		AC97_HAS_NO_PC_BEEP | AC97_HAS_NO_VIDEO | AC97_HAS_NO_CD | AC97_HAS_NO_TONE |
+		AC97_HAS_NO_STD_PCM;
+    	ac97->scaps &= ~AC97_SCAP_MODEM;
 
 	snd_ac97_write_cache(ac97, AC97_EXTENDED_MID, 0xda00);
 	snd_ac97_write_cache(ac97, AC97_EXTENDED_MSTATUS, 0x3810);
@@ -1379,6 +1629,7 @@
 	u32 subid = ((u32)ac97->subsystem_vendor << 16) | ac97->subsystem_device;
 	switch (subid) {
 	case 0x103c0890: /* HP nc6000 */
+	case 0x103c099c: /* HP nx6110 */
 	case 0x103c006d: /* HP nx9105 */
 	case 0x17340088: /* FSC Scenic-W */
 		/* enable headphone jack sense */
@@ -1706,7 +1957,7 @@
 };
 
 static const snd_kcontrol_new_t snd_ac97_spdif_controls_alc650[] = {
-        AC97_SINGLE("IEC958 Capture Switch", AC97_ALC650_MULTICH, 11, 1, 0),
+        AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_ALC650_MULTICH, 11, 1, 0),
         AC97_SINGLE("Analog to IEC958 Output", AC97_ALC650_MULTICH, 12, 1, 0),
 	/* disable this controls since it doesn't work as expected */
 	/* AC97_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 13, 1, 0), */
@@ -1849,12 +2100,12 @@
 }
 
 static const snd_kcontrol_new_t snd_ac97_spdif_controls_alc655[] = {
-        AC97_PAGE_SINGLE("IEC958 Capture Switch", AC97_ALC650_MULTICH, 11, 1, 0, 0),
+        AC97_PAGE_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), AC97_ALC650_MULTICH, 11, 1, 0, 0),
 	/* disable this controls since it doesn't work as expected */
         /* AC97_PAGE_SINGLE("IEC958 Input Monitor", AC97_ALC650_MULTICH, 14, 1, 0, 0), */
 	{
 		.iface  = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name   = "IEC958 Playback Route",
+		.name   = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
 		.info   = alc655_iec958_route_info,
 		.get    = alc655_iec958_route_get,
 		.put    = alc655_iec958_route_put,
@@ -2416,6 +2667,16 @@
 }
 
 /*
+ * VT1617A codec
+ */
+int patch_vt1617a(ac97_t * ac97)
+{
+	ac97->ext_id |= AC97_EI_SPDIF;	/* force the detection of spdif */
+	ac97->rates[AC97_RATES_SPDIF] = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000;
+	return 0;
+}
+
+/*
  */
 static void it2646_update_jacks(ac97_t *ac97)
 {
@@ -2433,7 +2694,7 @@
 };
 
 static const snd_kcontrol_new_t snd_ac97_spdif_controls_it2646[] = {
-	AC97_SINGLE("IEC958 Capture Switch", 0x76, 11, 1, 0),
+	AC97_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0x76, 11, 1, 0),
 	AC97_SINGLE("Analog to IEC958 Output", 0x76, 12, 1, 0),
 	AC97_SINGLE("IEC958 Input Monitor", 0x76, 13, 1, 0),
 };
diff --git a/sound/pci/ac97/ac97_patch.h b/sound/pci/ac97/ac97_patch.h
index 7b7377d..ec18113 100644
--- a/sound/pci/ac97/ac97_patch.h
+++ b/sound/pci/ac97/ac97_patch.h
@@ -56,5 +56,6 @@
 int patch_cm9761(ac97_t * ac97);
 int patch_cm9780(ac97_t * ac97);
 int patch_vt1616(ac97_t * ac97);
+int patch_vt1617a(ac97_t * ac97);
 int patch_it2646(ac97_t * ac97);
 int mpatch_si3036(ac97_t * ac97);
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index f08ae71f..ce6c9fa 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -1842,7 +1842,7 @@
 	return 0;
 }
 
-struct ali_pcm_description ali_pcms[] = {
+static struct ali_pcm_description ali_pcms[] = {
 	{ "ALI 5451", ALI_CHANNELS, 1, &snd_ali_playback_ops, &snd_ali_capture_ops },
 	{ "ALI 5451 modem", 1, 1, &snd_ali_modem_playback_ops, &snd_ali_modem_capture_ops }
 };
@@ -1959,9 +1959,9 @@
 static snd_kcontrol_new_t snd_ali5451_mixer_spdif[] __devinitdata = {
 	/* spdif aplayback switch */
 	/* FIXME: "IEC958 Playback Switch" may conflict with one on ac97_codec */
-	ALI5451_SPDIF("IEC958 Output switch", 0, 0),
+	ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), 0, 0),
 	/* spdif out to spdif channel */
-	ALI5451_SPDIF("IEC958 Channel Output Switch", 0, 1),
+	ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("Channel Output ",NONE,SWITCH), 0, 1),
 	/* spdif in from spdif channel */
 	ALI5451_SPDIF(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, 2)
 };
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index cafab4a..904d173 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -248,6 +248,7 @@
 	unsigned int period_bytes, periods;
 	int opened;
 	int running;
+	int suspended;
 	int pcm_open_flag;
 	int ac97_pcm_type;	/* index # of ac97_pcm to access, -1 = not used */
 	unsigned int saved_curptr;
@@ -699,12 +700,18 @@
 	spin_lock(&chip->reg_lock);
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+	case SNDRV_PCM_TRIGGER_RESUME:
 		dma->ops->enable_transfer(chip, 1);
 		dma->running = 1;
+		dma->suspended = 0;
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
 		dma->ops->enable_transfer(chip, 0);
 		dma->running = 0;
+		dma->suspended = cmd == SNDRV_PCM_TRIGGER_SUSPEND;
 		break;
 	default:
 		err = -EINVAL;
@@ -975,6 +982,7 @@
 {
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
+				 SNDRV_PCM_INFO_PAUSE |
 				 SNDRV_PCM_INFO_RESUME |
 				 SNDRV_PCM_INFO_MMAP_VALID),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE,
@@ -1443,7 +1451,7 @@
 	for (i = 0; i < NUM_ATI_PCMDEVS; i++)
 		if (chip->pcmdevs[i]) {
 			atiixp_dma_t *dma = &chip->dmas[i];
-			if (dma->substream && dma->running) {
+			if (dma->substream && dma->suspended) {
 				dma->ops->enable_dma(chip, 1);
 				writel((u32)dma->desc_buf.addr | ATI_REG_LINKPTR_EN,
 				       chip->remap_addr + dma->ops->llp_offset);
diff --git a/sound/pci/au88x0/au88x0_pcm.c b/sound/pci/au88x0/au88x0_pcm.c
index 04dcefd..38bd2b5 100644
--- a/sound/pci/au88x0/au88x0_pcm.c
+++ b/sound/pci/au88x0/au88x0_pcm.c
@@ -33,7 +33,7 @@
 /* hardware definition */
 static snd_pcm_hardware_t snd_vortex_playback_hw_adb = {
 	.info =
-	    (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_RESUME |
+	    (SNDRV_PCM_INFO_MMAP | /* SNDRV_PCM_INFO_RESUME | */
 	     SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED |
 	     SNDRV_PCM_INFO_MMAP_VALID),
 	.formats =
@@ -58,7 +58,7 @@
 #ifndef CHIP_AU8820
 static snd_pcm_hardware_t snd_vortex_playback_hw_a3d = {
 	.info =
-	    (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_RESUME |
+	    (SNDRV_PCM_INFO_MMAP | /* SNDRV_PCM_INFO_RESUME | */
 	     SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED |
 	     SNDRV_PCM_INFO_MMAP_VALID),
 	.formats =
@@ -78,7 +78,7 @@
 #endif
 static snd_pcm_hardware_t snd_vortex_playback_hw_spdif = {
 	.info =
-	    (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_RESUME |
+	    (SNDRV_PCM_INFO_MMAP | /* SNDRV_PCM_INFO_RESUME | */
 	     SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_INTERLEAVED |
 	     SNDRV_PCM_INFO_MMAP_VALID),
 	.formats =
@@ -220,8 +220,10 @@
 		    vortex_adb_allocroute(chip, -1,
 					  params_channels(hw_params),
 					  substream->stream, type);
-		if (dma < 0)
+		if (dma < 0) {
+			spin_unlock_irq(&chip->lock);
 			return dma;
+		}
 		stream = substream->runtime->private_data = &chip->dma_adb[dma];
 		stream->substream = substream;
 		/* Setup Buffers. */
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index 95c2892..7e27bfc 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -188,6 +188,14 @@
 	   .name   = "MSI K8N Diamond MB [SB0438]",
 	   .gpio_type = 1,
 	   .i2c_adc = 1 } ,
+	 /* Shuttle XPC SD31P which has an onboard Creative Labs Sound Blaster Live! 24-bit EAX
+	  * high-definition 7.1 audio processor".
+	  * Added using info from andrewvegan in alsa bug #1298
+	  */
+	 { .serial = 0x30381297,
+	   .name   = "Shuttle XPC SD31P [SD31P]",
+	   .gpio_type = 1,
+	   .i2c_adc = 1 } ,
 	 { .serial = 0,
 	   .name   = "AudigyLS [Unknown]" }
 };
diff --git a/sound/pci/ca0106/ca0106_mixer.c b/sound/pci/ca0106/ca0106_mixer.c
index 0e5e9ce..b6b8882 100644
--- a/sound/pci/ca0106/ca0106_mixer.c
+++ b/sound/pci/ca0106/ca0106_mixer.c
@@ -297,7 +297,7 @@
 static snd_kcontrol_new_t snd_ca0106_spdif_mask_control =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-        .iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
+        .iface =        SNDRV_CTL_ELEM_IFACE_PCM,
         .name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
 	.count =	4,
         .info =         snd_ca0106_spdif_info,
@@ -306,7 +306,7 @@
 
 static snd_kcontrol_new_t snd_ca0106_spdif_control =
 {
-        .iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+        .iface =	SNDRV_CTL_ELEM_IFACE_PCM,
         .name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
 	.count =	4,
         .info =         snd_ca0106_spdif_info,
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index f5a4ac1..b098b51 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -1029,7 +1029,7 @@
 static snd_kcontrol_new_t snd_cmipci_spdif_mask __devinitdata =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
 	.info =		snd_cmipci_spdif_mask_info,
 	.get =		snd_cmipci_spdif_mask_get,
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index db212ec..b9fff4e 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -113,7 +113,7 @@
 		return err;
 	}
 #endif
-	if ((err = snd_cs46xx_mixer(chip)) < 0) {
+	if ((err = snd_cs46xx_mixer(chip, 2)) < 0) {
 		snd_card_free(card);
 		return err;
 	}
diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
index ff28af1..4b05215 100644
--- a/sound/pci/cs46xx/cs46xx_lib.c
+++ b/sound/pci/cs46xx/cs46xx_lib.c
@@ -1243,8 +1243,8 @@
 {
 	.info =			(SNDRV_PCM_INFO_MMAP |
 				 SNDRV_PCM_INFO_INTERLEAVED | 
-				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				 SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
+				 /*SNDRV_PCM_INFO_RESUME*/),
 	.formats =		(SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 |
 				 SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE |
 				 SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE),
@@ -1265,8 +1265,8 @@
 {
 	.info =			(SNDRV_PCM_INFO_MMAP |
 				 SNDRV_PCM_INFO_INTERLEAVED |
-				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
-				 SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/
+				 /*SNDRV_PCM_INFO_RESUME*/),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
 	.rate_min =		5500,
@@ -2231,7 +2231,7 @@
 },
 {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "IEC958 Output Switch",
+	.name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
 	.info = snd_mixer_boolean_info,
 	.get = snd_cs46xx_iec958_get,
 	.put = snd_cs46xx_iec958_put,
@@ -2239,7 +2239,7 @@
 },
 {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "IEC958 Input Switch",
+	.name = SNDRV_CTL_NAME_IEC958("Input ",NONE,SWITCH),
 	.info = snd_mixer_boolean_info,
 	.get = snd_cs46xx_iec958_get,
 	.put = snd_cs46xx_iec958_put,
@@ -2249,7 +2249,7 @@
 /* Input IEC958 volume does not work for the moment. (Benny) */
 {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "IEC958 Input Volume",
+	.name = SNDRV_CTL_NAME_IEC958("Input ",NONE,VOLUME),
 	.info = snd_cs46xx_vol_info,
 	.get = snd_cs46xx_vol_iec958_get,
 	.put = snd_cs46xx_vol_iec958_put,
@@ -2440,7 +2440,7 @@
 	return -ENXIO;
 }
 
-int __devinit snd_cs46xx_mixer(cs46xx_t *chip)
+int __devinit snd_cs46xx_mixer(cs46xx_t *chip, int spdif_device)
 {
 	snd_card_t *card = chip->card;
 	snd_ctl_elem_id_t id;
@@ -2476,6 +2476,8 @@
 	for (idx = 0; idx < ARRAY_SIZE(snd_cs46xx_controls); idx++) {
 		snd_kcontrol_t *kctl;
 		kctl = snd_ctl_new1(&snd_cs46xx_controls[idx], chip);
+		if (kctl && kctl->id.iface == SNDRV_CTL_ELEM_IFACE_PCM)
+			kctl->id.device = spdif_device;
 		if ((err = snd_ctl_add(card, kctl)) < 0)
 			return err;
 	}
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index b17142c..fc377c4 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -149,7 +149,7 @@
 		}
 	}
 
-	if ((err = snd_emu10k1_mixer(emu)) < 0) {
+	if ((err = snd_emu10k1_mixer(emu, 0, 3)) < 0) {
 		snd_card_free(card);
 		return err;
 	}
diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c
index 746b51e..e69d5b7 100644
--- a/sound/pci/emu10k1/emu10k1_main.c
+++ b/sound/pci/emu10k1/emu10k1_main.c
@@ -741,12 +741,20 @@
 	 .emu10k1_chip = 1,
 	 .ac97_chip = 1,
 	 .sblive51 = 1} ,
+	/* Tested by Thomas Zehetbauer 27th Aug 2005 */
+	{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80651102,
+	 .driver = "EMU10K1", .name = "SB Live 5.1 [SB0220]", 
+	 .id = "Live",
+	 .emu10k1_chip = 1,
+	 .ac97_chip = 1,
+	 .sblive51 = 1} ,
 	{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80641102,
 	 .driver = "EMU10K1", .name = "SB Live 5.1", 
 	 .id = "Live",
 	 .emu10k1_chip = 1,
 	 .ac97_chip = 1,
 	 .sblive51 = 1} ,
+	/* Tested by alsa bugtrack user "hus" 12th Sept 2005 */
 	{.vendor = 0x1102, .device = 0x0002, .subsystem = 0x80611102,
 	 .driver = "EMU10K1", .name = "SBLive! Player 5.1 [SB0060]", 
 	 .id = "Live",
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index e90c5ddd..52c7826 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1183,7 +1183,7 @@
 static snd_kcontrol_new_t snd_emu10k1x_spdif_mask_control =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =        SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
 	.count =	3,
 	.info =         snd_emu10k1x_spdif_info,
@@ -1192,7 +1192,7 @@
 
 static snd_kcontrol_new_t snd_emu10k1x_spdif_control =
 {
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
 	.count =	3,
 	.info =         snd_emu10k1x_spdif_info,
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 0529fb2..637c555 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -1159,12 +1159,12 @@
  	/* Optical SPDIF Playback Volume */
 	A_ADD_VOLUME_IN(stereo_mix, gpr, A_EXTIN_OPT_SPDIF_L);
 	A_ADD_VOLUME_IN(stereo_mix+1, gpr+1, A_EXTIN_OPT_SPDIF_R);
-	snd_emu10k1_init_stereo_control(&controls[nctl++], "IEC958 Optical Playback Volume", gpr, 0);
+	snd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958("Optical ",PLAYBACK,VOLUME), gpr, 0);
 	gpr += 2;
 	/* Optical SPDIF Capture Volume */
 	A_ADD_VOLUME_IN(capture, gpr, A_EXTIN_OPT_SPDIF_L);
 	A_ADD_VOLUME_IN(capture+1, gpr+1, A_EXTIN_OPT_SPDIF_R);
-	snd_emu10k1_init_stereo_control(&controls[nctl++], "IEC958 Optical Capture Volume", gpr, 0);
+	snd_emu10k1_init_stereo_control(&controls[nctl++], SNDRV_CTL_NAME_IEC958("Optical ",CAPTURE,VOLUME), gpr, 0);
 	gpr += 2;
 
 	/* Line2 Playback Volume */
@@ -1389,7 +1389,7 @@
 			A_OP(icode, &ptr, iACC3, A_EXTOUT(A_EXTOUT_FRONT_L + z), A_GPR(tmp + 0), A_GPR(tmp + 1), A_C_00000000);
 		}
 	}
-	snd_emu10k1_init_stereo_onoff_control(controls + nctl++, "IEC958 Optical Raw Playback Switch", gpr, 0);
+	snd_emu10k1_init_stereo_onoff_control(controls + nctl++, SNDRV_CTL_NAME_IEC958("Optical Raw ",PLAYBACK,SWITCH), gpr, 0);
 	gpr += 2;
 	
 	A_PUT_STEREO_OUTPUT(A_EXTOUT_REAR_L, A_EXTOUT_REAR_R, playback+2 + SND_EMU10K1_PLAYBACK_CHANNELS);
@@ -1716,7 +1716,7 @@
 		/* IEC958 TTL Playback Volume */
 		for (z = 0; z < 2; z++)
 			VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_SPDIF_CD_L + z, gpr + z);
-		snd_emu10k1_init_stereo_control(controls + i++, "IEC958 TTL Playback Volume", gpr, 0);
+		snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",PLAYBACK,VOLUME), gpr, 0);
 		gpr += 2;
 	
 		/* IEC958 TTL Capture Volume + Switch */
@@ -1724,8 +1724,8 @@
 			SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_SPDIF_CD_L + z, gpr + 2 + z);
 			VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z);
 		}
-		snd_emu10k1_init_stereo_control(controls + i++, "IEC958 TTL Capture Volume", gpr, 0);
-		snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 TTL Capture Switch", gpr + 2, 0);
+		snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",CAPTURE,VOLUME), gpr, 0);
+		snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("TTL ",CAPTURE,SWITCH), gpr + 2, 0);
 		gpr += 4;
 	}
 	
@@ -1750,7 +1750,7 @@
 		/* IEC958 Optical Playback Volume */
 		for (z = 0; z < 2; z++)
 			VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_TOSLINK_L + z, gpr + z);
-		snd_emu10k1_init_stereo_control(controls + i++, "IEC958 LiveDrive Playback Volume", gpr, 0);
+		snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",PLAYBACK,VOLUME), gpr, 0);
 		gpr += 2;
 	
 		/* IEC958 Optical Capture Volume */
@@ -1758,8 +1758,8 @@
 			SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_TOSLINK_L + z, gpr + 2 + z);
 			VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z);
 		}
-		snd_emu10k1_init_stereo_control(controls + i++, "IEC958 LiveDrive Capture Volume", gpr, 0);
-		snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 LiveDrive Capture Switch", gpr + 2, 0);
+		snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",CAPTURE,VOLUME), gpr, 0);
+		snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("LiveDrive ",CAPTURE,SWITCH), gpr + 2, 0);
 		gpr += 4;
 	}
 	
@@ -1784,7 +1784,7 @@
 		/* IEC958 Coax Playback Volume */
 		for (z = 0; z < 2; z++)
 			VOLUME_ADDIN(icode, &ptr, playback + z, EXTIN_COAX_SPDIF_L + z, gpr + z);
-		snd_emu10k1_init_stereo_control(controls + i++, "IEC958 Coaxial Playback Volume", gpr, 0);
+		snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",PLAYBACK,VOLUME), gpr, 0);
 		gpr += 2;
 	
 		/* IEC958 Coax Capture Volume + Switch */
@@ -1792,8 +1792,8 @@
 			SWITCH_IN(icode, &ptr, tmp + 0, EXTIN_COAX_SPDIF_L + z, gpr + 2 + z);
 			VOLUME_ADD(icode, &ptr, capture + z, tmp + 0, gpr + z);
 		}
-		snd_emu10k1_init_stereo_control(controls + i++, "IEC958 Coaxial Capture Volume", gpr, 0);
-		snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 Coaxial Capture Switch", gpr + 2, 0);
+		snd_emu10k1_init_stereo_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",CAPTURE,VOLUME), gpr, 0);
+		snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("Coaxial ",CAPTURE,SWITCH), gpr + 2, 0);
 		gpr += 4;
 	}
 	
@@ -1920,7 +1920,7 @@
 #endif
 		}
 
-		snd_emu10k1_init_stereo_onoff_control(controls + i++, "IEC958 Optical Raw Playback Switch", gpr, 0);
+		snd_emu10k1_init_stereo_onoff_control(controls + i++, SNDRV_CTL_NAME_IEC958("Optical Raw ",PLAYBACK,SWITCH), gpr, 0);
 		gpr += 2;
 	}
 
diff --git a/sound/pci/emu10k1/emumixer.c b/sound/pci/emu10k1/emumixer.c
index 6be82c5..d71a72e 100644
--- a/sound/pci/emu10k1/emumixer.c
+++ b/sound/pci/emu10k1/emumixer.c
@@ -181,7 +181,7 @@
 static snd_kcontrol_new_t snd_emu10k1_spdif_mask_control =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =        SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK),
 	.count =	4,
 	.info =         snd_emu10k1_spdif_info,
@@ -190,7 +190,7 @@
 
 static snd_kcontrol_new_t snd_emu10k1_spdif_control =
 {
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
 	.count =	4,
 	.info =         snd_emu10k1_spdif_info,
@@ -295,7 +295,7 @@
 static snd_kcontrol_new_t snd_emu10k1_send_routing_control =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
-	.iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =        SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         "EMU10K1 PCM Send Routing",
 	.count =	32,
 	.info =         snd_emu10k1_send_routing_info,
@@ -364,7 +364,7 @@
 static snd_kcontrol_new_t snd_emu10k1_send_volume_control =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
-	.iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =        SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         "EMU10K1 PCM Send Volume",
 	.count =	32,
 	.info =         snd_emu10k1_send_volume_info,
@@ -427,7 +427,7 @@
 static snd_kcontrol_new_t snd_emu10k1_attn_control =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_INACTIVE,
-	.iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =        SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         "EMU10K1 PCM Volume",
 	.count =	32,
 	.info =         snd_emu10k1_attn_info,
@@ -737,7 +737,8 @@
 	return -ENOENT;
 }
 
-int __devinit snd_emu10k1_mixer(emu10k1_t *emu)
+int __devinit snd_emu10k1_mixer(emu10k1_t *emu,
+				int pcm_device, int multi_device)
 {
 	int err, pcm;
 	snd_kcontrol_t *kctl;
@@ -852,29 +853,35 @@
 
 	if ((kctl = emu->ctl_send_routing = snd_ctl_new1(&snd_emu10k1_send_routing_control, emu)) == NULL)
 		return -ENOMEM;
+	kctl->id.device = pcm_device;
 	if ((err = snd_ctl_add(card, kctl)))
 		return err;
 	if ((kctl = emu->ctl_send_volume = snd_ctl_new1(&snd_emu10k1_send_volume_control, emu)) == NULL)
 		return -ENOMEM;
+	kctl->id.device = pcm_device;
 	if ((err = snd_ctl_add(card, kctl)))
 		return err;
 	if ((kctl = emu->ctl_attn = snd_ctl_new1(&snd_emu10k1_attn_control, emu)) == NULL)
 		return -ENOMEM;
+	kctl->id.device = pcm_device;
 	if ((err = snd_ctl_add(card, kctl)))
 		return err;
 
 	if ((kctl = emu->ctl_efx_send_routing = snd_ctl_new1(&snd_emu10k1_efx_send_routing_control, emu)) == NULL)
 		return -ENOMEM;
+	kctl->id.device = multi_device;
 	if ((err = snd_ctl_add(card, kctl)))
 		return err;
 	
 	if ((kctl = emu->ctl_efx_send_volume = snd_ctl_new1(&snd_emu10k1_efx_send_volume_control, emu)) == NULL)
 		return -ENOMEM;
+	kctl->id.device = multi_device;
 	if ((err = snd_ctl_add(card, kctl)))
 		return err;
 	
 	if ((kctl = emu->ctl_efx_attn = snd_ctl_new1(&snd_emu10k1_efx_attn_control, emu)) == NULL)
 		return -ENOMEM;
+	kctl->id.device = multi_device;
 	if ((err = snd_ctl_add(card, kctl)))
 		return err;
 
@@ -924,10 +931,14 @@
 		/* sb live! and audigy */
 		if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_mask_control, emu)) == NULL)
 			return -ENOMEM;
+		if (!emu->audigy)
+			kctl->id.device = emu->pcm_efx->device;
 		if ((err = snd_ctl_add(card, kctl)))
 			return err;
 		if ((kctl = snd_ctl_new1(&snd_emu10k1_spdif_control, emu)) == NULL)
 			return -ENOMEM;
+		if (!emu->audigy)
+			kctl->id.device = emu->pcm_efx->device;
 		if ((err = snd_ctl_add(card, kctl)))
 			return err;
 	}
diff --git a/sound/pci/emu10k1/emupcm.c b/sound/pci/emu10k1/emupcm.c
index 520b99a..9c35f6d 100644
--- a/sound/pci/emu10k1/emupcm.c
+++ b/sound/pci/emu10k1/emupcm.c
@@ -1682,6 +1682,7 @@
 int __devinit snd_emu10k1_pcm_efx(emu10k1_t * emu, int device, snd_pcm_t ** rpcm)
 {
 	snd_pcm_t *pcm;
+	snd_kcontrol_t *kctl;
 	int err;
 
 	if (rpcm)
@@ -1714,7 +1715,11 @@
 		emu->efx_voices_mask[0] = 0xffff0000;
 		emu->efx_voices_mask[1] = 0;
 	}
-	snd_ctl_add(emu->card, snd_ctl_new1(&snd_emu10k1_pcm_efx_voices_mask, emu));
+	kctl = snd_ctl_new1(&snd_emu10k1_pcm_efx_voices_mask, emu);
+	if (!kctl)
+		return -ENOMEM;
+	kctl->id.device = device;
+	snd_ctl_add(emu->card, kctl);
 
 	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(emu->pci), 64*1024, 64*1024);
 
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 78a81f3..f06b95f 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -1444,7 +1444,7 @@
 
 /* spdif controls */
 static snd_kcontrol_new_t snd_es1371_mixer_spdif[] __devinitdata = {
-	ES1371_SPDIF("IEC958 Playback Switch"),
+	ES1371_SPDIF(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH)),
 	{
 		.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 		.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT),
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index ff10e63..36b2f62 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1155,10 +1155,10 @@
 static snd_kcontrol_new_t snd_fm801_controls_multi[] __devinitdata = {
 FM801_SINGLE("AC97 2ch->4ch Copy Switch", FM801_CODEC_CTRL, 7, 1, 0),
 FM801_SINGLE("AC97 18-bit Switch", FM801_CODEC_CTRL, 10, 1, 0),
-FM801_SINGLE("IEC958 Capture Switch", FM801_I2S_MODE, 8, 1, 0),
-FM801_SINGLE("IEC958 Raw Data Playback Switch", FM801_I2S_MODE, 9, 1, 0),
-FM801_SINGLE("IEC958 Raw Data Capture Switch", FM801_I2S_MODE, 10, 1, 0),
-FM801_SINGLE("IEC958 Playback Switch", FM801_GEN_CTRL, 2, 1, 0),
+FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), FM801_I2S_MODE, 8, 1, 0),
+FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",PLAYBACK,SWITCH), FM801_I2S_MODE, 9, 1, 0),
+FM801_SINGLE(SNDRV_CTL_NAME_IEC958("Raw Data ",CAPTURE,SWITCH), FM801_I2S_MODE, 10, 1, 0),
+FM801_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), FM801_GEN_CTRL, 2, 1, 0),
 };
 
 static void snd_fm801_mixer_free_ac97_bus(ac97_bus_t *bus)
diff --git a/sound/pci/hda/Makefile b/sound/pci/hda/Makefile
index bd8cb33..ddfb5ff 100644
--- a/sound/pci/hda/Makefile
+++ b/sound/pci/hda/Makefile
@@ -1,5 +1,5 @@
 snd-hda-intel-objs := hda_intel.o
-snd-hda-codec-objs := hda_codec.o hda_generic.o patch_realtek.o patch_cmedia.o patch_analog.o patch_sigmatel.o
+snd-hda-codec-objs := hda_codec.o hda_generic.o patch_realtek.o patch_cmedia.o patch_analog.o patch_sigmatel.o patch_si3054.o
 ifdef CONFIG_PROC_FS
 snd-hda-codec-objs += hda_proc.o
 endif
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index e2cf023..20f7762 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -432,22 +432,26 @@
 }
 
 /*
- * look for an AFG node
- *
- * return 0 if not found
+ * look for an AFG and MFG nodes
  */
-static int look_for_afg_node(struct hda_codec *codec)
+static void setup_fg_nodes(struct hda_codec *codec)
 {
 	int i, total_nodes;
 	hda_nid_t nid;
 
 	total_nodes = snd_hda_get_sub_nodes(codec, AC_NODE_ROOT, &nid);
 	for (i = 0; i < total_nodes; i++, nid++) {
-		if ((snd_hda_param_read(codec, nid, AC_PAR_FUNCTION_TYPE) & 0xff) ==
-		    AC_GRP_AUDIO_FUNCTION)
-			return nid;
+		switch((snd_hda_param_read(codec, nid, AC_PAR_FUNCTION_TYPE) & 0xff)) {
+		case AC_GRP_AUDIO_FUNCTION:
+			codec->afg = nid;
+			break;
+		case AC_GRP_MODEM_FUNCTION:
+			codec->mfg = nid;
+			break;
+		default:
+			break;
+		}
 	}
-	return 0;
 }
 
 /*
@@ -507,10 +511,9 @@
 	codec->subsystem_id = snd_hda_param_read(codec, AC_NODE_ROOT, AC_PAR_SUBSYSTEM_ID);
 	codec->revision_id = snd_hda_param_read(codec, AC_NODE_ROOT, AC_PAR_REV_ID);
 
-	/* FIXME: support for multiple AFGs? */
-	codec->afg = look_for_afg_node(codec);
-	if (! codec->afg) {
-		snd_printdd("hda_codec: no AFG node found\n");
+	setup_fg_nodes(codec);
+	if (! codec->afg && ! codec->mfg) {
+		snd_printdd("hda_codec: no AFG or MFG node found\n");
 		snd_hda_codec_free(codec);
 		return -ENODEV;
 	}
@@ -749,12 +752,14 @@
 	long *valp = ucontrol->value.integer.value;
 	int change = 0;
 
-	if (chs & 1)
+	if (chs & 1) {
 		change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
 						  0x7f, *valp);
+		valp++;
+	}
 	if (chs & 2)
 		change |= snd_hda_codec_amp_update(codec, nid, 1, dir, idx,
-						   0x7f, valp[1]);
+						   0x7f, *valp);
 	return change;
 }
 
@@ -796,12 +801,15 @@
 	long *valp = ucontrol->value.integer.value;
 	int change = 0;
 
-	if (chs & 1)
+	if (chs & 1) {
 		change = snd_hda_codec_amp_update(codec, nid, 0, dir, idx,
 						  0x80, *valp ? 0 : 0x80);
+		valp++;
+	}
 	if (chs & 2)
 		change |= snd_hda_codec_amp_update(codec, nid, 1, dir, idx,
-						   0x80, valp[1] ? 0 : 0x80);
+						   0x80, *valp ? 0 : 0x80);
+	
 	return change;
 }
 
@@ -1155,8 +1163,16 @@
 /*
  * stream formats
  */
-static unsigned int rate_bits[][3] = {
+struct hda_rate_tbl {
+	unsigned int hz;
+	unsigned int alsa_bits;
+	unsigned int hda_fmt;
+};
+
+static struct hda_rate_tbl rate_bits[] = {
 	/* rate in Hz, ALSA rate bitmask, HDA format value */
+
+	/* autodetected value used in snd_hda_query_supported_pcm */
 	{ 8000, SNDRV_PCM_RATE_8000, 0x0500 }, /* 1/6 x 48 */
 	{ 11025, SNDRV_PCM_RATE_11025, 0x4300 }, /* 1/4 x 44 */
 	{ 16000, SNDRV_PCM_RATE_16000, 0x0200 }, /* 1/3 x 48 */
@@ -1168,7 +1184,11 @@
 	{ 96000, SNDRV_PCM_RATE_96000, 0x0800 }, /* 2 x 48 */
 	{ 176400, SNDRV_PCM_RATE_176400, 0x5800 },/* 4 x 44 */
 	{ 192000, SNDRV_PCM_RATE_192000, 0x1800 }, /* 4 x 48 */
-	{ 0 }
+
+	/* not autodetected value */
+	{ 9600, SNDRV_PCM_RATE_KNOT, 0x0400 }, /* 1/5 x 48 */
+
+	{ 0 } /* terminator */
 };
 
 /**
@@ -1190,12 +1210,12 @@
 	int i;
 	unsigned int val = 0;
 
-	for (i = 0; rate_bits[i][0]; i++)
-		if (rate_bits[i][0] == rate) {
-			val = rate_bits[i][2];
+	for (i = 0; rate_bits[i].hz; i++)
+		if (rate_bits[i].hz == rate) {
+			val = rate_bits[i].hda_fmt;
 			break;
 		}
-	if (! rate_bits[i][0]) {
+	if (! rate_bits[i].hz) {
 		snd_printdd("invalid rate %d\n", rate);
 		return 0;
 	}
@@ -1258,9 +1278,9 @@
 
 	if (ratesp) {
 		u32 rates = 0;
-		for (i = 0; rate_bits[i][0]; i++) {
+		for (i = 0; rate_bits[i].hz; i++) {
 			if (val & (1 << i))
-				rates |= rate_bits[i][1];
+				rates |= rate_bits[i].alsa_bits;
 		}
 		*ratesp = rates;
 	}
@@ -1352,13 +1372,13 @@
 	}
 
 	rate = format & 0xff00;
-	for (i = 0; rate_bits[i][0]; i++)
-		if (rate_bits[i][2] == rate) {
+	for (i = 0; rate_bits[i].hz; i++)
+		if (rate_bits[i].hda_fmt == rate) {
 			if (val & (1 << i))
 				break;
 			return 0;
 		}
-	if (! rate_bits[i][0])
+	if (! rate_bits[i].hz)
 		return 0;
 
 	stream = snd_hda_param_read(codec, nid, AC_PAR_STREAM);
@@ -1541,8 +1561,11 @@
 		for (c = tbl; c->modelname || c->pci_subvendor; c++) {
 			if (c->pci_subvendor == subsystem_vendor &&
 			    (! c->pci_subdevice /* all match */||
-			     (c->pci_subdevice == subsystem_device)))
+			     (c->pci_subdevice == subsystem_device))) {
+				snd_printdd(KERN_INFO "hda_codec: PCI %x:%x, codec config %d is selected\n",
+					    subsystem_vendor, subsystem_device, c->config);
 				return c->config;
+			}
 		}
 	}
 	return -1;
@@ -1803,11 +1826,25 @@
 				cfg->line_out_pins[j] = nid;
 			}
 
-	/* Swap surround and CLFE: the association order is front/CLFE/surr/back */
-	if (cfg->line_outs >= 3) {
+	/* Reorder the surround channels
+	 * ALSA sequence is front/surr/clfe/side
+	 * HDA sequence is:
+	 *    4-ch: front/surr  =>  OK as it is
+	 *    6-ch: front/clfe/surr
+	 *    8-ch: front/clfe/side/surr
+	 */
+	switch (cfg->line_outs) {
+	case 3:
 		nid = cfg->line_out_pins[1];
 		cfg->line_out_pins[1] = cfg->line_out_pins[2];
 		cfg->line_out_pins[2] = nid;
+		break;
+	case 4:
+		nid = cfg->line_out_pins[1];
+		cfg->line_out_pins[1] = cfg->line_out_pins[3];
+		cfg->line_out_pins[3] = cfg->line_out_pins[2];
+		cfg->line_out_pins[2] = nid;
+		break;
 	}
 
 	return 0;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index dd0d99d..63a29a8 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -514,6 +514,7 @@
 	struct list_head list;	/* list point */
 
 	hda_nid_t afg;	/* AFG node id */
+	hda_nid_t mfg;	/* MFG node id */
 
 	/* ids */
 	u32 vendor_id;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 2d046ab..1229227 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -881,6 +881,11 @@
 	struct hda_gspec *spec;
 	int err;
 
+	if(!codec->afg) {
+		snd_printdd("hda_generic: no generic modem yet\n");
+		return -ENODEV;
+	}
+
 	spec = kcalloc(1, sizeof(*spec), GFP_KERNEL);
 	if (spec == NULL) {
 		printk(KERN_ERR "hda_generic: can't allocate spec\n");
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 288ab07..15107df 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -71,7 +71,9 @@
 			 "{Intel, ESB2},"
 			 "{ATI, SB450},"
 			 "{VIA, VT8251},"
-			 "{VIA, VT8237A}}");
+			 "{VIA, VT8237A},"
+			 "{SiS, SIS966},"
+			 "{ULI, M5461}}");
 MODULE_DESCRIPTION("Intel HDA driver");
 
 #define SFX	"hda-intel: "
@@ -141,9 +143,24 @@
  */
 
 /* max number of SDs */
-#define MAX_ICH6_DEV		8
+/* ICH, ATI and VIA have 4 playback and 4 capture */
+#define ICH6_CAPTURE_INDEX	0
+#define ICH6_NUM_CAPTURE	4
+#define ICH6_PLAYBACK_INDEX	4
+#define ICH6_NUM_PLAYBACK	4
+
+/* ULI has 6 playback and 5 capture */
+#define ULI_CAPTURE_INDEX	0
+#define ULI_NUM_CAPTURE		5
+#define ULI_PLAYBACK_INDEX	5
+#define ULI_NUM_PLAYBACK	6
+
+/* this number is statically defined for simplicity */
+#define MAX_AZX_DEV		16
+
 /* max number of fragments - we may use more if allocating more pages for BDL */
-#define AZX_MAX_FRAG		(PAGE_SIZE / (MAX_ICH6_DEV * 16))
+#define BDL_SIZE		PAGE_ALIGN(8192)
+#define AZX_MAX_FRAG		(BDL_SIZE / (MAX_AZX_DEV * 16))
 /* max buffer size - no h/w limit, you can increase as you like */
 #define AZX_MAX_BUF_SIZE	(1024*1024*1024)
 /* max number of PCM devics per card */
@@ -200,7 +217,6 @@
 };
 
 /* Defines for ATI HD Audio support in SB450 south bridge */
-#define ATI_SB450_HDAUDIO_PCI_DEVICE_ID     0x437b
 #define ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR   0x42
 #define ATI_SB450_HDAUDIO_ENABLE_SNOOP      0x02
 
@@ -258,6 +274,14 @@
 	snd_card_t *card;
 	struct pci_dev *pci;
 
+	/* chip type specific */
+	int driver_type;
+	int playback_streams;
+	int playback_index_offset;
+	int capture_streams;
+	int capture_index_offset;
+	int num_streams;
+
 	/* pci resources */
 	unsigned long addr;
 	void __iomem *remap_addr;
@@ -267,8 +291,8 @@
 	spinlock_t reg_lock;
 	struct semaphore open_mutex;
 
-	/* streams */
-	azx_dev_t azx_dev[MAX_ICH6_DEV];
+	/* streams (x num_streams) */
+	azx_dev_t *azx_dev;
 
 	/* PCM */
 	unsigned int pcm_devs;
@@ -292,6 +316,23 @@
 	unsigned int initialized: 1;
 };
 
+/* driver types */
+enum {
+	AZX_DRIVER_ICH,
+	AZX_DRIVER_ATI,
+	AZX_DRIVER_VIA,
+	AZX_DRIVER_SIS,
+	AZX_DRIVER_ULI,
+};
+
+static char *driver_short_names[] __devinitdata = {
+	[AZX_DRIVER_ICH] = "HDA Intel",
+	[AZX_DRIVER_ATI] = "HDA ATI SB",
+	[AZX_DRIVER_VIA] = "HDA VIA VT82xx",
+	[AZX_DRIVER_SIS] = "HDA SIS966",
+	[AZX_DRIVER_ULI] = "HDA ULI M5461"
+};
+
 /*
  * macros for easy use
  */
@@ -360,6 +401,8 @@
 	azx_writel(chip, CORBLBASE, (u32)chip->corb.addr);
 	azx_writel(chip, CORBUBASE, upper_32bit(chip->corb.addr));
 
+	/* set the corb size to 256 entries (ULI requires explicitly) */
+	azx_writeb(chip, CORBSIZE, 0x02);
 	/* set the corb write pointer to 0 */
 	azx_writew(chip, CORBWP, 0);
 	/* reset the corb hw read pointer */
@@ -373,6 +416,8 @@
 	azx_writel(chip, RIRBLBASE, (u32)chip->rirb.addr);
 	azx_writel(chip, RIRBUBASE, upper_32bit(chip->rirb.addr));
 
+	/* set the rirb size to 256 entries (ULI requires explicitly) */
+	azx_writeb(chip, RIRBSIZE, 0x02);
 	/* reset the rirb hw write pointer */
 	azx_writew(chip, RIRBWP, ICH6_RBRWP_CLR);
 	/* set N=1, get RIRB response interrupt for new entry */
@@ -596,7 +641,7 @@
 	int i;
 
 	/* disable interrupts in stream descriptor */
-	for (i = 0; i < MAX_ICH6_DEV; i++) {
+	for (i = 0; i < chip->num_streams; i++) {
 		azx_dev_t *azx_dev = &chip->azx_dev[i];
 		azx_sd_writeb(azx_dev, SD_CTL,
 			      azx_sd_readb(azx_dev, SD_CTL) & ~SD_INT_MASK);
@@ -616,7 +661,7 @@
 	int i;
 
 	/* clear stream status */
-	for (i = 0; i < MAX_ICH6_DEV; i++) {
+	for (i = 0; i < chip->num_streams; i++) {
 		azx_dev_t *azx_dev = &chip->azx_dev[i];
 		azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
 	}
@@ -686,8 +731,7 @@
 	}
 
 	/* For ATI SB450 azalia HD audio, we need to enable snoop */
-	if (chip->pci->vendor == PCI_VENDOR_ID_ATI && 
-	    chip->pci->device == ATI_SB450_HDAUDIO_PCI_DEVICE_ID) {
+	if (chip->driver_type == AZX_DRIVER_ATI) {
 		pci_read_config_byte(chip->pci, ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 
 				     &ati_misc_cntl2);
 		pci_write_config_byte(chip->pci, ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, 
@@ -714,7 +758,7 @@
 		return IRQ_NONE;
 	}
 	
-	for (i = 0; i < MAX_ICH6_DEV; i++) {
+	for (i = 0; i < chip->num_streams; i++) {
 		azx_dev = &chip->azx_dev[i];
 		if (status & azx_dev->sd_int_sta_mask) {
 			azx_sd_writeb(azx_dev, SD_STS, SD_INT_MASK);
@@ -879,9 +923,15 @@
 /* assign a stream for the PCM */
 static inline azx_dev_t *azx_assign_device(azx_t *chip, int stream)
 {
-	int dev, i;
-	dev = stream == SNDRV_PCM_STREAM_PLAYBACK ? 4 : 0;
-	for (i = 0; i < 4; i++, dev++)
+	int dev, i, nums;
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
+		dev = chip->playback_index_offset;
+		nums = chip->playback_streams;
+	} else {
+		dev = chip->capture_index_offset;
+		nums = chip->capture_streams;
+	}
+	for (i = 0; i < nums; i++, dev++)
 		if (! chip->azx_dev[dev].opened) {
 			chip->azx_dev[dev].opened = 1;
 			return &chip->azx_dev[dev];
@@ -899,8 +949,8 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID |
-				 SNDRV_PCM_INFO_PAUSE |
-				 SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE /*|*/
+				 /*SNDRV_PCM_INFO_RESUME*/),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
 	.rate_min =		48000,
@@ -1049,6 +1099,7 @@
 		azx_dev->running = 1;
 		break;
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
 	case SNDRV_PCM_TRIGGER_STOP:
 		azx_stream_stop(chip, azx_dev);
 		azx_dev->running = 0;
@@ -1058,6 +1109,7 @@
 	}
 	spin_unlock(&chip->reg_lock);
 	if (cmd == SNDRV_PCM_TRIGGER_PAUSE_PUSH ||
+	    cmd == SNDRV_PCM_TRIGGER_SUSPEND ||
 	    cmd == SNDRV_PCM_TRIGGER_STOP) {
 		int timeout = 5000;
 		while (azx_sd_readb(azx_dev, SD_CTL) & SD_CTL_DMA_START && --timeout)
@@ -1136,6 +1188,7 @@
 					      snd_dma_pci_data(chip->pci),
 					      1024 * 64, 1024 * 128);
 	chip->pcm[pcm_dev] = pcm;
+	chip->pcm_devs = pcm_dev + 1;
 
 	return 0;
 }
@@ -1186,7 +1239,7 @@
 	/* initialize each stream (aka device)
 	 * assign the starting bdl address to each stream (device) and initialize
 	 */
-	for (i = 0; i < MAX_ICH6_DEV; i++) {
+	for (i = 0; i < chip->num_streams; i++) {
 		unsigned int off = sizeof(u32) * (i * AZX_MAX_FRAG * 4);
 		azx_dev_t *azx_dev = &chip->azx_dev[i];
 		azx_dev->bdl = (u32 *)(chip->bdl.area + off);
@@ -1245,7 +1298,7 @@
 	if (chip->initialized) {
 		int i;
 
-		for (i = 0; i < MAX_ICH6_DEV; i++)
+		for (i = 0; i < chip->num_streams; i++)
 			azx_stream_stop(chip, &chip->azx_dev[i]);
 
 		/* disable interrupts */
@@ -1261,10 +1314,10 @@
 
 		/* wait a little for interrupts to finish */
 		msleep(1);
-
-		iounmap(chip->remap_addr);
 	}
 
+	if (chip->remap_addr)
+		iounmap(chip->remap_addr);
 	if (chip->irq >= 0)
 		free_irq(chip->irq, (void*)chip);
 
@@ -1276,6 +1329,7 @@
 		snd_dma_free_pages(&chip->posbuf);
 	pci_release_regions(chip->pci);
 	pci_disable_device(chip->pci);
+	kfree(chip->azx_dev);
 	kfree(chip);
 
 	return 0;
@@ -1290,7 +1344,8 @@
  * constructor
  */
 static int __devinit azx_create(snd_card_t *card, struct pci_dev *pci,
-				int posfix, azx_t **rchip)
+				int posfix, int driver_type,
+				azx_t **rchip)
 {
 	azx_t *chip;
 	int err = 0;
@@ -1316,9 +1371,20 @@
 	chip->card = card;
 	chip->pci = pci;
 	chip->irq = -1;
+	chip->driver_type = driver_type;
 
 	chip->position_fix = posfix;
 
+#if BITS_PER_LONG != 64
+	/* Fix up base address on ULI M5461 */
+	if (chip->driver_type == AZX_DRIVER_ULI) {
+		u16 tmp3;
+		pci_read_config_word(pci, 0x40, &tmp3);
+		pci_write_config_word(pci, 0x40, tmp3 | 0x10);
+		pci_write_config_dword(pci, PCI_BASE_ADDRESS_1, 0);
+	}
+#endif
+
 	if ((err = pci_request_regions(pci, "ICH HD audio")) < 0) {
 		kfree(chip);
 		pci_disable_device(pci);
@@ -1344,16 +1410,37 @@
 	pci_set_master(pci);
 	synchronize_irq(chip->irq);
 
+	switch (chip->driver_type) {
+	case AZX_DRIVER_ULI:
+		chip->playback_streams = ULI_NUM_PLAYBACK;
+		chip->capture_streams = ULI_NUM_CAPTURE;
+		chip->playback_index_offset = ULI_PLAYBACK_INDEX;
+		chip->capture_index_offset = ULI_CAPTURE_INDEX;
+		break;
+	default:
+		chip->playback_streams = ICH6_NUM_PLAYBACK;
+		chip->capture_streams = ICH6_NUM_CAPTURE;
+		chip->playback_index_offset = ICH6_PLAYBACK_INDEX;
+		chip->capture_index_offset = ICH6_CAPTURE_INDEX;
+		break;
+	}
+	chip->num_streams = chip->playback_streams + chip->capture_streams;
+	chip->azx_dev = kcalloc(chip->num_streams, sizeof(*chip->azx_dev), GFP_KERNEL);
+	if (! chip->azx_dev) {
+		snd_printk(KERN_ERR "cannot malloc azx_dev\n");
+		goto errout;
+	}
+
 	/* allocate memory for the BDL for each stream */
 	if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
-				       PAGE_SIZE, &chip->bdl)) < 0) {
+				       BDL_SIZE, &chip->bdl)) < 0) {
 		snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
 		goto errout;
 	}
 	if (chip->position_fix == POS_FIX_POSBUF) {
 		/* allocate memory for the position buffer */
 		if ((err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci),
-					       MAX_ICH6_DEV * 8, &chip->posbuf)) < 0) {
+					       chip->num_streams * 8, &chip->posbuf)) < 0) {
 			snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
 			goto errout;
 		}
@@ -1382,6 +1469,10 @@
 		goto errout;
 	}
 
+	strcpy(card->driver, "HDA-Intel");
+	strcpy(card->shortname, driver_short_names[chip->driver_type]);
+	sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->addr, chip->irq);
+
 	*rchip = chip;
 	return 0;
 
@@ -1410,15 +1501,12 @@
 		return -ENOMEM;
 	}
 
-	if ((err = azx_create(card, pci, position_fix[dev], &chip)) < 0) {
+	if ((err = azx_create(card, pci, position_fix[dev], pci_id->driver_data,
+			      &chip)) < 0) {
 		snd_card_free(card);
 		return err;
 	}
 
-	strcpy(card->driver, "HDA-Intel");
-	strcpy(card->shortname, "HDA Intel");
-	sprintf(card->longname, "%s at 0x%lx irq %i", card->shortname, chip->addr, chip->irq);
-
 	/* create codec instances */
 	if ((err = azx_codec_create(chip, model[dev])) < 0) {
 		snd_card_free(card);
@@ -1459,12 +1547,13 @@
 
 /* PCI IDs */
 static struct pci_device_id azx_ids[] = {
-	{ 0x8086, 0x2668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICH6 */
-	{ 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ICH7 */
-	{ 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ESB2 */
-	{ 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ATI SB450 */
-	{ 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* VIA VT8251/VT8237A */
-	{ 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, /* ALI 5461? */
+	{ 0x8086, 0x2668, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH6 */
+	{ 0x8086, 0x27d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ICH7 */
+	{ 0x8086, 0x269a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ICH }, /* ESB2 */
+	{ 0x1002, 0x437b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ATI }, /* ATI SB450 */
+	{ 0x1106, 0x3288, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_VIA }, /* VIA VT8251/VT8237A */
+	{ 0x1039, 0x7502, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_SIS }, /* SIS966 */
+	{ 0x10b9, 0x5461, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AZX_DRIVER_ULI }, /* ULI M5461 */
 	{ 0, }
 };
 MODULE_DEVICE_TABLE(pci, azx_ids);
diff --git a/sound/pci/hda/hda_patch.h b/sound/pci/hda/hda_patch.h
index a5de684..acaef3c 100644
--- a/sound/pci/hda/hda_patch.h
+++ b/sound/pci/hda/hda_patch.h
@@ -10,11 +10,14 @@
 extern struct hda_codec_preset snd_hda_preset_analog[];
 /* SigmaTel codecs */
 extern struct hda_codec_preset snd_hda_preset_sigmatel[];
+/* SiLabs 3054/3055 modem codecs */
+extern struct hda_codec_preset snd_hda_preset_si3054[];
 
 static const struct hda_codec_preset *hda_preset_tables[] = {
 	snd_hda_preset_realtek,
 	snd_hda_preset_cmedia,
 	snd_hda_preset_analog,
 	snd_hda_preset_sigmatel,
+	snd_hda_preset_si3054,
 	NULL
 };
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 2fd05bb..bceb83a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -572,7 +572,7 @@
 	},
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "IEC958 Playback Route",
+		.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
 		.info = ad1983_spdif_route_info,
 		.get = ad1983_spdif_route_get,
 		.put = ad1983_spdif_route_put,
@@ -705,7 +705,7 @@
 	/* identical with AD1983 */
 	{
 		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-		.name = "IEC958 Playback Route",
+		.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
 		.info = ad1983_spdif_route_info,
 		.get = ad1983_spdif_route_get,
 		.put = ad1983_spdif_route_put,
diff --git a/sound/pci/hda/patch_cmedia.c b/sound/pci/hda/patch_cmedia.c
index 86f195f..07fb4f5 100644
--- a/sound/pci/hda/patch_cmedia.c
+++ b/sound/pci/hda/patch_cmedia.c
@@ -647,6 +647,7 @@
 	{ .modelname = "min_fp", .config = CMI_MIN_FP },
 	{ .modelname = "full", .config = CMI_FULL },
 	{ .modelname = "full_dig", .config = CMI_FULL_DIG },
+	{ .pci_subvendor = 0x1043, .pci_subdevice = 0x813d, .config = CMI_FULL_DIG }, /* ASUS P5AD2 */
 	{ .modelname = "allout", .config = CMI_ALLOUT },
 	{ .modelname = "auto", .config = CMI_AUTO },
 	{} /* terminator */
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 9b85699..eeb900a 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -687,6 +687,12 @@
 	{ } /* end */
 };
 
+/* additional mixers to alc880_asus_mixer */
+static snd_kcontrol_new_t alc880_pcbeep_mixer[] = {
+	HDA_CODEC_VOLUME("PC Speaker Playback Volume", 0x0b, 0x05, HDA_INPUT),
+	HDA_CODEC_MUTE("PC Speaker Playback Switch", 0x0b, 0x05, HDA_INPUT),
+	{ } /* end */
+};
 
 /*
  * build control elements
@@ -1524,6 +1530,7 @@
 	/* Back 3 jack plus 1 SPDIF out jack, front 2 jack */
 	{ .modelname = "3stack-digout", .config = ALC880_3ST_DIG },
 	{ .pci_subvendor = 0x8086, .pci_subdevice = 0xe308, .config = ALC880_3ST_DIG },
+	{ .pci_subvendor = 0x1025, .pci_subdevice = 0x0070, .config = ALC880_3ST_DIG },
 
 	/* Back 3 jack plus 1 SPDIF out jack, front 2 jack (Internal add Aux-In)*/
 	{ .pci_subvendor = 0x8086, .pci_subdevice = 0xe305, .config = ALC880_3ST_DIG },
@@ -1734,7 +1741,7 @@
 		.input_mux = &alc880_capture_source,
 	},
 	[ALC880_UNIWILL_DIG] = {
-		.mixers = { alc880_asus_mixer },
+		.mixers = { alc880_asus_mixer, alc880_pcbeep_mixer },
 		.init_verbs = { alc880_volume_init_verbs, alc880_pin_asus_init_verbs },
 		.num_dacs = ARRAY_SIZE(alc880_asus_dac_nids),
 		.dac_nids = alc880_asus_dac_nids,
diff --git a/sound/pci/hda/patch_si3054.c b/sound/pci/hda/patch_si3054.c
new file mode 100644
index 0000000..b0270d1
--- /dev/null
+++ b/sound/pci/hda/patch_si3054.c
@@ -0,0 +1,300 @@
+/*
+ * Universal Interface for Intel High Definition Audio Codec
+ *
+ * HD audio interface patch for Silicon Labs 3054/5 modem codec
+ *
+ * Copyright (c) 2005 Sasha Khapyorsky <sashak@smlink.com>
+ *                    Takashi Iwai <tiwai@suse.de>
+ *
+ *
+ *  This driver is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This driver is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
+ */
+
+#include <sound/driver.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <sound/core.h>
+#include "hda_codec.h"
+#include "hda_local.h"
+
+
+/* si3054 verbs */
+#define SI3054_VERB_READ_NODE  0x900
+#define SI3054_VERB_WRITE_NODE 0x100
+
+/* si3054 nodes (registers) */
+#define SI3054_EXTENDED_MID    2
+#define SI3054_LINE_RATE       3
+#define SI3054_LINE_LEVEL      4
+#define SI3054_GPIO_CFG        5
+#define SI3054_GPIO_POLARITY   6
+#define SI3054_GPIO_STICKY     7
+#define SI3054_GPIO_WAKEUP     8
+#define SI3054_GPIO_STATUS     9
+#define SI3054_GPIO_CONTROL   10
+#define SI3054_MISC_AFE       11
+#define SI3054_CHIPID         12
+#define SI3054_LINE_CFG1      13
+#define SI3054_LINE_STATUS    14
+#define SI3054_DC_TERMINATION 15
+#define SI3054_LINE_CONFIG    16
+#define SI3054_CALLPROG_ATT   17
+#define SI3054_SQ_CONTROL     18
+#define SI3054_MISC_CONTROL   19
+#define SI3054_RING_CTRL1     20
+#define SI3054_RING_CTRL2     21
+
+/* extended MID */
+#define SI3054_MEI_READY 0xf
+
+/* line level */
+#define SI3054_ATAG_MASK 0x00f0
+#define SI3054_DTAG_MASK 0xf000
+
+/* GPIO bits */
+#define SI3054_GPIO_OH    0x0001
+#define SI3054_GPIO_CID   0x0002
+
+/* chipid and revisions */
+#define SI3054_CHIPID_CODEC_REV_MASK 0x000f
+#define SI3054_CHIPID_DAA_REV_MASK   0x00f0
+#define SI3054_CHIPID_INTERNATIONAL  0x0100
+#define SI3054_CHIPID_DAA_ID         0x0f00
+#define SI3054_CHIPID_CODEC_ID      (1<<12)
+
+/* si3054 codec registers (nodes) access macros */
+#define GET_REG(codec,reg) (snd_hda_codec_read(codec,reg,0,SI3054_VERB_READ_NODE,0))
+#define SET_REG(codec,reg,val) (snd_hda_codec_write(codec,reg,0,SI3054_VERB_WRITE_NODE,val))
+
+
+struct si3054_spec {
+	unsigned international;
+	struct hda_pcm pcm;
+};
+
+
+/*
+ * Modem mixer
+ */
+
+#define PRIVATE_VALUE(reg,mask) ((reg<<16)|(mask&0xffff))
+#define PRIVATE_REG(val) ((val>>16)&0xffff)
+#define PRIVATE_MASK(val) (val&0xffff)
+
+static int si3054_switch_info(snd_kcontrol_t *kcontrol,
+		               snd_ctl_elem_info_t *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
+	uinfo->count = 1;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 1;
+	return 0;
+}
+
+static int si3054_switch_get(snd_kcontrol_t *kcontrol,
+		               snd_ctl_elem_value_t *uvalue)
+{
+	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+	u16 reg  = PRIVATE_REG(kcontrol->private_value);
+	u16 mask = PRIVATE_MASK(kcontrol->private_value);
+	uvalue->value.integer.value[0] = (GET_REG(codec, reg)) & mask ? 1 : 0 ;
+	return 0;
+}
+
+static int si3054_switch_put(snd_kcontrol_t *kcontrol,
+		               snd_ctl_elem_value_t *uvalue)
+{
+	struct hda_codec *codec = snd_kcontrol_chip(kcontrol);
+	u16 reg  = PRIVATE_REG(kcontrol->private_value);
+	u16 mask = PRIVATE_MASK(kcontrol->private_value);
+	if (uvalue->value.integer.value[0])
+		SET_REG(codec, reg, (GET_REG(codec, reg)) | mask);
+	else
+		SET_REG(codec, reg, (GET_REG(codec, reg)) & ~mask);
+	return 0;
+}
+
+#define SI3054_KCONTROL(kname,reg,mask) { \
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
+	.name = kname, \
+	.info = si3054_switch_info, \
+	.get  = si3054_switch_get, \
+	.put  = si3054_switch_put, \
+	.private_value = PRIVATE_VALUE(reg,mask), \
+}
+		
+
+static snd_kcontrol_new_t si3054_modem_mixer[] = {
+	SI3054_KCONTROL("Off-hook Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_OH),
+	SI3054_KCONTROL("Caller ID Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_CID),
+	{}
+};
+
+static int si3054_build_controls(struct hda_codec *codec)
+{
+	return snd_hda_add_new_ctls(codec, si3054_modem_mixer);
+}
+
+
+/*
+ * PCM callbacks
+ */
+
+static int si3054_pcm_prepare(struct hda_pcm_stream *hinfo,
+			      struct hda_codec *codec,
+			      unsigned int stream_tag,
+			      unsigned int format,
+			      snd_pcm_substream_t *substream)
+{
+	u16 val;
+
+	SET_REG(codec, SI3054_LINE_RATE, substream->runtime->rate);
+	val = GET_REG(codec, SI3054_LINE_LEVEL);
+	val &= 0xff << (8 * (substream->stream != SNDRV_PCM_STREAM_PLAYBACK));
+	val |= ((stream_tag & 0xf) << 4) << (8 * (substream->stream == SNDRV_PCM_STREAM_PLAYBACK));
+	SET_REG(codec, SI3054_LINE_LEVEL, val);
+
+	snd_hda_codec_setup_stream(codec, hinfo->nid,
+				   stream_tag, 0, format);
+	return 0;
+}
+
+static int si3054_pcm_open(struct hda_pcm_stream *hinfo,
+			   struct hda_codec *codec,
+			    snd_pcm_substream_t *substream)
+{
+	static unsigned int rates[] = { 8000, 9600, 16000 };
+	static snd_pcm_hw_constraint_list_t hw_constraints_rates = {
+		.count = ARRAY_SIZE(rates),
+		.list = rates,
+		.mask = 0,
+	};
+	substream->runtime->hw.period_bytes_min = 80;
+	return snd_pcm_hw_constraint_list(substream->runtime, 0,
+			SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates);
+}
+
+
+static struct hda_pcm_stream si3054_pcm = {
+	.substreams = 1,
+	.channels_min = 1,
+	.channels_max = 1,
+	.nid = 0x1,
+	.rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_KNOT,
+	.formats = SNDRV_PCM_FMTBIT_S16_LE,
+	.maxbps = 16,
+	.ops = {
+		.open = si3054_pcm_open,
+		.prepare = si3054_pcm_prepare,
+	},
+};
+
+
+static int si3054_build_pcms(struct hda_codec *codec)
+{
+	struct si3054_spec *spec = codec->spec;
+	struct hda_pcm *info = &spec->pcm;
+	si3054_pcm.nid = codec->mfg;
+	codec->num_pcms = 1;
+	codec->pcm_info = info;
+	info->name = "Si3054 Modem";
+	info->stream[SNDRV_PCM_STREAM_PLAYBACK] = si3054_pcm;
+	info->stream[SNDRV_PCM_STREAM_CAPTURE]  = si3054_pcm;
+	return 0;
+}
+
+
+/*
+ * Init part
+ */
+
+static int si3054_init(struct hda_codec *codec)
+{
+	struct si3054_spec *spec = codec->spec;
+	unsigned wait_count;
+	u16 val;
+
+	snd_hda_codec_write(codec, AC_NODE_ROOT, 0, AC_VERB_SET_CODEC_RESET, 0);
+	snd_hda_codec_write(codec, codec->mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0);
+	SET_REG(codec, SI3054_LINE_RATE, 9600);
+	SET_REG(codec, SI3054_LINE_LEVEL, SI3054_DTAG_MASK|SI3054_ATAG_MASK);
+	SET_REG(codec, SI3054_EXTENDED_MID, 0);
+
+	wait_count = 10;
+	do {
+		msleep(2);
+		val = GET_REG(codec, SI3054_EXTENDED_MID);
+	} while ((val & SI3054_MEI_READY) != SI3054_MEI_READY && wait_count--);
+
+	if((val&SI3054_MEI_READY) != SI3054_MEI_READY) {
+		snd_printk(KERN_ERR "si3054: cannot initialize. EXT MID = %04x\n", val);
+		return -EACCES;
+	}
+
+	SET_REG(codec, SI3054_GPIO_POLARITY, 0xffff);
+	SET_REG(codec, SI3054_GPIO_CFG, 0x0);
+	SET_REG(codec, SI3054_MISC_AFE, 0);
+	SET_REG(codec, SI3054_LINE_CFG1,0x200);
+
+	if((GET_REG(codec,SI3054_LINE_STATUS) & (1<<6)) == 0) {
+		snd_printd("Link Frame Detect(FDT) is not ready (line status: %04x)\n",
+				GET_REG(codec,SI3054_LINE_STATUS));
+	}
+
+	spec->international = GET_REG(codec, SI3054_CHIPID) & SI3054_CHIPID_INTERNATIONAL;
+
+	return 0;
+}
+
+static void si3054_free(struct hda_codec *codec)
+{
+	kfree(codec->spec);
+}
+
+
+/*
+ */
+
+static struct hda_codec_ops si3054_patch_ops = {
+	.build_controls = si3054_build_controls,
+	.build_pcms = si3054_build_pcms,
+	.init = si3054_init,
+	.free = si3054_free,
+#ifdef CONFIG_PM
+	//.suspend = si3054_suspend,
+	.resume = si3054_init,
+#endif
+};
+
+static int patch_si3054(struct hda_codec *codec)
+{
+	struct si3054_spec *spec = kcalloc(1, sizeof(*spec), GFP_KERNEL);
+	if (spec == NULL)
+		return -ENOMEM;
+	codec->spec = spec;
+	codec->patch_ops = si3054_patch_ops;
+	return 0;
+}
+
+/*
+ * patch entries
+ */
+struct hda_codec_preset snd_hda_preset_si3054[] = {
+ 	{ .id = 0x163c3155, .name = "Si3054", .patch = patch_si3054 },
+	{}
+};
+
diff --git a/sound/pci/ice1712/delta.c b/sound/pci/ice1712/delta.c
index eb20f73..39fbe66 100644
--- a/sound/pci/ice1712/delta.c
+++ b/sound/pci/ice1712/delta.c
@@ -618,15 +618,15 @@
  */
 
 static snd_kcontrol_new_t snd_ice1712_delta1010_wordclock_select __devinitdata =
-ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Word Clock Sync", 0, ICE1712_DELTA_WORD_CLOCK_SELECT, 1, 0);
+ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_WORD_CLOCK_SELECT, 1, 0);
 static snd_kcontrol_new_t snd_ice1712_delta1010lt_wordclock_select __devinitdata =
-ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Word Clock Sync", 0, ICE1712_DELTA_1010LT_WORDCLOCK, 1, 0);
+ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Sync", 0, ICE1712_DELTA_1010LT_WORDCLOCK, 1, 0);
 static snd_kcontrol_new_t snd_ice1712_delta1010_wordclock_status __devinitdata =
-ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Word Clock Status", 0, ICE1712_DELTA_WORD_CLOCK_STATUS, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
+ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Word Clock Status", 0, ICE1712_DELTA_WORD_CLOCK_STATUS, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
 static snd_kcontrol_new_t snd_ice1712_deltadio2496_spdif_in_select __devinitdata =
-ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "IEC958 Input Optical", 0, ICE1712_DELTA_SPDIF_INPUT_SELECT, 0, 0);
+ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "IEC958 Input Optical", 0, ICE1712_DELTA_SPDIF_INPUT_SELECT, 0, 0);
 static snd_kcontrol_new_t snd_ice1712_delta_spdif_in_status __devinitdata =
-ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_PCM, "Delta IEC958 Input Status", 0, ICE1712_DELTA_SPDIF_IN_STAT, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
+ICE1712_GPIO(SNDRV_CTL_ELEM_IFACE_MIXER, "Delta IEC958 Input Status", 0, ICE1712_DELTA_SPDIF_IN_STAT, 1, SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE);
 
 
 static int __devinit snd_ice1712_delta_add_controls(ice1712_t *ice)
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index a2545a5..b97f50d 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -1422,7 +1422,7 @@
 
 static snd_kcontrol_new_t snd_ice1712_multi_capture_spdif_switch __devinitdata = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "IEC958 Multi Capture Switch",
+	.name = SNDRV_CTL_NAME_IEC958("Multi ",CAPTURE,SWITCH),
 	.info = snd_ice1712_pro_mixer_switch_info,
 	.get = snd_ice1712_pro_mixer_switch_get,
 	.put = snd_ice1712_pro_mixer_switch_put,
@@ -1441,7 +1441,7 @@
 
 static snd_kcontrol_new_t snd_ice1712_multi_capture_spdif_volume __devinitdata = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "IEC958 Multi Capture Volume",
+	.name = SNDRV_CTL_NAME_IEC958("Multi ",CAPTURE,VOLUME),
 	.info = snd_ice1712_pro_mixer_volume_info,
 	.get = snd_ice1712_pro_mixer_volume_get,
 	.put = snd_ice1712_pro_mixer_volume_put,
@@ -1715,7 +1715,7 @@
 static snd_kcontrol_new_t snd_ice1712_spdif_maskc __devinitdata =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
 	.info =		snd_ice1712_spdif_info,
 	.get =		snd_ice1712_spdif_maskc_get,
@@ -1724,7 +1724,7 @@
 static snd_kcontrol_new_t snd_ice1712_spdif_maskp __devinitdata =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
 	.info =		snd_ice1712_spdif_info,
 	.get =		snd_ice1712_spdif_maskp_get,
@@ -2203,7 +2203,7 @@
 
 static snd_kcontrol_new_t snd_ice1712_mixer_pro_spdif_route __devinitdata = {
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-	.name = "IEC958 Playback Route",
+	.name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,NONE) "Route",
 	.info = snd_ice1712_pro_route_info,
 	.get = snd_ice1712_pro_route_spdif_get,
 	.put = snd_ice1712_pro_route_spdif_put,
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 79b5f12..c7af5e5 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -1414,7 +1414,7 @@
 static snd_kcontrol_new_t snd_vt1724_spdif_maskc __devinitdata =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
 	.info =		snd_vt1724_spdif_info,
 	.get =		snd_vt1724_spdif_maskc_get,
@@ -1423,7 +1423,7 @@
 static snd_kcontrol_new_t snd_vt1724_spdif_maskp __devinitdata =
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
 	.info =		snd_vt1724_spdif_info,
 	.get =		snd_vt1724_spdif_maskp_get,
@@ -1466,7 +1466,7 @@
 	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
 	/* FIXME: the following conflict with IEC958 Playback Route */
 	// .name =         SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH),
-	.name =         "IEC958 Output Switch",
+	.name =         SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
 	.info =		snd_vt1724_spdif_sw_info,
 	.get =		snd_vt1724_spdif_sw_get,
 	.put =		snd_vt1724_spdif_sw_put
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index d7af3e4..7b54841 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -389,6 +389,7 @@
 	struct ac97_pcm *pcm;
 	int pcm_open_flag;
 	unsigned int page_attr_changed: 1;
+	unsigned int suspended: 1;
 } ichdev_t;
 
 typedef struct _snd_intel8x0 intel8x0_t;
@@ -862,12 +863,16 @@
 	unsigned long port = ichdev->reg_offset;
 
 	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
+		ichdev->suspended = 0;
+		/* fallthru */
+	case SNDRV_PCM_TRIGGER_START:
 		val = ICH_IOCE | ICH_STARTBM;
 		break;
-	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
+		ichdev->suspended = 1;
+		/* fallthru */
+	case SNDRV_PCM_TRIGGER_STOP:
 		val = 0;
 		break;
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
@@ -899,9 +904,11 @@
 
 	val = igetdword(chip, ICHREG(ALI_DMACR));
 	switch (cmd) {
+	case SNDRV_PCM_TRIGGER_RESUME:
+		ichdev->suspended = 0;
+		/* fallthru */
 	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
-	case SNDRV_PCM_TRIGGER_RESUME:
 		if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
 			/* clear FIFO for synchronization of channels */
 			fifo = igetdword(chip, fiforeg[ichdev->ali_slot / 4]);
@@ -913,9 +920,11 @@
 		val &= ~(1 << (ichdev->ali_slot + 16)); /* clear PAUSE flag */
 		iputdword(chip, ICHREG(ALI_DMACR), val | (1 << ichdev->ali_slot)); /* start DMA */
 		break;
+	case SNDRV_PCM_TRIGGER_SUSPEND:
+		ichdev->suspended = 1;
+		/* fallthru */
 	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
-	case SNDRV_PCM_TRIGGER_SUSPEND:
 		iputdword(chip, ICHREG(ALI_DMACR), val | (1 << (ichdev->ali_slot + 16))); /* pause */
 		iputbyte(chip, port + ICH_REG_OFF_CR, 0);
 		while (igetbyte(chip, port + ICH_REG_OFF_CR))
@@ -994,6 +1003,8 @@
 {
 	unsigned int cnt;
 	int dbl = runtime->rate > 48000;
+
+	spin_lock_irq(&chip->reg_lock);
 	switch (chip->device_type) {
 	case DEVICE_ALI:
 		cnt = igetdword(chip, ICHREG(ALI_SCR));
@@ -1037,6 +1048,7 @@
 		iputdword(chip, ICHREG(GLOB_CNT), cnt);
 		break;
 	}
+	spin_unlock_irq(&chip->reg_lock);
 }
 
 static int snd_intel8x0_pcm_prepare(snd_pcm_substream_t * substream)
@@ -1048,15 +1060,12 @@
 	ichdev->physbuf = runtime->dma_addr;
 	ichdev->size = snd_pcm_lib_buffer_bytes(substream);
 	ichdev->fragsize = snd_pcm_lib_period_bytes(substream);
-	spin_lock_irq(&chip->reg_lock);
 	if (ichdev->ichd == ICHD_PCMOUT) {
 		snd_intel8x0_setup_pcm_out(chip, runtime);
-		if (chip->device_type == DEVICE_INTEL_ICH4) {
+		if (chip->device_type == DEVICE_INTEL_ICH4)
 			ichdev->pos_shift = (runtime->sample_bits > 16) ? 2 : 1;
-		}
 	}
 	snd_intel8x0_setup_periods(chip, ichdev);
-	spin_unlock_irq(&chip->reg_lock);
 	return 0;
 }
 
@@ -1817,6 +1826,18 @@
 	},
 	{
 		.subvendor = 0x103c,
+		.subdevice = 0x0934,
+		.name = "HP nx8220",
+		.type = AC97_TUNE_MUTE_LED
+	},
+	{
+		.subvendor = 0x103c,
+		.subdevice = 0x099c,
+		.name = "HP nx6110",	/* AD1981B */
+		.type = AC97_TUNE_HP_ONLY
+	},
+	{
+		.subvendor = 0x103c,
 		.subdevice = 0x129d,
 		.name = "HP xw8000",
 		.type = AC97_TUNE_HP_ONLY
@@ -1870,6 +1891,12 @@
 		.type = AC97_TUNE_HP_ONLY
 	},
 	{
+		.subvendor = 0x10cf,
+		.subdevice = 0x12ec,
+		.name = "Fujitsu-Siemens 4010",
+		.type = AC97_TUNE_HP_ONLY
+	},
+	{
 		.subvendor = 0x10f1,
 		.subdevice = 0x2665,
 		.name = "Fujitsu-Siemens Celsius",	/* AD1981? */
@@ -2424,6 +2451,20 @@
 		}
 	}
 
+	/* resume status */
+	for (i = 0; i < chip->bdbars_count; i++) {
+		ichdev_t *ichdev = &chip->ichd[i];
+		unsigned long port = ichdev->reg_offset;
+		if (! ichdev->substream || ! ichdev->suspended)
+			continue;
+		if (ichdev->ichd == ICHD_PCMOUT)
+			snd_intel8x0_setup_pcm_out(chip, ichdev->substream->runtime);
+		iputdword(chip, port + ICH_REG_OFF_BDBAR, ichdev->bdbar_addr);
+		iputbyte(chip, port + ICH_REG_OFF_LVI, ichdev->lvi);
+		iputbyte(chip, port + ICH_REG_OFF_CIV, ichdev->civ);
+		iputbyte(chip, port + ichdev->roff_sr, ICH_FIFOE | ICH_BCIS | ICH_LVBCI);
+	}
+
 	return 0;
 }
 #endif /* CONFIG_PM */
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 79d8eda..d2aa9c8 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2067,7 +2067,7 @@
         },                                                                                      \
         {											\
                 .access =	SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE,	\
-                .iface =        SNDRV_CTL_ELEM_IFACE_PCM,					\
+                .iface =        SNDRV_CTL_ELEM_IFACE_MIXER,					\
                 .name =		c_name " Monitor Phase Invert",					\
                 .info =		snd_korg1212_control_phase_info,				\
                 .get =		snd_korg1212_control_phase_get,					\
@@ -2082,7 +2082,7 @@
         MON_MIXER(4, "ADAT-5"), MON_MIXER(5, "ADAT-6"), MON_MIXER(6, "ADAT-7"), MON_MIXER(7, "ADAT-8"),
 	{
                 .access =	SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_WRITE,
-                .iface =        SNDRV_CTL_ELEM_IFACE_PCM,
+                .iface =        SNDRV_CTL_ELEM_IFACE_MIXER,
                 .name =		"Sync Source",
                 .info =		snd_korg1212_control_sync_info,
                 .get =		snd_korg1212_control_sync_get,
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index 7eb20b8..2bbeb10 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -189,6 +189,7 @@
 	nm256_t *chip;
 	snd_pcm_substream_t *substream;
 	int running;
+	int suspended;
 	
 	u32 buf;	/* offset from chip->buffer */
 	int bufsize;	/* buffer size in bytes */
@@ -231,8 +232,10 @@
 	int mixer_status_mask;		/* bit mask to test the mixer status */
 
 	int irq;
+	int irq_acks;
 	irqreturn_t (*interrupt)(int, void *, struct pt_regs *);
 	int badintrcount;		/* counter to check bogus interrupts */
+	struct semaphore irq_mutex;
 
 	nm256_stream_t streams[2];
 
@@ -464,6 +467,37 @@
 	}
 }
 
+/* acquire interrupt */
+static int snd_nm256_acquire_irq(nm256_t *chip)
+{
+	down(&chip->irq_mutex);
+	if (chip->irq < 0) {
+		if (request_irq(chip->pci->irq, chip->interrupt, SA_INTERRUPT|SA_SHIRQ,
+				chip->card->driver, (void*)chip)) {
+			snd_printk("unable to grab IRQ %d\n", chip->pci->irq);
+			up(&chip->irq_mutex);
+			return -EBUSY;
+		}
+		chip->irq = chip->pci->irq;
+	}
+	chip->irq_acks++;
+	up(&chip->irq_mutex);
+	return 0;
+}
+
+/* release interrupt */
+static void snd_nm256_release_irq(nm256_t *chip)
+{
+	down(&chip->irq_mutex);
+	if (chip->irq_acks > 0)
+		chip->irq_acks--;
+	if (chip->irq_acks == 0 && chip->irq >= 0) {
+		free_irq(chip->irq, (void*)chip);
+		chip->irq = -1;
+	}
+	up(&chip->irq_mutex);
+}
+
 /*
  * start / stop
  */
@@ -538,15 +572,19 @@
 
 	spin_lock(&chip->reg_lock);
 	switch (cmd) {
-	case SNDRV_PCM_TRIGGER_START:
 	case SNDRV_PCM_TRIGGER_RESUME:
+		s->suspended = 0;
+		/* fallthru */
+	case SNDRV_PCM_TRIGGER_START:
 		if (! s->running) {
 			snd_nm256_playback_start(chip, s, substream);
 			s->running = 1;
 		}
 		break;
-	case SNDRV_PCM_TRIGGER_STOP:
 	case SNDRV_PCM_TRIGGER_SUSPEND:
+		s->suspended = 1;
+		/* fallthru */
+	case SNDRV_PCM_TRIGGER_STOP:
 		if (s->running) {
 			snd_nm256_playback_stop(chip);
 			s->running = 0;
@@ -818,6 +856,8 @@
 {
 	nm256_t *chip = snd_pcm_substream_chip(substream);
 
+	if (snd_nm256_acquire_irq(chip) < 0)
+		return -EBUSY;
 	snd_nm256_setup_stream(chip, &chip->streams[SNDRV_PCM_STREAM_PLAYBACK],
 			       substream, &snd_nm256_playback);
 	return 0;
@@ -828,6 +868,8 @@
 {
 	nm256_t *chip = snd_pcm_substream_chip(substream);
 
+	if (snd_nm256_acquire_irq(chip) < 0)
+		return -EBUSY;
 	snd_nm256_setup_stream(chip, &chip->streams[SNDRV_PCM_STREAM_CAPTURE],
 			       substream, &snd_nm256_capture);
 	return 0;
@@ -839,6 +881,9 @@
 static int
 snd_nm256_playback_close(snd_pcm_substream_t *substream)
 {
+	nm256_t *chip = snd_pcm_substream_chip(substream);
+
+	snd_nm256_release_irq(chip);
 	return 0;
 }
 
@@ -846,6 +891,9 @@
 static int
 snd_nm256_capture_close(snd_pcm_substream_t *substream)
 {
+	nm256_t *chip = snd_pcm_substream_chip(substream);
+
+	snd_nm256_release_irq(chip);
 	return 0;
 }
 
@@ -915,18 +963,16 @@
 static void
 snd_nm256_init_chip(nm256_t *chip)
 {
-	spin_lock_irq(&chip->reg_lock);
 	/* Reset everything. */
 	snd_nm256_writeb(chip, 0x0, 0x11);
 	snd_nm256_writew(chip, 0x214, 0);
 	/* stop sounds.. */
 	//snd_nm256_playback_stop(chip);
 	//snd_nm256_capture_stop(chip);
-	spin_unlock_irq(&chip->reg_lock);
 }
 
 
-static inline void
+static irqreturn_t
 snd_nm256_intr_check(nm256_t *chip)
 {
 	if (chip->badintrcount++ > 1000) {
@@ -947,7 +993,9 @@
 		if (chip->streams[SNDRV_PCM_STREAM_CAPTURE].running)
 			snd_nm256_capture_stop(chip);
 		chip->badintrcount = 0;
+		return IRQ_HANDLED;
 	}
+	return IRQ_NONE;
 }
 
 /* 
@@ -969,10 +1017,8 @@
 	status = snd_nm256_readw(chip, NM_INT_REG);
 
 	/* Not ours. */
-	if (status == 0) {
-		snd_nm256_intr_check(chip);
-		return IRQ_NONE;
-	}
+	if (status == 0)
+		return snd_nm256_intr_check(chip);
 
 	chip->badintrcount = 0;
 
@@ -1036,10 +1082,8 @@
 	status = snd_nm256_readl(chip, NM_INT_REG);
 
 	/* Not ours. */
-	if (status == 0) {
-		snd_nm256_intr_check(chip);
-		return IRQ_NONE;
-	}
+	if (status == 0)
+		return snd_nm256_intr_check(chip);
 
 	chip->badintrcount = 0;
 
@@ -1192,7 +1236,7 @@
 		AC97_PC_BEEP, AC97_PHONE, AC97_MIC, AC97_LINE, AC97_CD,
 		AC97_VIDEO, AC97_AUX, AC97_PCM, AC97_REC_SEL,
 		AC97_REC_GAIN, AC97_GENERAL_PURPOSE, AC97_3D_CONTROL,
-		AC97_EXTENDED_ID,
+		/*AC97_EXTENDED_ID,*/
 		AC97_VENDOR_ID1, AC97_VENDOR_ID2,
 		-1
 	};
@@ -1206,6 +1250,7 @@
 	for (i = 0; mixer_regs[i] >= 0; i++)
 		set_bit(mixer_regs[i], ac97.reg_accessed);
 	ac97.private_data = chip;
+	pbus->no_vra = 1;
 	err = snd_ac97_mixer(pbus, &ac97, &chip->ac97);
 	if (err < 0)
 		return err;
@@ -1281,6 +1326,7 @@
 static int nm256_resume(snd_card_t *card)
 {
 	nm256_t *chip = card->pm_private_data;
+	int i;
 
 	/* Perform a full reset on the hardware */
 	pci_enable_device(chip->pci);
@@ -1289,6 +1335,15 @@
 	/* restore ac97 */
 	snd_ac97_resume(chip->ac97);
 
+	for (i = 0; i < 2; i++) {
+		nm256_stream_t *s = &chip->streams[i];
+		if (s->substream && s->suspended) {
+			spin_lock_irq(&chip->reg_lock);
+			snd_nm256_set_format(chip, s, s->substream);
+			spin_unlock_irq(&chip->reg_lock);
+		}
+	}
+
 	return 0;
 }
 #endif /* CONFIG_PM */
@@ -1360,6 +1415,7 @@
 	chip->use_cache = usecache;
 	spin_lock_init(&chip->reg_lock);
 	chip->irq = -1;
+	init_MUTEX(&chip->irq_mutex);
 
 	chip->streams[SNDRV_PCM_STREAM_PLAYBACK].bufsize = play_bufsize;
 	chip->streams[SNDRV_PCM_STREAM_CAPTURE].bufsize = capt_bufsize;
@@ -1470,15 +1526,6 @@
 		chip->coeff_buf[SNDRV_PCM_STREAM_CAPTURE] = addr;
 	}
 
-	/* acquire interrupt */
-	if (request_irq(pci->irq, chip->interrupt, SA_INTERRUPT|SA_SHIRQ,
-			card->driver, (void*)chip)) {
-		err = -EBUSY;
-		snd_printk("unable to grab IRQ %d\n", pci->irq);
-		goto __error;
-	}
-	chip->irq = pci->irq;
-
 	/* Fixed setting. */
 	chip->mixer_base = NM_MIXER_OFFSET;
 
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index b7b554d..456be39 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -1900,7 +1900,7 @@
 	},
 	{
 		.access = SNDRV_CTL_ELEM_ACCESS_READ,
-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
 		.name =	SNDRV_CTL_NAME_IEC958("", PLAYBACK, CON_MASK),
 		.info =	snd_rme32_control_spdif_mask_info,
 		.get =	snd_rme32_control_spdif_mask_get,
@@ -1908,7 +1908,7 @@
 	},
 	{
 		.access = SNDRV_CTL_ELEM_ACCESS_READ,
-		.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+		.iface = SNDRV_CTL_ELEM_IFACE_PCM,
 		.name =	SNDRV_CTL_NAME_IEC958("", PLAYBACK, PRO_MASK),
 		.info =	snd_rme32_control_spdif_mask_info,
 		.get =	snd_rme32_control_spdif_mask_get,
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 10c4f45..9645e90 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -2266,7 +2266,7 @@
 },
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
 	.info =		snd_rme96_control_spdif_mask_info,
 	.get =		snd_rme96_control_spdif_mask_get,
@@ -2276,7 +2276,7 @@
 },
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
 	.info =		snd_rme96_control_spdif_mask_info,
 	.get =		snd_rme96_control_spdif_mask_get,
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 796621d..6694866 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -1524,7 +1524,7 @@
 }
 
 #define HDSP_SPDIF_IN(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM,  \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_spdif_in, \
@@ -1584,7 +1584,7 @@
 }
 
 #define HDSP_SPDIF_OUT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_hdsp_info_spdif_bits, \
   .get = snd_hdsp_get_spdif_out, .put = snd_hdsp_put_spdif_out }
 
@@ -1638,7 +1638,7 @@
 }
 
 #define HDSP_SPDIF_PROFESSIONAL(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_hdsp_info_spdif_bits, \
   .get = snd_hdsp_get_spdif_professional, .put = snd_hdsp_put_spdif_professional }
 
@@ -1683,7 +1683,7 @@
 }
 
 #define HDSP_SPDIF_EMPHASIS(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_hdsp_info_spdif_bits, \
   .get = snd_hdsp_get_spdif_emphasis, .put = snd_hdsp_put_spdif_emphasis }
 
@@ -1728,7 +1728,7 @@
 }
 
 #define HDSP_SPDIF_NON_AUDIO(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_hdsp_info_spdif_bits, \
   .get = snd_hdsp_get_spdif_nonaudio, .put = snd_hdsp_put_spdif_nonaudio }
 
@@ -1773,7 +1773,7 @@
 }
 
 #define HDSP_SPDIF_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1834,7 +1834,7 @@
 }
 
 #define HDSP_SYSTEM_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1858,7 +1858,7 @@
 }
 
 #define HDSP_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1918,7 +1918,7 @@
 }
 
 #define HDSP_SYSTEM_CLOCK_MODE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1958,7 +1958,7 @@
 }
 
 #define HDSP_CLOCK_SOURCE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_clock_source, \
@@ -2124,7 +2124,7 @@
 }
 
 #define HDSP_DA_GAIN(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_da_gain, \
@@ -2210,7 +2210,7 @@
 }
 
 #define HDSP_AD_GAIN(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_ad_gain, \
@@ -2296,7 +2296,7 @@
 }
 
 #define HDSP_PHONE_GAIN(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_phone_gain, \
@@ -2382,7 +2382,7 @@
 }
 
 #define HDSP_XLR_BREAKOUT_CABLE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_xlr_breakout_cable, \
@@ -2447,7 +2447,7 @@
    Switching this on desactivates external ADAT
 */
 #define HDSP_AEB(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_aeb, \
@@ -2508,7 +2508,7 @@
 }
 
 #define HDSP_PREF_SYNC_REF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_pref_sync_ref, \
@@ -2641,7 +2641,7 @@
 }
 
 #define HDSP_AUTOSYNC_REF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -2697,7 +2697,7 @@
 }
 
 #define HDSP_LINE_OUT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_line_out, \
@@ -2757,7 +2757,7 @@
 }
 
 #define HDSP_PRECISE_POINTER(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_precise_pointer, \
@@ -2811,7 +2811,7 @@
 }
 
 #define HDSP_USE_MIDI_TASKLET(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_CARD, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdsp_info_use_midi_tasklet, \
@@ -2868,6 +2868,7 @@
 { .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
   .name = xname, \
   .index = xindex, \
+  .device = 0, \
   .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
 		 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
   .info = snd_hdsp_info_mixer, \
@@ -2939,7 +2940,7 @@
 }
 
 #define HDSP_WC_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -2983,7 +2984,7 @@
 }
 
 #define HDSP_SPDIF_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -3015,7 +3016,7 @@
 }
 
 #define HDSP_ADATSYNC_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -3046,7 +3047,7 @@
 }
 
 #define HDSP_ADAT_SYNC_CHECK \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
   .info = snd_hdsp_info_sync_check, \
   .get = snd_hdsp_get_adat_sync_check \
@@ -3119,7 +3120,7 @@
 },
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
 	.info =		snd_hdsp_control_spdif_mask_info,
 	.get =		snd_hdsp_control_spdif_mask_get,
@@ -3129,7 +3130,7 @@
 },
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
 	.info =		snd_hdsp_control_spdif_mask_info,
 	.get =		snd_hdsp_control_spdif_mask_get,
@@ -3146,8 +3147,6 @@
 /* 'Sample Clock Source' complies with the alsa control naming scheme */ 
 HDSP_CLOCK_SOURCE("Sample Clock Source", 0),
 {
-	/* FIXME: should be PCM or MIXER? */
-	/* .iface = SNDRV_CTL_ELEM_IFACE_PCM, */
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 	.name = "Sample Clock Source Locking",
 	.info = snd_hdsp_info_clock_source_lock,
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 9e86d0e..5d786d1 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -65,7 +65,7 @@
 MODULE_PARM_DESC(enable, "Enable/disable specific HDSPM soundcards.");
 
 module_param_array(precise_ptr, bool, NULL, 0444);
-MODULE_PARM_DESC(precise_ptr, "Enable precise pointer, or disable.");
+MODULE_PARM_DESC(precise_ptr, "Enable or disable precise pointer.");
 
 module_param_array(line_outs_monitor, bool, NULL, 0444);
 MODULE_PARM_DESC(line_outs_monitor,
@@ -1104,14 +1104,14 @@
 	return 0;
 }
 
-snd_rawmidi_ops_t snd_hdspm_midi_output =
+static snd_rawmidi_ops_t snd_hdspm_midi_output =
 {
 	.open =		snd_hdspm_midi_output_open,
 	.close =	snd_hdspm_midi_output_close,
 	.trigger =	snd_hdspm_midi_output_trigger,
 };
 
-snd_rawmidi_ops_t snd_hdspm_midi_input =
+static snd_rawmidi_ops_t snd_hdspm_midi_input =
 {
 	.open =		snd_hdspm_midi_input_open,
 	.close =	snd_hdspm_midi_input_close,
@@ -1168,7 +1168,7 @@
 /* get the system sample rate which is set */
 
 #define HDSPM_SYSTEM_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1195,7 +1195,7 @@
 }
 
 #define HDSPM_AUTOSYNC_SAMPLE_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1264,7 +1264,7 @@
 }
 
 #define HDSPM_SYSTEM_CLOCK_MODE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1310,7 +1310,7 @@
 }
 
 #define HDSPM_CLOCK_SOURCE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_clock_source, \
@@ -1457,7 +1457,7 @@
 }
 
 #define HDSPM_PREF_SYNC_REF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_pref_sync_ref, \
@@ -1547,7 +1547,7 @@
 }
 
 #define HDSPM_AUTOSYNC_REF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ, \
@@ -1604,7 +1604,7 @@
 }
 
 #define HDSPM_LINE_OUT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_line_out, \
@@ -1668,7 +1668,7 @@
 }
 
 #define HDSPM_TX_64(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_tx_64, \
@@ -1731,7 +1731,7 @@
 }
 
 #define HDSPM_C_TMS(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_c_tms, \
@@ -1794,7 +1794,7 @@
 }
 
 #define HDSPM_SAFE_MODE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_safe_mode, \
@@ -1857,7 +1857,7 @@
 }
 
 #define HDSPM_INPUT_SELECT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .info = snd_hdspm_info_input_select, \
@@ -1941,6 +1941,7 @@
 { .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
   .name = xname, \
   .index = xindex, \
+  .device = 0, \
   .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | \
 		 SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
   .info = snd_hdspm_info_mixer, \
@@ -2124,7 +2125,7 @@
 }
 
 #define HDSPM_WC_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
@@ -2170,7 +2171,7 @@
 
 
 #define HDSPM_MADI_SYNC_CHECK(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_HWDEP, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \
   .name = xname, \
   .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 1bc9d0d..8ee4d6f 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -893,7 +893,7 @@
 }
 
 #define RME9652_ADAT1_IN(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_rme9652_info_adat1_in, \
   .get = snd_rme9652_get_adat1_in, \
   .put = snd_rme9652_put_adat1_in }
@@ -971,7 +971,7 @@
 }
 
 #define RME9652_SPDIF_IN(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_rme9652_info_spdif_in, \
   .get = snd_rme9652_get_spdif_in, .put = snd_rme9652_put_spdif_in }
 
@@ -1042,7 +1042,7 @@
 }
 
 #define RME9652_SPDIF_OUT(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_rme9652_info_spdif_out, \
   .get = snd_rme9652_get_spdif_out, .put = snd_rme9652_put_spdif_out }
 
@@ -1110,7 +1110,7 @@
 }
 
 #define RME9652_SYNC_MODE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_rme9652_info_sync_mode, \
   .get = snd_rme9652_get_sync_mode, .put = snd_rme9652_put_sync_mode }
 
@@ -1195,7 +1195,7 @@
 }
 
 #define RME9652_SYNC_PREF(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_rme9652_info_sync_pref, \
   .get = snd_rme9652_get_sync_pref, .put = snd_rme9652_put_sync_pref }
 
@@ -1340,7 +1340,7 @@
 }
 
 #define RME9652_PASSTHRU(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .info = snd_rme9652_info_passthru, \
   .put = snd_rme9652_put_passthru, \
   .get = snd_rme9652_get_passthru }
@@ -1386,7 +1386,7 @@
 /* Read-only switches */
 
 #define RME9652_SPDIF_RATE(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
   .info = snd_rme9652_info_spdif_rate, \
   .get = snd_rme9652_get_spdif_rate }
@@ -1411,7 +1411,7 @@
 }
 
 #define RME9652_ADAT_SYNC(xname, xindex, xidx) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
   .info = snd_rme9652_info_adat_sync, \
   .get = snd_rme9652_get_adat_sync, .private_value = xidx }
@@ -1447,7 +1447,7 @@
 }
 
 #define RME9652_TC_VALID(xname, xindex) \
-{ .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = xname, .index = xindex, \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \
   .access = SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_VOLATILE, \
   .info = snd_rme9652_info_tc_valid, \
   .get = snd_rme9652_get_tc_valid }
@@ -1545,7 +1545,7 @@
 },
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,CON_MASK),
 	.info =		snd_rme9652_control_spdif_mask_info,
 	.get =		snd_rme9652_control_spdif_mask_get,
@@ -1555,7 +1555,7 @@
 },
 {
 	.access =	SNDRV_CTL_ELEM_ACCESS_READ,
-	.iface =	SNDRV_CTL_ELEM_IFACE_MIXER,
+	.iface =	SNDRV_CTL_ELEM_IFACE_PCM,
 	.name =		SNDRV_CTL_NAME_IEC958("",PLAYBACK,PRO_MASK),
 	.info =		snd_rme9652_control_spdif_mask_info,
 	.get =		snd_rme9652_control_spdif_mask_get,
@@ -1568,7 +1568,7 @@
 RME9652_SYNC_MODE("Sync Mode", 0),
 RME9652_SYNC_PREF("Preferred Sync Source", 0),
 {
-	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 	.name = "Channels Thru",
 	.index = 0,
 	.info = snd_rme9652_info_thru,
diff --git a/sound/pci/trident/trident_main.c b/sound/pci/trident/trident_main.c
index 29d89bfb..f30d9d9 100644
--- a/sound/pci/trident/trident_main.c
+++ b/sound/pci/trident/trident_main.c
@@ -1689,7 +1689,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
 	.formats =		(SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
 				 SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE),
 	.rates =		SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
@@ -1714,7 +1714,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
 	.formats =		(SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE |
 				 SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE),
 	.rates =		SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000,
@@ -1739,7 +1739,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
 	.rate_min =		48000,
@@ -1763,7 +1763,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		(SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 |
 				 SNDRV_PCM_RATE_48000),
@@ -1784,7 +1784,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_SYNC_START |
-				 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME),
+				 SNDRV_PCM_INFO_PAUSE /* | SNDRV_PCM_INFO_RESUME */),
 	.formats =		SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
 	.rate_min =		48000,
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 4889600..56c6e52 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -663,10 +663,12 @@
 		val = 0;
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_RESUME:
 		val |= VIA_REG_CTRL_START;
 		viadev->running = 1;
 		break;
 	case SNDRV_PCM_TRIGGER_STOP:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
 		val = VIA_REG_CTRL_TERMINATE;
 		viadev->running = 0;
 		break;
@@ -929,12 +931,12 @@
 
 	if ((rate_changed = via_lock_rate(&chip->rates[0], ac97_rate)) < 0)
 		return rate_changed;
-	if (rate_changed) {
+	if (rate_changed)
 		snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE,
 				  chip->no_vra ? 48000 : runtime->rate);
-		snd_ac97_set_rate(chip->ac97, AC97_SPDIF,
-				  chip->no_vra ? 48000 : runtime->rate);
-	}
+	if (chip->spdif_on && viadev->reg_offset == 0x30)
+		snd_ac97_set_rate(chip->ac97, AC97_SPDIF, runtime->rate);
+
 	if (runtime->rate == 48000)
 		rbits = 0xfffff;
 	else
@@ -1035,7 +1037,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID |
-				 SNDRV_PCM_INFO_RESUME |
+				 /* SNDRV_PCM_INFO_RESUME | */
 				 SNDRV_PCM_INFO_PAUSE),
 	.formats =		SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_48000,
@@ -1484,7 +1486,7 @@
 }
 
 static snd_kcontrol_new_t snd_via8233_dxs3_spdif_control __devinitdata = {
-	.name = "IEC958 Output Switch",
+	.name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH),
 	.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
 	.info = snd_via8233_dxs3_spdif_info,
 	.get = snd_via8233_dxs3_spdif_get,
@@ -2153,6 +2155,7 @@
 		{ .subvendor = 0x1019, .subdevice = 0x0a81, .action = VIA_DXS_NO_VRA }, /* ECS K7VTA3 v8.0 */
 		{ .subvendor = 0x1019, .subdevice = 0x0a85, .action = VIA_DXS_NO_VRA }, /* ECS L7VMM2 */
 		{ .subvendor = 0x1025, .subdevice = 0x0033, .action = VIA_DXS_NO_VRA }, /* Acer Inspire 1353LM */
+		{ .subvendor = 0x1025, .subdevice = 0x0046, .action = VIA_DXS_SRC }, /* Acer Aspire 1524 WLMi */
 		{ .subvendor = 0x1043, .subdevice = 0x8095, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8X (FIXME: possibly VIA_DXS_ENABLE?)*/
 		{ .subvendor = 0x1043, .subdevice = 0x80a1, .action = VIA_DXS_NO_VRA }, /* ASUS A7V8-X */
 		{ .subvendor = 0x1043, .subdevice = 0x80b0, .action = VIA_DXS_NO_VRA }, /* ASUS A7V600 & K8V*/ 
@@ -2168,10 +2171,12 @@
 		{ .subvendor = 0x1297, .subdevice = 0xc160, .action = VIA_DXS_ENABLE }, /* Shuttle SK41G */
 		{ .subvendor = 0x1458, .subdevice = 0xa002, .action = VIA_DXS_ENABLE }, /* Gigabyte GA-7VAXP */
 		{ .subvendor = 0x1462, .subdevice = 0x0080, .action = VIA_DXS_SRC }, /* MSI K8T Neo-FIS2R */
+		{ .subvendor = 0x1462, .subdevice = 0x0430, .action = VIA_DXS_SRC }, /* MSI 7142 (K8MM-V) */
 		{ .subvendor = 0x1462, .subdevice = 0x3800, .action = VIA_DXS_ENABLE }, /* MSI KT266 */
 		{ .subvendor = 0x1462, .subdevice = 0x5901, .action = VIA_DXS_NO_VRA }, /* MSI KT6 Delta-SR */
 		{ .subvendor = 0x1462, .subdevice = 0x7023, .action = VIA_DXS_NO_VRA }, /* MSI K8T Neo2-FI */
 		{ .subvendor = 0x1462, .subdevice = 0x7120, .action = VIA_DXS_ENABLE }, /* MSI KT4V */
+		{ .subvendor = 0x1462, .subdevice = 0x7142, .action = VIA_DXS_ENABLE }, /* MSI K8MM-V */
 		{ .subvendor = 0x147b, .subdevice = 0x1401, .action = VIA_DXS_ENABLE }, /* ABIT KD7(-RAID) */
 		{ .subvendor = 0x147b, .subdevice = 0x1411, .action = VIA_DXS_ENABLE }, /* ABIT VA-20 */
 		{ .subvendor = 0x147b, .subdevice = 0x1413, .action = VIA_DXS_ENABLE }, /* ABIT KV8 Pro */
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 4a9779c..5872d43 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -521,6 +521,7 @@
 
 	switch (cmd) {
 	case SNDRV_PCM_TRIGGER_START:
+	case SNDRV_PCM_TRIGGER_SUSPEND:
 		val |= VIA_REG_CTRL_START;
 		viadev->running = 1;
 		break;
@@ -697,7 +698,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID |
-				 SNDRV_PCM_INFO_RESUME |
+				 /* SNDRV_PCM_INFO_RESUME | */
 				 SNDRV_PCM_INFO_PAUSE),
 	.formats =		SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE,
 	.rates =		SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_KNOT,
diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
index d54f88a..0548364 100644
--- a/sound/pci/ymfpci/ymfpci_main.c
+++ b/sound/pci/ymfpci/ymfpci_main.c
@@ -321,6 +321,26 @@
 			snd_pcm_period_elapsed(ypcm->substream);
 			spin_lock(&chip->reg_lock);
 		}
+
+		if (unlikely(ypcm->update_pcm_vol)) {
+			unsigned int subs = ypcm->substream->number;
+			unsigned int next_bank = 1 - chip->active_bank;
+			snd_ymfpci_playback_bank_t *bank;
+			u32 volume;
+			
+			bank = &voice->bank[next_bank];
+			volume = cpu_to_le32(chip->pcm_mixer[subs].left << 15);
+			bank->left_gain_end = volume;
+			if (ypcm->output_rear)
+				bank->eff2_gain_end = volume;
+			if (ypcm->voices[1])
+				bank = &ypcm->voices[1]->bank[next_bank];
+			volume = cpu_to_le32(chip->pcm_mixer[subs].right << 15);
+			bank->right_gain_end = volume;
+			if (ypcm->output_rear)
+				bank->eff3_gain_end = volume;
+			ypcm->update_pcm_vol--;
+		}
 	}
 	spin_unlock(&chip->reg_lock);
 }
@@ -451,87 +471,74 @@
 	return 0;
 }
 
-static void snd_ymfpci_pcm_init_voice(ymfpci_voice_t *voice, int stereo,
-				      int rate, int w_16, unsigned long addr,
-				      unsigned int end,
-				      int output_front, int output_rear)
+static void snd_ymfpci_pcm_init_voice(ymfpci_pcm_t *ypcm, unsigned int voiceidx,
+				      snd_pcm_runtime_t *runtime,
+				      int has_pcm_volume)
 {
+	ymfpci_voice_t *voice = ypcm->voices[voiceidx];
 	u32 format;
-	u32 delta = snd_ymfpci_calc_delta(rate);
-	u32 lpfQ = snd_ymfpci_calc_lpfQ(rate);
-	u32 lpfK = snd_ymfpci_calc_lpfK(rate);
+	u32 delta = snd_ymfpci_calc_delta(runtime->rate);
+	u32 lpfQ = snd_ymfpci_calc_lpfQ(runtime->rate);
+	u32 lpfK = snd_ymfpci_calc_lpfK(runtime->rate);
 	snd_ymfpci_playback_bank_t *bank;
 	unsigned int nbank;
+	u32 vol_left, vol_right;
+	u8 use_left, use_right;
 
 	snd_assert(voice != NULL, return);
-	format = (stereo ? 0x00010000 : 0) | (w_16 ? 0 : 0x80000000);
+	if (runtime->channels == 1) {
+		use_left = 1;
+		use_right = 1;
+	} else {
+		use_left = (voiceidx & 1) == 0;
+		use_right = !use_left;
+	}
+	if (has_pcm_volume) {
+		vol_left = cpu_to_le32(ypcm->chip->pcm_mixer
+				       [ypcm->substream->number].left << 15);
+		vol_right = cpu_to_le32(ypcm->chip->pcm_mixer
+					[ypcm->substream->number].right << 15);
+	} else {
+		vol_left = cpu_to_le32(0x40000000);
+		vol_right = cpu_to_le32(0x40000000);
+	}
+	format = runtime->channels == 2 ? 0x00010000 : 0;
+	if (snd_pcm_format_width(runtime->format) == 8)
+		format |= 0x80000000;
+	if (runtime->channels == 2 && (voiceidx & 1) != 0)
+		format |= 1;
 	for (nbank = 0; nbank < 2; nbank++) {
 		bank = &voice->bank[nbank];
+		memset(bank, 0, sizeof(*bank));
 		bank->format = cpu_to_le32(format);
-		bank->loop_default = 0;
-		bank->base = cpu_to_le32(addr);
-		bank->loop_start = 0;
-		bank->loop_end = cpu_to_le32(end);
-		bank->loop_frac = 0;
-		bank->eg_gain_end = cpu_to_le32(0x40000000);
+		bank->base = cpu_to_le32(runtime->dma_addr);
+		bank->loop_end = cpu_to_le32(ypcm->buffer_size);
 		bank->lpfQ = cpu_to_le32(lpfQ);
-		bank->status = 0;
-		bank->num_of_frames = 0;
-		bank->loop_count = 0;
-		bank->start = 0;
-		bank->start_frac = 0;
 		bank->delta =
 		bank->delta_end = cpu_to_le32(delta);
 		bank->lpfK =
 		bank->lpfK_end = cpu_to_le32(lpfK);
-		bank->eg_gain = cpu_to_le32(0x40000000);
-		bank->lpfD1 =
-		bank->lpfD2 = 0;
+		bank->eg_gain =
+		bank->eg_gain_end = cpu_to_le32(0x40000000);
 
-		bank->left_gain = 
-		bank->right_gain =
-		bank->left_gain_end =
-		bank->right_gain_end =
-		bank->eff1_gain =
-		bank->eff2_gain =
-		bank->eff3_gain =
-		bank->eff1_gain_end =
-		bank->eff2_gain_end =
-		bank->eff3_gain_end = 0;
-
-		if (!stereo) {
-			if (output_front) {
-				bank->left_gain = 
+		if (ypcm->output_front) {
+			if (use_left) {
+				bank->left_gain =
+				bank->left_gain_end = vol_left;
+			}
+			if (use_right) {
 				bank->right_gain =
-				bank->left_gain_end =
-				bank->right_gain_end = cpu_to_le32(0x40000000);
+				bank->right_gain_end = vol_right;
 			}
-			if (output_rear) {
+		}
+		if (ypcm->output_rear) {
+			if (use_left) {
 				bank->eff2_gain =
-				bank->eff2_gain_end =
+				bank->eff2_gain_end = vol_left;
+			}
+			if (use_right) {
 				bank->eff3_gain =
-				bank->eff3_gain_end = cpu_to_le32(0x40000000);
-			}
-		} else {
-			if (output_front) {
-				if ((voice->number & 1) == 0) {
-					bank->left_gain =
-					bank->left_gain_end = cpu_to_le32(0x40000000);
-				} else {
-					bank->format |= cpu_to_le32(1);
-					bank->right_gain =
-					bank->right_gain_end = cpu_to_le32(0x40000000);
-				}
-			}
-			if (output_rear) {
-				if ((voice->number & 1) == 0) {
-					bank->eff3_gain =
-					bank->eff3_gain_end = cpu_to_le32(0x40000000);
-				} else {
-					bank->format |= cpu_to_le32(1);
-					bank->eff2_gain =
-					bank->eff2_gain_end = cpu_to_le32(0x40000000);
-				}
+				bank->eff3_gain_end = vol_right;
 			}
 		}
 	}
@@ -613,7 +620,7 @@
 
 static int snd_ymfpci_playback_prepare(snd_pcm_substream_t * substream)
 {
-	// ymfpci_t *chip = snd_pcm_substream_chip(substream);
+	ymfpci_t *chip = snd_pcm_substream_chip(substream);
 	snd_pcm_runtime_t *runtime = substream->runtime;
 	ymfpci_pcm_t *ypcm = runtime->private_data;
 	unsigned int nvoice;
@@ -623,14 +630,8 @@
 	ypcm->period_pos = 0;
 	ypcm->last_pos = 0;
 	for (nvoice = 0; nvoice < runtime->channels; nvoice++)
-		snd_ymfpci_pcm_init_voice(ypcm->voices[nvoice],
-					  runtime->channels == 2,
-					  runtime->rate,
-					  snd_pcm_format_width(runtime->format) == 16,
-					  runtime->dma_addr,
-					  ypcm->buffer_size,
-					  ypcm->output_front,
-					  ypcm->output_rear);
+		snd_ymfpci_pcm_init_voice(ypcm, nvoice, runtime,
+					  substream->pcm == chip->pcm);
 	return 0;
 }
 
@@ -882,6 +883,7 @@
 	ymfpci_t *chip = snd_pcm_substream_chip(substream);
 	snd_pcm_runtime_t *runtime = substream->runtime;
 	ymfpci_pcm_t *ypcm;
+	snd_kcontrol_t *kctl;
 	int err;
 	
 	if ((err = snd_ymfpci_playback_open_1(substream)) < 0)
@@ -895,6 +897,10 @@
 		chip->rear_opened++;
 	}
 	spin_unlock_irq(&chip->reg_lock);
+
+	kctl = chip->pcm_mixer[substream->number].ctl;
+	kctl->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+	snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id);
 	return 0;
 }
 
@@ -987,6 +993,7 @@
 {
 	ymfpci_t *chip = snd_pcm_substream_chip(substream);
 	ymfpci_pcm_t *ypcm = substream->runtime->private_data;
+	snd_kcontrol_t *kctl;
 
 	spin_lock_irq(&chip->reg_lock);
 	if (ypcm->output_rear && chip->rear_opened > 0) {
@@ -994,6 +1001,9 @@
 		ymfpci_close_extension(chip);
 	}
 	spin_unlock_irq(&chip->reg_lock);
+	kctl = chip->pcm_mixer[substream->number].ctl;
+	kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE;
+	snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &kctl->id);
 	return snd_ymfpci_playback_close_1(substream);
 }
 
@@ -1665,6 +1675,66 @@
 	.private_value = 2,
 };
 
+/*
+ * PCM voice volume
+ */
+
+static int snd_ymfpci_pcm_vol_info(snd_kcontrol_t *kcontrol,
+				   snd_ctl_elem_info_t *uinfo)
+{
+	uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
+	uinfo->count = 2;
+	uinfo->value.integer.min = 0;
+	uinfo->value.integer.max = 0x8000;
+	return 0;
+}
+
+static int snd_ymfpci_pcm_vol_get(snd_kcontrol_t *kcontrol,
+				  snd_ctl_elem_value_t *ucontrol)
+{
+	ymfpci_t *chip = snd_kcontrol_chip(kcontrol);
+	unsigned int subs = kcontrol->id.subdevice;
+
+	ucontrol->value.integer.value[0] = chip->pcm_mixer[subs].left;
+	ucontrol->value.integer.value[1] = chip->pcm_mixer[subs].right;
+	return 0;
+}
+
+static int snd_ymfpci_pcm_vol_put(snd_kcontrol_t *kcontrol,
+				  snd_ctl_elem_value_t *ucontrol)
+{
+	ymfpci_t *chip = snd_kcontrol_chip(kcontrol);
+	unsigned int subs = kcontrol->id.subdevice;
+	snd_pcm_substream_t *substream;
+	unsigned long flags;
+
+	if (ucontrol->value.integer.value[0] != chip->pcm_mixer[subs].left ||
+	    ucontrol->value.integer.value[1] != chip->pcm_mixer[subs].right) {
+		chip->pcm_mixer[subs].left = ucontrol->value.integer.value[0];
+		chip->pcm_mixer[subs].right = ucontrol->value.integer.value[1];
+
+		substream = (snd_pcm_substream_t *)kcontrol->private_value;
+		spin_lock_irqsave(&chip->voice_lock, flags);
+		if (substream->runtime && substream->runtime->private_data) {
+			ymfpci_pcm_t *ypcm = substream->runtime->private_data;
+			ypcm->update_pcm_vol = 2;
+		}
+		spin_unlock_irqrestore(&chip->voice_lock, flags);
+		return 1;
+	}
+	return 0;
+}
+
+static snd_kcontrol_new_t snd_ymfpci_pcm_volume __devinitdata = {
+	.iface = SNDRV_CTL_ELEM_IFACE_PCM,
+	.name = "PCM Playback Volume",
+	.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
+		SNDRV_CTL_ELEM_ACCESS_INACTIVE,
+	.info = snd_ymfpci_pcm_vol_info,
+	.get = snd_ymfpci_pcm_vol_get,
+	.put = snd_ymfpci_pcm_vol_put,
+};
+
 
 /*
  *  Mixer routines
@@ -1686,6 +1756,7 @@
 {
 	ac97_template_t ac97;
 	snd_kcontrol_t *kctl;
+	snd_pcm_substream_t *substream;
 	unsigned int idx;
 	int err;
 	static ac97_bus_ops_t ops = {
@@ -1739,6 +1810,23 @@
 			return err;
 	}
 
+	/* per-voice volume */
+	substream = chip->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+	for (idx = 0; idx < 32; ++idx) {
+		kctl = snd_ctl_new1(&snd_ymfpci_pcm_volume, chip);
+		if (!kctl)
+			return -ENOMEM;
+		kctl->id.device = chip->pcm->device;
+		kctl->id.subdevice = idx;
+		kctl->private_value = (unsigned long)substream;
+		if ((err = snd_ctl_add(chip->card, kctl)) < 0)
+			return err;
+		chip->pcm_mixer[idx].left = 0x8000;
+		chip->pcm_mixer[idx].right = 0x8000;
+		chip->pcm_mixer[idx].ctl = kctl;
+		substream = substream->next;
+	}
+
 	return 0;
 }
 
diff --git a/sound/pcmcia/vx/vxpocket.c b/sound/pcmcia/vx/vxpocket.c
index 3a82161..1e8f16b 100644
--- a/sound/pcmcia/vx/vxpocket.c
+++ b/sound/pcmcia/vx/vxpocket.c
@@ -297,6 +297,7 @@
 	CS_CHECK(RequestConfiguration, pcmcia_request_configuration(link->handle, &link->conf));
 
 	chip->dev = &handle_to_dev(link->handle);
+	snd_card_set_dev(chip->card, chip->dev);
 
 	if (snd_vxpocket_assign_resources(chip, link->io.BasePort1, link->irq.AssignedIRQ) < 0)
 		goto failed;
@@ -376,7 +377,7 @@
 
 /*
  */
-static dev_link_t *vxp_attach(void)
+static dev_link_t *vxpocket_attach(void)
 {
 	snd_card_t *card;
 	struct snd_vxpocket *vxp;
@@ -407,7 +408,7 @@
 		return NULL;
 	}
 
-	vxp->index = index[i];
+	vxp->index = i;
 	card_alloc |= 1 << i;
 
 	/* Chain drivers */
@@ -417,7 +418,7 @@
 	return &vxp->link;
 }
 
-static void vxp_detach(dev_link_t *link)
+static void vxpocket_detach(dev_link_t *link)
 {
 	struct snd_vxpocket *vxp;
 	vx_core_t *chip;
@@ -458,8 +459,9 @@
 	.drv		= {
 		.name	= "snd-vxpocket",
 	},
-	.attach		= vxp_attach,
-	.detach		= vxp_detach,
+	.attach		= vxpocket_attach,
+	.detach		= vxpocket_detach,
+	.event		= vxpocket_event,
 	.id_table	= vxp_ids,
 };
 
diff --git a/sound/sound_core.c b/sound/sound_core.c
index 21a69e0..954f994 100644
--- a/sound/sound_core.c
+++ b/sound/sound_core.c
@@ -153,7 +153,7 @@
  *	list. Acquires locks as needed
  */
 
-static int sound_insert_unit(struct sound_unit **list, struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode)
+static int sound_insert_unit(struct sound_unit **list, struct file_operations *fops, int index, int low, int top, const char *name, umode_t mode, struct device *dev)
 {
 	struct sound_unit *s = kmalloc(sizeof(*s), GFP_KERNEL);
 	int r;
@@ -175,7 +175,7 @@
 	devfs_mk_cdev(MKDEV(SOUND_MAJOR, s->unit_minor),
 			S_IFCHR | mode, s->name);
 	class_device_create(sound_class, MKDEV(SOUND_MAJOR, s->unit_minor),
-				NULL, s->name+6);
+			    dev, s->name+6);
 	return r;
 
  fail:
@@ -227,16 +227,18 @@
 static struct sound_unit *chains[SOUND_STEP];
 
 /**
- *	register_sound_special - register a special sound node
+ *	register_sound_special_device - register a special sound node
  *	@fops: File operations for the driver
  *	@unit: Unit number to allocate
+ *      @dev: device pointer
  *
  *	Allocate a special sound device by minor number from the sound
  *	subsystem. The allocated number is returned on succes. On failure
  *	a negative error code is returned.
  */
  
-int register_sound_special(struct file_operations *fops, int unit)
+int register_sound_special_device(struct file_operations *fops, int unit,
+				  struct device *dev)
 {
 	const int chain = unit % SOUND_STEP;
 	int max_unit = 128 + chain;
@@ -294,9 +296,16 @@
 		break;
 	}
 	return sound_insert_unit(&chains[chain], fops, -1, unit, max_unit,
-				 name, S_IRUSR | S_IWUSR);
+				 name, S_IRUSR | S_IWUSR, dev);
 }
  
+EXPORT_SYMBOL(register_sound_special_device);
+
+int register_sound_special(struct file_operations *fops, int unit)
+{
+	return register_sound_special_device(fops, unit, NULL);
+}
+
 EXPORT_SYMBOL(register_sound_special);
 
 /**
@@ -312,7 +321,7 @@
 int register_sound_mixer(struct file_operations *fops, int dev)
 {
 	return sound_insert_unit(&chains[0], fops, dev, 0, 128,
-				 "mixer", S_IRUSR | S_IWUSR);
+				 "mixer", S_IRUSR | S_IWUSR, NULL);
 }
 
 EXPORT_SYMBOL(register_sound_mixer);
@@ -330,7 +339,7 @@
 int register_sound_midi(struct file_operations *fops, int dev)
 {
 	return sound_insert_unit(&chains[2], fops, dev, 2, 130,
-				 "midi", S_IRUSR | S_IWUSR);
+				 "midi", S_IRUSR | S_IWUSR, NULL);
 }
 
 EXPORT_SYMBOL(register_sound_midi);
@@ -356,7 +365,7 @@
 int register_sound_dsp(struct file_operations *fops, int dev)
 {
 	return sound_insert_unit(&chains[3], fops, dev, 3, 131,
-				 "dsp", S_IWUSR | S_IRUSR);
+				 "dsp", S_IWUSR | S_IRUSR, NULL);
 }
 
 EXPORT_SYMBOL(register_sound_dsp);
@@ -375,7 +384,7 @@
 int register_sound_synth(struct file_operations *fops, int dev)
 {
 	return sound_insert_unit(&chains[9], fops, dev, 9, 137,
-				 "synth", S_IRUSR | S_IWUSR);
+				 "synth", S_IRUSR | S_IWUSR, NULL);
 }
 
 EXPORT_SYMBOL(register_sound_synth);
diff --git a/sound/synth/emux/emux_synth.c b/sound/synth/emux/emux_synth.c
index f13b038..751bf12 100644
--- a/sound/synth/emux/emux_synth.c
+++ b/sound/synth/emux/emux_synth.c
@@ -98,7 +98,6 @@
 		vp = emu->ops.get_voice(emu, port);
 		if (vp == NULL || vp->ch < 0)
 			continue;
-		snd_assert(vp->emu != NULL && vp->hw != NULL, return);
 		if (STATE_IS_PLAYING(vp->state))
 			emu->ops.terminate(vp);
 
diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
index 8298c46..5aa5fe6 100644
--- a/sound/usb/usbaudio.c
+++ b/sound/usb/usbaudio.c
@@ -41,10 +41,12 @@
 #include <sound/driver.h>
 #include <linux/bitops.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/slab.h>
 #include <linux/string.h>
 #include <linux/usb.h>
+#include <linux/vmalloc.h>
 #include <linux/moduleparam.h>
 #include <sound/core.h>
 #include <sound/info.h>
@@ -79,7 +81,7 @@
 MODULE_PARM_DESC(vid, "Vendor ID for the USB audio device.");
 module_param_array(pid, int, NULL, 0444);
 MODULE_PARM_DESC(pid, "Product ID for the USB audio device.");
-module_param(nrpacks, int, 0444);
+module_param(nrpacks, int, 0644);
 MODULE_PARM_DESC(nrpacks, "Max. number of packets per URB.");
 module_param(async_unlink, bool, 0444);
 MODULE_PARM_DESC(async_unlink, "Use async unlink mode.");
@@ -97,7 +99,7 @@
 
 #define MAX_PACKS	10
 #define MAX_PACKS_HS	(MAX_PACKS * 8)	/* in high speed mode */
-#define MAX_URBS	5	/* max. 20ms long packets */
+#define MAX_URBS	8
 #define SYNC_URBS	4	/* always four urbs for sync */
 #define MIN_PACKS_URB	1	/* minimum 1 packet per urb */
 
@@ -126,11 +128,10 @@
 
 struct snd_urb_ctx {
 	struct urb *urb;
+	unsigned int buffer_size;	/* size of data buffer, if data URB */
 	snd_usb_substream_t *subs;
 	int index;	/* index for urb array */
 	int packets;	/* number of packets per urb */
-	int transfer;	/* transferred size */
-	char *buf;	/* buffer for capture */
 };
 
 struct snd_urb_ops {
@@ -165,12 +166,11 @@
 	unsigned int curframesize;	/* current packet size in frames (for capture) */
 	unsigned int fill_max: 1;	/* fill max packet size always */
 	unsigned int fmt_type;		/* USB audio format type (1-3) */
+	unsigned int packs_per_ms;	/* packets per millisecond (for playback) */
 
 	unsigned int running: 1;	/* running status */
 
-	unsigned int hwptr;			/* free frame position in the buffer (only for playback) */
 	unsigned int hwptr_done;			/* processed frame position in the buffer */
-	unsigned int transfer_sched;		/* scheduled frames since last period (for playback) */
 	unsigned int transfer_done;		/* processed frames since last period update */
 	unsigned long active_mask;	/* bitmask of active urbs */
 	unsigned long unlink_mask;	/* bitmask of unlinked urbs */
@@ -178,13 +178,14 @@
 	unsigned int nurbs;			/* # urbs */
 	snd_urb_ctx_t dataurb[MAX_URBS];	/* data urb table */
 	snd_urb_ctx_t syncurb[SYNC_URBS];	/* sync urb table */
-	char syncbuf[SYNC_URBS * 4];	/* sync buffer; it's so small - let's get static */
-	char *tmpbuf;			/* temporary buffer for playback */
+	char *syncbuf;				/* sync buffer for all sync URBs */
+	dma_addr_t sync_dma;			/* DMA address of syncbuf */
 
 	u64 formats;			/* format bitmasks (all or'ed) */
 	unsigned int num_formats;		/* number of supported audio formats (list) */
 	struct list_head fmt_list;	/* format list */
 	spinlock_t lock;
+	struct tasklet_struct start_period_elapsed;	/* for start trigger */
 
 	struct snd_urb_ops ops;		/* callbacks (must be filled at init) */
 };
@@ -311,27 +312,17 @@
 			       struct urb *urb)
 {
 	int i, offs;
-	unsigned long flags;
 	snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context;
 
 	offs = 0;
 	urb->dev = ctx->subs->dev; /* we need to set this at each time */
-	urb->number_of_packets = 0;
-	spin_lock_irqsave(&subs->lock, flags);
 	for (i = 0; i < ctx->packets; i++) {
 		urb->iso_frame_desc[i].offset = offs;
 		urb->iso_frame_desc[i].length = subs->curpacksize;
 		offs += subs->curpacksize;
-		urb->number_of_packets++;
-		subs->transfer_sched += subs->curframesize;
-		if (subs->transfer_sched >= runtime->period_size) {
-			subs->transfer_sched -= runtime->period_size;
-			break;
-		}
 	}
-	spin_unlock_irqrestore(&subs->lock, flags);
-	urb->transfer_buffer = ctx->buf;
 	urb->transfer_buffer_length = offs;
+	urb->number_of_packets = ctx->packets;
 #if 0 // for check
 	if (! urb->bandwidth) {
 		int bustime;
@@ -359,6 +350,7 @@
 	unsigned char *cp;
 	int i;
 	unsigned int stride, len, oldptr;
+	int period_elapsed = 0;
 
 	stride = runtime->frame_bits >> 3;
 
@@ -378,6 +370,10 @@
 		if (subs->hwptr_done >= runtime->buffer_size)
 			subs->hwptr_done -= runtime->buffer_size;
 		subs->transfer_done += len;
+		if (subs->transfer_done >= runtime->period_size) {
+			subs->transfer_done -= runtime->period_size;
+			period_elapsed = 1;
+		}
 		spin_unlock_irqrestore(&subs->lock, flags);
 		/* copy a data chunk */
 		if (oldptr + len > runtime->buffer_size) {
@@ -388,15 +384,9 @@
 		} else {
 			memcpy(runtime->dma_area + oldptr * stride, cp, len * stride);
 		}
-		/* update the pointer, call callback if necessary */
-		spin_lock_irqsave(&subs->lock, flags);
-		if (subs->transfer_done >= runtime->period_size) {
-			subs->transfer_done -= runtime->period_size;
-			spin_unlock_irqrestore(&subs->lock, flags);
-			snd_pcm_period_elapsed(subs->pcm_substream);
-		} else
-			spin_unlock_irqrestore(&subs->lock, flags);
 	}
+	if (period_elapsed)
+		snd_pcm_period_elapsed(subs->pcm_substream);
 	return 0;
 }
 
@@ -492,12 +482,10 @@
 /*
  * prepare urb for playback data pipe
  *
- * we copy the data directly from the pcm buffer.
- * the current position to be copied is held in hwptr field.
- * since a urb can handle only a single linear buffer, if the total
- * transferred area overflows the buffer boundary, we cannot send
- * it directly from the buffer.  thus the data is once copied to
- * a temporary buffer and urb points to that.
+ * Since a URB can handle only a single linear buffer, we must use double
+ * buffering when the data to be transferred overflows the buffer boundary.
+ * To avoid inconsistencies when updating hwptr_done, we use double buffering
+ * for all URBs.
  */
 static int prepare_playback_urb(snd_usb_substream_t *subs,
 				snd_pcm_runtime_t *runtime,
@@ -506,6 +494,7 @@
 	int i, stride, offs;
 	unsigned int counts;
 	unsigned long flags;
+	int period_elapsed = 0;
 	snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context;
 
 	stride = runtime->frame_bits >> 3;
@@ -530,80 +519,85 @@
 		urb->iso_frame_desc[i].length = counts * stride;
 		offs += counts;
 		urb->number_of_packets++;
-		subs->transfer_sched += counts;
-		if (subs->transfer_sched >= runtime->period_size) {
-			subs->transfer_sched -= runtime->period_size;
+		subs->transfer_done += counts;
+		if (subs->transfer_done >= runtime->period_size) {
+			subs->transfer_done -= runtime->period_size;
+			period_elapsed = 1;
 			if (subs->fmt_type == USB_FORMAT_TYPE_II) {
-				if (subs->transfer_sched > 0) {
-					/* FIXME: fill-max mode is not supported yet */
-					offs -= subs->transfer_sched;
-					counts -= subs->transfer_sched;
-					urb->iso_frame_desc[i].length = counts * stride;
-					subs->transfer_sched = 0;
+				if (subs->transfer_done > 0) {
+					/* FIXME: fill-max mode is not
+					 * supported yet */
+					offs -= subs->transfer_done;
+					counts -= subs->transfer_done;
+					urb->iso_frame_desc[i].length =
+						counts * stride;
+					subs->transfer_done = 0;
 				}
 				i++;
 				if (i < ctx->packets) {
 					/* add a transfer delimiter */
-					urb->iso_frame_desc[i].offset = offs * stride;
+					urb->iso_frame_desc[i].offset =
+						offs * stride;
 					urb->iso_frame_desc[i].length = 0;
 					urb->number_of_packets++;
 				}
+				break;
 			}
-			break;
  		}
+		/* finish at the frame boundary at/after the period boundary */
+		if (period_elapsed &&
+		    (i & (subs->packs_per_ms - 1)) == subs->packs_per_ms - 1)
+			break;
 	}
-	if (subs->hwptr + offs > runtime->buffer_size) {
-		/* err, the transferred area goes over buffer boundary.
-		 * copy the data to the temp buffer.
-		 */
-		int len;
-		len = runtime->buffer_size - subs->hwptr;
-		urb->transfer_buffer = subs->tmpbuf;
-		memcpy(subs->tmpbuf, runtime->dma_area + subs->hwptr * stride, len * stride);
-		memcpy(subs->tmpbuf + len * stride, runtime->dma_area, (offs - len) * stride);
-		subs->hwptr += offs;
-		subs->hwptr -= runtime->buffer_size;
+	if (subs->hwptr_done + offs > runtime->buffer_size) {
+		/* err, the transferred area goes over buffer boundary. */
+		unsigned int len = runtime->buffer_size - subs->hwptr_done;
+		memcpy(urb->transfer_buffer,
+		       runtime->dma_area + subs->hwptr_done * stride,
+		       len * stride);
+		memcpy(urb->transfer_buffer + len * stride,
+		       runtime->dma_area,
+		       (offs - len) * stride);
 	} else {
-		/* set the buffer pointer */
-		urb->transfer_buffer = runtime->dma_area + subs->hwptr * stride;
-		subs->hwptr += offs;
-		if (subs->hwptr == runtime->buffer_size)
-			subs->hwptr = 0;
+		memcpy(urb->transfer_buffer,
+		       runtime->dma_area + subs->hwptr_done * stride,
+		       offs * stride);
 	}
+	subs->hwptr_done += offs;
+	if (subs->hwptr_done >= runtime->buffer_size)
+		subs->hwptr_done -= runtime->buffer_size;
 	spin_unlock_irqrestore(&subs->lock, flags);
 	urb->transfer_buffer_length = offs * stride;
-	ctx->transfer = offs;
-
+	if (period_elapsed) {
+		if (likely(subs->running))
+			snd_pcm_period_elapsed(subs->pcm_substream);
+		else
+			tasklet_hi_schedule(&subs->start_period_elapsed);
+	}
 	return 0;
 }
 
 /*
  * process after playback data complete
- *
- * update the current position and call callback if a period is processed.
+ * - nothing to do
  */
 static int retire_playback_urb(snd_usb_substream_t *subs,
 			       snd_pcm_runtime_t *runtime,
 			       struct urb *urb)
 {
-	unsigned long flags;
-	snd_urb_ctx_t *ctx = (snd_urb_ctx_t *)urb->context;
-
-	spin_lock_irqsave(&subs->lock, flags);
-	subs->transfer_done += ctx->transfer;
-	subs->hwptr_done += ctx->transfer;
-	ctx->transfer = 0;
-	if (subs->hwptr_done >= runtime->buffer_size)
-		subs->hwptr_done -= runtime->buffer_size;
-	if (subs->transfer_done >= runtime->period_size) {
-		subs->transfer_done -= runtime->period_size;
-		spin_unlock_irqrestore(&subs->lock, flags);
-		snd_pcm_period_elapsed(subs->pcm_substream);
-	} else
-		spin_unlock_irqrestore(&subs->lock, flags);
 	return 0;
 }
 
+/*
+ * Delay the snd_pcm_period_elapsed() call until after the start trigger
+ * callback so that we're not longer in the substream's lock.
+ */
+static void start_period_elapsed(unsigned long data)
+{
+	snd_usb_substream_t *subs = (snd_usb_substream_t *)data;
+	snd_pcm_period_elapsed(subs->pcm_substream);
+}
+
 
 /*
  */
@@ -683,6 +677,42 @@
 }
 
 
+/* get the physical page pointer at the given offset */
+static struct page *snd_pcm_get_vmalloc_page(snd_pcm_substream_t *subs,
+					     unsigned long offset)
+{
+	void *pageptr = subs->runtime->dma_area + offset;
+	return vmalloc_to_page(pageptr);
+}
+
+/* allocate virtual buffer; may be called more than once */
+static int snd_pcm_alloc_vmalloc_buffer(snd_pcm_substream_t *subs, size_t size)
+{
+	snd_pcm_runtime_t *runtime = subs->runtime;
+	if (runtime->dma_area) {
+		if (runtime->dma_bytes >= size)
+			return 0; /* already large enough */
+		vfree_nocheck(runtime->dma_area);
+	}
+	runtime->dma_area = vmalloc_nocheck(size);
+	if (! runtime->dma_area)
+		return -ENOMEM;
+	runtime->dma_bytes = size;
+	return 0;
+}
+
+/* free virtual buffer; may be called more than once */
+static int snd_pcm_free_vmalloc_buffer(snd_pcm_substream_t *subs)
+{
+	snd_pcm_runtime_t *runtime = subs->runtime;
+	if (runtime->dma_area) {
+		vfree_nocheck(runtime->dma_area);
+		runtime->dma_area = NULL;
+	}
+	return 0;
+}
+
+
 /*
  * unlink active urbs.
  */
@@ -824,8 +854,14 @@
  */
 static snd_pcm_uframes_t snd_usb_pcm_pointer(snd_pcm_substream_t *substream)
 {
-	snd_usb_substream_t *subs = (snd_usb_substream_t *)substream->runtime->private_data;
-	return subs->hwptr_done;
+	snd_usb_substream_t *subs;
+	snd_pcm_uframes_t hwptr_done;
+	
+	subs = (snd_usb_substream_t *)substream->runtime->private_data;
+	spin_lock(&subs->lock);
+	hwptr_done = subs->hwptr_done;
+	spin_unlock(&subs->lock);
+	return hwptr_done;
 }
 
 
@@ -858,11 +894,13 @@
 static void release_urb_ctx(snd_urb_ctx_t *u)
 {
 	if (u->urb) {
+		if (u->buffer_size)
+			usb_buffer_free(u->subs->dev, u->buffer_size,
+					u->urb->transfer_buffer,
+					u->urb->transfer_dma);
 		usb_free_urb(u->urb);
 		u->urb = NULL;
 	}
-	kfree(u->buf);
-	u->buf = NULL;
 }
 
 /*
@@ -880,8 +918,9 @@
 		release_urb_ctx(&subs->dataurb[i]);
 	for (i = 0; i < SYNC_URBS; i++)
 		release_urb_ctx(&subs->syncurb[i]);
-	kfree(subs->tmpbuf);
-	subs->tmpbuf = NULL;
+	usb_buffer_free(subs->dev, SYNC_URBS * 4,
+			subs->syncbuf, subs->sync_dma);
+	subs->syncbuf = NULL;
 	subs->nurbs = 0;
 }
 
@@ -893,7 +932,7 @@
 {
 	unsigned int maxsize, n, i;
 	int is_playback = subs->direction == SNDRV_PCM_STREAM_PLAYBACK;
-	unsigned int npacks[MAX_URBS], urb_packs, total_packs;
+	unsigned int npacks[MAX_URBS], urb_packs, total_packs, packs_per_ms;
 
 	/* calculate the frequency in 16.16 format */
 	if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
@@ -920,24 +959,40 @@
 	else
 		subs->curpacksize = maxsize;
 
-	if (snd_usb_get_speed(subs->dev) == USB_SPEED_FULL)
-		urb_packs = nrpacks;
+	if (snd_usb_get_speed(subs->dev) == USB_SPEED_HIGH)
+		packs_per_ms = 8 >> subs->datainterval;
 	else
-		urb_packs = (nrpacks * 8) >> subs->datainterval;
+		packs_per_ms = 1;
+	subs->packs_per_ms = packs_per_ms;
 
-	/* allocate a temporary buffer for playback */
 	if (is_playback) {
-		subs->tmpbuf = kmalloc(maxsize * urb_packs, GFP_KERNEL);
-		if (! subs->tmpbuf) {
-			snd_printk(KERN_ERR "cannot malloc tmpbuf\n");
-			return -ENOMEM;
-		}
-	}
+		urb_packs = nrpacks;
+		urb_packs = max(urb_packs, (unsigned int)MIN_PACKS_URB);
+		urb_packs = min(urb_packs, (unsigned int)MAX_PACKS);
+	} else
+		urb_packs = 1;
+	urb_packs *= packs_per_ms;
 
 	/* decide how many packets to be used */
-	total_packs = (period_bytes + maxsize - 1) / maxsize;
-	if (total_packs < 2 * MIN_PACKS_URB)
-		total_packs = 2 * MIN_PACKS_URB;
+	if (is_playback) {
+		unsigned int minsize;
+		/* determine how small a packet can be */
+		minsize = (subs->freqn >> (16 - subs->datainterval))
+			  * (frame_bits >> 3);
+		/* with sync from device, assume it can be 12% lower */
+		if (subs->syncpipe)
+			minsize -= minsize >> 3;
+		minsize = max(minsize, 1u);
+		total_packs = (period_bytes + minsize - 1) / minsize;
+		/* round up to multiple of packs_per_ms */
+		total_packs = (total_packs + packs_per_ms - 1)
+				& ~(packs_per_ms - 1);
+		/* we need at least two URBs for queueing */
+		if (total_packs < 2 * MIN_PACKS_URB * packs_per_ms)
+			total_packs = 2 * MIN_PACKS_URB * packs_per_ms;
+	} else {
+		total_packs = MAX_URBS * urb_packs;
+	}
 	subs->nurbs = (total_packs + urb_packs - 1) / urb_packs;
 	if (subs->nurbs > MAX_URBS) {
 		/* too much... */
@@ -956,7 +1011,7 @@
 		subs->nurbs = 2;
 		npacks[0] = (total_packs + 1) / 2;
 		npacks[1] = total_packs - npacks[0];
-	} else if (npacks[subs->nurbs-1] < MIN_PACKS_URB) {
+	} else if (npacks[subs->nurbs-1] < MIN_PACKS_URB * packs_per_ms) {
 		/* the last packet is too small.. */
 		if (subs->nurbs > 2) {
 			/* merge to the first one */
@@ -975,27 +1030,20 @@
 		snd_urb_ctx_t *u = &subs->dataurb[i];
 		u->index = i;
 		u->subs = subs;
-		u->transfer = 0;
 		u->packets = npacks[i];
+		u->buffer_size = maxsize * u->packets;
 		if (subs->fmt_type == USB_FORMAT_TYPE_II)
 			u->packets++; /* for transfer delimiter */
-		if (! is_playback) {
-			/* allocate a capture buffer per urb */
-			u->buf = kmalloc(maxsize * u->packets, GFP_KERNEL);
-			if (! u->buf) {
-				release_substream_urbs(subs, 0);
-				return -ENOMEM;
-			}
-		}
 		u->urb = usb_alloc_urb(u->packets, GFP_KERNEL);
-		if (! u->urb) {
-			release_substream_urbs(subs, 0);
-			return -ENOMEM;
-		}
-		u->urb->dev = subs->dev;
+		if (! u->urb)
+			goto out_of_memory;
+		u->urb->transfer_buffer =
+			usb_buffer_alloc(subs->dev, u->buffer_size, GFP_KERNEL,
+					 &u->urb->transfer_dma);
+		if (! u->urb->transfer_buffer)
+			goto out_of_memory;
 		u->urb->pipe = subs->datapipe;
-		u->urb->transfer_flags = URB_ISO_ASAP;
-		u->urb->number_of_packets = u->packets;
+		u->urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
 		u->urb->interval = 1 << subs->datainterval;
 		u->urb->context = u;
 		u->urb->complete = snd_usb_complete_callback(snd_complete_urb);
@@ -1003,21 +1051,24 @@
 
 	if (subs->syncpipe) {
 		/* allocate and initialize sync urbs */
+		subs->syncbuf = usb_buffer_alloc(subs->dev, SYNC_URBS * 4,
+						 GFP_KERNEL, &subs->sync_dma);
+		if (! subs->syncbuf)
+			goto out_of_memory;
 		for (i = 0; i < SYNC_URBS; i++) {
 			snd_urb_ctx_t *u = &subs->syncurb[i];
 			u->index = i;
 			u->subs = subs;
 			u->packets = 1;
 			u->urb = usb_alloc_urb(1, GFP_KERNEL);
-			if (! u->urb) {
-				release_substream_urbs(subs, 0);
-				return -ENOMEM;
-			}
+			if (! u->urb)
+				goto out_of_memory;
 			u->urb->transfer_buffer = subs->syncbuf + i * 4;
+			u->urb->transfer_dma = subs->sync_dma + i * 4;
 			u->urb->transfer_buffer_length = 4;
-			u->urb->dev = subs->dev;
 			u->urb->pipe = subs->syncpipe;
-			u->urb->transfer_flags = URB_ISO_ASAP;
+			u->urb->transfer_flags = URB_ISO_ASAP |
+						 URB_NO_TRANSFER_DMA_MAP;
 			u->urb->number_of_packets = 1;
 			u->urb->interval = 1 << subs->syncinterval;
 			u->urb->context = u;
@@ -1025,6 +1076,10 @@
 		}
 	}
 	return 0;
+
+out_of_memory:
+	release_substream_urbs(subs, 0);
+	return -ENOMEM;
 }
 
 
@@ -1293,7 +1348,8 @@
 	unsigned int channels, rate, format;
 	int ret, changed;
 
-	ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params));
+	ret = snd_pcm_alloc_vmalloc_buffer(substream,
+					   params_buffer_bytes(hw_params));
 	if (ret < 0)
 		return ret;
 
@@ -1349,7 +1405,7 @@
 	subs->cur_rate = 0;
 	subs->period_bytes = 0;
 	release_substream_urbs(subs, 0);
-	return snd_pcm_lib_free_pages(substream);
+	return snd_pcm_free_vmalloc_buffer(substream);
 }
 
 /*
@@ -1372,9 +1428,7 @@
 	subs->curframesize = bytes_to_frames(runtime, subs->curpacksize);
 
 	/* reset the pointer */
-	subs->hwptr = 0;
 	subs->hwptr_done = 0;
-	subs->transfer_sched = 0;
 	subs->transfer_done = 0;
 	subs->phase = 0;
 
@@ -1390,7 +1444,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID),
-	.buffer_bytes_max =	(128*1024),
+	.buffer_bytes_max =	(256*1024),
 	.period_bytes_min =	64,
 	.period_bytes_max =	(128*1024),
 	.periods_min =		2,
@@ -1402,7 +1456,7 @@
 	.info =			(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED |
 				 SNDRV_PCM_INFO_BLOCK_TRANSFER |
 				 SNDRV_PCM_INFO_MMAP_VALID),
-	.buffer_bytes_max =	(128*1024),
+	.buffer_bytes_max =	(256*1024),
 	.period_bytes_min =	64,
 	.period_bytes_max =	(128*1024),
 	.periods_min =		2,
@@ -1794,6 +1848,7 @@
 	.prepare =	snd_usb_pcm_prepare,
 	.trigger =	snd_usb_pcm_trigger,
 	.pointer =	snd_usb_pcm_pointer,
+	.page =		snd_pcm_get_vmalloc_page,
 };
 
 static snd_pcm_ops_t snd_usb_capture_ops = {
@@ -1805,6 +1860,7 @@
 	.prepare =	snd_usb_pcm_prepare,
 	.trigger =	snd_usb_pcm_trigger,
 	.pointer =	snd_usb_pcm_pointer,
+	.page =		snd_pcm_get_vmalloc_page,
 };
 
 
@@ -2021,6 +2077,9 @@
 
 	INIT_LIST_HEAD(&subs->fmt_list);
 	spin_lock_init(&subs->lock);
+	if (stream == SNDRV_PCM_STREAM_PLAYBACK)
+		tasklet_init(&subs->start_period_elapsed, start_period_elapsed,
+			     (unsigned long)subs);
 
 	subs->stream = as;
 	subs->direction = stream;
@@ -2029,10 +2088,6 @@
 		subs->ops = audio_urb_ops[stream];
 	else
 		subs->ops = audio_urb_ops_high_speed[stream];
-	snd_pcm_lib_preallocate_pages(as->pcm->streams[stream].substream,
-				      SNDRV_DMA_TYPE_CONTINUOUS,
-				      snd_dma_continuous_data(GFP_KERNEL),
-				      64 * 1024, 128 * 1024);
 	snd_pcm_set_ops(as->pcm, stream,
 			stream == SNDRV_PCM_STREAM_PLAYBACK ?
 			&snd_usb_playback_ops : &snd_usb_capture_ops);
@@ -2078,7 +2133,6 @@
 	snd_usb_stream_t *stream = pcm->private_data;
 	if (stream) {
 		stream->pcm = NULL;
-		snd_pcm_lib_preallocate_free_for_all(pcm);
 		snd_usb_audio_stream_free(stream);
 	}
 }
diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
index 5778a9b..93dedde 100644
--- a/sound/usb/usbmidi.c
+++ b/sound/usb/usbmidi.c
@@ -44,6 +44,7 @@
 #include <linux/string.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/timer.h>
 #include <linux/usb.h>
 #include <sound/core.h>
 #include <sound/minors.h>
@@ -56,6 +57,12 @@
  */
 /* #define DUMP_PACKETS */
 
+/*
+ * how long to wait after some USB errors, so that khubd can disconnect() us
+ * without too many spurious errors
+ */
+#define ERROR_DELAY_JIFFIES (HZ / 10)
+
 
 MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
 MODULE_DESCRIPTION("USB Audio/MIDI helper module");
@@ -100,6 +107,7 @@
 	snd_rawmidi_t* rmidi;
 	struct usb_protocol_ops* usb_protocol_ops;
 	struct list_head list;
+	struct timer_list error_timer;
 
 	struct snd_usb_midi_endpoint {
 		snd_usb_midi_out_endpoint_t *out;
@@ -141,7 +149,8 @@
 	struct usbmidi_in_port {
 		snd_rawmidi_substream_t* substream;
 	} ports[0x10];
-	int seen_f5;
+	u8 seen_f5;
+	u8 error_resubmit;
 	int current_port;
 };
 
@@ -167,14 +176,22 @@
  */
 static int snd_usbmidi_urb_error(int status)
 {
-	if (status == -ENOENT)
-		return status; /* killed */
-	if (status == -EILSEQ ||
-	    status == -ECONNRESET ||
-	    status == -ETIMEDOUT)
-		return -ENODEV; /* device removed/shutdown */
-	snd_printk(KERN_ERR "urb status %d\n", status);
-	return 0; /* continue */
+	switch (status) {
+	/* manually unlinked, or device gone */
+	case -ENOENT:
+	case -ECONNRESET:
+	case -ESHUTDOWN:
+	case -ENODEV:
+		return -ENODEV;
+	/* errors that might occur during unplugging */
+	case -EPROTO:    /* EHCI */
+	case -ETIMEDOUT: /* OHCI */
+	case -EILSEQ:    /* UHCI */
+		return -EIO;
+	default:
+		snd_printk(KERN_ERR "urb status %d\n", status);
+		return 0; /* continue */
+	}
 }
 
 /*
@@ -218,8 +235,15 @@
 		ep->umidi->usb_protocol_ops->input(ep, urb->transfer_buffer,
 						   urb->actual_length);
 	} else {
-		if (snd_usbmidi_urb_error(urb->status) < 0)
+		int err = snd_usbmidi_urb_error(urb->status);
+		if (err < 0) {
+			if (err != -ENODEV) {
+				ep->error_resubmit = 1;
+				mod_timer(&ep->umidi->error_timer,
+					  jiffies + ERROR_DELAY_JIFFIES);
+			}
 			return;
+		}
 	}
 
 	if (usb_pipe_needs_resubmit(urb->pipe)) {
@@ -236,8 +260,13 @@
 	ep->urb_active = 0;
 	spin_unlock(&ep->buffer_lock);
 	if (urb->status < 0) {
-		if (snd_usbmidi_urb_error(urb->status) < 0)
+		int err = snd_usbmidi_urb_error(urb->status);
+		if (err < 0) {
+			if (err != -ENODEV)
+				mod_timer(&ep->umidi->error_timer,
+					  jiffies + ERROR_DELAY_JIFFIES);
 			return;
+		}
 	}
 	snd_usbmidi_do_output(ep);
 }
@@ -276,6 +305,24 @@
 	snd_usbmidi_do_output(ep);
 }
 
+/* called after transfers had been interrupted due to some USB error */
+static void snd_usbmidi_error_timer(unsigned long data)
+{
+	snd_usb_midi_t *umidi = (snd_usb_midi_t *)data;
+	int i;
+
+	for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
+		snd_usb_midi_in_endpoint_t *in = umidi->endpoints[i].in;
+		if (in && in->error_resubmit) {
+			in->error_resubmit = 0;
+			in->urb->dev = umidi->chip->dev;
+			snd_usbmidi_submit_urb(in->urb, GFP_ATOMIC);
+		}
+		if (umidi->endpoints[i].out)
+			snd_usbmidi_do_output(umidi->endpoints[i].out);
+	}
+}
+
 /* helper function to send static data that may not DMA-able */
 static int send_bulk_static_data(snd_usb_midi_out_endpoint_t* ep,
 				 const void *data, int len)
@@ -594,17 +641,20 @@
 static void snd_usbmidi_emagic_input(snd_usb_midi_in_endpoint_t* ep,
 				     uint8_t* buffer, int buffer_length)
 {
-	/* ignore padding bytes at end of buffer */
-	while (buffer_length > 0 && buffer[buffer_length - 1] == 0xff)
-		--buffer_length;
+	int i;
+
+	/* FF indicates end of valid data */
+	for (i = 0; i < buffer_length; ++i)
+		if (buffer[i] == 0xff) {
+			buffer_length = i;
+			break;
+		}
 
 	/* handle F5 at end of last buffer */
 	if (ep->seen_f5)
 		goto switch_port;
 
 	while (buffer_length > 0) {
-		int i;
-
 		/* determine size of data until next F5 */
 		for (i = 0; i < buffer_length; ++i)
 			if (buffer[i] == 0xf5)
@@ -671,6 +721,10 @@
 				break;
 		}
 	}
+	if (buf_free < ep->max_transfer && buf_free > 0) {
+		*buf = 0xff;
+		--buf_free;
+	}
 	ep->urb->transfer_buffer_length = ep->max_transfer - buf_free;
 }
 
@@ -765,7 +819,10 @@
 static void snd_usbmidi_in_endpoint_delete(snd_usb_midi_in_endpoint_t* ep)
 {
 	if (ep->urb) {
-		kfree(ep->urb->transfer_buffer);
+		usb_buffer_free(ep->umidi->chip->dev,
+				ep->urb->transfer_buffer_length,
+				ep->urb->transfer_buffer,
+				ep->urb->transfer_dma);
 		usb_free_urb(ep->urb);
 	}
 	kfree(ep);
@@ -799,7 +856,8 @@
 	else
 		pipe = usb_rcvbulkpipe(umidi->chip->dev, ep_info->in_ep);
 	length = usb_maxpacket(umidi->chip->dev, pipe, 0);
-	buffer = kmalloc(length, GFP_KERNEL);
+	buffer = usb_buffer_alloc(umidi->chip->dev, length, GFP_KERNEL,
+				  &ep->urb->transfer_dma);
 	if (!buffer) {
 		snd_usbmidi_in_endpoint_delete(ep);
 		return -ENOMEM;
@@ -812,6 +870,7 @@
 		usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer, length,
 				  snd_usb_complete_callback(snd_usbmidi_in_urb_complete),
 				  ep);
+	ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
 
 	rep->in = ep;
 	return 0;
@@ -832,10 +891,10 @@
  */
 static void snd_usbmidi_out_endpoint_delete(snd_usb_midi_out_endpoint_t* ep)
 {
-	if (ep->tasklet.func)
-		tasklet_kill(&ep->tasklet);
 	if (ep->urb) {
-		kfree(ep->urb->transfer_buffer);
+		usb_buffer_free(ep->umidi->chip->dev, ep->max_transfer,
+				ep->urb->transfer_buffer,
+				ep->urb->transfer_dma);
 		usb_free_urb(ep->urb);
 	}
 	kfree(ep);
@@ -867,7 +926,8 @@
 	/* we never use interrupt output pipes */
 	pipe = usb_sndbulkpipe(umidi->chip->dev, ep_info->out_ep);
 	ep->max_transfer = usb_maxpacket(umidi->chip->dev, pipe, 1);
-	buffer = kmalloc(ep->max_transfer, GFP_KERNEL);
+	buffer = usb_buffer_alloc(umidi->chip->dev, ep->max_transfer,
+				  GFP_KERNEL, &ep->urb->transfer_dma);
 	if (!buffer) {
 		snd_usbmidi_out_endpoint_delete(ep);
 		return -ENOMEM;
@@ -875,6 +935,7 @@
 	usb_fill_bulk_urb(ep->urb, umidi->chip->dev, pipe, buffer,
 			  ep->max_transfer,
 			  snd_usb_complete_callback(snd_usbmidi_out_urb_complete), ep);
+	ep->urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
 
 	spin_lock_init(&ep->buffer_lock);
 	tasklet_init(&ep->tasklet, snd_usbmidi_out_tasklet, (unsigned long)ep);
@@ -918,8 +979,11 @@
 	int i;
 
 	umidi = list_entry(p, snd_usb_midi_t, list);
+	del_timer_sync(&umidi->error_timer);
 	for (i = 0; i < MIDI_MAX_ENDPOINTS; ++i) {
 		snd_usb_midi_endpoint_t* ep = &umidi->endpoints[i];
+		if (ep->out)
+			tasklet_kill(&ep->out->tasklet);
 		if (ep->out && ep->out->urb) {
 			usb_kill_urb(ep->out->urb);
 			if (umidi->usb_protocol_ops->finish_out_endpoint)
@@ -1480,6 +1544,9 @@
 	umidi->iface = iface;
 	umidi->quirk = quirk;
 	umidi->usb_protocol_ops = &snd_usbmidi_standard_ops;
+	init_timer(&umidi->error_timer);
+	umidi->error_timer.function = snd_usbmidi_error_timer;
+	umidi->error_timer.data = (unsigned long)umidi;
 
 	/* detect the endpoint(s) to use */
 	memset(endpoints, 0, sizeof(endpoints));
diff --git a/sound/usb/usx2y/usx2yhwdeppcm.c b/sound/usb/usx2y/usx2yhwdeppcm.c
index ef280612..d0199c4 100644
--- a/sound/usb/usx2y/usx2yhwdeppcm.c
+++ b/sound/usb/usx2y/usx2yhwdeppcm.c
@@ -624,7 +624,7 @@
 		for (s = 0; s < 2; ++s) {
 			snd_pcm_substream_t *substream;
 			substream = pcm->streams[s].substream;
-			if (substream && substream->open_flag)
+			if (substream && substream->ffile != NULL)
 				err = -EBUSY;
 		}
 	}