Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
new file mode 100644
index 0000000..423bbf2
--- /dev/null
+++ b/drivers/block/DAC960.c
@@ -0,0 +1,7099 @@
+/*
+
+  Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+
+  Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+  This program is free software; you may redistribute and/or modify it under
+  the terms of the GNU General Public License Version 2 as published by the
+  Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  for complete details.
+
+*/
+
+
+#define DAC960_DriverVersion			"2.5.47"
+#define DAC960_DriverDate			"14 November 2002"
+
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/miscdevice.h>
+#include <linux/blkdev.h>
+#include <linux/bio.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/blkpg.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/reboot.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include "DAC960.h"
+
+#define DAC960_GAM_MINOR	252
+
+
+static DAC960_Controller_T *DAC960_Controllers[DAC960_MaxControllers];
+static int DAC960_ControllerCount;
+static struct proc_dir_entry *DAC960_ProcDirectoryEntry;
+
+static long disk_size(DAC960_Controller_T *p, int drive_nr)
+{
+	if (p->FirmwareType == DAC960_V1_Controller) {
+		if (drive_nr >= p->LogicalDriveCount)
+			return 0;
+		return p->V1.LogicalDriveInformation[drive_nr].
+			LogicalDriveSize;
+	} else {
+		DAC960_V2_LogicalDeviceInfo_T *i =
+			p->V2.LogicalDeviceInformation[drive_nr];
+		if (i == NULL)
+			return 0;
+		return i->ConfigurableDeviceSize;
+	}
+}
+
+static int DAC960_open(struct inode *inode, struct file *file)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	DAC960_Controller_T *p = disk->queue->queuedata;
+	int drive_nr = (long)disk->private_data;
+
+	if (p->FirmwareType == DAC960_V1_Controller) {
+		if (p->V1.LogicalDriveInformation[drive_nr].
+		    LogicalDriveState == DAC960_V1_LogicalDrive_Offline)
+			return -ENXIO;
+	} else {
+		DAC960_V2_LogicalDeviceInfo_T *i =
+			p->V2.LogicalDeviceInformation[drive_nr];
+		if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline)
+			return -ENXIO;
+	}
+
+	check_disk_change(inode->i_bdev);
+
+	if (!get_capacity(p->disks[drive_nr]))
+		return -ENXIO;
+	return 0;
+}
+
+static int DAC960_ioctl(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	DAC960_Controller_T *p = disk->queue->queuedata;
+	int drive_nr = (long)disk->private_data;
+	struct hd_geometry g;
+	struct hd_geometry __user *loc = (struct hd_geometry __user *)arg;
+
+	if (cmd != HDIO_GETGEO || !loc)
+		return -EINVAL;
+
+	if (p->FirmwareType == DAC960_V1_Controller) {
+		g.heads = p->V1.GeometryTranslationHeads;
+		g.sectors = p->V1.GeometryTranslationSectors;
+		g.cylinders = p->V1.LogicalDriveInformation[drive_nr].
+			LogicalDriveSize / (g.heads * g.sectors);
+	} else {
+		DAC960_V2_LogicalDeviceInfo_T *i =
+			p->V2.LogicalDeviceInformation[drive_nr];
+		switch (i->DriveGeometry) {
+		case DAC960_V2_Geometry_128_32:
+			g.heads = 128;
+			g.sectors = 32;
+			break;
+		case DAC960_V2_Geometry_255_63:
+			g.heads = 255;
+			g.sectors = 63;
+			break;
+		default:
+			DAC960_Error("Illegal Logical Device Geometry %d\n",
+					p, i->DriveGeometry);
+			return -EINVAL;
+		}
+
+		g.cylinders = i->ConfigurableDeviceSize / (g.heads * g.sectors);
+	}
+	
+	g.start = get_start_sect(inode->i_bdev);
+
+	return copy_to_user(loc, &g, sizeof g) ? -EFAULT : 0; 
+}
+
+static int DAC960_media_changed(struct gendisk *disk)
+{
+	DAC960_Controller_T *p = disk->queue->queuedata;
+	int drive_nr = (long)disk->private_data;
+
+	if (!p->LogicalDriveInitiallyAccessible[drive_nr])
+		return 1;
+	return 0;
+}
+
+static int DAC960_revalidate_disk(struct gendisk *disk)
+{
+	DAC960_Controller_T *p = disk->queue->queuedata;
+	int unit = (long)disk->private_data;
+
+	set_capacity(disk, disk_size(p, unit));
+	return 0;
+}
+
+static struct block_device_operations DAC960_BlockDeviceOperations = {
+	.owner			= THIS_MODULE,
+	.open			= DAC960_open,
+	.ioctl			= DAC960_ioctl,
+	.media_changed		= DAC960_media_changed,
+	.revalidate_disk	= DAC960_revalidate_disk,
+};
+
+
+/*
+  DAC960_AnnounceDriver announces the Driver Version and Date, Author's Name,
+  Copyright Notice, and Electronic Mail Address.
+*/
+
+static void DAC960_AnnounceDriver(DAC960_Controller_T *Controller)
+{
+  DAC960_Announce("***** DAC960 RAID Driver Version "
+		  DAC960_DriverVersion " of "
+		  DAC960_DriverDate " *****\n", Controller);
+  DAC960_Announce("Copyright 1998-2001 by Leonard N. Zubkoff "
+		  "<lnz@dandelion.com>\n", Controller);
+}
+
+
+/*
+  DAC960_Failure prints a standardized error message, and then returns false.
+*/
+
+static boolean DAC960_Failure(DAC960_Controller_T *Controller,
+			      unsigned char *ErrorMessage)
+{
+  DAC960_Error("While configuring DAC960 PCI RAID Controller at\n",
+	       Controller);
+  if (Controller->IO_Address == 0)
+    DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
+		 "PCI Address 0x%X\n", Controller,
+		 Controller->Bus, Controller->Device,
+		 Controller->Function, Controller->PCI_Address);
+  else DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
+		    "0x%X PCI Address 0x%X\n", Controller,
+		    Controller->Bus, Controller->Device,
+		    Controller->Function, Controller->IO_Address,
+		    Controller->PCI_Address);
+  DAC960_Error("%s FAILED - DETACHING\n", Controller, ErrorMessage);
+  return false;
+}
+
+/*
+  init_dma_loaf() and slice_dma_loaf() are helper functions for
+  aggregating the dma-mapped memory for a well-known collection of
+  data structures that are of different lengths.
+
+  These routines don't guarantee any alignment.  The caller must
+  include any space needed for alignment in the sizes of the structures
+  that are passed in.
+ */
+
+static boolean init_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf,
+								 size_t len)
+{
+	void *cpu_addr;
+	dma_addr_t dma_handle;
+
+	cpu_addr = pci_alloc_consistent(dev, len, &dma_handle);
+	if (cpu_addr == NULL)
+		return false;
+	
+	loaf->cpu_free = loaf->cpu_base = cpu_addr;
+	loaf->dma_free =loaf->dma_base = dma_handle;
+	loaf->length = len;
+	memset(cpu_addr, 0, len);
+	return true;
+}
+
+static void *slice_dma_loaf(struct dma_loaf *loaf, size_t len,
+					dma_addr_t *dma_handle)
+{
+	void *cpu_end = loaf->cpu_free + len;
+	void *cpu_addr = loaf->cpu_free;
+
+	if (cpu_end > loaf->cpu_base + loaf->length)
+		BUG();
+	*dma_handle = loaf->dma_free;
+	loaf->cpu_free = cpu_end;
+	loaf->dma_free += len;
+	return cpu_addr;
+}
+
+static void free_dma_loaf(struct pci_dev *dev, struct dma_loaf *loaf_handle)
+{
+	if (loaf_handle->cpu_base != NULL)
+		pci_free_consistent(dev, loaf_handle->length,
+			loaf_handle->cpu_base, loaf_handle->dma_base);
+}
+
+
+/*
+  DAC960_CreateAuxiliaryStructures allocates and initializes the auxiliary
+  data structures for Controller.  It returns true on success and false on
+  failure.
+*/
+
+static boolean DAC960_CreateAuxiliaryStructures(DAC960_Controller_T *Controller)
+{
+  int CommandAllocationLength, CommandAllocationGroupSize;
+  int CommandsRemaining = 0, CommandIdentifier, CommandGroupByteCount;
+  void *AllocationPointer = NULL;
+  void *ScatterGatherCPU = NULL;
+  dma_addr_t ScatterGatherDMA;
+  struct pci_pool *ScatterGatherPool;
+  void *RequestSenseCPU = NULL;
+  dma_addr_t RequestSenseDMA;
+  struct pci_pool *RequestSensePool = NULL;
+
+  if (Controller->FirmwareType == DAC960_V1_Controller)
+    {
+      CommandAllocationLength = offsetof(DAC960_Command_T, V1.EndMarker);
+      CommandAllocationGroupSize = DAC960_V1_CommandAllocationGroupSize;
+      ScatterGatherPool = pci_pool_create("DAC960_V1_ScatterGather",
+		Controller->PCIDevice,
+	DAC960_V1_ScatterGatherLimit * sizeof(DAC960_V1_ScatterGatherSegment_T),
+	sizeof(DAC960_V1_ScatterGatherSegment_T), 0);
+      if (ScatterGatherPool == NULL)
+	    return DAC960_Failure(Controller,
+			"AUXILIARY STRUCTURE CREATION (SG)");
+      Controller->ScatterGatherPool = ScatterGatherPool;
+    }
+  else
+    {
+      CommandAllocationLength = offsetof(DAC960_Command_T, V2.EndMarker);
+      CommandAllocationGroupSize = DAC960_V2_CommandAllocationGroupSize;
+      ScatterGatherPool = pci_pool_create("DAC960_V2_ScatterGather",
+		Controller->PCIDevice,
+	DAC960_V2_ScatterGatherLimit * sizeof(DAC960_V2_ScatterGatherSegment_T),
+	sizeof(DAC960_V2_ScatterGatherSegment_T), 0);
+      if (ScatterGatherPool == NULL)
+	    return DAC960_Failure(Controller,
+			"AUXILIARY STRUCTURE CREATION (SG)");
+      RequestSensePool = pci_pool_create("DAC960_V2_RequestSense",
+		Controller->PCIDevice, sizeof(DAC960_SCSI_RequestSense_T),
+		sizeof(int), 0);
+      if (RequestSensePool == NULL) {
+	    pci_pool_destroy(ScatterGatherPool);
+	    return DAC960_Failure(Controller,
+			"AUXILIARY STRUCTURE CREATION (SG)");
+      }
+      Controller->ScatterGatherPool = ScatterGatherPool;
+      Controller->V2.RequestSensePool = RequestSensePool;
+    }
+  Controller->CommandAllocationGroupSize = CommandAllocationGroupSize;
+  Controller->FreeCommands = NULL;
+  for (CommandIdentifier = 1;
+       CommandIdentifier <= Controller->DriverQueueDepth;
+       CommandIdentifier++)
+    {
+      DAC960_Command_T *Command;
+      if (--CommandsRemaining <= 0)
+	{
+	  CommandsRemaining =
+		Controller->DriverQueueDepth - CommandIdentifier + 1;
+	  if (CommandsRemaining > CommandAllocationGroupSize)
+		CommandsRemaining = CommandAllocationGroupSize;
+	  CommandGroupByteCount =
+		CommandsRemaining * CommandAllocationLength;
+	  AllocationPointer = kmalloc(CommandGroupByteCount, GFP_ATOMIC);
+	  if (AllocationPointer == NULL)
+		return DAC960_Failure(Controller,
+					"AUXILIARY STRUCTURE CREATION");
+	  memset(AllocationPointer, 0, CommandGroupByteCount);
+	 }
+      Command = (DAC960_Command_T *) AllocationPointer;
+      AllocationPointer += CommandAllocationLength;
+      Command->CommandIdentifier = CommandIdentifier;
+      Command->Controller = Controller;
+      Command->Next = Controller->FreeCommands;
+      Controller->FreeCommands = Command;
+      Controller->Commands[CommandIdentifier-1] = Command;
+      ScatterGatherCPU = pci_pool_alloc(ScatterGatherPool, SLAB_ATOMIC,
+							&ScatterGatherDMA);
+      if (ScatterGatherCPU == NULL)
+	  return DAC960_Failure(Controller, "AUXILIARY STRUCTURE CREATION");
+
+      if (RequestSensePool != NULL) {
+  	  RequestSenseCPU = pci_pool_alloc(RequestSensePool, SLAB_ATOMIC,
+						&RequestSenseDMA);
+  	  if (RequestSenseCPU == NULL) {
+                pci_pool_free(ScatterGatherPool, ScatterGatherCPU,
+                                ScatterGatherDMA);
+    		return DAC960_Failure(Controller,
+					"AUXILIARY STRUCTURE CREATION");
+	  }
+        }
+     if (Controller->FirmwareType == DAC960_V1_Controller) {
+        Command->cmd_sglist = Command->V1.ScatterList;
+	Command->V1.ScatterGatherList =
+		(DAC960_V1_ScatterGatherSegment_T *)ScatterGatherCPU;
+	Command->V1.ScatterGatherListDMA = ScatterGatherDMA;
+      } else {
+        Command->cmd_sglist = Command->V2.ScatterList;
+	Command->V2.ScatterGatherList =
+		(DAC960_V2_ScatterGatherSegment_T *)ScatterGatherCPU;
+	Command->V2.ScatterGatherListDMA = ScatterGatherDMA;
+	Command->V2.RequestSense =
+				(DAC960_SCSI_RequestSense_T *)RequestSenseCPU;
+	Command->V2.RequestSenseDMA = RequestSenseDMA;
+      }
+    }
+  return true;
+}
+
+
+/*
+  DAC960_DestroyAuxiliaryStructures deallocates the auxiliary data
+  structures for Controller.
+*/
+
+static void DAC960_DestroyAuxiliaryStructures(DAC960_Controller_T *Controller)
+{
+  int i;
+  struct pci_pool *ScatterGatherPool = Controller->ScatterGatherPool;
+  struct pci_pool *RequestSensePool = NULL;
+  void *ScatterGatherCPU;
+  dma_addr_t ScatterGatherDMA;
+  void *RequestSenseCPU;
+  dma_addr_t RequestSenseDMA;
+  DAC960_Command_T *CommandGroup = NULL;
+  
+
+  if (Controller->FirmwareType == DAC960_V2_Controller)
+        RequestSensePool = Controller->V2.RequestSensePool;
+
+  Controller->FreeCommands = NULL;
+  for (i = 0; i < Controller->DriverQueueDepth; i++)
+    {
+      DAC960_Command_T *Command = Controller->Commands[i];
+
+      if (Command == NULL)
+	  continue;
+
+      if (Controller->FirmwareType == DAC960_V1_Controller) {
+	  ScatterGatherCPU = (void *)Command->V1.ScatterGatherList;
+	  ScatterGatherDMA = Command->V1.ScatterGatherListDMA;
+	  RequestSenseCPU = NULL;
+	  RequestSenseDMA = (dma_addr_t)0;
+      } else {
+          ScatterGatherCPU = (void *)Command->V2.ScatterGatherList;
+	  ScatterGatherDMA = Command->V2.ScatterGatherListDMA;
+	  RequestSenseCPU = (void *)Command->V2.RequestSense;
+	  RequestSenseDMA = Command->V2.RequestSenseDMA;
+      }
+      if (ScatterGatherCPU != NULL)
+          pci_pool_free(ScatterGatherPool, ScatterGatherCPU, ScatterGatherDMA);
+      if (RequestSenseCPU != NULL)
+          pci_pool_free(RequestSensePool, RequestSenseCPU, RequestSenseDMA);
+
+      if ((Command->CommandIdentifier
+	   % Controller->CommandAllocationGroupSize) == 1) {
+	   /*
+	    * We can't free the group of commands until all of the
+	    * request sense and scatter gather dma structures are free.
+            * Remember the beginning of the group, but don't free it
+	    * until we've reached the beginning of the next group.
+	    */
+	   if (CommandGroup != NULL)
+		kfree(CommandGroup);
+	    CommandGroup = Command;
+      }
+      Controller->Commands[i] = NULL;
+    }
+  if (CommandGroup != NULL)
+      kfree(CommandGroup);
+
+  if (Controller->CombinedStatusBuffer != NULL)
+    {
+      kfree(Controller->CombinedStatusBuffer);
+      Controller->CombinedStatusBuffer = NULL;
+      Controller->CurrentStatusBuffer = NULL;
+    }
+
+  if (ScatterGatherPool != NULL)
+  	pci_pool_destroy(ScatterGatherPool);
+  if (Controller->FirmwareType == DAC960_V1_Controller) return;
+
+  if (RequestSensePool != NULL)
+	pci_pool_destroy(RequestSensePool);
+
+  for (i = 0; i < DAC960_MaxLogicalDrives; i++)
+    if (Controller->V2.LogicalDeviceInformation[i] != NULL)
+      {
+	kfree(Controller->V2.LogicalDeviceInformation[i]);
+	Controller->V2.LogicalDeviceInformation[i] = NULL;
+      }
+
+  for (i = 0; i < DAC960_V2_MaxPhysicalDevices; i++)
+    {
+      if (Controller->V2.PhysicalDeviceInformation[i] != NULL)
+	{
+	  kfree(Controller->V2.PhysicalDeviceInformation[i]);
+	  Controller->V2.PhysicalDeviceInformation[i] = NULL;
+	}
+      if (Controller->V2.InquiryUnitSerialNumber[i] != NULL)
+	{
+	  kfree(Controller->V2.InquiryUnitSerialNumber[i]);
+	  Controller->V2.InquiryUnitSerialNumber[i] = NULL;
+	}
+    }
+}
+
+
+/*
+  DAC960_V1_ClearCommand clears critical fields of Command for DAC960 V1
+  Firmware Controllers.
+*/
+
+static inline void DAC960_V1_ClearCommand(DAC960_Command_T *Command)
+{
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  memset(CommandMailbox, 0, sizeof(DAC960_V1_CommandMailbox_T));
+  Command->V1.CommandStatus = 0;
+}
+
+
+/*
+  DAC960_V2_ClearCommand clears critical fields of Command for DAC960 V2
+  Firmware Controllers.
+*/
+
+static inline void DAC960_V2_ClearCommand(DAC960_Command_T *Command)
+{
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
+  Command->V2.CommandStatus = 0;
+}
+
+
+/*
+  DAC960_AllocateCommand allocates a Command structure from Controller's
+  free list.  During driver initialization, a special initialization command
+  has been placed on the free list to guarantee that command allocation can
+  never fail.
+*/
+
+static inline DAC960_Command_T *DAC960_AllocateCommand(DAC960_Controller_T
+						       *Controller)
+{
+  DAC960_Command_T *Command = Controller->FreeCommands;
+  if (Command == NULL) return NULL;
+  Controller->FreeCommands = Command->Next;
+  Command->Next = NULL;
+  return Command;
+}
+
+
+/*
+  DAC960_DeallocateCommand deallocates Command, returning it to Controller's
+  free list.
+*/
+
+static inline void DAC960_DeallocateCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+
+  Command->Request = NULL;
+  Command->Next = Controller->FreeCommands;
+  Controller->FreeCommands = Command;
+}
+
+
+/*
+  DAC960_WaitForCommand waits for a wake_up on Controller's Command Wait Queue.
+*/
+
+static void DAC960_WaitForCommand(DAC960_Controller_T *Controller)
+{
+  spin_unlock_irq(&Controller->queue_lock);
+  __wait_event(Controller->CommandWaitQueue, Controller->FreeCommands);
+  spin_lock_irq(&Controller->queue_lock);
+}
+
+
+/*
+  DAC960_BA_QueueCommand queues Command for DAC960 BA Series Controllers.
+*/
+
+static void DAC960_BA_QueueCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandMailbox_T *NextCommandMailbox =
+    Controller->V2.NextCommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  DAC960_BA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
+  if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
+      Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
+    DAC960_BA_MemoryMailboxNewCommand(ControllerBaseAddress);
+  Controller->V2.PreviousCommandMailbox2 =
+    Controller->V2.PreviousCommandMailbox1;
+  Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
+  if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
+    NextCommandMailbox = Controller->V2.FirstCommandMailbox;
+  Controller->V2.NextCommandMailbox = NextCommandMailbox;
+}
+
+
+/*
+  DAC960_LP_QueueCommand queues Command for DAC960 LP Series Controllers.
+*/
+
+static void DAC960_LP_QueueCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandMailbox_T *NextCommandMailbox =
+    Controller->V2.NextCommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  DAC960_LP_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
+  if (Controller->V2.PreviousCommandMailbox1->Words[0] == 0 ||
+      Controller->V2.PreviousCommandMailbox2->Words[0] == 0)
+    DAC960_LP_MemoryMailboxNewCommand(ControllerBaseAddress);
+  Controller->V2.PreviousCommandMailbox2 =
+    Controller->V2.PreviousCommandMailbox1;
+  Controller->V2.PreviousCommandMailbox1 = NextCommandMailbox;
+  if (++NextCommandMailbox > Controller->V2.LastCommandMailbox)
+    NextCommandMailbox = Controller->V2.FirstCommandMailbox;
+  Controller->V2.NextCommandMailbox = NextCommandMailbox;
+}
+
+
+/*
+  DAC960_LA_QueueCommandDualMode queues Command for DAC960 LA Series
+  Controllers with Dual Mode Firmware.
+*/
+
+static void DAC960_LA_QueueCommandDualMode(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandMailbox_T *NextCommandMailbox =
+    Controller->V1.NextCommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
+  if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
+      Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
+    DAC960_LA_MemoryMailboxNewCommand(ControllerBaseAddress);
+  Controller->V1.PreviousCommandMailbox2 =
+    Controller->V1.PreviousCommandMailbox1;
+  Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
+  if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
+    NextCommandMailbox = Controller->V1.FirstCommandMailbox;
+  Controller->V1.NextCommandMailbox = NextCommandMailbox;
+}
+
+
+/*
+  DAC960_LA_QueueCommandSingleMode queues Command for DAC960 LA Series
+  Controllers with Single Mode Firmware.
+*/
+
+static void DAC960_LA_QueueCommandSingleMode(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandMailbox_T *NextCommandMailbox =
+    Controller->V1.NextCommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  DAC960_LA_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
+  if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
+      Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
+    DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
+  Controller->V1.PreviousCommandMailbox2 =
+    Controller->V1.PreviousCommandMailbox1;
+  Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
+  if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
+    NextCommandMailbox = Controller->V1.FirstCommandMailbox;
+  Controller->V1.NextCommandMailbox = NextCommandMailbox;
+}
+
+
+/*
+  DAC960_PG_QueueCommandDualMode queues Command for DAC960 PG Series
+  Controllers with Dual Mode Firmware.
+*/
+
+static void DAC960_PG_QueueCommandDualMode(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandMailbox_T *NextCommandMailbox =
+    Controller->V1.NextCommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
+  if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
+      Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
+    DAC960_PG_MemoryMailboxNewCommand(ControllerBaseAddress);
+  Controller->V1.PreviousCommandMailbox2 =
+    Controller->V1.PreviousCommandMailbox1;
+  Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
+  if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
+    NextCommandMailbox = Controller->V1.FirstCommandMailbox;
+  Controller->V1.NextCommandMailbox = NextCommandMailbox;
+}
+
+
+/*
+  DAC960_PG_QueueCommandSingleMode queues Command for DAC960 PG Series
+  Controllers with Single Mode Firmware.
+*/
+
+static void DAC960_PG_QueueCommandSingleMode(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandMailbox_T *NextCommandMailbox =
+    Controller->V1.NextCommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  DAC960_PG_WriteCommandMailbox(NextCommandMailbox, CommandMailbox);
+  if (Controller->V1.PreviousCommandMailbox1->Words[0] == 0 ||
+      Controller->V1.PreviousCommandMailbox2->Words[0] == 0)
+    DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
+  Controller->V1.PreviousCommandMailbox2 =
+    Controller->V1.PreviousCommandMailbox1;
+  Controller->V1.PreviousCommandMailbox1 = NextCommandMailbox;
+  if (++NextCommandMailbox > Controller->V1.LastCommandMailbox)
+    NextCommandMailbox = Controller->V1.FirstCommandMailbox;
+  Controller->V1.NextCommandMailbox = NextCommandMailbox;
+}
+
+
+/*
+  DAC960_PD_QueueCommand queues Command for DAC960 PD Series Controllers.
+*/
+
+static void DAC960_PD_QueueCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
+    udelay(1);
+  DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
+  DAC960_PD_NewCommand(ControllerBaseAddress);
+}
+
+
+/*
+  DAC960_P_QueueCommand queues Command for DAC960 P Series Controllers.
+*/
+
+static void DAC960_P_QueueCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  CommandMailbox->Common.CommandIdentifier = Command->CommandIdentifier;
+  switch (CommandMailbox->Common.CommandOpcode)
+    {
+    case DAC960_V1_Enquiry:
+      CommandMailbox->Common.CommandOpcode = DAC960_V1_Enquiry_Old;
+      break;
+    case DAC960_V1_GetDeviceState:
+      CommandMailbox->Common.CommandOpcode = DAC960_V1_GetDeviceState_Old;
+      break;
+    case DAC960_V1_Read:
+      CommandMailbox->Common.CommandOpcode = DAC960_V1_Read_Old;
+      DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
+      break;
+    case DAC960_V1_Write:
+      CommandMailbox->Common.CommandOpcode = DAC960_V1_Write_Old;
+      DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
+      break;
+    case DAC960_V1_ReadWithScatterGather:
+      CommandMailbox->Common.CommandOpcode =
+	DAC960_V1_ReadWithScatterGather_Old;
+      DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
+      break;
+    case DAC960_V1_WriteWithScatterGather:
+      CommandMailbox->Common.CommandOpcode =
+	DAC960_V1_WriteWithScatterGather_Old;
+      DAC960_PD_To_P_TranslateReadWriteCommand(CommandMailbox);
+      break;
+    default:
+      break;
+    }
+  while (DAC960_PD_MailboxFullP(ControllerBaseAddress))
+    udelay(1);
+  DAC960_PD_WriteCommandMailbox(ControllerBaseAddress, CommandMailbox);
+  DAC960_PD_NewCommand(ControllerBaseAddress);
+}
+
+
+/*
+  DAC960_ExecuteCommand executes Command and waits for completion.
+*/
+
+static void DAC960_ExecuteCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DECLARE_COMPLETION(Completion);
+  unsigned long flags;
+  Command->Completion = &Completion;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_QueueCommand(Command);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+ 
+  if (in_interrupt())
+	  return;
+  wait_for_completion(&Completion);
+}
+
+
+/*
+  DAC960_V1_ExecuteType3 executes a DAC960 V1 Firmware Controller Type 3
+  Command and waits for completion.  It returns true on success and false
+  on failure.
+*/
+
+static boolean DAC960_V1_ExecuteType3(DAC960_Controller_T *Controller,
+				      DAC960_V1_CommandOpcode_T CommandOpcode,
+				      dma_addr_t DataDMA)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandStatus_T CommandStatus;
+  DAC960_V1_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->Type3.CommandOpcode = CommandOpcode;
+  CommandMailbox->Type3.BusAddress = DataDMA;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V1.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V1_NormalCompletion);
+}
+
+
+/*
+  DAC960_V1_ExecuteTypeB executes a DAC960 V1 Firmware Controller Type 3B
+  Command and waits for completion.  It returns true on success and false
+  on failure.
+*/
+
+static boolean DAC960_V1_ExecuteType3B(DAC960_Controller_T *Controller,
+				       DAC960_V1_CommandOpcode_T CommandOpcode,
+				       unsigned char CommandOpcode2,
+				       dma_addr_t DataDMA)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandStatus_T CommandStatus;
+  DAC960_V1_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->Type3B.CommandOpcode = CommandOpcode;
+  CommandMailbox->Type3B.CommandOpcode2 = CommandOpcode2;
+  CommandMailbox->Type3B.BusAddress = DataDMA;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V1.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V1_NormalCompletion);
+}
+
+
+/*
+  DAC960_V1_ExecuteType3D executes a DAC960 V1 Firmware Controller Type 3D
+  Command and waits for completion.  It returns true on success and false
+  on failure.
+*/
+
+static boolean DAC960_V1_ExecuteType3D(DAC960_Controller_T *Controller,
+				       DAC960_V1_CommandOpcode_T CommandOpcode,
+				       unsigned char Channel,
+				       unsigned char TargetID,
+				       dma_addr_t DataDMA)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_CommandStatus_T CommandStatus;
+  DAC960_V1_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->Type3D.CommandOpcode = CommandOpcode;
+  CommandMailbox->Type3D.Channel = Channel;
+  CommandMailbox->Type3D.TargetID = TargetID;
+  CommandMailbox->Type3D.BusAddress = DataDMA;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V1.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V1_NormalCompletion);
+}
+
+
+/*
+  DAC960_V2_GeneralInfo executes a DAC960 V2 Firmware General Information
+  Reading IOCTL Command and waits for completion.  It returns true on success
+  and false on failure.
+
+  Return data in The controller's HealthStatusBuffer, which is dma-able memory
+*/
+
+static boolean DAC960_V2_GeneralInfo(DAC960_Controller_T *Controller)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandStatus_T CommandStatus;
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->Common.CommandControlBits
+			.DataTransferControllerToHost = true;
+  CommandMailbox->Common.CommandControlBits
+			.NoAutoRequestSense = true;
+  CommandMailbox->Common.DataTransferSize = sizeof(DAC960_V2_HealthStatusBuffer_T);
+  CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_GetHealthStatus;
+  CommandMailbox->Common.DataTransferMemoryAddress
+			.ScatterGatherSegments[0]
+			.SegmentDataPointer =
+    Controller->V2.HealthStatusBufferDMA;
+  CommandMailbox->Common.DataTransferMemoryAddress
+			.ScatterGatherSegments[0]
+			.SegmentByteCount =
+    CommandMailbox->Common.DataTransferSize;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V2.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V2_ControllerInfo executes a DAC960 V2 Firmware Controller
+  Information Reading IOCTL Command and waits for completion.  It returns
+  true on success and false on failure.
+
+  Data is returned in the controller's V2.NewControllerInformation dma-able
+  memory buffer.
+*/
+
+static boolean DAC960_V2_NewControllerInfo(DAC960_Controller_T *Controller)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandStatus_T CommandStatus;
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->ControllerInfo.CommandControlBits
+				.DataTransferControllerToHost = true;
+  CommandMailbox->ControllerInfo.CommandControlBits
+				.NoAutoRequestSense = true;
+  CommandMailbox->ControllerInfo.DataTransferSize = sizeof(DAC960_V2_ControllerInfo_T);
+  CommandMailbox->ControllerInfo.ControllerNumber = 0;
+  CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
+  CommandMailbox->ControllerInfo.DataTransferMemoryAddress
+				.ScatterGatherSegments[0]
+				.SegmentDataPointer =
+    	Controller->V2.NewControllerInformationDMA;
+  CommandMailbox->ControllerInfo.DataTransferMemoryAddress
+				.ScatterGatherSegments[0]
+				.SegmentByteCount =
+    CommandMailbox->ControllerInfo.DataTransferSize;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V2.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V2_LogicalDeviceInfo executes a DAC960 V2 Firmware Controller Logical
+  Device Information Reading IOCTL Command and waits for completion.  It
+  returns true on success and false on failure.
+
+  Data is returned in the controller's V2.NewLogicalDeviceInformation
+*/
+
+static boolean DAC960_V2_NewLogicalDeviceInfo(DAC960_Controller_T *Controller,
+					   unsigned short LogicalDeviceNumber)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandStatus_T CommandStatus;
+
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->LogicalDeviceInfo.CommandOpcode =
+				DAC960_V2_IOCTL;
+  CommandMailbox->LogicalDeviceInfo.CommandControlBits
+				   .DataTransferControllerToHost = true;
+  CommandMailbox->LogicalDeviceInfo.CommandControlBits
+				   .NoAutoRequestSense = true;
+  CommandMailbox->LogicalDeviceInfo.DataTransferSize = 
+				sizeof(DAC960_V2_LogicalDeviceInfo_T);
+  CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+    LogicalDeviceNumber;
+  CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode = DAC960_V2_GetLogicalDeviceInfoValid;
+  CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
+				   .ScatterGatherSegments[0]
+				   .SegmentDataPointer =
+    	Controller->V2.NewLogicalDeviceInformationDMA;
+  CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
+				   .ScatterGatherSegments[0]
+				   .SegmentByteCount =
+    CommandMailbox->LogicalDeviceInfo.DataTransferSize;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V2.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V2_PhysicalDeviceInfo executes a DAC960 V2 Firmware Controller "Read
+  Physical Device Information" IOCTL Command and waits for completion.  It
+  returns true on success and false on failure.
+
+  The Channel, TargetID, LogicalUnit arguments should be 0 the first time
+  this function is called for a given controller.  This will return data
+  for the "first" device on that controller.  The returned data includes a
+  Channel, TargetID, LogicalUnit that can be passed in to this routine to
+  get data for the NEXT device on that controller.
+
+  Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
+  memory buffer.
+
+*/
+
+static boolean DAC960_V2_NewPhysicalDeviceInfo(DAC960_Controller_T *Controller,
+					    unsigned char Channel,
+					    unsigned char TargetID,
+					    unsigned char LogicalUnit)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandStatus_T CommandStatus;
+
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->PhysicalDeviceInfo.CommandControlBits
+				    .DataTransferControllerToHost = true;
+  CommandMailbox->PhysicalDeviceInfo.CommandControlBits
+				    .NoAutoRequestSense = true;
+  CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
+				sizeof(DAC960_V2_PhysicalDeviceInfo_T);
+  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit = LogicalUnit;
+  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
+  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
+  CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
+					DAC960_V2_GetPhysicalDeviceInfoValid;
+  CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
+				    .ScatterGatherSegments[0]
+				    .SegmentDataPointer =
+    					Controller->V2.NewPhysicalDeviceInformationDMA;
+  CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
+				    .ScatterGatherSegments[0]
+				    .SegmentByteCount =
+    CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V2.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+static void DAC960_V2_ConstructNewUnitSerialNumber(
+	DAC960_Controller_T *Controller,
+	DAC960_V2_CommandMailbox_T *CommandMailbox, int Channel, int TargetID,
+	int LogicalUnit)
+{
+      CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10_Passthru;
+      CommandMailbox->SCSI_10.CommandControlBits
+			     .DataTransferControllerToHost = true;
+      CommandMailbox->SCSI_10.CommandControlBits
+			     .NoAutoRequestSense = true;
+      CommandMailbox->SCSI_10.DataTransferSize =
+	sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+      CommandMailbox->SCSI_10.PhysicalDevice.LogicalUnit = LogicalUnit;
+      CommandMailbox->SCSI_10.PhysicalDevice.TargetID = TargetID;
+      CommandMailbox->SCSI_10.PhysicalDevice.Channel = Channel;
+      CommandMailbox->SCSI_10.CDBLength = 6;
+      CommandMailbox->SCSI_10.SCSI_CDB[0] = 0x12; /* INQUIRY */
+      CommandMailbox->SCSI_10.SCSI_CDB[1] = 1; /* EVPD = 1 */
+      CommandMailbox->SCSI_10.SCSI_CDB[2] = 0x80; /* Page Code */
+      CommandMailbox->SCSI_10.SCSI_CDB[3] = 0; /* Reserved */
+      CommandMailbox->SCSI_10.SCSI_CDB[4] =
+	sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+      CommandMailbox->SCSI_10.SCSI_CDB[5] = 0; /* Control */
+      CommandMailbox->SCSI_10.DataTransferMemoryAddress
+			     .ScatterGatherSegments[0]
+			     .SegmentDataPointer =
+		Controller->V2.NewInquiryUnitSerialNumberDMA;
+      CommandMailbox->SCSI_10.DataTransferMemoryAddress
+			     .ScatterGatherSegments[0]
+			     .SegmentByteCount =
+		CommandMailbox->SCSI_10.DataTransferSize;
+}
+
+
+/*
+  DAC960_V2_NewUnitSerialNumber executes an SCSI pass-through
+  Inquiry command to a SCSI device identified by Channel number,
+  Target id, Logical Unit Number.  This function Waits for completion
+  of the command.
+
+  The return data includes Unit Serial Number information for the
+  specified device.
+
+  Data is stored in the controller's V2.NewPhysicalDeviceInfo dma-able
+  memory buffer.
+*/
+
+static boolean DAC960_V2_NewInquiryUnitSerialNumber(DAC960_Controller_T *Controller,
+			int Channel, int TargetID, int LogicalUnit)
+{
+      DAC960_Command_T *Command;
+      DAC960_V2_CommandMailbox_T *CommandMailbox;
+      DAC960_V2_CommandStatus_T CommandStatus;
+
+      Command = DAC960_AllocateCommand(Controller);
+      CommandMailbox = &Command->V2.CommandMailbox;
+      DAC960_V2_ClearCommand(Command);
+      Command->CommandType = DAC960_ImmediateCommand;
+
+      DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
+			Channel, TargetID, LogicalUnit);
+
+      DAC960_ExecuteCommand(Command);
+      CommandStatus = Command->V2.CommandStatus;
+      DAC960_DeallocateCommand(Command);
+      return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V2_DeviceOperation executes a DAC960 V2 Firmware Controller Device
+  Operation IOCTL Command and waits for completion.  It returns true on
+  success and false on failure.
+*/
+
+static boolean DAC960_V2_DeviceOperation(DAC960_Controller_T *Controller,
+					 DAC960_V2_IOCTL_Opcode_T IOCTL_Opcode,
+					 DAC960_V2_OperationDevice_T
+					   OperationDevice)
+{
+  DAC960_Command_T *Command = DAC960_AllocateCommand(Controller);
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_CommandStatus_T CommandStatus;
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox->DeviceOperation.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->DeviceOperation.CommandControlBits
+				 .DataTransferControllerToHost = true;
+  CommandMailbox->DeviceOperation.CommandControlBits
+    				 .NoAutoRequestSense = true;
+  CommandMailbox->DeviceOperation.IOCTL_Opcode = IOCTL_Opcode;
+  CommandMailbox->DeviceOperation.OperationDevice = OperationDevice;
+  DAC960_ExecuteCommand(Command);
+  CommandStatus = Command->V2.CommandStatus;
+  DAC960_DeallocateCommand(Command);
+  return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V1_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
+  for DAC960 V1 Firmware Controllers.
+
+  PD and P controller types have no memory mailbox, but still need the
+  other dma mapped memory.
+*/
+
+static boolean DAC960_V1_EnableMemoryMailboxInterface(DAC960_Controller_T
+						      *Controller)
+{
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_HardwareType_T hw_type = Controller->HardwareType;
+  struct pci_dev *PCI_Device = Controller->PCIDevice;
+  struct dma_loaf *DmaPages = &Controller->DmaPages;
+  size_t DmaPagesSize;
+  size_t CommandMailboxesSize;
+  size_t StatusMailboxesSize;
+
+  DAC960_V1_CommandMailbox_T *CommandMailboxesMemory;
+  dma_addr_t CommandMailboxesMemoryDMA;
+
+  DAC960_V1_StatusMailbox_T *StatusMailboxesMemory;
+  dma_addr_t StatusMailboxesMemoryDMA;
+
+  DAC960_V1_CommandMailbox_T CommandMailbox;
+  DAC960_V1_CommandStatus_T CommandStatus;
+  int TimeoutCounter;
+  int i;
+
+  
+  if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V1_PciDmaMask))
+	return DAC960_Failure(Controller, "DMA mask out of range");
+  Controller->BounceBufferLimit = DAC690_V1_PciDmaMask;
+
+  if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) {
+    CommandMailboxesSize =  0;
+    StatusMailboxesSize = 0;
+  } else {
+    CommandMailboxesSize =  DAC960_V1_CommandMailboxCount * sizeof(DAC960_V1_CommandMailbox_T);
+    StatusMailboxesSize = DAC960_V1_StatusMailboxCount * sizeof(DAC960_V1_StatusMailbox_T);
+  }
+  DmaPagesSize = CommandMailboxesSize + StatusMailboxesSize + 
+	sizeof(DAC960_V1_DCDB_T) + sizeof(DAC960_V1_Enquiry_T) +
+	sizeof(DAC960_V1_ErrorTable_T) + sizeof(DAC960_V1_EventLogEntry_T) +
+	sizeof(DAC960_V1_RebuildProgress_T) +
+	sizeof(DAC960_V1_LogicalDriveInformationArray_T) +
+	sizeof(DAC960_V1_BackgroundInitializationStatus_T) +
+	sizeof(DAC960_V1_DeviceState_T) + sizeof(DAC960_SCSI_Inquiry_T) +
+	sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+
+  if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize))
+	return false;
+
+
+  if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller)) 
+	goto skip_mailboxes;
+
+  CommandMailboxesMemory = slice_dma_loaf(DmaPages,
+                CommandMailboxesSize, &CommandMailboxesMemoryDMA);
+  
+  /* These are the base addresses for the command memory mailbox array */
+  Controller->V1.FirstCommandMailbox = CommandMailboxesMemory;
+  Controller->V1.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
+
+  CommandMailboxesMemory += DAC960_V1_CommandMailboxCount - 1;
+  Controller->V1.LastCommandMailbox = CommandMailboxesMemory;
+  Controller->V1.NextCommandMailbox = Controller->V1.FirstCommandMailbox;
+  Controller->V1.PreviousCommandMailbox1 = Controller->V1.LastCommandMailbox;
+  Controller->V1.PreviousCommandMailbox2 =
+	  				Controller->V1.LastCommandMailbox - 1;
+
+  /* These are the base addresses for the status memory mailbox array */
+  StatusMailboxesMemory = slice_dma_loaf(DmaPages,
+                StatusMailboxesSize, &StatusMailboxesMemoryDMA);
+
+  Controller->V1.FirstStatusMailbox = StatusMailboxesMemory;
+  Controller->V1.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
+  StatusMailboxesMemory += DAC960_V1_StatusMailboxCount - 1;
+  Controller->V1.LastStatusMailbox = StatusMailboxesMemory;
+  Controller->V1.NextStatusMailbox = Controller->V1.FirstStatusMailbox;
+
+skip_mailboxes:
+  Controller->V1.MonitoringDCDB = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_DCDB_T),
+                &Controller->V1.MonitoringDCDB_DMA);
+
+  Controller->V1.NewEnquiry = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_Enquiry_T),
+                &Controller->V1.NewEnquiryDMA);
+
+  Controller->V1.NewErrorTable = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_ErrorTable_T),
+                &Controller->V1.NewErrorTableDMA);
+
+  Controller->V1.EventLogEntry = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_EventLogEntry_T),
+                &Controller->V1.EventLogEntryDMA);
+
+  Controller->V1.RebuildProgress = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_RebuildProgress_T),
+                &Controller->V1.RebuildProgressDMA);
+
+  Controller->V1.NewLogicalDriveInformation = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_LogicalDriveInformationArray_T),
+                &Controller->V1.NewLogicalDriveInformationDMA);
+
+  Controller->V1.BackgroundInitializationStatus = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_BackgroundInitializationStatus_T),
+                &Controller->V1.BackgroundInitializationStatusDMA);
+
+  Controller->V1.NewDeviceState = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V1_DeviceState_T),
+                &Controller->V1.NewDeviceStateDMA);
+
+  Controller->V1.NewInquiryStandardData = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_SCSI_Inquiry_T),
+                &Controller->V1.NewInquiryStandardDataDMA);
+
+  Controller->V1.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
+                &Controller->V1.NewInquiryUnitSerialNumberDMA);
+
+  if ((hw_type == DAC960_PD_Controller) || (hw_type == DAC960_P_Controller))
+	return true;
+ 
+  /* Enable the Memory Mailbox Interface. */
+  Controller->V1.DualModeMemoryMailboxInterface = true;
+  CommandMailbox.TypeX.CommandOpcode = 0x2B;
+  CommandMailbox.TypeX.CommandIdentifier = 0;
+  CommandMailbox.TypeX.CommandOpcode2 = 0x14;
+  CommandMailbox.TypeX.CommandMailboxesBusAddress =
+    				Controller->V1.FirstCommandMailboxDMA;
+  CommandMailbox.TypeX.StatusMailboxesBusAddress =
+    				Controller->V1.FirstStatusMailboxDMA;
+#define TIMEOUT_COUNT 1000000
+
+  for (i = 0; i < 2; i++)
+    switch (Controller->HardwareType)
+      {
+      case DAC960_LA_Controller:
+	TimeoutCounter = TIMEOUT_COUNT;
+	while (--TimeoutCounter >= 0)
+	  {
+	    if (!DAC960_LA_HardwareMailboxFullP(ControllerBaseAddress))
+	      break;
+	    udelay(10);
+	  }
+	if (TimeoutCounter < 0) return false;
+	DAC960_LA_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
+	DAC960_LA_HardwareMailboxNewCommand(ControllerBaseAddress);
+	TimeoutCounter = TIMEOUT_COUNT;
+	while (--TimeoutCounter >= 0)
+	  {
+	    if (DAC960_LA_HardwareMailboxStatusAvailableP(
+		  ControllerBaseAddress))
+	      break;
+	    udelay(10);
+	  }
+	if (TimeoutCounter < 0) return false;
+	CommandStatus = DAC960_LA_ReadStatusRegister(ControllerBaseAddress);
+	DAC960_LA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
+	DAC960_LA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
+	if (CommandStatus == DAC960_V1_NormalCompletion) return true;
+	Controller->V1.DualModeMemoryMailboxInterface = false;
+	CommandMailbox.TypeX.CommandOpcode2 = 0x10;
+	break;
+      case DAC960_PG_Controller:
+	TimeoutCounter = TIMEOUT_COUNT;
+	while (--TimeoutCounter >= 0)
+	  {
+	    if (!DAC960_PG_HardwareMailboxFullP(ControllerBaseAddress))
+	      break;
+	    udelay(10);
+	  }
+	if (TimeoutCounter < 0) return false;
+	DAC960_PG_WriteHardwareMailbox(ControllerBaseAddress, &CommandMailbox);
+	DAC960_PG_HardwareMailboxNewCommand(ControllerBaseAddress);
+
+	TimeoutCounter = TIMEOUT_COUNT;
+	while (--TimeoutCounter >= 0)
+	  {
+	    if (DAC960_PG_HardwareMailboxStatusAvailableP(
+		  ControllerBaseAddress))
+	      break;
+	    udelay(10);
+	  }
+	if (TimeoutCounter < 0) return false;
+	CommandStatus = DAC960_PG_ReadStatusRegister(ControllerBaseAddress);
+	DAC960_PG_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
+	DAC960_PG_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
+	if (CommandStatus == DAC960_V1_NormalCompletion) return true;
+	Controller->V1.DualModeMemoryMailboxInterface = false;
+	CommandMailbox.TypeX.CommandOpcode2 = 0x10;
+	break;
+      default:
+        DAC960_Failure(Controller, "Unknown Controller Type\n");
+	break;
+      }
+  return false;
+}
+
+
+/*
+  DAC960_V2_EnableMemoryMailboxInterface enables the Memory Mailbox Interface
+  for DAC960 V2 Firmware Controllers.
+
+  Aggregate the space needed for the controller's memory mailbox and
+  the other data structures that will be targets of dma transfers with
+  the controller.  Allocate a dma-mapped region of memory to hold these
+  structures.  Then, save CPU pointers and dma_addr_t values to reference
+  the structures that are contained in that region.
+*/
+
+static boolean DAC960_V2_EnableMemoryMailboxInterface(DAC960_Controller_T
+						      *Controller)
+{
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  struct pci_dev *PCI_Device = Controller->PCIDevice;
+  struct dma_loaf *DmaPages = &Controller->DmaPages;
+  size_t DmaPagesSize;
+  size_t CommandMailboxesSize;
+  size_t StatusMailboxesSize;
+
+  DAC960_V2_CommandMailbox_T *CommandMailboxesMemory;
+  dma_addr_t CommandMailboxesMemoryDMA;
+
+  DAC960_V2_StatusMailbox_T *StatusMailboxesMemory;
+  dma_addr_t StatusMailboxesMemoryDMA;
+
+  DAC960_V2_CommandMailbox_T *CommandMailbox;
+  dma_addr_t	CommandMailboxDMA;
+  DAC960_V2_CommandStatus_T CommandStatus;
+
+  if (pci_set_dma_mask(Controller->PCIDevice, DAC690_V2_PciDmaMask))
+	return DAC960_Failure(Controller, "DMA mask out of range");
+  Controller->BounceBufferLimit = DAC690_V2_PciDmaMask;
+
+  /* This is a temporary dma mapping, used only in the scope of this function */
+  CommandMailbox =
+	  (DAC960_V2_CommandMailbox_T *)pci_alloc_consistent( PCI_Device,
+		sizeof(DAC960_V2_CommandMailbox_T), &CommandMailboxDMA);
+  if (CommandMailbox == NULL)
+	  return false;
+
+  CommandMailboxesSize = DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T);
+  StatusMailboxesSize = DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T);
+  DmaPagesSize =
+    CommandMailboxesSize + StatusMailboxesSize +
+    sizeof(DAC960_V2_HealthStatusBuffer_T) +
+    sizeof(DAC960_V2_ControllerInfo_T) +
+    sizeof(DAC960_V2_LogicalDeviceInfo_T) +
+    sizeof(DAC960_V2_PhysicalDeviceInfo_T) +
+    sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T) +
+    sizeof(DAC960_V2_Event_T) +
+    sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
+
+  if (!init_dma_loaf(PCI_Device, DmaPages, DmaPagesSize)) {
+  	pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
+					CommandMailbox, CommandMailboxDMA);
+	return false;
+  }
+
+  CommandMailboxesMemory = slice_dma_loaf(DmaPages,
+		CommandMailboxesSize, &CommandMailboxesMemoryDMA);
+
+  /* These are the base addresses for the command memory mailbox array */
+  Controller->V2.FirstCommandMailbox = CommandMailboxesMemory;
+  Controller->V2.FirstCommandMailboxDMA = CommandMailboxesMemoryDMA;
+
+  CommandMailboxesMemory += DAC960_V2_CommandMailboxCount - 1;
+  Controller->V2.LastCommandMailbox = CommandMailboxesMemory;
+  Controller->V2.NextCommandMailbox = Controller->V2.FirstCommandMailbox;
+  Controller->V2.PreviousCommandMailbox1 = Controller->V2.LastCommandMailbox;
+  Controller->V2.PreviousCommandMailbox2 =
+    					Controller->V2.LastCommandMailbox - 1;
+
+  /* These are the base addresses for the status memory mailbox array */
+  StatusMailboxesMemory = slice_dma_loaf(DmaPages,
+		StatusMailboxesSize, &StatusMailboxesMemoryDMA);
+
+  Controller->V2.FirstStatusMailbox = StatusMailboxesMemory;
+  Controller->V2.FirstStatusMailboxDMA = StatusMailboxesMemoryDMA;
+  StatusMailboxesMemory += DAC960_V2_StatusMailboxCount - 1;
+  Controller->V2.LastStatusMailbox = StatusMailboxesMemory;
+  Controller->V2.NextStatusMailbox = Controller->V2.FirstStatusMailbox;
+
+  Controller->V2.HealthStatusBuffer = slice_dma_loaf(DmaPages,
+		sizeof(DAC960_V2_HealthStatusBuffer_T),
+		&Controller->V2.HealthStatusBufferDMA);
+
+  Controller->V2.NewControllerInformation = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V2_ControllerInfo_T), 
+                &Controller->V2.NewControllerInformationDMA);
+
+  Controller->V2.NewLogicalDeviceInformation =  slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V2_LogicalDeviceInfo_T),
+                &Controller->V2.NewLogicalDeviceInformationDMA);
+
+  Controller->V2.NewPhysicalDeviceInformation = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V2_PhysicalDeviceInfo_T),
+                &Controller->V2.NewPhysicalDeviceInformationDMA);
+
+  Controller->V2.NewInquiryUnitSerialNumber = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
+                &Controller->V2.NewInquiryUnitSerialNumberDMA);
+
+  Controller->V2.Event = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V2_Event_T),
+                &Controller->V2.EventDMA);
+
+  Controller->V2.PhysicalToLogicalDevice = slice_dma_loaf(DmaPages,
+                sizeof(DAC960_V2_PhysicalToLogicalDevice_T),
+                &Controller->V2.PhysicalToLogicalDeviceDMA);
+
+  /*
+    Enable the Memory Mailbox Interface.
+    
+    I don't know why we can't just use one of the memory mailboxes
+    we just allocated to do this, instead of using this temporary one.
+    Try this change later.
+  */
+  memset(CommandMailbox, 0, sizeof(DAC960_V2_CommandMailbox_T));
+  CommandMailbox->SetMemoryMailbox.CommandIdentifier = 1;
+  CommandMailbox->SetMemoryMailbox.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->SetMemoryMailbox.CommandControlBits.NoAutoRequestSense = true;
+  CommandMailbox->SetMemoryMailbox.FirstCommandMailboxSizeKB =
+    (DAC960_V2_CommandMailboxCount * sizeof(DAC960_V2_CommandMailbox_T)) >> 10;
+  CommandMailbox->SetMemoryMailbox.FirstStatusMailboxSizeKB =
+    (DAC960_V2_StatusMailboxCount * sizeof(DAC960_V2_StatusMailbox_T)) >> 10;
+  CommandMailbox->SetMemoryMailbox.SecondCommandMailboxSizeKB = 0;
+  CommandMailbox->SetMemoryMailbox.SecondStatusMailboxSizeKB = 0;
+  CommandMailbox->SetMemoryMailbox.RequestSenseSize = 0;
+  CommandMailbox->SetMemoryMailbox.IOCTL_Opcode = DAC960_V2_SetMemoryMailbox;
+  CommandMailbox->SetMemoryMailbox.HealthStatusBufferSizeKB = 1;
+  CommandMailbox->SetMemoryMailbox.HealthStatusBufferBusAddress =
+    					Controller->V2.HealthStatusBufferDMA;
+  CommandMailbox->SetMemoryMailbox.FirstCommandMailboxBusAddress =
+    					Controller->V2.FirstCommandMailboxDMA;
+  CommandMailbox->SetMemoryMailbox.FirstStatusMailboxBusAddress =
+    					Controller->V2.FirstStatusMailboxDMA;
+  switch (Controller->HardwareType)
+    {
+    case DAC960_BA_Controller:
+      while (DAC960_BA_HardwareMailboxFullP(ControllerBaseAddress))
+	udelay(1);
+      DAC960_BA_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
+      DAC960_BA_HardwareMailboxNewCommand(ControllerBaseAddress);
+      while (!DAC960_BA_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
+	udelay(1);
+      CommandStatus = DAC960_BA_ReadCommandStatus(ControllerBaseAddress);
+      DAC960_BA_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
+      DAC960_BA_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
+      break;
+    case DAC960_LP_Controller:
+      while (DAC960_LP_HardwareMailboxFullP(ControllerBaseAddress))
+	udelay(1);
+      DAC960_LP_WriteHardwareMailbox(ControllerBaseAddress, CommandMailboxDMA);
+      DAC960_LP_HardwareMailboxNewCommand(ControllerBaseAddress);
+      while (!DAC960_LP_HardwareMailboxStatusAvailableP(ControllerBaseAddress))
+	udelay(1);
+      CommandStatus = DAC960_LP_ReadCommandStatus(ControllerBaseAddress);
+      DAC960_LP_AcknowledgeHardwareMailboxInterrupt(ControllerBaseAddress);
+      DAC960_LP_AcknowledgeHardwareMailboxStatus(ControllerBaseAddress);
+      break;
+    default:
+      DAC960_Failure(Controller, "Unknown Controller Type\n");
+      CommandStatus = DAC960_V2_AbormalCompletion;
+      break;
+    }
+  pci_free_consistent(PCI_Device, sizeof(DAC960_V2_CommandMailbox_T),
+					CommandMailbox, CommandMailboxDMA);
+  return (CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V1_ReadControllerConfiguration reads the Configuration Information
+  from DAC960 V1 Firmware Controllers and initializes the Controller structure.
+*/
+
+static boolean DAC960_V1_ReadControllerConfiguration(DAC960_Controller_T
+						     *Controller)
+{
+  DAC960_V1_Enquiry2_T *Enquiry2;
+  dma_addr_t Enquiry2DMA;
+  DAC960_V1_Config2_T *Config2;
+  dma_addr_t Config2DMA;
+  int LogicalDriveNumber, Channel, TargetID;
+  struct dma_loaf local_dma;
+
+  if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
+		sizeof(DAC960_V1_Enquiry2_T) + sizeof(DAC960_V1_Config2_T)))
+	return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
+
+  Enquiry2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Enquiry2_T), &Enquiry2DMA);
+  Config2 = slice_dma_loaf(&local_dma, sizeof(DAC960_V1_Config2_T), &Config2DMA);
+
+  if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry,
+			      Controller->V1.NewEnquiryDMA)) {
+    free_dma_loaf(Controller->PCIDevice, &local_dma);
+    return DAC960_Failure(Controller, "ENQUIRY");
+  }
+  memcpy(&Controller->V1.Enquiry, Controller->V1.NewEnquiry,
+						sizeof(DAC960_V1_Enquiry_T));
+
+  if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_Enquiry2, Enquiry2DMA)) {
+    free_dma_loaf(Controller->PCIDevice, &local_dma);
+    return DAC960_Failure(Controller, "ENQUIRY2");
+  }
+
+  if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_ReadConfig2, Config2DMA)) {
+    free_dma_loaf(Controller->PCIDevice, &local_dma);
+    return DAC960_Failure(Controller, "READ CONFIG2");
+  }
+
+  if (!DAC960_V1_ExecuteType3(Controller, DAC960_V1_GetLogicalDriveInformation,
+			      Controller->V1.NewLogicalDriveInformationDMA)) {
+    free_dma_loaf(Controller->PCIDevice, &local_dma);
+    return DAC960_Failure(Controller, "GET LOGICAL DRIVE INFORMATION");
+  }
+  memcpy(&Controller->V1.LogicalDriveInformation,
+		Controller->V1.NewLogicalDriveInformation,
+		sizeof(DAC960_V1_LogicalDriveInformationArray_T));
+
+  for (Channel = 0; Channel < Enquiry2->ActualChannels; Channel++)
+    for (TargetID = 0; TargetID < Enquiry2->MaxTargets; TargetID++) {
+      if (!DAC960_V1_ExecuteType3D(Controller, DAC960_V1_GetDeviceState,
+				   Channel, TargetID,
+				   Controller->V1.NewDeviceStateDMA)) {
+    		free_dma_loaf(Controller->PCIDevice, &local_dma);
+		return DAC960_Failure(Controller, "GET DEVICE STATE");
+	}
+	memcpy(&Controller->V1.DeviceState[Channel][TargetID],
+		Controller->V1.NewDeviceState, sizeof(DAC960_V1_DeviceState_T));
+     }
+  /*
+    Initialize the Controller Model Name and Full Model Name fields.
+  */
+  switch (Enquiry2->HardwareID.SubModel)
+    {
+    case DAC960_V1_P_PD_PU:
+      if (Enquiry2->SCSICapability.BusSpeed == DAC960_V1_Ultra)
+	strcpy(Controller->ModelName, "DAC960PU");
+      else strcpy(Controller->ModelName, "DAC960PD");
+      break;
+    case DAC960_V1_PL:
+      strcpy(Controller->ModelName, "DAC960PL");
+      break;
+    case DAC960_V1_PG:
+      strcpy(Controller->ModelName, "DAC960PG");
+      break;
+    case DAC960_V1_PJ:
+      strcpy(Controller->ModelName, "DAC960PJ");
+      break;
+    case DAC960_V1_PR:
+      strcpy(Controller->ModelName, "DAC960PR");
+      break;
+    case DAC960_V1_PT:
+      strcpy(Controller->ModelName, "DAC960PT");
+      break;
+    case DAC960_V1_PTL0:
+      strcpy(Controller->ModelName, "DAC960PTL0");
+      break;
+    case DAC960_V1_PRL:
+      strcpy(Controller->ModelName, "DAC960PRL");
+      break;
+    case DAC960_V1_PTL1:
+      strcpy(Controller->ModelName, "DAC960PTL1");
+      break;
+    case DAC960_V1_1164P:
+      strcpy(Controller->ModelName, "DAC1164P");
+      break;
+    default:
+      free_dma_loaf(Controller->PCIDevice, &local_dma);
+      return DAC960_Failure(Controller, "MODEL VERIFICATION");
+    }
+  strcpy(Controller->FullModelName, "Mylex ");
+  strcat(Controller->FullModelName, Controller->ModelName);
+  /*
+    Initialize the Controller Firmware Version field and verify that it
+    is a supported firmware version.  The supported firmware versions are:
+
+    DAC1164P		    5.06 and above
+    DAC960PTL/PRL/PJ/PG	    4.06 and above
+    DAC960PU/PD/PL	    3.51 and above
+    DAC960PU/PD/PL/P	    2.73 and above
+  */
+#if defined(CONFIG_ALPHA)
+  /*
+    DEC Alpha machines were often equipped with DAC960 cards that were
+    OEMed from Mylex, and had their own custom firmware. Version 2.70,
+    the last custom FW revision to be released by DEC for these older
+    controllers, appears to work quite well with this driver.
+
+    Cards tested successfully were several versions each of the PD and
+    PU, called by DEC the KZPSC and KZPAC, respectively, and having
+    the Manufacturer Numbers (from Mylex), usually on a sticker on the
+    back of the board, of:
+
+    KZPSC:  D040347 (1-channel) or D040348 (2-channel) or D040349 (3-channel)
+    KZPAC:  D040395 (1-channel) or D040396 (2-channel) or D040397 (3-channel)
+  */
+# define FIRMWARE_27X	"2.70"
+#else
+# define FIRMWARE_27X	"2.73"
+#endif
+
+  if (Enquiry2->FirmwareID.MajorVersion == 0)
+    {
+      Enquiry2->FirmwareID.MajorVersion =
+	Controller->V1.Enquiry.MajorFirmwareVersion;
+      Enquiry2->FirmwareID.MinorVersion =
+	Controller->V1.Enquiry.MinorFirmwareVersion;
+      Enquiry2->FirmwareID.FirmwareType = '0';
+      Enquiry2->FirmwareID.TurnID = 0;
+    }
+  sprintf(Controller->FirmwareVersion, "%d.%02d-%c-%02d",
+	  Enquiry2->FirmwareID.MajorVersion, Enquiry2->FirmwareID.MinorVersion,
+	  Enquiry2->FirmwareID.FirmwareType, Enquiry2->FirmwareID.TurnID);
+  if (!((Controller->FirmwareVersion[0] == '5' &&
+	 strcmp(Controller->FirmwareVersion, "5.06") >= 0) ||
+	(Controller->FirmwareVersion[0] == '4' &&
+	 strcmp(Controller->FirmwareVersion, "4.06") >= 0) ||
+	(Controller->FirmwareVersion[0] == '3' &&
+	 strcmp(Controller->FirmwareVersion, "3.51") >= 0) ||
+	(Controller->FirmwareVersion[0] == '2' &&
+	 strcmp(Controller->FirmwareVersion, FIRMWARE_27X) >= 0)))
+    {
+      DAC960_Failure(Controller, "FIRMWARE VERSION VERIFICATION");
+      DAC960_Error("Firmware Version = '%s'\n", Controller,
+		   Controller->FirmwareVersion);
+      free_dma_loaf(Controller->PCIDevice, &local_dma);
+      return false;
+    }
+  /*
+    Initialize the Controller Channels, Targets, Memory Size, and SAF-TE
+    Enclosure Management Enabled fields.
+  */
+  Controller->Channels = Enquiry2->ActualChannels;
+  Controller->Targets = Enquiry2->MaxTargets;
+  Controller->MemorySize = Enquiry2->MemorySize >> 20;
+  Controller->V1.SAFTE_EnclosureManagementEnabled =
+    (Enquiry2->FaultManagementType == DAC960_V1_SAFTE);
+  /*
+    Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
+    Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
+    Driver Scatter/Gather Limit.  The Driver Queue Depth must be at most one
+    less than the Controller Queue Depth to allow for an automatic drive
+    rebuild operation.
+  */
+  Controller->ControllerQueueDepth = Controller->V1.Enquiry.MaxCommands;
+  Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
+  if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
+    Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
+  Controller->LogicalDriveCount =
+    Controller->V1.Enquiry.NumberOfLogicalDrives;
+  Controller->MaxBlocksPerCommand = Enquiry2->MaxBlocksPerCommand;
+  Controller->ControllerScatterGatherLimit = Enquiry2->MaxScatterGatherEntries;
+  Controller->DriverScatterGatherLimit =
+    Controller->ControllerScatterGatherLimit;
+  if (Controller->DriverScatterGatherLimit > DAC960_V1_ScatterGatherLimit)
+    Controller->DriverScatterGatherLimit = DAC960_V1_ScatterGatherLimit;
+  /*
+    Initialize the Stripe Size, Segment Size, and Geometry Translation.
+  */
+  Controller->V1.StripeSize = Config2->BlocksPerStripe * Config2->BlockFactor
+			      >> (10 - DAC960_BlockSizeBits);
+  Controller->V1.SegmentSize = Config2->BlocksPerCacheLine * Config2->BlockFactor
+			       >> (10 - DAC960_BlockSizeBits);
+  switch (Config2->DriveGeometry)
+    {
+    case DAC960_V1_Geometry_128_32:
+      Controller->V1.GeometryTranslationHeads = 128;
+      Controller->V1.GeometryTranslationSectors = 32;
+      break;
+    case DAC960_V1_Geometry_255_63:
+      Controller->V1.GeometryTranslationHeads = 255;
+      Controller->V1.GeometryTranslationSectors = 63;
+      break;
+    default:
+      free_dma_loaf(Controller->PCIDevice, &local_dma);
+      return DAC960_Failure(Controller, "CONFIG2 DRIVE GEOMETRY");
+    }
+  /*
+    Initialize the Background Initialization Status.
+  */
+  if ((Controller->FirmwareVersion[0] == '4' &&
+      strcmp(Controller->FirmwareVersion, "4.08") >= 0) ||
+      (Controller->FirmwareVersion[0] == '5' &&
+       strcmp(Controller->FirmwareVersion, "5.08") >= 0))
+    {
+      Controller->V1.BackgroundInitializationStatusSupported = true;
+      DAC960_V1_ExecuteType3B(Controller,
+			      DAC960_V1_BackgroundInitializationControl, 0x20,
+			      Controller->
+			       V1.BackgroundInitializationStatusDMA);
+      memcpy(&Controller->V1.LastBackgroundInitializationStatus,
+		Controller->V1.BackgroundInitializationStatus,
+		sizeof(DAC960_V1_BackgroundInitializationStatus_T));
+    }
+  /*
+    Initialize the Logical Drive Initially Accessible flag.
+  */
+  for (LogicalDriveNumber = 0;
+       LogicalDriveNumber < Controller->LogicalDriveCount;
+       LogicalDriveNumber++)
+    if (Controller->V1.LogicalDriveInformation
+		       [LogicalDriveNumber].LogicalDriveState !=
+	DAC960_V1_LogicalDrive_Offline)
+      Controller->LogicalDriveInitiallyAccessible[LogicalDriveNumber] = true;
+  Controller->V1.LastRebuildStatus = DAC960_V1_NoRebuildOrCheckInProgress;
+  free_dma_loaf(Controller->PCIDevice, &local_dma);
+  return true;
+}
+
+
+/*
+  DAC960_V2_ReadControllerConfiguration reads the Configuration Information
+  from DAC960 V2 Firmware Controllers and initializes the Controller structure.
+*/
+
+static boolean DAC960_V2_ReadControllerConfiguration(DAC960_Controller_T
+						     *Controller)
+{
+  DAC960_V2_ControllerInfo_T *ControllerInfo =
+    		&Controller->V2.ControllerInformation;
+  unsigned short LogicalDeviceNumber = 0;
+  int ModelNameLength;
+
+  /* Get data into dma-able area, then copy into permanant location */
+  if (!DAC960_V2_NewControllerInfo(Controller))
+    return DAC960_Failure(Controller, "GET CONTROLLER INFO");
+  memcpy(ControllerInfo, Controller->V2.NewControllerInformation,
+			sizeof(DAC960_V2_ControllerInfo_T));
+	 
+  
+  if (!DAC960_V2_GeneralInfo(Controller))
+    return DAC960_Failure(Controller, "GET HEALTH STATUS");
+
+  /*
+    Initialize the Controller Model Name and Full Model Name fields.
+  */
+  ModelNameLength = sizeof(ControllerInfo->ControllerName);
+  if (ModelNameLength > sizeof(Controller->ModelName)-1)
+    ModelNameLength = sizeof(Controller->ModelName)-1;
+  memcpy(Controller->ModelName, ControllerInfo->ControllerName,
+	 ModelNameLength);
+  ModelNameLength--;
+  while (Controller->ModelName[ModelNameLength] == ' ' ||
+	 Controller->ModelName[ModelNameLength] == '\0')
+    ModelNameLength--;
+  Controller->ModelName[++ModelNameLength] = '\0';
+  strcpy(Controller->FullModelName, "Mylex ");
+  strcat(Controller->FullModelName, Controller->ModelName);
+  /*
+    Initialize the Controller Firmware Version field.
+  */
+  sprintf(Controller->FirmwareVersion, "%d.%02d-%02d",
+	  ControllerInfo->FirmwareMajorVersion,
+	  ControllerInfo->FirmwareMinorVersion,
+	  ControllerInfo->FirmwareTurnNumber);
+  if (ControllerInfo->FirmwareMajorVersion == 6 &&
+      ControllerInfo->FirmwareMinorVersion == 0 &&
+      ControllerInfo->FirmwareTurnNumber < 1)
+    {
+      DAC960_Info("FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n",
+		  Controller, Controller->FirmwareVersion);
+      DAC960_Info("STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n",
+		  Controller);
+      DAC960_Info("PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
+		  Controller);
+    }
+  /*
+    Initialize the Controller Channels, Targets, and Memory Size.
+  */
+  Controller->Channels = ControllerInfo->NumberOfPhysicalChannelsPresent;
+  Controller->Targets =
+    ControllerInfo->MaximumTargetsPerChannel
+		    [ControllerInfo->NumberOfPhysicalChannelsPresent-1];
+  Controller->MemorySize = ControllerInfo->MemorySizeMB;
+  /*
+    Initialize the Controller Queue Depth, Driver Queue Depth, Logical Drive
+    Count, Maximum Blocks per Command, Controller Scatter/Gather Limit, and
+    Driver Scatter/Gather Limit.  The Driver Queue Depth must be at most one
+    less than the Controller Queue Depth to allow for an automatic drive
+    rebuild operation.
+  */
+  Controller->ControllerQueueDepth = ControllerInfo->MaximumParallelCommands;
+  Controller->DriverQueueDepth = Controller->ControllerQueueDepth - 1;
+  if (Controller->DriverQueueDepth > DAC960_MaxDriverQueueDepth)
+    Controller->DriverQueueDepth = DAC960_MaxDriverQueueDepth;
+  Controller->LogicalDriveCount = ControllerInfo->LogicalDevicesPresent;
+  Controller->MaxBlocksPerCommand =
+    ControllerInfo->MaximumDataTransferSizeInBlocks;
+  Controller->ControllerScatterGatherLimit =
+    ControllerInfo->MaximumScatterGatherEntries;
+  Controller->DriverScatterGatherLimit =
+    Controller->ControllerScatterGatherLimit;
+  if (Controller->DriverScatterGatherLimit > DAC960_V2_ScatterGatherLimit)
+    Controller->DriverScatterGatherLimit = DAC960_V2_ScatterGatherLimit;
+  /*
+    Initialize the Logical Device Information.
+  */
+  while (true)
+    {
+      DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
+	Controller->V2.NewLogicalDeviceInformation;
+      DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo;
+      DAC960_V2_PhysicalDevice_T PhysicalDevice;
+
+      if (!DAC960_V2_NewLogicalDeviceInfo(Controller, LogicalDeviceNumber))
+	break;
+      LogicalDeviceNumber = NewLogicalDeviceInfo->LogicalDeviceNumber;
+      if (LogicalDeviceNumber >= DAC960_MaxLogicalDrives) {
+	DAC960_Error("DAC960: Logical Drive Number %d not supported\n",
+		       Controller, LogicalDeviceNumber);
+		break;
+      }
+      if (NewLogicalDeviceInfo->DeviceBlockSizeInBytes != DAC960_BlockSize) {
+	DAC960_Error("DAC960: Logical Drive Block Size %d not supported\n",
+	      Controller, NewLogicalDeviceInfo->DeviceBlockSizeInBytes);
+        LogicalDeviceNumber++;
+        continue;
+      }
+      PhysicalDevice.Controller = 0;
+      PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
+      PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
+      PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
+      Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
+	PhysicalDevice;
+      if (NewLogicalDeviceInfo->LogicalDeviceState !=
+	  DAC960_V2_LogicalDevice_Offline)
+	Controller->LogicalDriveInitiallyAccessible[LogicalDeviceNumber] = true;
+      LogicalDeviceInfo = (DAC960_V2_LogicalDeviceInfo_T *)
+	kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), GFP_ATOMIC);
+      if (LogicalDeviceInfo == NULL)
+	return DAC960_Failure(Controller, "LOGICAL DEVICE ALLOCATION");
+      Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
+	LogicalDeviceInfo;
+      memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
+	     sizeof(DAC960_V2_LogicalDeviceInfo_T));
+      LogicalDeviceNumber++;
+    }
+  return true;
+}
+
+
+/*
+  DAC960_ReportControllerConfiguration reports the Configuration Information
+  for Controller.
+*/
+
+static boolean DAC960_ReportControllerConfiguration(DAC960_Controller_T
+						    *Controller)
+{
+  DAC960_Info("Configuring Mylex %s PCI RAID Controller\n",
+	      Controller, Controller->ModelName);
+  DAC960_Info("  Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
+	      Controller, Controller->FirmwareVersion,
+	      Controller->Channels, Controller->MemorySize);
+  DAC960_Info("  PCI Bus: %d, Device: %d, Function: %d, I/O Address: ",
+	      Controller, Controller->Bus,
+	      Controller->Device, Controller->Function);
+  if (Controller->IO_Address == 0)
+    DAC960_Info("Unassigned\n", Controller);
+  else DAC960_Info("0x%X\n", Controller, Controller->IO_Address);
+  DAC960_Info("  PCI Address: 0x%X mapped at 0x%lX, IRQ Channel: %d\n",
+	      Controller, Controller->PCI_Address,
+	      (unsigned long) Controller->BaseAddress,
+	      Controller->IRQ_Channel);
+  DAC960_Info("  Controller Queue Depth: %d, "
+	      "Maximum Blocks per Command: %d\n",
+	      Controller, Controller->ControllerQueueDepth,
+	      Controller->MaxBlocksPerCommand);
+  DAC960_Info("  Driver Queue Depth: %d, "
+	      "Scatter/Gather Limit: %d of %d Segments\n",
+	      Controller, Controller->DriverQueueDepth,
+	      Controller->DriverScatterGatherLimit,
+	      Controller->ControllerScatterGatherLimit);
+  if (Controller->FirmwareType == DAC960_V1_Controller)
+    {
+      DAC960_Info("  Stripe Size: %dKB, Segment Size: %dKB, "
+		  "BIOS Geometry: %d/%d\n", Controller,
+		  Controller->V1.StripeSize,
+		  Controller->V1.SegmentSize,
+		  Controller->V1.GeometryTranslationHeads,
+		  Controller->V1.GeometryTranslationSectors);
+      if (Controller->V1.SAFTE_EnclosureManagementEnabled)
+	DAC960_Info("  SAF-TE Enclosure Management Enabled\n", Controller);
+    }
+  return true;
+}
+
+
+/*
+  DAC960_V1_ReadDeviceConfiguration reads the Device Configuration Information
+  for DAC960 V1 Firmware Controllers by requesting the SCSI Inquiry and SCSI
+  Inquiry Unit Serial Number information for each device connected to
+  Controller.
+*/
+
+static boolean DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
+						 *Controller)
+{
+  struct dma_loaf local_dma;
+
+  dma_addr_t DCDBs_dma[DAC960_V1_MaxChannels];
+  DAC960_V1_DCDB_T *DCDBs_cpu[DAC960_V1_MaxChannels];
+
+  dma_addr_t SCSI_Inquiry_dma[DAC960_V1_MaxChannels];
+  DAC960_SCSI_Inquiry_T *SCSI_Inquiry_cpu[DAC960_V1_MaxChannels];
+
+  dma_addr_t SCSI_NewInquiryUnitSerialNumberDMA[DAC960_V1_MaxChannels];
+  DAC960_SCSI_Inquiry_UnitSerialNumber_T *SCSI_NewInquiryUnitSerialNumberCPU[DAC960_V1_MaxChannels];
+
+  struct completion Completions[DAC960_V1_MaxChannels];
+  unsigned long flags;
+  int Channel, TargetID;
+
+  if (!init_dma_loaf(Controller->PCIDevice, &local_dma, 
+		DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
+			sizeof(DAC960_SCSI_Inquiry_T) +
+			sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T))))
+     return DAC960_Failure(Controller,
+                        "DMA ALLOCATION FAILED IN ReadDeviceConfiguration"); 
+   
+  for (Channel = 0; Channel < Controller->Channels; Channel++) {
+	DCDBs_cpu[Channel] = slice_dma_loaf(&local_dma,
+			sizeof(DAC960_V1_DCDB_T), DCDBs_dma + Channel);
+	SCSI_Inquiry_cpu[Channel] = slice_dma_loaf(&local_dma,
+			sizeof(DAC960_SCSI_Inquiry_T),
+			SCSI_Inquiry_dma + Channel);
+	SCSI_NewInquiryUnitSerialNumberCPU[Channel] = slice_dma_loaf(&local_dma,
+			sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
+			SCSI_NewInquiryUnitSerialNumberDMA + Channel);
+  }
+		
+  for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
+    {
+      /*
+       * For each channel, submit a probe for a device on that channel.
+       * The timeout interval for a device that is present is 10 seconds.
+       * With this approach, the timeout periods can elapse in parallel
+       * on each channel.
+       */
+      for (Channel = 0; Channel < Controller->Channels; Channel++)
+	{
+	  dma_addr_t NewInquiryStandardDataDMA = SCSI_Inquiry_dma[Channel];
+  	  DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
+  	  dma_addr_t DCDB_dma = DCDBs_dma[Channel];
+	  DAC960_Command_T *Command = Controller->Commands[Channel];
+          struct completion *Completion = &Completions[Channel];
+
+	  init_completion(Completion);
+	  DAC960_V1_ClearCommand(Command);
+	  Command->CommandType = DAC960_ImmediateCommand;
+	  Command->Completion = Completion;
+	  Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
+	  Command->V1.CommandMailbox.Type3.BusAddress = DCDB_dma;
+	  DCDB->Channel = Channel;
+	  DCDB->TargetID = TargetID;
+	  DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
+	  DCDB->EarlyStatus = false;
+	  DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
+	  DCDB->NoAutomaticRequestSense = false;
+	  DCDB->DisconnectPermitted = true;
+	  DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
+	  DCDB->BusAddress = NewInquiryStandardDataDMA;
+	  DCDB->CDBLength = 6;
+	  DCDB->TransferLengthHigh4 = 0;
+	  DCDB->SenseLength = sizeof(DCDB->SenseData);
+	  DCDB->CDB[0] = 0x12; /* INQUIRY */
+	  DCDB->CDB[1] = 0; /* EVPD = 0 */
+	  DCDB->CDB[2] = 0; /* Page Code */
+	  DCDB->CDB[3] = 0; /* Reserved */
+	  DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
+	  DCDB->CDB[5] = 0; /* Control */
+
+	  spin_lock_irqsave(&Controller->queue_lock, flags);
+	  DAC960_QueueCommand(Command);
+	  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	}
+      /*
+       * Wait for the problems submitted in the previous loop
+       * to complete.  On the probes that are successful, 
+       * get the serial number of the device that was found.
+       */
+      for (Channel = 0; Channel < Controller->Channels; Channel++)
+	{
+	  DAC960_SCSI_Inquiry_T *InquiryStandardData =
+	    &Controller->V1.InquiryStandardData[Channel][TargetID];
+	  DAC960_SCSI_Inquiry_T *NewInquiryStandardData = SCSI_Inquiry_cpu[Channel];
+	  dma_addr_t NewInquiryUnitSerialNumberDMA =
+			SCSI_NewInquiryUnitSerialNumberDMA[Channel];
+	  DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
+	    		SCSI_NewInquiryUnitSerialNumberCPU[Channel];
+	  DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+	    &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
+	  DAC960_Command_T *Command = Controller->Commands[Channel];
+  	  DAC960_V1_DCDB_T *DCDB = DCDBs_cpu[Channel];
+          struct completion *Completion = &Completions[Channel];
+
+	  wait_for_completion(Completion);
+
+	  if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
+	    memset(InquiryStandardData, 0, sizeof(DAC960_SCSI_Inquiry_T));
+	    InquiryStandardData->PeripheralDeviceType = 0x1F;
+	    continue;
+	  } else
+	    memcpy(InquiryStandardData, NewInquiryStandardData, sizeof(DAC960_SCSI_Inquiry_T));
+	
+	  /* Preserve Channel and TargetID values from the previous loop */
+	  Command->Completion = Completion;
+	  DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+	  DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
+	  DCDB->SenseLength = sizeof(DCDB->SenseData);
+	  DCDB->CDB[0] = 0x12; /* INQUIRY */
+	  DCDB->CDB[1] = 1; /* EVPD = 1 */
+	  DCDB->CDB[2] = 0x80; /* Page Code */
+	  DCDB->CDB[3] = 0; /* Reserved */
+	  DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+	  DCDB->CDB[5] = 0; /* Control */
+
+	  spin_lock_irqsave(&Controller->queue_lock, flags);
+	  DAC960_QueueCommand(Command);
+	  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	  wait_for_completion(Completion);
+
+	  if (Command->V1.CommandStatus != DAC960_V1_NormalCompletion) {
+	  	memset(InquiryUnitSerialNumber, 0,
+			sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+	  	InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
+	  } else
+	  	memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
+			sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+	}
+    }
+    free_dma_loaf(Controller->PCIDevice, &local_dma);
+  return true;
+}
+
+
+/*
+  DAC960_V2_ReadDeviceConfiguration reads the Device Configuration Information
+  for DAC960 V2 Firmware Controllers by requesting the Physical Device
+  Information and SCSI Inquiry Unit Serial Number information for each
+  device connected to Controller.
+*/
+
+static boolean DAC960_V2_ReadDeviceConfiguration(DAC960_Controller_T
+						 *Controller)
+{
+  unsigned char Channel = 0, TargetID = 0, LogicalUnit = 0;
+  unsigned short PhysicalDeviceIndex = 0;
+
+  while (true)
+    {
+      DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
+		Controller->V2.NewPhysicalDeviceInformation;
+      DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo;
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber =
+		Controller->V2.NewInquiryUnitSerialNumber;
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber;
+
+      if (!DAC960_V2_NewPhysicalDeviceInfo(Controller, Channel, TargetID, LogicalUnit))
+	  break;
+
+      PhysicalDeviceInfo = (DAC960_V2_PhysicalDeviceInfo_T *)
+		kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC);
+      if (PhysicalDeviceInfo == NULL)
+		return DAC960_Failure(Controller, "PHYSICAL DEVICE ALLOCATION");
+      Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex] =
+		PhysicalDeviceInfo;
+      memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
+		sizeof(DAC960_V2_PhysicalDeviceInfo_T));
+
+      InquiryUnitSerialNumber = (DAC960_SCSI_Inquiry_UnitSerialNumber_T *)
+	kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T), GFP_ATOMIC);
+      if (InquiryUnitSerialNumber == NULL) {
+	kfree(PhysicalDeviceInfo);
+	return DAC960_Failure(Controller, "SERIAL NUMBER ALLOCATION");
+      }
+      Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex] =
+		InquiryUnitSerialNumber;
+
+      Channel = NewPhysicalDeviceInfo->Channel;
+      TargetID = NewPhysicalDeviceInfo->TargetID;
+      LogicalUnit = NewPhysicalDeviceInfo->LogicalUnit;
+
+      /*
+	 Some devices do NOT have Unit Serial Numbers.
+	 This command fails for them.  But, we still want to
+	 remember those devices are there.  Construct a
+	 UnitSerialNumber structure for the failure case.
+      */
+      if (!DAC960_V2_NewInquiryUnitSerialNumber(Controller, Channel, TargetID, LogicalUnit)) {
+      	memset(InquiryUnitSerialNumber, 0,
+             sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+     	InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
+      } else
+      	memcpy(InquiryUnitSerialNumber, NewInquiryUnitSerialNumber,
+		sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+
+      PhysicalDeviceIndex++;
+      LogicalUnit++;
+    }
+  return true;
+}
+
+
+/*
+  DAC960_SanitizeInquiryData sanitizes the Vendor, Model, Revision, and
+  Product Serial Number fields of the Inquiry Standard Data and Inquiry
+  Unit Serial Number structures.
+*/
+
+static void DAC960_SanitizeInquiryData(DAC960_SCSI_Inquiry_T
+					 *InquiryStandardData,
+				       DAC960_SCSI_Inquiry_UnitSerialNumber_T
+					 *InquiryUnitSerialNumber,
+				       unsigned char *Vendor,
+				       unsigned char *Model,
+				       unsigned char *Revision,
+				       unsigned char *SerialNumber)
+{
+  int SerialNumberLength, i;
+  if (InquiryStandardData->PeripheralDeviceType == 0x1F) return;
+  for (i = 0; i < sizeof(InquiryStandardData->VendorIdentification); i++)
+    {
+      unsigned char VendorCharacter =
+	InquiryStandardData->VendorIdentification[i];
+      Vendor[i] = (VendorCharacter >= ' ' && VendorCharacter <= '~'
+		   ? VendorCharacter : ' ');
+    }
+  Vendor[sizeof(InquiryStandardData->VendorIdentification)] = '\0';
+  for (i = 0; i < sizeof(InquiryStandardData->ProductIdentification); i++)
+    {
+      unsigned char ModelCharacter =
+	InquiryStandardData->ProductIdentification[i];
+      Model[i] = (ModelCharacter >= ' ' && ModelCharacter <= '~'
+		  ? ModelCharacter : ' ');
+    }
+  Model[sizeof(InquiryStandardData->ProductIdentification)] = '\0';
+  for (i = 0; i < sizeof(InquiryStandardData->ProductRevisionLevel); i++)
+    {
+      unsigned char RevisionCharacter =
+	InquiryStandardData->ProductRevisionLevel[i];
+      Revision[i] = (RevisionCharacter >= ' ' && RevisionCharacter <= '~'
+		     ? RevisionCharacter : ' ');
+    }
+  Revision[sizeof(InquiryStandardData->ProductRevisionLevel)] = '\0';
+  if (InquiryUnitSerialNumber->PeripheralDeviceType == 0x1F) return;
+  SerialNumberLength = InquiryUnitSerialNumber->PageLength;
+  if (SerialNumberLength >
+      sizeof(InquiryUnitSerialNumber->ProductSerialNumber))
+    SerialNumberLength = sizeof(InquiryUnitSerialNumber->ProductSerialNumber);
+  for (i = 0; i < SerialNumberLength; i++)
+    {
+      unsigned char SerialNumberCharacter =
+	InquiryUnitSerialNumber->ProductSerialNumber[i];
+      SerialNumber[i] =
+	(SerialNumberCharacter >= ' ' && SerialNumberCharacter <= '~'
+	 ? SerialNumberCharacter : ' ');
+    }
+  SerialNumber[SerialNumberLength] = '\0';
+}
+
+
+/*
+  DAC960_V1_ReportDeviceConfiguration reports the Device Configuration
+  Information for DAC960 V1 Firmware Controllers.
+*/
+
+static boolean DAC960_V1_ReportDeviceConfiguration(DAC960_Controller_T
+						   *Controller)
+{
+  int LogicalDriveNumber, Channel, TargetID;
+  DAC960_Info("  Physical Devices:\n", Controller);
+  for (Channel = 0; Channel < Controller->Channels; Channel++)
+    for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
+      {
+	DAC960_SCSI_Inquiry_T *InquiryStandardData =
+	  &Controller->V1.InquiryStandardData[Channel][TargetID];
+	DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+	  &Controller->V1.InquiryUnitSerialNumber[Channel][TargetID];
+	DAC960_V1_DeviceState_T *DeviceState =
+	  &Controller->V1.DeviceState[Channel][TargetID];
+	DAC960_V1_ErrorTableEntry_T *ErrorEntry =
+	  &Controller->V1.ErrorTable.ErrorTableEntries[Channel][TargetID];
+	char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
+	char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
+	char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
+	char SerialNumber[1+sizeof(InquiryUnitSerialNumber
+				   ->ProductSerialNumber)];
+	if (InquiryStandardData->PeripheralDeviceType == 0x1F) continue;
+	DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
+				   Vendor, Model, Revision, SerialNumber);
+	DAC960_Info("    %d:%d%s Vendor: %s  Model: %s  Revision: %s\n",
+		    Controller, Channel, TargetID, (TargetID < 10 ? " " : ""),
+		    Vendor, Model, Revision);
+	if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
+	  DAC960_Info("         Serial Number: %s\n", Controller, SerialNumber);
+	if (DeviceState->Present &&
+	    DeviceState->DeviceType == DAC960_V1_DiskType)
+	  {
+	    if (Controller->V1.DeviceResetCount[Channel][TargetID] > 0)
+	      DAC960_Info("         Disk Status: %s, %u blocks, %d resets\n",
+			  Controller,
+			  (DeviceState->DeviceState == DAC960_V1_Device_Dead
+			   ? "Dead"
+			   : DeviceState->DeviceState
+			     == DAC960_V1_Device_WriteOnly
+			     ? "Write-Only"
+			     : DeviceState->DeviceState
+			       == DAC960_V1_Device_Online
+			       ? "Online" : "Standby"),
+			  DeviceState->DiskSize,
+			  Controller->V1.DeviceResetCount[Channel][TargetID]);
+	    else
+	      DAC960_Info("         Disk Status: %s, %u blocks\n", Controller,
+			  (DeviceState->DeviceState == DAC960_V1_Device_Dead
+			   ? "Dead"
+			   : DeviceState->DeviceState
+			     == DAC960_V1_Device_WriteOnly
+			     ? "Write-Only"
+			     : DeviceState->DeviceState
+			       == DAC960_V1_Device_Online
+			       ? "Online" : "Standby"),
+			  DeviceState->DiskSize);
+	  }
+	if (ErrorEntry->ParityErrorCount > 0 ||
+	    ErrorEntry->SoftErrorCount > 0 ||
+	    ErrorEntry->HardErrorCount > 0 ||
+	    ErrorEntry->MiscErrorCount > 0)
+	  DAC960_Info("         Errors - Parity: %d, Soft: %d, "
+		      "Hard: %d, Misc: %d\n", Controller,
+		      ErrorEntry->ParityErrorCount,
+		      ErrorEntry->SoftErrorCount,
+		      ErrorEntry->HardErrorCount,
+		      ErrorEntry->MiscErrorCount);
+      }
+  DAC960_Info("  Logical Drives:\n", Controller);
+  for (LogicalDriveNumber = 0;
+       LogicalDriveNumber < Controller->LogicalDriveCount;
+       LogicalDriveNumber++)
+    {
+      DAC960_V1_LogicalDriveInformation_T *LogicalDriveInformation =
+	&Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
+      DAC960_Info("    /dev/rd/c%dd%d: RAID-%d, %s, %u blocks, %s\n",
+		  Controller, Controller->ControllerNumber, LogicalDriveNumber,
+		  LogicalDriveInformation->RAIDLevel,
+		  (LogicalDriveInformation->LogicalDriveState
+		   == DAC960_V1_LogicalDrive_Online
+		   ? "Online"
+		   : LogicalDriveInformation->LogicalDriveState
+		     == DAC960_V1_LogicalDrive_Critical
+		     ? "Critical" : "Offline"),
+		  LogicalDriveInformation->LogicalDriveSize,
+		  (LogicalDriveInformation->WriteBack
+		   ? "Write Back" : "Write Thru"));
+    }
+  return true;
+}
+
+
+/*
+  DAC960_V2_ReportDeviceConfiguration reports the Device Configuration
+  Information for DAC960 V2 Firmware Controllers.
+*/
+
+static boolean DAC960_V2_ReportDeviceConfiguration(DAC960_Controller_T
+						   *Controller)
+{
+  int PhysicalDeviceIndex, LogicalDriveNumber;
+  DAC960_Info("  Physical Devices:\n", Controller);
+  for (PhysicalDeviceIndex = 0;
+       PhysicalDeviceIndex < DAC960_V2_MaxPhysicalDevices;
+       PhysicalDeviceIndex++)
+    {
+      DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
+	Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
+      DAC960_SCSI_Inquiry_T *InquiryStandardData =
+	(DAC960_SCSI_Inquiry_T *) &PhysicalDeviceInfo->SCSI_InquiryData;
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+	Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
+      char Vendor[1+sizeof(InquiryStandardData->VendorIdentification)];
+      char Model[1+sizeof(InquiryStandardData->ProductIdentification)];
+      char Revision[1+sizeof(InquiryStandardData->ProductRevisionLevel)];
+      char SerialNumber[1+sizeof(InquiryUnitSerialNumber->ProductSerialNumber)];
+      if (PhysicalDeviceInfo == NULL) break;
+      DAC960_SanitizeInquiryData(InquiryStandardData, InquiryUnitSerialNumber,
+				 Vendor, Model, Revision, SerialNumber);
+      DAC960_Info("    %d:%d%s Vendor: %s  Model: %s  Revision: %s\n",
+		  Controller,
+		  PhysicalDeviceInfo->Channel,
+		  PhysicalDeviceInfo->TargetID,
+		  (PhysicalDeviceInfo->TargetID < 10 ? " " : ""),
+		  Vendor, Model, Revision);
+      if (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers == 0)
+	DAC960_Info("         %sAsynchronous\n", Controller,
+		    (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
+		     ? "Wide " :""));
+      else
+	DAC960_Info("         %sSynchronous at %d MB/sec\n", Controller,
+		    (PhysicalDeviceInfo->NegotiatedDataWidthBits == 16
+		     ? "Wide " :""),
+		    (PhysicalDeviceInfo->NegotiatedSynchronousMegaTransfers
+		     * PhysicalDeviceInfo->NegotiatedDataWidthBits/8));
+      if (InquiryUnitSerialNumber->PeripheralDeviceType != 0x1F)
+	DAC960_Info("         Serial Number: %s\n", Controller, SerialNumber);
+      if (PhysicalDeviceInfo->PhysicalDeviceState ==
+	  DAC960_V2_Device_Unconfigured)
+	continue;
+      DAC960_Info("         Disk Status: %s, %u blocks\n", Controller,
+		  (PhysicalDeviceInfo->PhysicalDeviceState
+		   == DAC960_V2_Device_Online
+		   ? "Online"
+		   : PhysicalDeviceInfo->PhysicalDeviceState
+		     == DAC960_V2_Device_Rebuild
+		     ? "Rebuild"
+		     : PhysicalDeviceInfo->PhysicalDeviceState
+		       == DAC960_V2_Device_Missing
+		       ? "Missing"
+		       : PhysicalDeviceInfo->PhysicalDeviceState
+			 == DAC960_V2_Device_Critical
+			 ? "Critical"
+			 : PhysicalDeviceInfo->PhysicalDeviceState
+			   == DAC960_V2_Device_Dead
+			   ? "Dead"
+			   : PhysicalDeviceInfo->PhysicalDeviceState
+			     == DAC960_V2_Device_SuspectedDead
+			     ? "Suspected-Dead"
+			     : PhysicalDeviceInfo->PhysicalDeviceState
+			       == DAC960_V2_Device_CommandedOffline
+			       ? "Commanded-Offline"
+			       : PhysicalDeviceInfo->PhysicalDeviceState
+				 == DAC960_V2_Device_Standby
+				 ? "Standby" : "Unknown"),
+		  PhysicalDeviceInfo->ConfigurableDeviceSize);
+      if (PhysicalDeviceInfo->ParityErrors == 0 &&
+	  PhysicalDeviceInfo->SoftErrors == 0 &&
+	  PhysicalDeviceInfo->HardErrors == 0 &&
+	  PhysicalDeviceInfo->MiscellaneousErrors == 0 &&
+	  PhysicalDeviceInfo->CommandTimeouts == 0 &&
+	  PhysicalDeviceInfo->Retries == 0 &&
+	  PhysicalDeviceInfo->Aborts == 0 &&
+	  PhysicalDeviceInfo->PredictedFailuresDetected == 0)
+	continue;
+      DAC960_Info("         Errors - Parity: %d, Soft: %d, "
+		  "Hard: %d, Misc: %d\n", Controller,
+		  PhysicalDeviceInfo->ParityErrors,
+		  PhysicalDeviceInfo->SoftErrors,
+		  PhysicalDeviceInfo->HardErrors,
+		  PhysicalDeviceInfo->MiscellaneousErrors);
+      DAC960_Info("                  Timeouts: %d, Retries: %d, "
+		  "Aborts: %d, Predicted: %d\n", Controller,
+		  PhysicalDeviceInfo->CommandTimeouts,
+		  PhysicalDeviceInfo->Retries,
+		  PhysicalDeviceInfo->Aborts,
+		  PhysicalDeviceInfo->PredictedFailuresDetected);
+    }
+  DAC960_Info("  Logical Drives:\n", Controller);
+  for (LogicalDriveNumber = 0;
+       LogicalDriveNumber < DAC960_MaxLogicalDrives;
+       LogicalDriveNumber++)
+    {
+      DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
+	Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
+      unsigned char *ReadCacheStatus[] = { "Read Cache Disabled",
+					   "Read Cache Enabled",
+					   "Read Ahead Enabled",
+					   "Intelligent Read Ahead Enabled",
+					   "-", "-", "-", "-" };
+      unsigned char *WriteCacheStatus[] = { "Write Cache Disabled",
+					    "Logical Device Read Only",
+					    "Write Cache Enabled",
+					    "Intelligent Write Cache Enabled",
+					    "-", "-", "-", "-" };
+      unsigned char *GeometryTranslation;
+      if (LogicalDeviceInfo == NULL) continue;
+      switch (LogicalDeviceInfo->DriveGeometry)
+	{
+	case DAC960_V2_Geometry_128_32:
+	  GeometryTranslation = "128/32";
+	  break;
+	case DAC960_V2_Geometry_255_63:
+	  GeometryTranslation = "255/63";
+	  break;
+	default:
+	  GeometryTranslation = "Invalid";
+	  DAC960_Error("Illegal Logical Device Geometry %d\n",
+		       Controller, LogicalDeviceInfo->DriveGeometry);
+	  break;
+	}
+      DAC960_Info("    /dev/rd/c%dd%d: RAID-%d, %s, %u blocks\n",
+		  Controller, Controller->ControllerNumber, LogicalDriveNumber,
+		  LogicalDeviceInfo->RAIDLevel,
+		  (LogicalDeviceInfo->LogicalDeviceState
+		   == DAC960_V2_LogicalDevice_Online
+		   ? "Online"
+		   : LogicalDeviceInfo->LogicalDeviceState
+		     == DAC960_V2_LogicalDevice_Critical
+		     ? "Critical" : "Offline"),
+		  LogicalDeviceInfo->ConfigurableDeviceSize);
+      DAC960_Info("                  Logical Device %s, BIOS Geometry: %s\n",
+		  Controller,
+		  (LogicalDeviceInfo->LogicalDeviceControl
+				     .LogicalDeviceInitialized
+		   ? "Initialized" : "Uninitialized"),
+		  GeometryTranslation);
+      if (LogicalDeviceInfo->StripeSize == 0)
+	{
+	  if (LogicalDeviceInfo->CacheLineSize == 0)
+	    DAC960_Info("                  Stripe Size: N/A, "
+			"Segment Size: N/A\n", Controller);
+	  else
+	    DAC960_Info("                  Stripe Size: N/A, "
+			"Segment Size: %dKB\n", Controller,
+			1 << (LogicalDeviceInfo->CacheLineSize - 2));
+	}
+      else
+	{
+	  if (LogicalDeviceInfo->CacheLineSize == 0)
+	    DAC960_Info("                  Stripe Size: %dKB, "
+			"Segment Size: N/A\n", Controller,
+			1 << (LogicalDeviceInfo->StripeSize - 2));
+	  else
+	    DAC960_Info("                  Stripe Size: %dKB, "
+			"Segment Size: %dKB\n", Controller,
+			1 << (LogicalDeviceInfo->StripeSize - 2),
+			1 << (LogicalDeviceInfo->CacheLineSize - 2));
+	}
+      DAC960_Info("                  %s, %s\n", Controller,
+		  ReadCacheStatus[
+		    LogicalDeviceInfo->LogicalDeviceControl.ReadCache],
+		  WriteCacheStatus[
+		    LogicalDeviceInfo->LogicalDeviceControl.WriteCache]);
+      if (LogicalDeviceInfo->SoftErrors > 0 ||
+	  LogicalDeviceInfo->CommandsFailed > 0 ||
+	  LogicalDeviceInfo->DeferredWriteErrors)
+	DAC960_Info("                  Errors - Soft: %d, Failed: %d, "
+		    "Deferred Write: %d\n", Controller,
+		    LogicalDeviceInfo->SoftErrors,
+		    LogicalDeviceInfo->CommandsFailed,
+		    LogicalDeviceInfo->DeferredWriteErrors);
+
+    }
+  return true;
+}
+
+/*
+  DAC960_RegisterBlockDevice registers the Block Device structures
+  associated with Controller.
+*/
+
+static boolean DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller)
+{
+  int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
+  int n;
+
+  /*
+    Register the Block Device Major Number for this DAC960 Controller.
+  */
+  if (register_blkdev(MajorNumber, "dac960") < 0)
+      return false;
+
+  for (n = 0; n < DAC960_MaxLogicalDrives; n++) {
+	struct gendisk *disk = Controller->disks[n];
+  	struct request_queue *RequestQueue;
+
+	/* for now, let all request queues share controller's lock */
+  	RequestQueue = blk_init_queue(DAC960_RequestFunction,&Controller->queue_lock);
+  	if (!RequestQueue) {
+		printk("DAC960: failure to allocate request queue\n");
+		continue;
+  	}
+  	Controller->RequestQueue[n] = RequestQueue;
+  	blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit);
+  	RequestQueue->queuedata = Controller;
+  	blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit);
+	blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit);
+	blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand);
+	disk->queue = RequestQueue;
+	sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n);
+	sprintf(disk->devfs_name, "rd/host%d/target%d", Controller->ControllerNumber, n);
+	disk->major = MajorNumber;
+	disk->first_minor = n << DAC960_MaxPartitionsBits;
+	disk->fops = &DAC960_BlockDeviceOperations;
+   }
+  /*
+    Indicate the Block Device Registration completed successfully,
+  */
+  return true;
+}
+
+
+/*
+  DAC960_UnregisterBlockDevice unregisters the Block Device structures
+  associated with Controller.
+*/
+
+static void DAC960_UnregisterBlockDevice(DAC960_Controller_T *Controller)
+{
+  int MajorNumber = DAC960_MAJOR + Controller->ControllerNumber;
+  int disk;
+
+  /* does order matter when deleting gendisk and cleanup in request queue? */
+  for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
+	del_gendisk(Controller->disks[disk]);
+	blk_cleanup_queue(Controller->RequestQueue[disk]);
+	Controller->RequestQueue[disk] = NULL;
+  }
+
+  /*
+    Unregister the Block Device Major Number for this DAC960 Controller.
+  */
+  unregister_blkdev(MajorNumber, "dac960");
+}
+
+/*
+  DAC960_ComputeGenericDiskInfo computes the values for the Generic Disk
+  Information Partition Sector Counts and Block Sizes.
+*/
+
+static void DAC960_ComputeGenericDiskInfo(DAC960_Controller_T *Controller)
+{
+	int disk;
+	for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++)
+		set_capacity(Controller->disks[disk], disk_size(Controller, disk));
+}
+
+/*
+  DAC960_ReportErrorStatus reports Controller BIOS Messages passed through
+  the Error Status Register when the driver performs the BIOS handshaking.
+  It returns true for fatal errors and false otherwise.
+*/
+
+static boolean DAC960_ReportErrorStatus(DAC960_Controller_T *Controller,
+					unsigned char ErrorStatus,
+					unsigned char Parameter0,
+					unsigned char Parameter1)
+{
+  switch (ErrorStatus)
+    {
+    case 0x00:
+      DAC960_Notice("Physical Device %d:%d Not Responding\n",
+		    Controller, Parameter1, Parameter0);
+      break;
+    case 0x08:
+      if (Controller->DriveSpinUpMessageDisplayed) break;
+      DAC960_Notice("Spinning Up Drives\n", Controller);
+      Controller->DriveSpinUpMessageDisplayed = true;
+      break;
+    case 0x30:
+      DAC960_Notice("Configuration Checksum Error\n", Controller);
+      break;
+    case 0x60:
+      DAC960_Notice("Mirror Race Recovery Failed\n", Controller);
+      break;
+    case 0x70:
+      DAC960_Notice("Mirror Race Recovery In Progress\n", Controller);
+      break;
+    case 0x90:
+      DAC960_Notice("Physical Device %d:%d COD Mismatch\n",
+		    Controller, Parameter1, Parameter0);
+      break;
+    case 0xA0:
+      DAC960_Notice("Logical Drive Installation Aborted\n", Controller);
+      break;
+    case 0xB0:
+      DAC960_Notice("Mirror Race On A Critical Logical Drive\n", Controller);
+      break;
+    case 0xD0:
+      DAC960_Notice("New Controller Configuration Found\n", Controller);
+      break;
+    case 0xF0:
+      DAC960_Error("Fatal Memory Parity Error for Controller at\n", Controller);
+      return true;
+    default:
+      DAC960_Error("Unknown Initialization Error %02X for Controller at\n",
+		   Controller, ErrorStatus);
+      return true;
+    }
+  return false;
+}
+
+
+/*
+ * DAC960_DetectCleanup releases the resources that were allocated
+ * during DAC960_DetectController().  DAC960_DetectController can
+ * has several internal failure points, so not ALL resources may 
+ * have been allocated.  It's important to free only
+ * resources that HAVE been allocated.  The code below always
+ * tests that the resource has been allocated before attempting to
+ * free it.
+ */
+static void DAC960_DetectCleanup(DAC960_Controller_T *Controller)
+{
+  int i;
+
+  /* Free the memory mailbox, status, and related structures */
+  free_dma_loaf(Controller->PCIDevice, &Controller->DmaPages);
+  if (Controller->MemoryMappedAddress) {
+  	switch(Controller->HardwareType)
+  	{
+		case DAC960_BA_Controller:
+			DAC960_BA_DisableInterrupts(Controller->BaseAddress);
+			break;
+		case DAC960_LP_Controller:
+			DAC960_LP_DisableInterrupts(Controller->BaseAddress);
+			break;
+		case DAC960_LA_Controller:
+			DAC960_LA_DisableInterrupts(Controller->BaseAddress);
+			break;
+		case DAC960_PG_Controller:
+			DAC960_PG_DisableInterrupts(Controller->BaseAddress);
+			break;
+		case DAC960_PD_Controller:
+			DAC960_PD_DisableInterrupts(Controller->BaseAddress);
+			break;
+		case DAC960_P_Controller:
+			DAC960_PD_DisableInterrupts(Controller->BaseAddress);
+			break;
+  	}
+  	iounmap(Controller->MemoryMappedAddress);
+  }
+  if (Controller->IRQ_Channel)
+  	free_irq(Controller->IRQ_Channel, Controller);
+  if (Controller->IO_Address)
+	release_region(Controller->IO_Address, 0x80);
+  pci_disable_device(Controller->PCIDevice);
+  for (i = 0; (i < DAC960_MaxLogicalDrives) && Controller->disks[i]; i++)
+       put_disk(Controller->disks[i]);
+  DAC960_Controllers[Controller->ControllerNumber] = NULL;
+  kfree(Controller);
+}
+
+
+/*
+  DAC960_DetectController detects Mylex DAC960/AcceleRAID/eXtremeRAID
+  PCI RAID Controllers by interrogating the PCI Configuration Space for
+  Controller Type.
+*/
+
+static DAC960_Controller_T * 
+DAC960_DetectController(struct pci_dev *PCI_Device,
+			const struct pci_device_id *entry)
+{
+  struct DAC960_privdata *privdata =
+	  	(struct DAC960_privdata *)entry->driver_data;
+  irqreturn_t (*InterruptHandler)(int, void *, struct pt_regs *) =
+	  	privdata->InterruptHandler;
+  unsigned int MemoryWindowSize = privdata->MemoryWindowSize;
+  DAC960_Controller_T *Controller = NULL;
+  unsigned char DeviceFunction = PCI_Device->devfn;
+  unsigned char ErrorStatus, Parameter0, Parameter1;
+  unsigned int IRQ_Channel;
+  void __iomem *BaseAddress;
+  int i;
+
+  Controller = (DAC960_Controller_T *)
+	kmalloc(sizeof(DAC960_Controller_T), GFP_ATOMIC);
+  if (Controller == NULL) {
+	DAC960_Error("Unable to allocate Controller structure for "
+                       "Controller at\n", NULL);
+	return NULL;
+  }
+  memset(Controller, 0, sizeof(DAC960_Controller_T));
+  Controller->ControllerNumber = DAC960_ControllerCount;
+  DAC960_Controllers[DAC960_ControllerCount++] = Controller;
+  Controller->Bus = PCI_Device->bus->number;
+  Controller->FirmwareType = privdata->FirmwareType;
+  Controller->HardwareType = privdata->HardwareType;
+  Controller->Device = DeviceFunction >> 3;
+  Controller->Function = DeviceFunction & 0x7;
+  Controller->PCIDevice = PCI_Device;
+  strcpy(Controller->FullModelName, "DAC960");
+
+  if (pci_enable_device(PCI_Device))
+	goto Failure;
+
+  switch (Controller->HardwareType)
+  {
+	case DAC960_BA_Controller:
+	  Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
+	  break;
+	case DAC960_LP_Controller:
+	  Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
+	  break;
+	case DAC960_LA_Controller:
+	  Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
+	  break;
+	case DAC960_PG_Controller:
+	  Controller->PCI_Address = pci_resource_start(PCI_Device, 0);
+	  break;
+	case DAC960_PD_Controller:
+	  Controller->IO_Address = pci_resource_start(PCI_Device, 0);
+	  Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
+	  break;
+	case DAC960_P_Controller:
+	  Controller->IO_Address = pci_resource_start(PCI_Device, 0);
+	  Controller->PCI_Address = pci_resource_start(PCI_Device, 1);
+	  break;
+  }
+
+  pci_set_drvdata(PCI_Device, (void *)((long)Controller->ControllerNumber));
+  for (i = 0; i < DAC960_MaxLogicalDrives; i++) {
+	Controller->disks[i] = alloc_disk(1<<DAC960_MaxPartitionsBits);
+	if (!Controller->disks[i])
+		goto Failure;
+	Controller->disks[i]->private_data = (void *)((long)i);
+  }
+  init_waitqueue_head(&Controller->CommandWaitQueue);
+  init_waitqueue_head(&Controller->HealthStatusWaitQueue);
+  spin_lock_init(&Controller->queue_lock);
+  DAC960_AnnounceDriver(Controller);
+  /*
+    Map the Controller Register Window.
+  */
+ if (MemoryWindowSize < PAGE_SIZE)
+	MemoryWindowSize = PAGE_SIZE;
+  Controller->MemoryMappedAddress =
+	ioremap_nocache(Controller->PCI_Address & PAGE_MASK, MemoryWindowSize);
+  Controller->BaseAddress =
+	Controller->MemoryMappedAddress + (Controller->PCI_Address & ~PAGE_MASK);
+  if (Controller->MemoryMappedAddress == NULL)
+  {
+	  DAC960_Error("Unable to map Controller Register Window for "
+		       "Controller at\n", Controller);
+	  goto Failure;
+  }
+  BaseAddress = Controller->BaseAddress;
+  switch (Controller->HardwareType)
+  {
+	case DAC960_BA_Controller:
+	  DAC960_BA_DisableInterrupts(BaseAddress);
+	  DAC960_BA_AcknowledgeHardwareMailboxStatus(BaseAddress);
+	  udelay(1000);
+	  while (DAC960_BA_InitializationInProgressP(BaseAddress))
+	    {
+	      if (DAC960_BA_ReadErrorStatus(BaseAddress, &ErrorStatus,
+					    &Parameter0, &Parameter1) &&
+		  DAC960_ReportErrorStatus(Controller, ErrorStatus,
+					   Parameter0, Parameter1))
+		goto Failure;
+	      udelay(10);
+	    }
+	  if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
+	    {
+	      DAC960_Error("Unable to Enable Memory Mailbox Interface "
+			   "for Controller at\n", Controller);
+	      goto Failure;
+	    }
+	  DAC960_BA_EnableInterrupts(BaseAddress);
+	  Controller->QueueCommand = DAC960_BA_QueueCommand;
+	  Controller->ReadControllerConfiguration =
+	    DAC960_V2_ReadControllerConfiguration;
+	  Controller->ReadDeviceConfiguration =
+	    DAC960_V2_ReadDeviceConfiguration;
+	  Controller->ReportDeviceConfiguration =
+	    DAC960_V2_ReportDeviceConfiguration;
+	  Controller->QueueReadWriteCommand =
+	    DAC960_V2_QueueReadWriteCommand;
+	  break;
+	case DAC960_LP_Controller:
+	  DAC960_LP_DisableInterrupts(BaseAddress);
+	  DAC960_LP_AcknowledgeHardwareMailboxStatus(BaseAddress);
+	  udelay(1000);
+	  while (DAC960_LP_InitializationInProgressP(BaseAddress))
+	    {
+	      if (DAC960_LP_ReadErrorStatus(BaseAddress, &ErrorStatus,
+					    &Parameter0, &Parameter1) &&
+		  DAC960_ReportErrorStatus(Controller, ErrorStatus,
+					   Parameter0, Parameter1))
+		goto Failure;
+	      udelay(10);
+	    }
+	  if (!DAC960_V2_EnableMemoryMailboxInterface(Controller))
+	    {
+	      DAC960_Error("Unable to Enable Memory Mailbox Interface "
+			   "for Controller at\n", Controller);
+	      goto Failure;
+	    }
+	  DAC960_LP_EnableInterrupts(BaseAddress);
+	  Controller->QueueCommand = DAC960_LP_QueueCommand;
+	  Controller->ReadControllerConfiguration =
+	    DAC960_V2_ReadControllerConfiguration;
+	  Controller->ReadDeviceConfiguration =
+	    DAC960_V2_ReadDeviceConfiguration;
+	  Controller->ReportDeviceConfiguration =
+	    DAC960_V2_ReportDeviceConfiguration;
+	  Controller->QueueReadWriteCommand =
+	    DAC960_V2_QueueReadWriteCommand;
+	  break;
+	case DAC960_LA_Controller:
+	  DAC960_LA_DisableInterrupts(BaseAddress);
+	  DAC960_LA_AcknowledgeHardwareMailboxStatus(BaseAddress);
+	  udelay(1000);
+	  while (DAC960_LA_InitializationInProgressP(BaseAddress))
+	    {
+	      if (DAC960_LA_ReadErrorStatus(BaseAddress, &ErrorStatus,
+					    &Parameter0, &Parameter1) &&
+		  DAC960_ReportErrorStatus(Controller, ErrorStatus,
+					   Parameter0, Parameter1))
+		goto Failure;
+	      udelay(10);
+	    }
+	  if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
+	    {
+	      DAC960_Error("Unable to Enable Memory Mailbox Interface "
+			   "for Controller at\n", Controller);
+	      goto Failure;
+	    }
+	  DAC960_LA_EnableInterrupts(BaseAddress);
+	  if (Controller->V1.DualModeMemoryMailboxInterface)
+	    Controller->QueueCommand = DAC960_LA_QueueCommandDualMode;
+	  else Controller->QueueCommand = DAC960_LA_QueueCommandSingleMode;
+	  Controller->ReadControllerConfiguration =
+	    DAC960_V1_ReadControllerConfiguration;
+	  Controller->ReadDeviceConfiguration =
+	    DAC960_V1_ReadDeviceConfiguration;
+	  Controller->ReportDeviceConfiguration =
+	    DAC960_V1_ReportDeviceConfiguration;
+	  Controller->QueueReadWriteCommand =
+	    DAC960_V1_QueueReadWriteCommand;
+	  break;
+	case DAC960_PG_Controller:
+	  DAC960_PG_DisableInterrupts(BaseAddress);
+	  DAC960_PG_AcknowledgeHardwareMailboxStatus(BaseAddress);
+	  udelay(1000);
+	  while (DAC960_PG_InitializationInProgressP(BaseAddress))
+	    {
+	      if (DAC960_PG_ReadErrorStatus(BaseAddress, &ErrorStatus,
+					    &Parameter0, &Parameter1) &&
+		  DAC960_ReportErrorStatus(Controller, ErrorStatus,
+					   Parameter0, Parameter1))
+		goto Failure;
+	      udelay(10);
+	    }
+	  if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
+	    {
+	      DAC960_Error("Unable to Enable Memory Mailbox Interface "
+			   "for Controller at\n", Controller);
+	      goto Failure;
+	    }
+	  DAC960_PG_EnableInterrupts(BaseAddress);
+	  if (Controller->V1.DualModeMemoryMailboxInterface)
+	    Controller->QueueCommand = DAC960_PG_QueueCommandDualMode;
+	  else Controller->QueueCommand = DAC960_PG_QueueCommandSingleMode;
+	  Controller->ReadControllerConfiguration =
+	    DAC960_V1_ReadControllerConfiguration;
+	  Controller->ReadDeviceConfiguration =
+	    DAC960_V1_ReadDeviceConfiguration;
+	  Controller->ReportDeviceConfiguration =
+	    DAC960_V1_ReportDeviceConfiguration;
+	  Controller->QueueReadWriteCommand =
+	    DAC960_V1_QueueReadWriteCommand;
+	  break;
+	case DAC960_PD_Controller:
+	  if (!request_region(Controller->IO_Address, 0x80,
+			      Controller->FullModelName)) {
+		DAC960_Error("IO port 0x%d busy for Controller at\n",
+			     Controller, Controller->IO_Address);
+		goto Failure;
+	  }
+	  DAC960_PD_DisableInterrupts(BaseAddress);
+	  DAC960_PD_AcknowledgeStatus(BaseAddress);
+	  udelay(1000);
+	  while (DAC960_PD_InitializationInProgressP(BaseAddress))
+	    {
+	      if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
+					    &Parameter0, &Parameter1) &&
+		  DAC960_ReportErrorStatus(Controller, ErrorStatus,
+					   Parameter0, Parameter1))
+		goto Failure;
+	      udelay(10);
+	    }
+	  if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
+	    {
+	      DAC960_Error("Unable to allocate DMA mapped memory "
+			   "for Controller at\n", Controller);
+	      goto Failure;
+	    }
+	  DAC960_PD_EnableInterrupts(BaseAddress);
+	  Controller->QueueCommand = DAC960_PD_QueueCommand;
+	  Controller->ReadControllerConfiguration =
+	    DAC960_V1_ReadControllerConfiguration;
+	  Controller->ReadDeviceConfiguration =
+	    DAC960_V1_ReadDeviceConfiguration;
+	  Controller->ReportDeviceConfiguration =
+	    DAC960_V1_ReportDeviceConfiguration;
+	  Controller->QueueReadWriteCommand =
+	    DAC960_V1_QueueReadWriteCommand;
+	  break;
+	case DAC960_P_Controller:
+	  if (!request_region(Controller->IO_Address, 0x80,
+			      Controller->FullModelName)){
+		DAC960_Error("IO port 0x%d busy for Controller at\n",
+		   	     Controller, Controller->IO_Address);
+		goto Failure;
+	  }
+	  DAC960_PD_DisableInterrupts(BaseAddress);
+	  DAC960_PD_AcknowledgeStatus(BaseAddress);
+	  udelay(1000);
+	  while (DAC960_PD_InitializationInProgressP(BaseAddress))
+	    {
+	      if (DAC960_PD_ReadErrorStatus(BaseAddress, &ErrorStatus,
+					    &Parameter0, &Parameter1) &&
+		  DAC960_ReportErrorStatus(Controller, ErrorStatus,
+					   Parameter0, Parameter1))
+		goto Failure;
+	      udelay(10);
+	    }
+	  if (!DAC960_V1_EnableMemoryMailboxInterface(Controller))
+	    {
+	      DAC960_Error("Unable to allocate DMA mapped memory"
+			   "for Controller at\n", Controller);
+	      goto Failure;
+	    }
+	  DAC960_PD_EnableInterrupts(BaseAddress);
+	  Controller->QueueCommand = DAC960_P_QueueCommand;
+	  Controller->ReadControllerConfiguration =
+	    DAC960_V1_ReadControllerConfiguration;
+	  Controller->ReadDeviceConfiguration =
+	    DAC960_V1_ReadDeviceConfiguration;
+	  Controller->ReportDeviceConfiguration =
+	    DAC960_V1_ReportDeviceConfiguration;
+	  Controller->QueueReadWriteCommand =
+	    DAC960_V1_QueueReadWriteCommand;
+	  break;
+  }
+  /*
+     Acquire shared access to the IRQ Channel.
+  */
+  IRQ_Channel = PCI_Device->irq;
+  if (request_irq(IRQ_Channel, InterruptHandler, SA_SHIRQ,
+		      Controller->FullModelName, Controller) < 0)
+  {
+	DAC960_Error("Unable to acquire IRQ Channel %d for Controller at\n",
+		       Controller, Controller->IRQ_Channel);
+	goto Failure;
+  }
+  Controller->IRQ_Channel = IRQ_Channel;
+  Controller->InitialCommand.CommandIdentifier = 1;
+  Controller->InitialCommand.Controller = Controller;
+  Controller->Commands[0] = &Controller->InitialCommand;
+  Controller->FreeCommands = &Controller->InitialCommand;
+  return Controller;
+      
+Failure:
+  if (Controller->IO_Address == 0)
+	DAC960_Error("PCI Bus %d Device %d Function %d I/O Address N/A "
+		     "PCI Address 0x%X\n", Controller,
+		     Controller->Bus, Controller->Device,
+		     Controller->Function, Controller->PCI_Address);
+  else
+	DAC960_Error("PCI Bus %d Device %d Function %d I/O Address "
+			"0x%X PCI Address 0x%X\n", Controller,
+			Controller->Bus, Controller->Device,
+			Controller->Function, Controller->IO_Address,
+			Controller->PCI_Address);
+  DAC960_DetectCleanup(Controller);
+  DAC960_ControllerCount--;
+  return NULL;
+}
+
+/*
+  DAC960_InitializeController initializes Controller.
+*/
+
+static boolean 
+DAC960_InitializeController(DAC960_Controller_T *Controller)
+{
+  if (DAC960_ReadControllerConfiguration(Controller) &&
+      DAC960_ReportControllerConfiguration(Controller) &&
+      DAC960_CreateAuxiliaryStructures(Controller) &&
+      DAC960_ReadDeviceConfiguration(Controller) &&
+      DAC960_ReportDeviceConfiguration(Controller) &&
+      DAC960_RegisterBlockDevice(Controller))
+    {
+      /*
+	Initialize the Monitoring Timer.
+      */
+      init_timer(&Controller->MonitoringTimer);
+      Controller->MonitoringTimer.expires =
+	jiffies + DAC960_MonitoringTimerInterval;
+      Controller->MonitoringTimer.data = (unsigned long) Controller;
+      Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
+      add_timer(&Controller->MonitoringTimer);
+      Controller->ControllerInitialized = true;
+      return true;
+    }
+  return false;
+}
+
+
+/*
+  DAC960_FinalizeController finalizes Controller.
+*/
+
+static void DAC960_FinalizeController(DAC960_Controller_T *Controller)
+{
+  if (Controller->ControllerInitialized)
+    {
+      unsigned long flags;
+
+      /*
+       * Acquiring and releasing lock here eliminates
+       * a very low probability race.
+       *
+       * The code below allocates controller command structures
+       * from the free list without holding the controller lock.
+       * This is safe assuming there is no other activity on
+       * the controller at the time.
+       * 
+       * But, there might be a monitoring command still
+       * in progress.  Setting the Shutdown flag while holding
+       * the lock ensures that there is no monitoring command
+       * in the interrupt handler currently, and any monitoring
+       * commands that complete from this time on will NOT return
+       * their command structure to the free list.
+       */
+
+      spin_lock_irqsave(&Controller->queue_lock, flags);
+      Controller->ShutdownMonitoringTimer = 1;
+      spin_unlock_irqrestore(&Controller->queue_lock, flags);
+
+      del_timer_sync(&Controller->MonitoringTimer);
+      if (Controller->FirmwareType == DAC960_V1_Controller)
+	{
+	  DAC960_Notice("Flushing Cache...", Controller);
+	  DAC960_V1_ExecuteType3(Controller, DAC960_V1_Flush, 0);
+	  DAC960_Notice("done\n", Controller);
+
+	  if (Controller->HardwareType == DAC960_PD_Controller)
+	      release_region(Controller->IO_Address, 0x80);
+	}
+      else
+	{
+	  DAC960_Notice("Flushing Cache...", Controller);
+	  DAC960_V2_DeviceOperation(Controller, DAC960_V2_PauseDevice,
+				    DAC960_V2_RAID_Controller);
+	  DAC960_Notice("done\n", Controller);
+	}
+    }
+  DAC960_UnregisterBlockDevice(Controller);
+  DAC960_DestroyAuxiliaryStructures(Controller);
+  DAC960_DestroyProcEntries(Controller);
+  DAC960_DetectCleanup(Controller);
+}
+
+
+/*
+  DAC960_Probe verifies controller's existence and
+  initializes the DAC960 Driver for that controller.
+*/
+
+static int 
+DAC960_Probe(struct pci_dev *dev, const struct pci_device_id *entry)
+{
+  int disk;
+  DAC960_Controller_T *Controller;
+
+  if (DAC960_ControllerCount == DAC960_MaxControllers)
+  {
+	DAC960_Error("More than %d DAC960 Controllers detected - "
+                       "ignoring from Controller at\n",
+                       NULL, DAC960_MaxControllers);
+	return -ENODEV;
+  }
+
+  Controller = DAC960_DetectController(dev, entry);
+  if (!Controller)
+	return -ENODEV;
+
+  if (!DAC960_InitializeController(Controller)) {
+  	DAC960_FinalizeController(Controller);
+	return -ENODEV;
+  }
+
+  for (disk = 0; disk < DAC960_MaxLogicalDrives; disk++) {
+        set_capacity(Controller->disks[disk], disk_size(Controller, disk));
+        add_disk(Controller->disks[disk]);
+  }
+  DAC960_CreateProcEntries(Controller);
+  return 0;
+}
+
+
+/*
+  DAC960_Finalize finalizes the DAC960 Driver.
+*/
+
+static void DAC960_Remove(struct pci_dev *PCI_Device)
+{
+  int Controller_Number = (long)pci_get_drvdata(PCI_Device);
+  DAC960_Controller_T *Controller = DAC960_Controllers[Controller_Number];
+  if (Controller != NULL)
+      DAC960_FinalizeController(Controller);
+}
+
+
+/*
+  DAC960_V1_QueueReadWriteCommand prepares and queues a Read/Write Command for
+  DAC960 V1 Firmware Controllers.
+*/
+
+static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_ScatterGatherSegment_T *ScatterGatherList =
+					Command->V1.ScatterGatherList;
+  struct scatterlist *ScatterList = Command->V1.ScatterList;
+
+  DAC960_V1_ClearCommand(Command);
+
+  if (Command->SegmentCount == 1)
+    {
+      if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
+	CommandMailbox->Type5.CommandOpcode = DAC960_V1_Read;
+      else 
+        CommandMailbox->Type5.CommandOpcode = DAC960_V1_Write;
+
+      CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
+      CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
+      CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
+      CommandMailbox->Type5.BusAddress =
+			(DAC960_BusAddress32_T)sg_dma_address(ScatterList);	
+    }
+  else
+    {
+      int i;
+
+      if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
+	CommandMailbox->Type5.CommandOpcode = DAC960_V1_ReadWithScatterGather;
+      else
+	CommandMailbox->Type5.CommandOpcode = DAC960_V1_WriteWithScatterGather;
+
+      CommandMailbox->Type5.LD.TransferLength = Command->BlockCount;
+      CommandMailbox->Type5.LD.LogicalDriveNumber = Command->LogicalDriveNumber;
+      CommandMailbox->Type5.LogicalBlockAddress = Command->BlockNumber;
+      CommandMailbox->Type5.BusAddress = Command->V1.ScatterGatherListDMA;
+
+      CommandMailbox->Type5.ScatterGatherCount = Command->SegmentCount;
+
+      for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
+		ScatterGatherList->SegmentDataPointer =
+			(DAC960_BusAddress32_T)sg_dma_address(ScatterList);
+		ScatterGatherList->SegmentByteCount =
+			(DAC960_ByteCount32_T)sg_dma_len(ScatterList);
+      }
+    }
+  DAC960_QueueCommand(Command);
+}
+
+
+/*
+  DAC960_V2_QueueReadWriteCommand prepares and queues a Read/Write Command for
+  DAC960 V2 Firmware Controllers.
+*/
+
+static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  struct scatterlist *ScatterList = Command->V2.ScatterList;
+
+  DAC960_V2_ClearCommand(Command);
+
+  CommandMailbox->SCSI_10.CommandOpcode = DAC960_V2_SCSI_10;
+  CommandMailbox->SCSI_10.CommandControlBits.DataTransferControllerToHost =
+    (Command->DmaDirection == PCI_DMA_FROMDEVICE);
+  CommandMailbox->SCSI_10.DataTransferSize =
+    Command->BlockCount << DAC960_BlockSizeBits;
+  CommandMailbox->SCSI_10.RequestSenseBusAddress = Command->V2.RequestSenseDMA;
+  CommandMailbox->SCSI_10.PhysicalDevice =
+    Controller->V2.LogicalDriveToVirtualDevice[Command->LogicalDriveNumber];
+  CommandMailbox->SCSI_10.RequestSenseSize = sizeof(DAC960_SCSI_RequestSense_T);
+  CommandMailbox->SCSI_10.CDBLength = 10;
+  CommandMailbox->SCSI_10.SCSI_CDB[0] =
+    (Command->DmaDirection == PCI_DMA_FROMDEVICE ? 0x28 : 0x2A);
+  CommandMailbox->SCSI_10.SCSI_CDB[2] = Command->BlockNumber >> 24;
+  CommandMailbox->SCSI_10.SCSI_CDB[3] = Command->BlockNumber >> 16;
+  CommandMailbox->SCSI_10.SCSI_CDB[4] = Command->BlockNumber >> 8;
+  CommandMailbox->SCSI_10.SCSI_CDB[5] = Command->BlockNumber;
+  CommandMailbox->SCSI_10.SCSI_CDB[7] = Command->BlockCount >> 8;
+  CommandMailbox->SCSI_10.SCSI_CDB[8] = Command->BlockCount;
+
+  if (Command->SegmentCount == 1)
+    {
+      CommandMailbox->SCSI_10.DataTransferMemoryAddress
+			     .ScatterGatherSegments[0]
+			     .SegmentDataPointer =
+	(DAC960_BusAddress64_T)sg_dma_address(ScatterList);
+      CommandMailbox->SCSI_10.DataTransferMemoryAddress
+			     .ScatterGatherSegments[0]
+			     .SegmentByteCount =
+	CommandMailbox->SCSI_10.DataTransferSize;
+    }
+  else
+    {
+      DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
+      int i;
+
+      if (Command->SegmentCount > 2)
+	{
+          ScatterGatherList = Command->V2.ScatterGatherList;
+	  CommandMailbox->SCSI_10.CommandControlBits
+			 .AdditionalScatterGatherListMemory = true;
+	  CommandMailbox->SCSI_10.DataTransferMemoryAddress
+		.ExtendedScatterGather.ScatterGatherList0Length = Command->SegmentCount;
+	  CommandMailbox->SCSI_10.DataTransferMemoryAddress
+			 .ExtendedScatterGather.ScatterGatherList0Address =
+	    Command->V2.ScatterGatherListDMA;
+	}
+      else
+	ScatterGatherList = CommandMailbox->SCSI_10.DataTransferMemoryAddress
+				 .ScatterGatherSegments;
+
+      for (i = 0; i < Command->SegmentCount; i++, ScatterList++, ScatterGatherList++) {
+		ScatterGatherList->SegmentDataPointer =
+			(DAC960_BusAddress64_T)sg_dma_address(ScatterList);
+		ScatterGatherList->SegmentByteCount =
+			(DAC960_ByteCount64_T)sg_dma_len(ScatterList);
+      }
+    }
+  DAC960_QueueCommand(Command);
+}
+
+
+static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_queue *req_q)
+{
+	struct request *Request;
+	DAC960_Command_T *Command;
+
+   while(1) {
+	Request = elv_next_request(req_q);
+	if (!Request)
+		return 1;
+
+	Command = DAC960_AllocateCommand(Controller);
+	if (Command == NULL)
+		return 0;
+
+	if (rq_data_dir(Request) == READ) {
+		Command->DmaDirection = PCI_DMA_FROMDEVICE;
+		Command->CommandType = DAC960_ReadCommand;
+	} else {
+		Command->DmaDirection = PCI_DMA_TODEVICE;
+		Command->CommandType = DAC960_WriteCommand;
+	}
+	Command->Completion = Request->waiting;
+	Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
+	Command->BlockNumber = Request->sector;
+	Command->BlockCount = Request->nr_sectors;
+	Command->Request = Request;
+	blkdev_dequeue_request(Request);
+	Command->SegmentCount = blk_rq_map_sg(req_q,
+		  Command->Request, Command->cmd_sglist);
+	/* pci_map_sg MAY change the value of SegCount */
+	Command->SegmentCount = pci_map_sg(Controller->PCIDevice, Command->cmd_sglist,
+		 Command->SegmentCount, Command->DmaDirection);
+
+	DAC960_QueueReadWriteCommand(Command);
+  }
+}
+
+/*
+  DAC960_ProcessRequest attempts to remove one I/O Request from Controller's
+  I/O Request Queue and queues it to the Controller.  WaitForCommand is true if
+  this function should wait for a Command to become available if necessary.
+  This function returns true if an I/O Request was queued and false otherwise.
+*/
+static void DAC960_ProcessRequest(DAC960_Controller_T *controller)
+{
+	int i;
+
+	if (!controller->ControllerInitialized)
+		return;
+
+	/* Do this better later! */
+	for (i = controller->req_q_index; i < DAC960_MaxLogicalDrives; i++) {
+		struct request_queue *req_q = controller->RequestQueue[i];
+
+		if (req_q == NULL)
+			continue;
+
+		if (!DAC960_process_queue(controller, req_q)) {
+			controller->req_q_index = i;
+			return;
+		}
+	}
+
+	if (controller->req_q_index == 0)
+		return;
+
+	for (i = 0; i < controller->req_q_index; i++) {
+		struct request_queue *req_q = controller->RequestQueue[i];
+
+		if (req_q == NULL)
+			continue;
+
+		if (!DAC960_process_queue(controller, req_q)) {
+			controller->req_q_index = i;
+			return;
+		}
+	}
+}
+
+
+/*
+  DAC960_queue_partial_rw extracts one bio from the request already
+  associated with argument command, and construct a new command block to retry I/O
+  only on that bio.  Queue that command to the controller.
+
+  This function re-uses a previously-allocated Command,
+  	there is no failure mode from trying to allocate a command.
+*/
+
+static void DAC960_queue_partial_rw(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  struct request *Request = Command->Request;
+  struct request_queue *req_q = Controller->RequestQueue[Command->LogicalDriveNumber];
+
+  if (Command->DmaDirection == PCI_DMA_FROMDEVICE)
+    Command->CommandType = DAC960_ReadRetryCommand;
+  else
+    Command->CommandType = DAC960_WriteRetryCommand;
+
+  /*
+   * We could be more efficient with these mapping requests
+   * and map only the portions that we need.  But since this
+   * code should almost never be called, just go with a
+   * simple coding.
+   */
+  (void)blk_rq_map_sg(req_q, Command->Request, Command->cmd_sglist);
+
+  (void)pci_map_sg(Controller->PCIDevice, Command->cmd_sglist, 1, Command->DmaDirection);
+  /*
+   * Resubmitting the request sector at a time is really tedious.
+   * But, this should almost never happen.  So, we're willing to pay
+   * this price so that in the end, as much of the transfer is completed
+   * successfully as possible.
+   */
+  Command->SegmentCount = 1;
+  Command->BlockNumber = Request->sector;
+  Command->BlockCount = 1;
+  DAC960_QueueReadWriteCommand(Command);
+  return;
+}
+
+/*
+  DAC960_RequestFunction is the I/O Request Function for DAC960 Controllers.
+*/
+
+static void DAC960_RequestFunction(struct request_queue *RequestQueue)
+{
+	DAC960_ProcessRequest(RequestQueue->queuedata);
+}
+
+/*
+  DAC960_ProcessCompletedBuffer performs completion processing for an
+  individual Buffer.
+*/
+
+static inline boolean DAC960_ProcessCompletedRequest(DAC960_Command_T *Command,
+						 boolean SuccessfulIO)
+{
+	struct request *Request = Command->Request;
+	int UpToDate;
+
+	UpToDate = 0;
+	if (SuccessfulIO)
+		UpToDate = 1;
+
+	pci_unmap_sg(Command->Controller->PCIDevice, Command->cmd_sglist,
+		Command->SegmentCount, Command->DmaDirection);
+
+	 if (!end_that_request_first(Request, UpToDate, Command->BlockCount)) {
+
+ 	 	end_that_request_last(Request);
+
+		if (Command->Completion) {
+			complete(Command->Completion);
+			Command->Completion = NULL;
+		}
+		return true;
+	}
+	return false;
+}
+
+/*
+  DAC960_V1_ReadWriteError prints an appropriate error message for Command
+  when an error occurs on a Read or Write operation.
+*/
+
+static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  unsigned char *CommandName = "UNKNOWN";
+  switch (Command->CommandType)
+    {
+    case DAC960_ReadCommand:
+    case DAC960_ReadRetryCommand:
+      CommandName = "READ";
+      break;
+    case DAC960_WriteCommand:
+    case DAC960_WriteRetryCommand:
+      CommandName = "WRITE";
+      break;
+    case DAC960_MonitoringCommand:
+    case DAC960_ImmediateCommand:
+    case DAC960_QueuedCommand:
+      break;
+    }
+  switch (Command->V1.CommandStatus)
+    {
+    case DAC960_V1_IrrecoverableDataError:
+      DAC960_Error("Irrecoverable Data Error on %s:\n",
+		   Controller, CommandName);
+      break;
+    case DAC960_V1_LogicalDriveNonexistentOrOffline:
+      DAC960_Error("Logical Drive Nonexistent or Offline on %s:\n",
+		   Controller, CommandName);
+      break;
+    case DAC960_V1_AccessBeyondEndOfLogicalDrive:
+      DAC960_Error("Attempt to Access Beyond End of Logical Drive "
+		   "on %s:\n", Controller, CommandName);
+      break;
+    case DAC960_V1_BadDataEncountered:
+      DAC960_Error("Bad Data Encountered on %s:\n", Controller, CommandName);
+      break;
+    default:
+      DAC960_Error("Unexpected Error Status %04X on %s:\n",
+		   Controller, Command->V1.CommandStatus, CommandName);
+      break;
+    }
+  DAC960_Error("  /dev/rd/c%dd%d:   absolute blocks %u..%u\n",
+	       Controller, Controller->ControllerNumber,
+	       Command->LogicalDriveNumber, Command->BlockNumber,
+	       Command->BlockNumber + Command->BlockCount - 1);
+}
+
+
+/*
+  DAC960_V1_ProcessCompletedCommand performs completion processing for Command
+  for DAC960 V1 Firmware Controllers.
+*/
+
+static void DAC960_V1_ProcessCompletedCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DAC960_CommandType_T CommandType = Command->CommandType;
+  DAC960_V1_CommandOpcode_T CommandOpcode =
+    Command->V1.CommandMailbox.Common.CommandOpcode;
+  DAC960_V1_CommandStatus_T CommandStatus = Command->V1.CommandStatus;
+
+  if (CommandType == DAC960_ReadCommand ||
+      CommandType == DAC960_WriteCommand)
+    {
+
+#ifdef FORCE_RETRY_DEBUG
+      CommandStatus = DAC960_V1_IrrecoverableDataError;
+#endif
+
+      if (CommandStatus == DAC960_V1_NormalCompletion) {
+
+		if (!DAC960_ProcessCompletedRequest(Command, true))
+			BUG();
+
+      } else if (CommandStatus == DAC960_V1_IrrecoverableDataError ||
+		CommandStatus == DAC960_V1_BadDataEncountered)
+	{
+	  /*
+	   * break the command down into pieces and resubmit each
+	   * piece, hoping that some of them will succeed.
+	   */
+	   DAC960_queue_partial_rw(Command);
+	   return;
+	}
+      else
+	{
+	  if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
+	    DAC960_V1_ReadWriteError(Command);
+
+	 if (!DAC960_ProcessCompletedRequest(Command, false))
+		BUG();
+	}
+    }
+  else if (CommandType == DAC960_ReadRetryCommand ||
+	   CommandType == DAC960_WriteRetryCommand)
+    {
+      boolean normal_completion;
+#ifdef FORCE_RETRY_FAILURE_DEBUG
+      static int retry_count = 1;
+#endif
+      /*
+        Perform completion processing for the portion that was
+        retried, and submit the next portion, if any.
+      */
+      normal_completion = true;
+      if (CommandStatus != DAC960_V1_NormalCompletion) {
+        normal_completion = false;
+        if (CommandStatus != DAC960_V1_LogicalDriveNonexistentOrOffline)
+            DAC960_V1_ReadWriteError(Command);
+      }
+
+#ifdef FORCE_RETRY_FAILURE_DEBUG
+      if (!(++retry_count % 10000)) {
+	      printk("V1 error retry failure test\n");
+	      normal_completion = false;
+              DAC960_V1_ReadWriteError(Command);
+      }
+#endif
+
+      if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
+        DAC960_queue_partial_rw(Command);
+        return;
+      }
+    }
+
+  else if (CommandType == DAC960_MonitoringCommand)
+    {
+      if (Controller->ShutdownMonitoringTimer)
+	      return;
+      if (CommandOpcode == DAC960_V1_Enquiry)
+	{
+	  DAC960_V1_Enquiry_T *OldEnquiry = &Controller->V1.Enquiry;
+	  DAC960_V1_Enquiry_T *NewEnquiry = Controller->V1.NewEnquiry;
+	  unsigned int OldCriticalLogicalDriveCount =
+	    OldEnquiry->CriticalLogicalDriveCount;
+	  unsigned int NewCriticalLogicalDriveCount =
+	    NewEnquiry->CriticalLogicalDriveCount;
+	  if (NewEnquiry->NumberOfLogicalDrives > Controller->LogicalDriveCount)
+	    {
+	      int LogicalDriveNumber = Controller->LogicalDriveCount - 1;
+	      while (++LogicalDriveNumber < NewEnquiry->NumberOfLogicalDrives)
+		DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+				"Now Exists\n", Controller,
+				LogicalDriveNumber,
+				Controller->ControllerNumber,
+				LogicalDriveNumber);
+	      Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
+	      DAC960_ComputeGenericDiskInfo(Controller);
+	    }
+	  if (NewEnquiry->NumberOfLogicalDrives < Controller->LogicalDriveCount)
+	    {
+	      int LogicalDriveNumber = NewEnquiry->NumberOfLogicalDrives - 1;
+	      while (++LogicalDriveNumber < Controller->LogicalDriveCount)
+		DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+				"No Longer Exists\n", Controller,
+				LogicalDriveNumber,
+				Controller->ControllerNumber,
+				LogicalDriveNumber);
+	      Controller->LogicalDriveCount = NewEnquiry->NumberOfLogicalDrives;
+	      DAC960_ComputeGenericDiskInfo(Controller);
+	    }
+	  if (NewEnquiry->StatusFlags.DeferredWriteError !=
+	      OldEnquiry->StatusFlags.DeferredWriteError)
+	    DAC960_Critical("Deferred Write Error Flag is now %s\n", Controller,
+			    (NewEnquiry->StatusFlags.DeferredWriteError
+			     ? "TRUE" : "FALSE"));
+	  if ((NewCriticalLogicalDriveCount > 0 ||
+	       NewCriticalLogicalDriveCount != OldCriticalLogicalDriveCount) ||
+	      (NewEnquiry->OfflineLogicalDriveCount > 0 ||
+	       NewEnquiry->OfflineLogicalDriveCount !=
+	       OldEnquiry->OfflineLogicalDriveCount) ||
+	      (NewEnquiry->DeadDriveCount > 0 ||
+	       NewEnquiry->DeadDriveCount !=
+	       OldEnquiry->DeadDriveCount) ||
+	      (NewEnquiry->EventLogSequenceNumber !=
+	       OldEnquiry->EventLogSequenceNumber) ||
+	      Controller->MonitoringTimerCount == 0 ||
+	      (jiffies - Controller->SecondaryMonitoringTime
+	       >= DAC960_SecondaryMonitoringInterval))
+	    {
+	      Controller->V1.NeedLogicalDriveInformation = true;
+	      Controller->V1.NewEventLogSequenceNumber =
+		NewEnquiry->EventLogSequenceNumber;
+	      Controller->V1.NeedErrorTableInformation = true;
+	      Controller->V1.NeedDeviceStateInformation = true;
+	      Controller->V1.StartDeviceStateScan = true;
+	      Controller->V1.NeedBackgroundInitializationStatus =
+		Controller->V1.BackgroundInitializationStatusSupported;
+	      Controller->SecondaryMonitoringTime = jiffies;
+	    }
+	  if (NewEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
+	      NewEnquiry->RebuildFlag
+	      == DAC960_V1_BackgroundRebuildInProgress ||
+	      OldEnquiry->RebuildFlag == DAC960_V1_StandbyRebuildInProgress ||
+	      OldEnquiry->RebuildFlag == DAC960_V1_BackgroundRebuildInProgress)
+	    {
+	      Controller->V1.NeedRebuildProgress = true;
+	      Controller->V1.RebuildProgressFirst =
+		(NewEnquiry->CriticalLogicalDriveCount <
+		 OldEnquiry->CriticalLogicalDriveCount);
+	    }
+	  if (OldEnquiry->RebuildFlag == DAC960_V1_BackgroundCheckInProgress)
+	    switch (NewEnquiry->RebuildFlag)
+	      {
+	      case DAC960_V1_NoStandbyRebuildOrCheckInProgress:
+		DAC960_Progress("Consistency Check Completed Successfully\n",
+				Controller);
+		break;
+	      case DAC960_V1_StandbyRebuildInProgress:
+	      case DAC960_V1_BackgroundRebuildInProgress:
+		break;
+	      case DAC960_V1_BackgroundCheckInProgress:
+		Controller->V1.NeedConsistencyCheckProgress = true;
+		break;
+	      case DAC960_V1_StandbyRebuildCompletedWithError:
+		DAC960_Progress("Consistency Check Completed with Error\n",
+				Controller);
+		break;
+	      case DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed:
+		DAC960_Progress("Consistency Check Failed - "
+				"Physical Device Failed\n", Controller);
+		break;
+	      case DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed:
+		DAC960_Progress("Consistency Check Failed - "
+				"Logical Drive Failed\n", Controller);
+		break;
+	      case DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses:
+		DAC960_Progress("Consistency Check Failed - Other Causes\n",
+				Controller);
+		break;
+	      case DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated:
+		DAC960_Progress("Consistency Check Successfully Terminated\n",
+				Controller);
+		break;
+	      }
+	  else if (NewEnquiry->RebuildFlag
+		   == DAC960_V1_BackgroundCheckInProgress)
+	    Controller->V1.NeedConsistencyCheckProgress = true;
+	  Controller->MonitoringAlertMode =
+	    (NewEnquiry->CriticalLogicalDriveCount > 0 ||
+	     NewEnquiry->OfflineLogicalDriveCount > 0 ||
+	     NewEnquiry->DeadDriveCount > 0);
+	  if (NewEnquiry->RebuildFlag > DAC960_V1_BackgroundCheckInProgress)
+	    {
+	      Controller->V1.PendingRebuildFlag = NewEnquiry->RebuildFlag;
+	      Controller->V1.RebuildFlagPending = true;
+	    }
+	  memcpy(&Controller->V1.Enquiry, &Controller->V1.NewEnquiry,
+		 sizeof(DAC960_V1_Enquiry_T));
+	}
+      else if (CommandOpcode == DAC960_V1_PerformEventLogOperation)
+	{
+	  static char
+	    *DAC960_EventMessages[] =
+	       { "killed because write recovery failed",
+		 "killed because of SCSI bus reset failure",
+		 "killed because of double check condition",
+		 "killed because it was removed",
+		 "killed because of gross error on SCSI chip",
+		 "killed because of bad tag returned from drive",
+		 "killed because of timeout on SCSI command",
+		 "killed because of reset SCSI command issued from system",
+		 "killed because busy or parity error count exceeded limit",
+		 "killed because of 'kill drive' command from system",
+		 "killed because of selection timeout",
+		 "killed due to SCSI phase sequence error",
+		 "killed due to unknown status" };
+	  DAC960_V1_EventLogEntry_T *EventLogEntry =
+	    	Controller->V1.EventLogEntry;
+	  if (EventLogEntry->SequenceNumber ==
+	      Controller->V1.OldEventLogSequenceNumber)
+	    {
+	      unsigned char SenseKey = EventLogEntry->SenseKey;
+	      unsigned char AdditionalSenseCode =
+		EventLogEntry->AdditionalSenseCode;
+	      unsigned char AdditionalSenseCodeQualifier =
+		EventLogEntry->AdditionalSenseCodeQualifier;
+	      if (SenseKey == DAC960_SenseKey_VendorSpecific &&
+		  AdditionalSenseCode == 0x80 &&
+		  AdditionalSenseCodeQualifier <
+		  sizeof(DAC960_EventMessages) / sizeof(char *))
+		DAC960_Critical("Physical Device %d:%d %s\n", Controller,
+				EventLogEntry->Channel,
+				EventLogEntry->TargetID,
+				DAC960_EventMessages[
+				  AdditionalSenseCodeQualifier]);
+	      else if (SenseKey == DAC960_SenseKey_UnitAttention &&
+		       AdditionalSenseCode == 0x29)
+		{
+		  if (Controller->MonitoringTimerCount > 0)
+		    Controller->V1.DeviceResetCount[EventLogEntry->Channel]
+						   [EventLogEntry->TargetID]++;
+		}
+	      else if (!(SenseKey == DAC960_SenseKey_NoSense ||
+			 (SenseKey == DAC960_SenseKey_NotReady &&
+			  AdditionalSenseCode == 0x04 &&
+			  (AdditionalSenseCodeQualifier == 0x01 ||
+			   AdditionalSenseCodeQualifier == 0x02))))
+		{
+		  DAC960_Critical("Physical Device %d:%d Error Log: "
+				  "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+				  Controller,
+				  EventLogEntry->Channel,
+				  EventLogEntry->TargetID,
+				  SenseKey,
+				  AdditionalSenseCode,
+				  AdditionalSenseCodeQualifier);
+		  DAC960_Critical("Physical Device %d:%d Error Log: "
+				  "Information = %02X%02X%02X%02X "
+				  "%02X%02X%02X%02X\n",
+				  Controller,
+				  EventLogEntry->Channel,
+				  EventLogEntry->TargetID,
+				  EventLogEntry->Information[0],
+				  EventLogEntry->Information[1],
+				  EventLogEntry->Information[2],
+				  EventLogEntry->Information[3],
+				  EventLogEntry->CommandSpecificInformation[0],
+				  EventLogEntry->CommandSpecificInformation[1],
+				  EventLogEntry->CommandSpecificInformation[2],
+				  EventLogEntry->CommandSpecificInformation[3]);
+		}
+	    }
+	  Controller->V1.OldEventLogSequenceNumber++;
+	}
+      else if (CommandOpcode == DAC960_V1_GetErrorTable)
+	{
+	  DAC960_V1_ErrorTable_T *OldErrorTable = &Controller->V1.ErrorTable;
+	  DAC960_V1_ErrorTable_T *NewErrorTable = Controller->V1.NewErrorTable;
+	  int Channel, TargetID;
+	  for (Channel = 0; Channel < Controller->Channels; Channel++)
+	    for (TargetID = 0; TargetID < Controller->Targets; TargetID++)
+	      {
+		DAC960_V1_ErrorTableEntry_T *NewErrorEntry =
+		  &NewErrorTable->ErrorTableEntries[Channel][TargetID];
+		DAC960_V1_ErrorTableEntry_T *OldErrorEntry =
+		  &OldErrorTable->ErrorTableEntries[Channel][TargetID];
+		if ((NewErrorEntry->ParityErrorCount !=
+		     OldErrorEntry->ParityErrorCount) ||
+		    (NewErrorEntry->SoftErrorCount !=
+		     OldErrorEntry->SoftErrorCount) ||
+		    (NewErrorEntry->HardErrorCount !=
+		     OldErrorEntry->HardErrorCount) ||
+		    (NewErrorEntry->MiscErrorCount !=
+		     OldErrorEntry->MiscErrorCount))
+		  DAC960_Critical("Physical Device %d:%d Errors: "
+				  "Parity = %d, Soft = %d, "
+				  "Hard = %d, Misc = %d\n",
+				  Controller, Channel, TargetID,
+				  NewErrorEntry->ParityErrorCount,
+				  NewErrorEntry->SoftErrorCount,
+				  NewErrorEntry->HardErrorCount,
+				  NewErrorEntry->MiscErrorCount);
+	      }
+	  memcpy(&Controller->V1.ErrorTable, Controller->V1.NewErrorTable,
+		 sizeof(DAC960_V1_ErrorTable_T));
+	}
+      else if (CommandOpcode == DAC960_V1_GetDeviceState)
+	{
+	  DAC960_V1_DeviceState_T *OldDeviceState =
+	    &Controller->V1.DeviceState[Controller->V1.DeviceStateChannel]
+				       [Controller->V1.DeviceStateTargetID];
+	  DAC960_V1_DeviceState_T *NewDeviceState =
+	    Controller->V1.NewDeviceState;
+	  if (NewDeviceState->DeviceState != OldDeviceState->DeviceState)
+	    DAC960_Critical("Physical Device %d:%d is now %s\n", Controller,
+			    Controller->V1.DeviceStateChannel,
+			    Controller->V1.DeviceStateTargetID,
+			    (NewDeviceState->DeviceState
+			     == DAC960_V1_Device_Dead
+			     ? "DEAD"
+			     : NewDeviceState->DeviceState
+			       == DAC960_V1_Device_WriteOnly
+			       ? "WRITE-ONLY"
+			       : NewDeviceState->DeviceState
+				 == DAC960_V1_Device_Online
+				 ? "ONLINE" : "STANDBY"));
+	  if (OldDeviceState->DeviceState == DAC960_V1_Device_Dead &&
+	      NewDeviceState->DeviceState != DAC960_V1_Device_Dead)
+	    {
+	      Controller->V1.NeedDeviceInquiryInformation = true;
+	      Controller->V1.NeedDeviceSerialNumberInformation = true;
+	      Controller->V1.DeviceResetCount
+			     [Controller->V1.DeviceStateChannel]
+			     [Controller->V1.DeviceStateTargetID] = 0;
+	    }
+	  memcpy(OldDeviceState, NewDeviceState,
+		 sizeof(DAC960_V1_DeviceState_T));
+	}
+      else if (CommandOpcode == DAC960_V1_GetLogicalDriveInformation)
+	{
+	  int LogicalDriveNumber;
+	  for (LogicalDriveNumber = 0;
+	       LogicalDriveNumber < Controller->LogicalDriveCount;
+	       LogicalDriveNumber++)
+	    {
+	      DAC960_V1_LogicalDriveInformation_T *OldLogicalDriveInformation =
+		&Controller->V1.LogicalDriveInformation[LogicalDriveNumber];
+	      DAC960_V1_LogicalDriveInformation_T *NewLogicalDriveInformation =
+		&(*Controller->V1.NewLogicalDriveInformation)[LogicalDriveNumber];
+	      if (NewLogicalDriveInformation->LogicalDriveState !=
+		  OldLogicalDriveInformation->LogicalDriveState)
+		DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+				"is now %s\n", Controller,
+				LogicalDriveNumber,
+				Controller->ControllerNumber,
+				LogicalDriveNumber,
+				(NewLogicalDriveInformation->LogicalDriveState
+				 == DAC960_V1_LogicalDrive_Online
+				 ? "ONLINE"
+				 : NewLogicalDriveInformation->LogicalDriveState
+				   == DAC960_V1_LogicalDrive_Critical
+				   ? "CRITICAL" : "OFFLINE"));
+	      if (NewLogicalDriveInformation->WriteBack !=
+		  OldLogicalDriveInformation->WriteBack)
+		DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+				"is now %s\n", Controller,
+				LogicalDriveNumber,
+				Controller->ControllerNumber,
+				LogicalDriveNumber,
+				(NewLogicalDriveInformation->WriteBack
+				 ? "WRITE BACK" : "WRITE THRU"));
+	    }
+	  memcpy(&Controller->V1.LogicalDriveInformation,
+		 Controller->V1.NewLogicalDriveInformation,
+		 sizeof(DAC960_V1_LogicalDriveInformationArray_T));
+	}
+      else if (CommandOpcode == DAC960_V1_GetRebuildProgress)
+	{
+	  unsigned int LogicalDriveNumber =
+	    Controller->V1.RebuildProgress->LogicalDriveNumber;
+	  unsigned int LogicalDriveSize =
+	    Controller->V1.RebuildProgress->LogicalDriveSize;
+	  unsigned int BlocksCompleted =
+	    LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
+	  if (CommandStatus == DAC960_V1_NoRebuildOrCheckInProgress &&
+	      Controller->V1.LastRebuildStatus == DAC960_V1_NormalCompletion)
+	    CommandStatus = DAC960_V1_RebuildSuccessful;
+	  switch (CommandStatus)
+	    {
+	    case DAC960_V1_NormalCompletion:
+	      Controller->EphemeralProgressMessage = true;
+	      DAC960_Progress("Rebuild in Progress: "
+			      "Logical Drive %d (/dev/rd/c%dd%d) "
+			      "%d%% completed\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber,
+			      (100 * (BlocksCompleted >> 7))
+			      / (LogicalDriveSize >> 7));
+	      Controller->EphemeralProgressMessage = false;
+	      break;
+	    case DAC960_V1_RebuildFailed_LogicalDriveFailure:
+	      DAC960_Progress("Rebuild Failed due to "
+			      "Logical Drive Failure\n", Controller);
+	      break;
+	    case DAC960_V1_RebuildFailed_BadBlocksOnOther:
+	      DAC960_Progress("Rebuild Failed due to "
+			      "Bad Blocks on Other Drives\n", Controller);
+	      break;
+	    case DAC960_V1_RebuildFailed_NewDriveFailed:
+	      DAC960_Progress("Rebuild Failed due to "
+			      "Failure of Drive Being Rebuilt\n", Controller);
+	      break;
+	    case DAC960_V1_NoRebuildOrCheckInProgress:
+	      break;
+	    case DAC960_V1_RebuildSuccessful:
+	      DAC960_Progress("Rebuild Completed Successfully\n", Controller);
+	      break;
+	    case DAC960_V1_RebuildSuccessfullyTerminated:
+	      DAC960_Progress("Rebuild Successfully Terminated\n", Controller);
+	      break;
+	    }
+	  Controller->V1.LastRebuildStatus = CommandStatus;
+	  if (CommandType != DAC960_MonitoringCommand &&
+	      Controller->V1.RebuildStatusPending)
+	    {
+	      Command->V1.CommandStatus = Controller->V1.PendingRebuildStatus;
+	      Controller->V1.RebuildStatusPending = false;
+	    }
+	  else if (CommandType == DAC960_MonitoringCommand &&
+		   CommandStatus != DAC960_V1_NormalCompletion &&
+		   CommandStatus != DAC960_V1_NoRebuildOrCheckInProgress)
+	    {
+	      Controller->V1.PendingRebuildStatus = CommandStatus;
+	      Controller->V1.RebuildStatusPending = true;
+	    }
+	}
+      else if (CommandOpcode == DAC960_V1_RebuildStat)
+	{
+	  unsigned int LogicalDriveNumber =
+	    Controller->V1.RebuildProgress->LogicalDriveNumber;
+	  unsigned int LogicalDriveSize =
+	    Controller->V1.RebuildProgress->LogicalDriveSize;
+	  unsigned int BlocksCompleted =
+	    LogicalDriveSize - Controller->V1.RebuildProgress->RemainingBlocks;
+	  if (CommandStatus == DAC960_V1_NormalCompletion)
+	    {
+	      Controller->EphemeralProgressMessage = true;
+	      DAC960_Progress("Consistency Check in Progress: "
+			      "Logical Drive %d (/dev/rd/c%dd%d) "
+			      "%d%% completed\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber,
+			      (100 * (BlocksCompleted >> 7))
+			      / (LogicalDriveSize >> 7));
+	      Controller->EphemeralProgressMessage = false;
+	    }
+	}
+      else if (CommandOpcode == DAC960_V1_BackgroundInitializationControl)
+	{
+	  unsigned int LogicalDriveNumber =
+	    Controller->V1.BackgroundInitializationStatus->LogicalDriveNumber;
+	  unsigned int LogicalDriveSize =
+	    Controller->V1.BackgroundInitializationStatus->LogicalDriveSize;
+	  unsigned int BlocksCompleted =
+	    Controller->V1.BackgroundInitializationStatus->BlocksCompleted;
+	  switch (CommandStatus)
+	    {
+	    case DAC960_V1_NormalCompletion:
+	      switch (Controller->V1.BackgroundInitializationStatus->Status)
+		{
+		case DAC960_V1_BackgroundInitializationInvalid:
+		  break;
+		case DAC960_V1_BackgroundInitializationStarted:
+		  DAC960_Progress("Background Initialization Started\n",
+				  Controller);
+		  break;
+		case DAC960_V1_BackgroundInitializationInProgress:
+		  if (BlocksCompleted ==
+		      Controller->V1.LastBackgroundInitializationStatus.
+				BlocksCompleted &&
+		      LogicalDriveNumber ==
+		      Controller->V1.LastBackgroundInitializationStatus.
+				LogicalDriveNumber)
+		    break;
+		  Controller->EphemeralProgressMessage = true;
+		  DAC960_Progress("Background Initialization in Progress: "
+				  "Logical Drive %d (/dev/rd/c%dd%d) "
+				  "%d%% completed\n",
+				  Controller, LogicalDriveNumber,
+				  Controller->ControllerNumber,
+				  LogicalDriveNumber,
+				  (100 * (BlocksCompleted >> 7))
+				  / (LogicalDriveSize >> 7));
+		  Controller->EphemeralProgressMessage = false;
+		  break;
+		case DAC960_V1_BackgroundInitializationSuspended:
+		  DAC960_Progress("Background Initialization Suspended\n",
+				  Controller);
+		  break;
+		case DAC960_V1_BackgroundInitializationCancelled:
+		  DAC960_Progress("Background Initialization Cancelled\n",
+				  Controller);
+		  break;
+		}
+	      memcpy(&Controller->V1.LastBackgroundInitializationStatus,
+		     Controller->V1.BackgroundInitializationStatus,
+		     sizeof(DAC960_V1_BackgroundInitializationStatus_T));
+	      break;
+	    case DAC960_V1_BackgroundInitSuccessful:
+	      if (Controller->V1.BackgroundInitializationStatus->Status ==
+		  DAC960_V1_BackgroundInitializationInProgress)
+		DAC960_Progress("Background Initialization "
+				"Completed Successfully\n", Controller);
+	      Controller->V1.BackgroundInitializationStatus->Status =
+		DAC960_V1_BackgroundInitializationInvalid;
+	      break;
+	    case DAC960_V1_BackgroundInitAborted:
+	      if (Controller->V1.BackgroundInitializationStatus->Status ==
+		  DAC960_V1_BackgroundInitializationInProgress)
+		DAC960_Progress("Background Initialization Aborted\n",
+				Controller);
+	      Controller->V1.BackgroundInitializationStatus->Status =
+		DAC960_V1_BackgroundInitializationInvalid;
+	      break;
+	    case DAC960_V1_NoBackgroundInitInProgress:
+	      break;
+	    }
+	} 
+      else if (CommandOpcode == DAC960_V1_DCDB)
+	{
+	   /*
+	     This is a bit ugly.
+
+	     The InquiryStandardData and 
+	     the InquiryUntitSerialNumber information
+	     retrieval operations BOTH use the DAC960_V1_DCDB
+	     commands.  the test above can't distinguish between
+	     these two cases.
+
+	     Instead, we rely on the order of code later in this
+             function to ensure that DeviceInquiryInformation commands
+             are submitted before DeviceSerialNumber commands.
+	   */
+	   if (Controller->V1.NeedDeviceInquiryInformation)
+	     {
+	        DAC960_SCSI_Inquiry_T *InquiryStandardData =
+			&Controller->V1.InquiryStandardData
+				[Controller->V1.DeviceStateChannel]
+				[Controller->V1.DeviceStateTargetID];
+	        if (CommandStatus != DAC960_V1_NormalCompletion)
+		   {
+			memset(InquiryStandardData, 0,
+				sizeof(DAC960_SCSI_Inquiry_T));
+	      		InquiryStandardData->PeripheralDeviceType = 0x1F;
+		    }
+	         else
+			memcpy(InquiryStandardData, 
+				Controller->V1.NewInquiryStandardData,
+				sizeof(DAC960_SCSI_Inquiry_T));
+	         Controller->V1.NeedDeviceInquiryInformation = false;
+              }
+	   else if (Controller->V1.NeedDeviceSerialNumberInformation) 
+              {
+	        DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+		  &Controller->V1.InquiryUnitSerialNumber
+				[Controller->V1.DeviceStateChannel]
+				[Controller->V1.DeviceStateTargetID];
+	         if (CommandStatus != DAC960_V1_NormalCompletion)
+		   {
+			memset(InquiryUnitSerialNumber, 0,
+				sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+	      		InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
+		    }
+	          else
+			memcpy(InquiryUnitSerialNumber, 
+				Controller->V1.NewInquiryUnitSerialNumber,
+				sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+	      Controller->V1.NeedDeviceSerialNumberInformation = false;
+	     }
+	}
+      /*
+        Begin submitting new monitoring commands.
+       */
+      if (Controller->V1.NewEventLogSequenceNumber
+	  - Controller->V1.OldEventLogSequenceNumber > 0)
+	{
+	  Command->V1.CommandMailbox.Type3E.CommandOpcode =
+	    DAC960_V1_PerformEventLogOperation;
+	  Command->V1.CommandMailbox.Type3E.OperationType =
+	    DAC960_V1_GetEventLogEntry;
+	  Command->V1.CommandMailbox.Type3E.OperationQualifier = 1;
+	  Command->V1.CommandMailbox.Type3E.SequenceNumber =
+	    Controller->V1.OldEventLogSequenceNumber;
+	  Command->V1.CommandMailbox.Type3E.BusAddress =
+	    	Controller->V1.EventLogEntryDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V1.NeedErrorTableInformation)
+	{
+	  Controller->V1.NeedErrorTableInformation = false;
+	  Command->V1.CommandMailbox.Type3.CommandOpcode =
+	    DAC960_V1_GetErrorTable;
+	  Command->V1.CommandMailbox.Type3.BusAddress =
+	    	Controller->V1.NewErrorTableDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V1.NeedRebuildProgress &&
+	  Controller->V1.RebuildProgressFirst)
+	{
+	  Controller->V1.NeedRebuildProgress = false;
+	  Command->V1.CommandMailbox.Type3.CommandOpcode =
+	    DAC960_V1_GetRebuildProgress;
+	  Command->V1.CommandMailbox.Type3.BusAddress =
+	    Controller->V1.RebuildProgressDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V1.NeedDeviceStateInformation)
+	{
+	  if (Controller->V1.NeedDeviceInquiryInformation)
+	    {
+	      DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
+	      dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
+
+	      dma_addr_t NewInquiryStandardDataDMA =
+		Controller->V1.NewInquiryStandardDataDMA;
+
+	      Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
+	      Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
+	      DCDB->Channel = Controller->V1.DeviceStateChannel;
+	      DCDB->TargetID = Controller->V1.DeviceStateTargetID;
+	      DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
+	      DCDB->EarlyStatus = false;
+	      DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
+	      DCDB->NoAutomaticRequestSense = false;
+	      DCDB->DisconnectPermitted = true;
+	      DCDB->TransferLength = sizeof(DAC960_SCSI_Inquiry_T);
+	      DCDB->BusAddress = NewInquiryStandardDataDMA;
+	      DCDB->CDBLength = 6;
+	      DCDB->TransferLengthHigh4 = 0;
+	      DCDB->SenseLength = sizeof(DCDB->SenseData);
+	      DCDB->CDB[0] = 0x12; /* INQUIRY */
+	      DCDB->CDB[1] = 0; /* EVPD = 0 */
+	      DCDB->CDB[2] = 0; /* Page Code */
+	      DCDB->CDB[3] = 0; /* Reserved */
+	      DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_T);
+	      DCDB->CDB[5] = 0; /* Control */
+	      DAC960_QueueCommand(Command);
+	      return;
+	    }
+	  if (Controller->V1.NeedDeviceSerialNumberInformation)
+	    {
+	      DAC960_V1_DCDB_T *DCDB = Controller->V1.MonitoringDCDB;
+	      dma_addr_t DCDB_DMA = Controller->V1.MonitoringDCDB_DMA;
+	      dma_addr_t NewInquiryUnitSerialNumberDMA = 
+			Controller->V1.NewInquiryUnitSerialNumberDMA;
+
+	      Command->V1.CommandMailbox.Type3.CommandOpcode = DAC960_V1_DCDB;
+	      Command->V1.CommandMailbox.Type3.BusAddress = DCDB_DMA;
+	      DCDB->Channel = Controller->V1.DeviceStateChannel;
+	      DCDB->TargetID = Controller->V1.DeviceStateTargetID;
+	      DCDB->Direction = DAC960_V1_DCDB_DataTransferDeviceToSystem;
+	      DCDB->EarlyStatus = false;
+	      DCDB->Timeout = DAC960_V1_DCDB_Timeout_10_seconds;
+	      DCDB->NoAutomaticRequestSense = false;
+	      DCDB->DisconnectPermitted = true;
+	      DCDB->TransferLength =
+		sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+	      DCDB->BusAddress = NewInquiryUnitSerialNumberDMA;
+	      DCDB->CDBLength = 6;
+	      DCDB->TransferLengthHigh4 = 0;
+	      DCDB->SenseLength = sizeof(DCDB->SenseData);
+	      DCDB->CDB[0] = 0x12; /* INQUIRY */
+	      DCDB->CDB[1] = 1; /* EVPD = 1 */
+	      DCDB->CDB[2] = 0x80; /* Page Code */
+	      DCDB->CDB[3] = 0; /* Reserved */
+	      DCDB->CDB[4] = sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T);
+	      DCDB->CDB[5] = 0; /* Control */
+	      DAC960_QueueCommand(Command);
+	      return;
+	    }
+	  if (Controller->V1.StartDeviceStateScan)
+	    {
+	      Controller->V1.DeviceStateChannel = 0;
+	      Controller->V1.DeviceStateTargetID = 0;
+	      Controller->V1.StartDeviceStateScan = false;
+	    }
+	  else if (++Controller->V1.DeviceStateTargetID == Controller->Targets)
+	    {
+	      Controller->V1.DeviceStateChannel++;
+	      Controller->V1.DeviceStateTargetID = 0;
+	    }
+	  if (Controller->V1.DeviceStateChannel < Controller->Channels)
+	    {
+	      Controller->V1.NewDeviceState->DeviceState =
+		DAC960_V1_Device_Dead;
+	      Command->V1.CommandMailbox.Type3D.CommandOpcode =
+		DAC960_V1_GetDeviceState;
+	      Command->V1.CommandMailbox.Type3D.Channel =
+		Controller->V1.DeviceStateChannel;
+	      Command->V1.CommandMailbox.Type3D.TargetID =
+		Controller->V1.DeviceStateTargetID;
+	      Command->V1.CommandMailbox.Type3D.BusAddress =
+		Controller->V1.NewDeviceStateDMA;
+	      DAC960_QueueCommand(Command);
+	      return;
+	    }
+	  Controller->V1.NeedDeviceStateInformation = false;
+	}
+      if (Controller->V1.NeedLogicalDriveInformation)
+	{
+	  Controller->V1.NeedLogicalDriveInformation = false;
+	  Command->V1.CommandMailbox.Type3.CommandOpcode =
+	    DAC960_V1_GetLogicalDriveInformation;
+	  Command->V1.CommandMailbox.Type3.BusAddress =
+	    Controller->V1.NewLogicalDriveInformationDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V1.NeedRebuildProgress)
+	{
+	  Controller->V1.NeedRebuildProgress = false;
+	  Command->V1.CommandMailbox.Type3.CommandOpcode =
+	    DAC960_V1_GetRebuildProgress;
+	  Command->V1.CommandMailbox.Type3.BusAddress =
+	    	Controller->V1.RebuildProgressDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V1.NeedConsistencyCheckProgress)
+	{
+	  Controller->V1.NeedConsistencyCheckProgress = false;
+	  Command->V1.CommandMailbox.Type3.CommandOpcode =
+	    DAC960_V1_RebuildStat;
+	  Command->V1.CommandMailbox.Type3.BusAddress =
+	    Controller->V1.RebuildProgressDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V1.NeedBackgroundInitializationStatus)
+	{
+	  Controller->V1.NeedBackgroundInitializationStatus = false;
+	  Command->V1.CommandMailbox.Type3B.CommandOpcode =
+	    DAC960_V1_BackgroundInitializationControl;
+	  Command->V1.CommandMailbox.Type3B.CommandOpcode2 = 0x20;
+	  Command->V1.CommandMailbox.Type3B.BusAddress =
+	    Controller->V1.BackgroundInitializationStatusDMA;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      Controller->MonitoringTimerCount++;
+      Controller->MonitoringTimer.expires =
+	jiffies + DAC960_MonitoringTimerInterval;
+      	add_timer(&Controller->MonitoringTimer);
+    }
+  if (CommandType == DAC960_ImmediateCommand)
+    {
+      complete(Command->Completion);
+      Command->Completion = NULL;
+      return;
+    }
+  if (CommandType == DAC960_QueuedCommand)
+    {
+      DAC960_V1_KernelCommand_T *KernelCommand = Command->V1.KernelCommand;
+      KernelCommand->CommandStatus = Command->V1.CommandStatus;
+      Command->V1.KernelCommand = NULL;
+      if (CommandOpcode == DAC960_V1_DCDB)
+	Controller->V1.DirectCommandActive[KernelCommand->DCDB->Channel]
+					  [KernelCommand->DCDB->TargetID] =
+	  false;
+      DAC960_DeallocateCommand(Command);
+      KernelCommand->CompletionFunction(KernelCommand);
+      return;
+    }
+  /*
+    Queue a Status Monitoring Command to the Controller using the just
+    completed Command if one was deferred previously due to lack of a
+    free Command when the Monitoring Timer Function was called.
+  */
+  if (Controller->MonitoringCommandDeferred)
+    {
+      Controller->MonitoringCommandDeferred = false;
+      DAC960_V1_QueueMonitoringCommand(Command);
+      return;
+    }
+  /*
+    Deallocate the Command.
+  */
+  DAC960_DeallocateCommand(Command);
+  /*
+    Wake up any processes waiting on a free Command.
+  */
+  wake_up(&Controller->CommandWaitQueue);
+}
+
+
+/*
+  DAC960_V2_ReadWriteError prints an appropriate error message for Command
+  when an error occurs on a Read or Write operation.
+*/
+
+static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  unsigned char *SenseErrors[] = { "NO SENSE", "RECOVERED ERROR",
+				   "NOT READY", "MEDIUM ERROR",
+				   "HARDWARE ERROR", "ILLEGAL REQUEST",
+				   "UNIT ATTENTION", "DATA PROTECT",
+				   "BLANK CHECK", "VENDOR-SPECIFIC",
+				   "COPY ABORTED", "ABORTED COMMAND",
+				   "EQUAL", "VOLUME OVERFLOW",
+				   "MISCOMPARE", "RESERVED" };
+  unsigned char *CommandName = "UNKNOWN";
+  switch (Command->CommandType)
+    {
+    case DAC960_ReadCommand:
+    case DAC960_ReadRetryCommand:
+      CommandName = "READ";
+      break;
+    case DAC960_WriteCommand:
+    case DAC960_WriteRetryCommand:
+      CommandName = "WRITE";
+      break;
+    case DAC960_MonitoringCommand:
+    case DAC960_ImmediateCommand:
+    case DAC960_QueuedCommand:
+      break;
+    }
+  DAC960_Error("Error Condition %s on %s:\n", Controller,
+	       SenseErrors[Command->V2.RequestSense->SenseKey], CommandName);
+  DAC960_Error("  /dev/rd/c%dd%d:   absolute blocks %u..%u\n",
+	       Controller, Controller->ControllerNumber,
+	       Command->LogicalDriveNumber, Command->BlockNumber,
+	       Command->BlockNumber + Command->BlockCount - 1);
+}
+
+
+/*
+  DAC960_V2_ReportEvent prints an appropriate message when a Controller Event
+  occurs.
+*/
+
+static void DAC960_V2_ReportEvent(DAC960_Controller_T *Controller,
+				  DAC960_V2_Event_T *Event)
+{
+  DAC960_SCSI_RequestSense_T *RequestSense =
+    (DAC960_SCSI_RequestSense_T *) &Event->RequestSenseData;
+  unsigned char MessageBuffer[DAC960_LineBufferSize];
+  static struct { int EventCode; unsigned char *EventMessage; } EventList[] =
+    { /* Physical Device Events (0x0000 - 0x007F) */
+      { 0x0001, "P Online" },
+      { 0x0002, "P Standby" },
+      { 0x0005, "P Automatic Rebuild Started" },
+      { 0x0006, "P Manual Rebuild Started" },
+      { 0x0007, "P Rebuild Completed" },
+      { 0x0008, "P Rebuild Cancelled" },
+      { 0x0009, "P Rebuild Failed for Unknown Reasons" },
+      { 0x000A, "P Rebuild Failed due to New Physical Device" },
+      { 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
+      { 0x000C, "S Offline" },
+      { 0x000D, "P Found" },
+      { 0x000E, "P Removed" },
+      { 0x000F, "P Unconfigured" },
+      { 0x0010, "P Expand Capacity Started" },
+      { 0x0011, "P Expand Capacity Completed" },
+      { 0x0012, "P Expand Capacity Failed" },
+      { 0x0013, "P Command Timed Out" },
+      { 0x0014, "P Command Aborted" },
+      { 0x0015, "P Command Retried" },
+      { 0x0016, "P Parity Error" },
+      { 0x0017, "P Soft Error" },
+      { 0x0018, "P Miscellaneous Error" },
+      { 0x0019, "P Reset" },
+      { 0x001A, "P Active Spare Found" },
+      { 0x001B, "P Warm Spare Found" },
+      { 0x001C, "S Sense Data Received" },
+      { 0x001D, "P Initialization Started" },
+      { 0x001E, "P Initialization Completed" },
+      { 0x001F, "P Initialization Failed" },
+      { 0x0020, "P Initialization Cancelled" },
+      { 0x0021, "P Failed because Write Recovery Failed" },
+      { 0x0022, "P Failed because SCSI Bus Reset Failed" },
+      { 0x0023, "P Failed because of Double Check Condition" },
+      { 0x0024, "P Failed because Device Cannot Be Accessed" },
+      { 0x0025, "P Failed because of Gross Error on SCSI Processor" },
+      { 0x0026, "P Failed because of Bad Tag from Device" },
+      { 0x0027, "P Failed because of Command Timeout" },
+      { 0x0028, "P Failed because of System Reset" },
+      { 0x0029, "P Failed because of Busy Status or Parity Error" },
+      { 0x002A, "P Failed because Host Set Device to Failed State" },
+      { 0x002B, "P Failed because of Selection Timeout" },
+      { 0x002C, "P Failed because of SCSI Bus Phase Error" },
+      { 0x002D, "P Failed because Device Returned Unknown Status" },
+      { 0x002E, "P Failed because Device Not Ready" },
+      { 0x002F, "P Failed because Device Not Found at Startup" },
+      { 0x0030, "P Failed because COD Write Operation Failed" },
+      { 0x0031, "P Failed because BDT Write Operation Failed" },
+      { 0x0039, "P Missing at Startup" },
+      { 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
+      { 0x003C, "P Temporarily Offline Device Automatically Made Online" },
+      { 0x003D, "P Standby Rebuild Started" },
+      /* Logical Device Events (0x0080 - 0x00FF) */
+      { 0x0080, "M Consistency Check Started" },
+      { 0x0081, "M Consistency Check Completed" },
+      { 0x0082, "M Consistency Check Cancelled" },
+      { 0x0083, "M Consistency Check Completed With Errors" },
+      { 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
+      { 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
+      { 0x0086, "L Offline" },
+      { 0x0087, "L Critical" },
+      { 0x0088, "L Online" },
+      { 0x0089, "M Automatic Rebuild Started" },
+      { 0x008A, "M Manual Rebuild Started" },
+      { 0x008B, "M Rebuild Completed" },
+      { 0x008C, "M Rebuild Cancelled" },
+      { 0x008D, "M Rebuild Failed for Unknown Reasons" },
+      { 0x008E, "M Rebuild Failed due to New Physical Device" },
+      { 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
+      { 0x0090, "M Initialization Started" },
+      { 0x0091, "M Initialization Completed" },
+      { 0x0092, "M Initialization Cancelled" },
+      { 0x0093, "M Initialization Failed" },
+      { 0x0094, "L Found" },
+      { 0x0095, "L Deleted" },
+      { 0x0096, "M Expand Capacity Started" },
+      { 0x0097, "M Expand Capacity Completed" },
+      { 0x0098, "M Expand Capacity Failed" },
+      { 0x0099, "L Bad Block Found" },
+      { 0x009A, "L Size Changed" },
+      { 0x009B, "L Type Changed" },
+      { 0x009C, "L Bad Data Block Found" },
+      { 0x009E, "L Read of Data Block in BDT" },
+      { 0x009F, "L Write Back Data for Disk Block Lost" },
+      { 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
+      { 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
+      { 0x00A2, "L Standby Rebuild Started" },
+      /* Fault Management Events (0x0100 - 0x017F) */
+      { 0x0140, "E Fan %d Failed" },
+      { 0x0141, "E Fan %d OK" },
+      { 0x0142, "E Fan %d Not Present" },
+      { 0x0143, "E Power Supply %d Failed" },
+      { 0x0144, "E Power Supply %d OK" },
+      { 0x0145, "E Power Supply %d Not Present" },
+      { 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
+      { 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
+      { 0x0148, "E Temperature Sensor %d Temperature Normal" },
+      { 0x0149, "E Temperature Sensor %d Not Present" },
+      { 0x014A, "E Enclosure Management Unit %d Access Critical" },
+      { 0x014B, "E Enclosure Management Unit %d Access OK" },
+      { 0x014C, "E Enclosure Management Unit %d Access Offline" },
+      /* Controller Events (0x0180 - 0x01FF) */
+      { 0x0181, "C Cache Write Back Error" },
+      { 0x0188, "C Battery Backup Unit Found" },
+      { 0x0189, "C Battery Backup Unit Charge Level Low" },
+      { 0x018A, "C Battery Backup Unit Charge Level OK" },
+      { 0x0193, "C Installation Aborted" },
+      { 0x0195, "C Battery Backup Unit Physically Removed" },
+      { 0x0196, "C Memory Error During Warm Boot" },
+      { 0x019E, "C Memory Soft ECC Error Corrected" },
+      { 0x019F, "C Memory Hard ECC Error Corrected" },
+      { 0x01A2, "C Battery Backup Unit Failed" },
+      { 0x01AB, "C Mirror Race Recovery Failed" },
+      { 0x01AC, "C Mirror Race on Critical Drive" },
+      /* Controller Internal Processor Events */
+      { 0x0380, "C Internal Controller Hung" },
+      { 0x0381, "C Internal Controller Firmware Breakpoint" },
+      { 0x0390, "C Internal Controller i960 Processor Specific Error" },
+      { 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
+      { 0, "" } };
+  int EventListIndex = 0, EventCode;
+  unsigned char EventType, *EventMessage;
+  if (Event->EventCode == 0x1C &&
+      RequestSense->SenseKey == DAC960_SenseKey_VendorSpecific &&
+      (RequestSense->AdditionalSenseCode == 0x80 ||
+       RequestSense->AdditionalSenseCode == 0x81))
+    Event->EventCode = ((RequestSense->AdditionalSenseCode - 0x80) << 8) |
+		       RequestSense->AdditionalSenseCodeQualifier;
+  while (true)
+    {
+      EventCode = EventList[EventListIndex].EventCode;
+      if (EventCode == Event->EventCode || EventCode == 0) break;
+      EventListIndex++;
+    }
+  EventType = EventList[EventListIndex].EventMessage[0];
+  EventMessage = &EventList[EventListIndex].EventMessage[2];
+  if (EventCode == 0)
+    {
+      DAC960_Critical("Unknown Controller Event Code %04X\n",
+		      Controller, Event->EventCode);
+      return;
+    }
+  switch (EventType)
+    {
+    case 'P':
+      DAC960_Critical("Physical Device %d:%d %s\n", Controller,
+		      Event->Channel, Event->TargetID, EventMessage);
+      break;
+    case 'L':
+      DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
+		      Event->LogicalUnit, Controller->ControllerNumber,
+		      Event->LogicalUnit, EventMessage);
+      break;
+    case 'M':
+      DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) %s\n", Controller,
+		      Event->LogicalUnit, Controller->ControllerNumber,
+		      Event->LogicalUnit, EventMessage);
+      break;
+    case 'S':
+      if (RequestSense->SenseKey == DAC960_SenseKey_NoSense ||
+	  (RequestSense->SenseKey == DAC960_SenseKey_NotReady &&
+	   RequestSense->AdditionalSenseCode == 0x04 &&
+	   (RequestSense->AdditionalSenseCodeQualifier == 0x01 ||
+	    RequestSense->AdditionalSenseCodeQualifier == 0x02)))
+	break;
+      DAC960_Critical("Physical Device %d:%d %s\n", Controller,
+		      Event->Channel, Event->TargetID, EventMessage);
+      DAC960_Critical("Physical Device %d:%d Request Sense: "
+		      "Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
+		      Controller,
+		      Event->Channel,
+		      Event->TargetID,
+		      RequestSense->SenseKey,
+		      RequestSense->AdditionalSenseCode,
+		      RequestSense->AdditionalSenseCodeQualifier);
+      DAC960_Critical("Physical Device %d:%d Request Sense: "
+		      "Information = %02X%02X%02X%02X "
+		      "%02X%02X%02X%02X\n",
+		      Controller,
+		      Event->Channel,
+		      Event->TargetID,
+		      RequestSense->Information[0],
+		      RequestSense->Information[1],
+		      RequestSense->Information[2],
+		      RequestSense->Information[3],
+		      RequestSense->CommandSpecificInformation[0],
+		      RequestSense->CommandSpecificInformation[1],
+		      RequestSense->CommandSpecificInformation[2],
+		      RequestSense->CommandSpecificInformation[3]);
+      break;
+    case 'E':
+      if (Controller->SuppressEnclosureMessages) break;
+      sprintf(MessageBuffer, EventMessage, Event->LogicalUnit);
+      DAC960_Critical("Enclosure %d %s\n", Controller,
+		      Event->TargetID, MessageBuffer);
+      break;
+    case 'C':
+      DAC960_Critical("Controller %s\n", Controller, EventMessage);
+      break;
+    default:
+      DAC960_Critical("Unknown Controller Event Code %04X\n",
+		      Controller, Event->EventCode);
+      break;
+    }
+}
+
+
+/*
+  DAC960_V2_ReportProgress prints an appropriate progress message for
+  Logical Device Long Operations.
+*/
+
+static void DAC960_V2_ReportProgress(DAC960_Controller_T *Controller,
+				     unsigned char *MessageString,
+				     unsigned int LogicalDeviceNumber,
+				     unsigned long BlocksCompleted,
+				     unsigned long LogicalDeviceSize)
+{
+  Controller->EphemeralProgressMessage = true;
+  DAC960_Progress("%s in Progress: Logical Drive %d (/dev/rd/c%dd%d) "
+		  "%d%% completed\n", Controller,
+		  MessageString,
+		  LogicalDeviceNumber,
+		  Controller->ControllerNumber,
+		  LogicalDeviceNumber,
+		  (100 * (BlocksCompleted >> 7)) / (LogicalDeviceSize >> 7));
+  Controller->EphemeralProgressMessage = false;
+}
+
+
+/*
+  DAC960_V2_ProcessCompletedCommand performs completion processing for Command
+  for DAC960 V2 Firmware Controllers.
+*/
+
+static void DAC960_V2_ProcessCompletedCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DAC960_CommandType_T CommandType = Command->CommandType;
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_IOCTL_Opcode_T CommandOpcode = CommandMailbox->Common.IOCTL_Opcode;
+  DAC960_V2_CommandStatus_T CommandStatus = Command->V2.CommandStatus;
+
+  if (CommandType == DAC960_ReadCommand ||
+      CommandType == DAC960_WriteCommand)
+    {
+
+#ifdef FORCE_RETRY_DEBUG
+      CommandStatus = DAC960_V2_AbormalCompletion;
+#endif
+      Command->V2.RequestSense->SenseKey = DAC960_SenseKey_MediumError;
+
+      if (CommandStatus == DAC960_V2_NormalCompletion) {
+
+		if (!DAC960_ProcessCompletedRequest(Command, true))
+			BUG();
+
+      } else if (Command->V2.RequestSense->SenseKey == DAC960_SenseKey_MediumError)
+	{
+	  /*
+	   * break the command down into pieces and resubmit each
+	   * piece, hoping that some of them will succeed.
+	   */
+	   DAC960_queue_partial_rw(Command);
+	   return;
+	}
+      else
+	{
+	  if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
+	    DAC960_V2_ReadWriteError(Command);
+	  /*
+	    Perform completion processing for all buffers in this I/O Request.
+	  */
+          (void)DAC960_ProcessCompletedRequest(Command, false);
+	}
+    }
+  else if (CommandType == DAC960_ReadRetryCommand ||
+	   CommandType == DAC960_WriteRetryCommand)
+    {
+      boolean normal_completion;
+
+#ifdef FORCE_RETRY_FAILURE_DEBUG
+      static int retry_count = 1;
+#endif
+      /*
+        Perform completion processing for the portion that was
+	retried, and submit the next portion, if any.
+      */
+      normal_completion = true;
+      if (CommandStatus != DAC960_V2_NormalCompletion) {
+	normal_completion = false;
+	if (Command->V2.RequestSense->SenseKey != DAC960_SenseKey_NotReady)
+	    DAC960_V2_ReadWriteError(Command);
+      }
+
+#ifdef FORCE_RETRY_FAILURE_DEBUG
+      if (!(++retry_count % 10000)) {
+	      printk("V2 error retry failure test\n");
+	      normal_completion = false;
+	      DAC960_V2_ReadWriteError(Command);
+      }
+#endif
+
+      if (!DAC960_ProcessCompletedRequest(Command, normal_completion)) {
+		DAC960_queue_partial_rw(Command);
+        	return;
+      }
+    }
+  else if (CommandType == DAC960_MonitoringCommand)
+    {
+      if (Controller->ShutdownMonitoringTimer)
+	      return;
+      if (CommandOpcode == DAC960_V2_GetControllerInfo)
+	{
+	  DAC960_V2_ControllerInfo_T *NewControllerInfo =
+	    Controller->V2.NewControllerInformation;
+	  DAC960_V2_ControllerInfo_T *ControllerInfo =
+	    &Controller->V2.ControllerInformation;
+	  Controller->LogicalDriveCount =
+	    NewControllerInfo->LogicalDevicesPresent;
+	  Controller->V2.NeedLogicalDeviceInformation = true;
+	  Controller->V2.NeedPhysicalDeviceInformation = true;
+	  Controller->V2.StartLogicalDeviceInformationScan = true;
+	  Controller->V2.StartPhysicalDeviceInformationScan = true;
+	  Controller->MonitoringAlertMode =
+	    (NewControllerInfo->LogicalDevicesCritical > 0 ||
+	     NewControllerInfo->LogicalDevicesOffline > 0 ||
+	     NewControllerInfo->PhysicalDisksCritical > 0 ||
+	     NewControllerInfo->PhysicalDisksOffline > 0);
+	  memcpy(ControllerInfo, NewControllerInfo,
+		 sizeof(DAC960_V2_ControllerInfo_T));
+	}
+      else if (CommandOpcode == DAC960_V2_GetEvent)
+	{
+	  if (CommandStatus == DAC960_V2_NormalCompletion) {
+	    DAC960_V2_ReportEvent(Controller, Controller->V2.Event);
+	  }
+	  Controller->V2.NextEventSequenceNumber++;
+	}
+      else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid &&
+	       CommandStatus == DAC960_V2_NormalCompletion)
+	{
+	  DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInfo =
+	    Controller->V2.NewPhysicalDeviceInformation;
+	  unsigned int PhysicalDeviceIndex = Controller->V2.PhysicalDeviceIndex;
+	  DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
+	    Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
+	  DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+	    Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
+	  unsigned int DeviceIndex;
+	  while (PhysicalDeviceInfo != NULL &&
+		 (NewPhysicalDeviceInfo->Channel >
+		  PhysicalDeviceInfo->Channel ||
+		  (NewPhysicalDeviceInfo->Channel ==
+		   PhysicalDeviceInfo->Channel &&
+		   (NewPhysicalDeviceInfo->TargetID >
+		    PhysicalDeviceInfo->TargetID ||
+		   (NewPhysicalDeviceInfo->TargetID ==
+		    PhysicalDeviceInfo->TargetID &&
+		    NewPhysicalDeviceInfo->LogicalUnit >
+		    PhysicalDeviceInfo->LogicalUnit)))))
+	    {
+	      DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
+			      Controller,
+			      PhysicalDeviceInfo->Channel,
+			      PhysicalDeviceInfo->TargetID);
+	      Controller->V2.PhysicalDeviceInformation
+			     [PhysicalDeviceIndex] = NULL;
+	      Controller->V2.InquiryUnitSerialNumber
+			     [PhysicalDeviceIndex] = NULL;
+	      kfree(PhysicalDeviceInfo);
+	      kfree(InquiryUnitSerialNumber);
+	      for (DeviceIndex = PhysicalDeviceIndex;
+		   DeviceIndex < DAC960_V2_MaxPhysicalDevices - 1;
+		   DeviceIndex++)
+		{
+		  Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
+		    Controller->V2.PhysicalDeviceInformation[DeviceIndex+1];
+		  Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
+		    Controller->V2.InquiryUnitSerialNumber[DeviceIndex+1];
+		}
+	      Controller->V2.PhysicalDeviceInformation
+			     [DAC960_V2_MaxPhysicalDevices-1] = NULL;
+	      Controller->V2.InquiryUnitSerialNumber
+			     [DAC960_V2_MaxPhysicalDevices-1] = NULL;
+	      PhysicalDeviceInfo =
+		Controller->V2.PhysicalDeviceInformation[PhysicalDeviceIndex];
+	      InquiryUnitSerialNumber =
+		Controller->V2.InquiryUnitSerialNumber[PhysicalDeviceIndex];
+	    }
+	  if (PhysicalDeviceInfo == NULL ||
+	      (NewPhysicalDeviceInfo->Channel !=
+	       PhysicalDeviceInfo->Channel) ||
+	      (NewPhysicalDeviceInfo->TargetID !=
+	       PhysicalDeviceInfo->TargetID) ||
+	      (NewPhysicalDeviceInfo->LogicalUnit !=
+	       PhysicalDeviceInfo->LogicalUnit))
+	    {
+	      PhysicalDeviceInfo = (DAC960_V2_PhysicalDeviceInfo_T *)
+		kmalloc(sizeof(DAC960_V2_PhysicalDeviceInfo_T), GFP_ATOMIC);
+	      InquiryUnitSerialNumber =
+		(DAC960_SCSI_Inquiry_UnitSerialNumber_T *)
+		  kmalloc(sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T),
+			  GFP_ATOMIC);
+	      if (InquiryUnitSerialNumber == NULL &&
+		  PhysicalDeviceInfo != NULL)
+		{
+		  kfree(PhysicalDeviceInfo);
+		  PhysicalDeviceInfo = NULL;
+		}
+	      DAC960_Critical("Physical Device %d:%d Now Exists%s\n",
+			      Controller,
+			      NewPhysicalDeviceInfo->Channel,
+			      NewPhysicalDeviceInfo->TargetID,
+			      (PhysicalDeviceInfo != NULL
+			       ? "" : " - Allocation Failed"));
+	      if (PhysicalDeviceInfo != NULL)
+		{
+		  memset(PhysicalDeviceInfo, 0,
+			 sizeof(DAC960_V2_PhysicalDeviceInfo_T));
+		  PhysicalDeviceInfo->PhysicalDeviceState =
+		    DAC960_V2_Device_InvalidState;
+		  memset(InquiryUnitSerialNumber, 0,
+			 sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+		  InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
+		  for (DeviceIndex = DAC960_V2_MaxPhysicalDevices - 1;
+		       DeviceIndex > PhysicalDeviceIndex;
+		       DeviceIndex--)
+		    {
+		      Controller->V2.PhysicalDeviceInformation[DeviceIndex] =
+			Controller->V2.PhysicalDeviceInformation[DeviceIndex-1];
+		      Controller->V2.InquiryUnitSerialNumber[DeviceIndex] =
+			Controller->V2.InquiryUnitSerialNumber[DeviceIndex-1];
+		    }
+		  Controller->V2.PhysicalDeviceInformation
+				 [PhysicalDeviceIndex] =
+		    PhysicalDeviceInfo;
+		  Controller->V2.InquiryUnitSerialNumber
+				 [PhysicalDeviceIndex] =
+		    InquiryUnitSerialNumber;
+		  Controller->V2.NeedDeviceSerialNumberInformation = true;
+		}
+	    }
+	  if (PhysicalDeviceInfo != NULL)
+	    {
+	      if (NewPhysicalDeviceInfo->PhysicalDeviceState !=
+		  PhysicalDeviceInfo->PhysicalDeviceState)
+		DAC960_Critical(
+		  "Physical Device %d:%d is now %s\n", Controller,
+		  NewPhysicalDeviceInfo->Channel,
+		  NewPhysicalDeviceInfo->TargetID,
+		  (NewPhysicalDeviceInfo->PhysicalDeviceState
+		   == DAC960_V2_Device_Online
+		   ? "ONLINE"
+		   : NewPhysicalDeviceInfo->PhysicalDeviceState
+		     == DAC960_V2_Device_Rebuild
+		     ? "REBUILD"
+		     : NewPhysicalDeviceInfo->PhysicalDeviceState
+		       == DAC960_V2_Device_Missing
+		       ? "MISSING"
+		       : NewPhysicalDeviceInfo->PhysicalDeviceState
+			 == DAC960_V2_Device_Critical
+			 ? "CRITICAL"
+			 : NewPhysicalDeviceInfo->PhysicalDeviceState
+			   == DAC960_V2_Device_Dead
+			   ? "DEAD"
+			   : NewPhysicalDeviceInfo->PhysicalDeviceState
+			     == DAC960_V2_Device_SuspectedDead
+			     ? "SUSPECTED-DEAD"
+			     : NewPhysicalDeviceInfo->PhysicalDeviceState
+			       == DAC960_V2_Device_CommandedOffline
+			       ? "COMMANDED-OFFLINE"
+			       : NewPhysicalDeviceInfo->PhysicalDeviceState
+				 == DAC960_V2_Device_Standby
+				 ? "STANDBY" : "UNKNOWN"));
+	      if ((NewPhysicalDeviceInfo->ParityErrors !=
+		   PhysicalDeviceInfo->ParityErrors) ||
+		  (NewPhysicalDeviceInfo->SoftErrors !=
+		   PhysicalDeviceInfo->SoftErrors) ||
+		  (NewPhysicalDeviceInfo->HardErrors !=
+		   PhysicalDeviceInfo->HardErrors) ||
+		  (NewPhysicalDeviceInfo->MiscellaneousErrors !=
+		   PhysicalDeviceInfo->MiscellaneousErrors) ||
+		  (NewPhysicalDeviceInfo->CommandTimeouts !=
+		   PhysicalDeviceInfo->CommandTimeouts) ||
+		  (NewPhysicalDeviceInfo->Retries !=
+		   PhysicalDeviceInfo->Retries) ||
+		  (NewPhysicalDeviceInfo->Aborts !=
+		   PhysicalDeviceInfo->Aborts) ||
+		  (NewPhysicalDeviceInfo->PredictedFailuresDetected !=
+		   PhysicalDeviceInfo->PredictedFailuresDetected))
+		{
+		  DAC960_Critical("Physical Device %d:%d Errors: "
+				  "Parity = %d, Soft = %d, "
+				  "Hard = %d, Misc = %d\n",
+				  Controller,
+				  NewPhysicalDeviceInfo->Channel,
+				  NewPhysicalDeviceInfo->TargetID,
+				  NewPhysicalDeviceInfo->ParityErrors,
+				  NewPhysicalDeviceInfo->SoftErrors,
+				  NewPhysicalDeviceInfo->HardErrors,
+				  NewPhysicalDeviceInfo->MiscellaneousErrors);
+		  DAC960_Critical("Physical Device %d:%d Errors: "
+				  "Timeouts = %d, Retries = %d, "
+				  "Aborts = %d, Predicted = %d\n",
+				  Controller,
+				  NewPhysicalDeviceInfo->Channel,
+				  NewPhysicalDeviceInfo->TargetID,
+				  NewPhysicalDeviceInfo->CommandTimeouts,
+				  NewPhysicalDeviceInfo->Retries,
+				  NewPhysicalDeviceInfo->Aborts,
+				  NewPhysicalDeviceInfo
+				  ->PredictedFailuresDetected);
+		}
+	      if ((PhysicalDeviceInfo->PhysicalDeviceState
+		   == DAC960_V2_Device_Dead ||
+		   PhysicalDeviceInfo->PhysicalDeviceState
+		   == DAC960_V2_Device_InvalidState) &&
+		  NewPhysicalDeviceInfo->PhysicalDeviceState
+		  != DAC960_V2_Device_Dead)
+		Controller->V2.NeedDeviceSerialNumberInformation = true;
+	      memcpy(PhysicalDeviceInfo, NewPhysicalDeviceInfo,
+		     sizeof(DAC960_V2_PhysicalDeviceInfo_T));
+	    }
+	  NewPhysicalDeviceInfo->LogicalUnit++;
+	  Controller->V2.PhysicalDeviceIndex++;
+	}
+      else if (CommandOpcode == DAC960_V2_GetPhysicalDeviceInfoValid)
+	{
+	  unsigned int DeviceIndex;
+	  for (DeviceIndex = Controller->V2.PhysicalDeviceIndex;
+	       DeviceIndex < DAC960_V2_MaxPhysicalDevices;
+	       DeviceIndex++)
+	    {
+	      DAC960_V2_PhysicalDeviceInfo_T *PhysicalDeviceInfo =
+		Controller->V2.PhysicalDeviceInformation[DeviceIndex];
+	      DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+		Controller->V2.InquiryUnitSerialNumber[DeviceIndex];
+	      if (PhysicalDeviceInfo == NULL) break;
+	      DAC960_Critical("Physical Device %d:%d No Longer Exists\n",
+			      Controller,
+			      PhysicalDeviceInfo->Channel,
+			      PhysicalDeviceInfo->TargetID);
+	      Controller->V2.PhysicalDeviceInformation[DeviceIndex] = NULL;
+	      Controller->V2.InquiryUnitSerialNumber[DeviceIndex] = NULL;
+	      kfree(PhysicalDeviceInfo);
+	      kfree(InquiryUnitSerialNumber);
+	    }
+	  Controller->V2.NeedPhysicalDeviceInformation = false;
+	}
+      else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid &&
+	       CommandStatus == DAC960_V2_NormalCompletion)
+	{
+	  DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInfo =
+	    Controller->V2.NewLogicalDeviceInformation;
+	  unsigned short LogicalDeviceNumber =
+	    NewLogicalDeviceInfo->LogicalDeviceNumber;
+	  DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
+	    Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber];
+	  if (LogicalDeviceInfo == NULL)
+	    {
+	      DAC960_V2_PhysicalDevice_T PhysicalDevice;
+	      PhysicalDevice.Controller = 0;
+	      PhysicalDevice.Channel = NewLogicalDeviceInfo->Channel;
+	      PhysicalDevice.TargetID = NewLogicalDeviceInfo->TargetID;
+	      PhysicalDevice.LogicalUnit = NewLogicalDeviceInfo->LogicalUnit;
+	      Controller->V2.LogicalDriveToVirtualDevice[LogicalDeviceNumber] =
+		PhysicalDevice;
+	      LogicalDeviceInfo = (DAC960_V2_LogicalDeviceInfo_T *)
+		kmalloc(sizeof(DAC960_V2_LogicalDeviceInfo_T), GFP_ATOMIC);
+	      Controller->V2.LogicalDeviceInformation[LogicalDeviceNumber] =
+		LogicalDeviceInfo;
+	      DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+			      "Now Exists%s\n", Controller,
+			      LogicalDeviceNumber,
+			      Controller->ControllerNumber,
+			      LogicalDeviceNumber,
+			      (LogicalDeviceInfo != NULL
+			       ? "" : " - Allocation Failed"));
+	      if (LogicalDeviceInfo != NULL)
+		{
+		  memset(LogicalDeviceInfo, 0,
+			 sizeof(DAC960_V2_LogicalDeviceInfo_T));
+		  DAC960_ComputeGenericDiskInfo(Controller);
+		}
+	    }
+	  if (LogicalDeviceInfo != NULL)
+	    {
+	      unsigned long LogicalDeviceSize =
+		NewLogicalDeviceInfo->ConfigurableDeviceSize;
+	      if (NewLogicalDeviceInfo->LogicalDeviceState !=
+		  LogicalDeviceInfo->LogicalDeviceState)
+		DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+				"is now %s\n", Controller,
+				LogicalDeviceNumber,
+				Controller->ControllerNumber,
+				LogicalDeviceNumber,
+				(NewLogicalDeviceInfo->LogicalDeviceState
+				 == DAC960_V2_LogicalDevice_Online
+				 ? "ONLINE"
+				 : NewLogicalDeviceInfo->LogicalDeviceState
+				   == DAC960_V2_LogicalDevice_Critical
+				   ? "CRITICAL" : "OFFLINE"));
+	      if ((NewLogicalDeviceInfo->SoftErrors !=
+		   LogicalDeviceInfo->SoftErrors) ||
+		  (NewLogicalDeviceInfo->CommandsFailed !=
+		   LogicalDeviceInfo->CommandsFailed) ||
+		  (NewLogicalDeviceInfo->DeferredWriteErrors !=
+		   LogicalDeviceInfo->DeferredWriteErrors))
+		DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) Errors: "
+				"Soft = %d, Failed = %d, Deferred Write = %d\n",
+				Controller, LogicalDeviceNumber,
+				Controller->ControllerNumber,
+				LogicalDeviceNumber,
+				NewLogicalDeviceInfo->SoftErrors,
+				NewLogicalDeviceInfo->CommandsFailed,
+				NewLogicalDeviceInfo->DeferredWriteErrors);
+	      if (NewLogicalDeviceInfo->ConsistencyCheckInProgress)
+		DAC960_V2_ReportProgress(Controller,
+					 "Consistency Check",
+					 LogicalDeviceNumber,
+					 NewLogicalDeviceInfo
+					 ->ConsistencyCheckBlockNumber,
+					 LogicalDeviceSize);
+	      else if (NewLogicalDeviceInfo->RebuildInProgress)
+		DAC960_V2_ReportProgress(Controller,
+					 "Rebuild",
+					 LogicalDeviceNumber,
+					 NewLogicalDeviceInfo
+					 ->RebuildBlockNumber,
+					 LogicalDeviceSize);
+	      else if (NewLogicalDeviceInfo->BackgroundInitializationInProgress)
+		DAC960_V2_ReportProgress(Controller,
+					 "Background Initialization",
+					 LogicalDeviceNumber,
+					 NewLogicalDeviceInfo
+					 ->BackgroundInitializationBlockNumber,
+					 LogicalDeviceSize);
+	      else if (NewLogicalDeviceInfo->ForegroundInitializationInProgress)
+		DAC960_V2_ReportProgress(Controller,
+					 "Foreground Initialization",
+					 LogicalDeviceNumber,
+					 NewLogicalDeviceInfo
+					 ->ForegroundInitializationBlockNumber,
+					 LogicalDeviceSize);
+	      else if (NewLogicalDeviceInfo->DataMigrationInProgress)
+		DAC960_V2_ReportProgress(Controller,
+					 "Data Migration",
+					 LogicalDeviceNumber,
+					 NewLogicalDeviceInfo
+					 ->DataMigrationBlockNumber,
+					 LogicalDeviceSize);
+	      else if (NewLogicalDeviceInfo->PatrolOperationInProgress)
+		DAC960_V2_ReportProgress(Controller,
+					 "Patrol Operation",
+					 LogicalDeviceNumber,
+					 NewLogicalDeviceInfo
+					 ->PatrolOperationBlockNumber,
+					 LogicalDeviceSize);
+	      if (LogicalDeviceInfo->BackgroundInitializationInProgress &&
+		  !NewLogicalDeviceInfo->BackgroundInitializationInProgress)
+		DAC960_Progress("Logical Drive %d (/dev/rd/c%dd%d) "
+				"Background Initialization %s\n",
+				Controller,
+				LogicalDeviceNumber,
+				Controller->ControllerNumber,
+				LogicalDeviceNumber,
+				(NewLogicalDeviceInfo->LogicalDeviceControl
+						      .LogicalDeviceInitialized
+				 ? "Completed" : "Failed"));
+	      memcpy(LogicalDeviceInfo, NewLogicalDeviceInfo,
+		     sizeof(DAC960_V2_LogicalDeviceInfo_T));
+	    }
+	  Controller->V2.LogicalDriveFoundDuringScan
+			 [LogicalDeviceNumber] = true;
+	  NewLogicalDeviceInfo->LogicalDeviceNumber++;
+	}
+      else if (CommandOpcode == DAC960_V2_GetLogicalDeviceInfoValid)
+	{
+	  int LogicalDriveNumber;
+	  for (LogicalDriveNumber = 0;
+	       LogicalDriveNumber < DAC960_MaxLogicalDrives;
+	       LogicalDriveNumber++)
+	    {
+	      DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
+		Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
+	      if (LogicalDeviceInfo == NULL ||
+		  Controller->V2.LogicalDriveFoundDuringScan
+				 [LogicalDriveNumber])
+		continue;
+	      DAC960_Critical("Logical Drive %d (/dev/rd/c%dd%d) "
+			      "No Longer Exists\n", Controller,
+			      LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber);
+	      Controller->V2.LogicalDeviceInformation
+			     [LogicalDriveNumber] = NULL;
+	      kfree(LogicalDeviceInfo);
+	      Controller->LogicalDriveInitiallyAccessible
+			  [LogicalDriveNumber] = false;
+	      DAC960_ComputeGenericDiskInfo(Controller);
+	    }
+	  Controller->V2.NeedLogicalDeviceInformation = false;
+	}
+      else if (CommandOpcode == DAC960_V2_SCSI_10_Passthru)
+        {
+	    DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+		Controller->V2.InquiryUnitSerialNumber[Controller->V2.PhysicalDeviceIndex - 1];
+
+	    if (CommandStatus != DAC960_V2_NormalCompletion) {
+		memset(InquiryUnitSerialNumber,
+			0, sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+		InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
+	    } else
+	  	memcpy(InquiryUnitSerialNumber,
+			Controller->V2.NewInquiryUnitSerialNumber,
+			sizeof(DAC960_SCSI_Inquiry_UnitSerialNumber_T));
+
+	     Controller->V2.NeedDeviceSerialNumberInformation = false;
+        }
+
+      if (Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
+	  - Controller->V2.NextEventSequenceNumber > 0)
+	{
+	  CommandMailbox->GetEvent.CommandOpcode = DAC960_V2_IOCTL;
+	  CommandMailbox->GetEvent.DataTransferSize = sizeof(DAC960_V2_Event_T);
+	  CommandMailbox->GetEvent.EventSequenceNumberHigh16 =
+	    Controller->V2.NextEventSequenceNumber >> 16;
+	  CommandMailbox->GetEvent.ControllerNumber = 0;
+	  CommandMailbox->GetEvent.IOCTL_Opcode =
+	    DAC960_V2_GetEvent;
+	  CommandMailbox->GetEvent.EventSequenceNumberLow16 =
+	    Controller->V2.NextEventSequenceNumber & 0xFFFF;
+	  CommandMailbox->GetEvent.DataTransferMemoryAddress
+				  .ScatterGatherSegments[0]
+				  .SegmentDataPointer =
+	    Controller->V2.EventDMA;
+	  CommandMailbox->GetEvent.DataTransferMemoryAddress
+				  .ScatterGatherSegments[0]
+				  .SegmentByteCount =
+	    CommandMailbox->GetEvent.DataTransferSize;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V2.NeedPhysicalDeviceInformation)
+	{
+	  if (Controller->V2.NeedDeviceSerialNumberInformation)
+	    {
+	      DAC960_SCSI_Inquiry_UnitSerialNumber_T *InquiryUnitSerialNumber =
+                Controller->V2.NewInquiryUnitSerialNumber;
+	      InquiryUnitSerialNumber->PeripheralDeviceType = 0x1F;
+
+	      DAC960_V2_ConstructNewUnitSerialNumber(Controller, CommandMailbox,
+			Controller->V2.NewPhysicalDeviceInformation->Channel,
+			Controller->V2.NewPhysicalDeviceInformation->TargetID,
+		Controller->V2.NewPhysicalDeviceInformation->LogicalUnit - 1);
+
+
+	      DAC960_QueueCommand(Command);
+	      return;
+	    }
+	  if (Controller->V2.StartPhysicalDeviceInformationScan)
+	    {
+	      Controller->V2.PhysicalDeviceIndex = 0;
+	      Controller->V2.NewPhysicalDeviceInformation->Channel = 0;
+	      Controller->V2.NewPhysicalDeviceInformation->TargetID = 0;
+	      Controller->V2.NewPhysicalDeviceInformation->LogicalUnit = 0;
+	      Controller->V2.StartPhysicalDeviceInformationScan = false;
+	    }
+	  CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
+	  CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
+	    sizeof(DAC960_V2_PhysicalDeviceInfo_T);
+	  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.LogicalUnit =
+	    Controller->V2.NewPhysicalDeviceInformation->LogicalUnit;
+	  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID =
+	    Controller->V2.NewPhysicalDeviceInformation->TargetID;
+	  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel =
+	    Controller->V2.NewPhysicalDeviceInformation->Channel;
+	  CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
+	    DAC960_V2_GetPhysicalDeviceInfoValid;
+	  CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
+					    .ScatterGatherSegments[0]
+					    .SegmentDataPointer =
+	    Controller->V2.NewPhysicalDeviceInformationDMA;
+	  CommandMailbox->PhysicalDeviceInfo.DataTransferMemoryAddress
+					    .ScatterGatherSegments[0]
+					    .SegmentByteCount =
+	    CommandMailbox->PhysicalDeviceInfo.DataTransferSize;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      if (Controller->V2.NeedLogicalDeviceInformation)
+	{
+	  if (Controller->V2.StartLogicalDeviceInformationScan)
+	    {
+	      int LogicalDriveNumber;
+	      for (LogicalDriveNumber = 0;
+		   LogicalDriveNumber < DAC960_MaxLogicalDrives;
+		   LogicalDriveNumber++)
+		Controller->V2.LogicalDriveFoundDuringScan
+			       [LogicalDriveNumber] = false;
+	      Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber = 0;
+	      Controller->V2.StartLogicalDeviceInformationScan = false;
+	    }
+	  CommandMailbox->LogicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
+	  CommandMailbox->LogicalDeviceInfo.DataTransferSize =
+	    sizeof(DAC960_V2_LogicalDeviceInfo_T);
+	  CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+	    Controller->V2.NewLogicalDeviceInformation->LogicalDeviceNumber;
+	  CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
+	    DAC960_V2_GetLogicalDeviceInfoValid;
+	  CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
+					   .ScatterGatherSegments[0]
+					   .SegmentDataPointer =
+	    Controller->V2.NewLogicalDeviceInformationDMA;
+	  CommandMailbox->LogicalDeviceInfo.DataTransferMemoryAddress
+					   .ScatterGatherSegments[0]
+					   .SegmentByteCount =
+	    CommandMailbox->LogicalDeviceInfo.DataTransferSize;
+	  DAC960_QueueCommand(Command);
+	  return;
+	}
+      Controller->MonitoringTimerCount++;
+      Controller->MonitoringTimer.expires =
+	jiffies + DAC960_HealthStatusMonitoringInterval;
+      	add_timer(&Controller->MonitoringTimer);
+    }
+  if (CommandType == DAC960_ImmediateCommand)
+    {
+      complete(Command->Completion);
+      Command->Completion = NULL;
+      return;
+    }
+  if (CommandType == DAC960_QueuedCommand)
+    {
+      DAC960_V2_KernelCommand_T *KernelCommand = Command->V2.KernelCommand;
+      KernelCommand->CommandStatus = CommandStatus;
+      KernelCommand->RequestSenseLength = Command->V2.RequestSenseLength;
+      KernelCommand->DataTransferLength = Command->V2.DataTransferResidue;
+      Command->V2.KernelCommand = NULL;
+      DAC960_DeallocateCommand(Command);
+      KernelCommand->CompletionFunction(KernelCommand);
+      return;
+    }
+  /*
+    Queue a Status Monitoring Command to the Controller using the just
+    completed Command if one was deferred previously due to lack of a
+    free Command when the Monitoring Timer Function was called.
+  */
+  if (Controller->MonitoringCommandDeferred)
+    {
+      Controller->MonitoringCommandDeferred = false;
+      DAC960_V2_QueueMonitoringCommand(Command);
+      return;
+    }
+  /*
+    Deallocate the Command.
+  */
+  DAC960_DeallocateCommand(Command);
+  /*
+    Wake up any processes waiting on a free Command.
+  */
+  wake_up(&Controller->CommandWaitQueue);
+}
+
+
+/*
+  DAC960_BA_InterruptHandler handles hardware interrupts from DAC960 BA Series
+  Controllers.
+*/
+
+static irqreturn_t DAC960_BA_InterruptHandler(int IRQ_Channel,
+				       void *DeviceIdentifier,
+				       struct pt_regs *InterruptRegisters)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+  unsigned long flags;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_BA_AcknowledgeInterrupt(ControllerBaseAddress);
+  NextStatusMailbox = Controller->V2.NextStatusMailbox;
+  while (NextStatusMailbox->Fields.CommandIdentifier > 0)
+    {
+      DAC960_V2_CommandIdentifier_T CommandIdentifier =
+	NextStatusMailbox->Fields.CommandIdentifier;
+      DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
+      Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
+      Command->V2.RequestSenseLength =
+	NextStatusMailbox->Fields.RequestSenseLength;
+      Command->V2.DataTransferResidue =
+	NextStatusMailbox->Fields.DataTransferResidue;
+      NextStatusMailbox->Words[0] = 0;
+      if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
+	NextStatusMailbox = Controller->V2.FirstStatusMailbox;
+      DAC960_V2_ProcessCompletedCommand(Command);
+    }
+  Controller->V2.NextStatusMailbox = NextStatusMailbox;
+  /*
+    Attempt to remove additional I/O Requests from the Controller's
+    I/O Request Queue and queue them to the Controller.
+  */
+  DAC960_ProcessRequest(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return IRQ_HANDLED;
+}
+
+
+/*
+  DAC960_LP_InterruptHandler handles hardware interrupts from DAC960 LP Series
+  Controllers.
+*/
+
+static irqreturn_t DAC960_LP_InterruptHandler(int IRQ_Channel,
+				       void *DeviceIdentifier,
+				       struct pt_regs *InterruptRegisters)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+  unsigned long flags;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_LP_AcknowledgeInterrupt(ControllerBaseAddress);
+  NextStatusMailbox = Controller->V2.NextStatusMailbox;
+  while (NextStatusMailbox->Fields.CommandIdentifier > 0)
+    {
+      DAC960_V2_CommandIdentifier_T CommandIdentifier =
+	NextStatusMailbox->Fields.CommandIdentifier;
+      DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
+      Command->V2.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
+      Command->V2.RequestSenseLength =
+	NextStatusMailbox->Fields.RequestSenseLength;
+      Command->V2.DataTransferResidue =
+	NextStatusMailbox->Fields.DataTransferResidue;
+      NextStatusMailbox->Words[0] = 0;
+      if (++NextStatusMailbox > Controller->V2.LastStatusMailbox)
+	NextStatusMailbox = Controller->V2.FirstStatusMailbox;
+      DAC960_V2_ProcessCompletedCommand(Command);
+    }
+  Controller->V2.NextStatusMailbox = NextStatusMailbox;
+  /*
+    Attempt to remove additional I/O Requests from the Controller's
+    I/O Request Queue and queue them to the Controller.
+  */
+  DAC960_ProcessRequest(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return IRQ_HANDLED;
+}
+
+
+/*
+  DAC960_LA_InterruptHandler handles hardware interrupts from DAC960 LA Series
+  Controllers.
+*/
+
+static irqreturn_t DAC960_LA_InterruptHandler(int IRQ_Channel,
+				       void *DeviceIdentifier,
+				       struct pt_regs *InterruptRegisters)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_StatusMailbox_T *NextStatusMailbox;
+  unsigned long flags;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_LA_AcknowledgeInterrupt(ControllerBaseAddress);
+  NextStatusMailbox = Controller->V1.NextStatusMailbox;
+  while (NextStatusMailbox->Fields.Valid)
+    {
+      DAC960_V1_CommandIdentifier_T CommandIdentifier =
+	NextStatusMailbox->Fields.CommandIdentifier;
+      DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
+      Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
+      NextStatusMailbox->Word = 0;
+      if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
+	NextStatusMailbox = Controller->V1.FirstStatusMailbox;
+      DAC960_V1_ProcessCompletedCommand(Command);
+    }
+  Controller->V1.NextStatusMailbox = NextStatusMailbox;
+  /*
+    Attempt to remove additional I/O Requests from the Controller's
+    I/O Request Queue and queue them to the Controller.
+  */
+  DAC960_ProcessRequest(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return IRQ_HANDLED;
+}
+
+
+/*
+  DAC960_PG_InterruptHandler handles hardware interrupts from DAC960 PG Series
+  Controllers.
+*/
+
+static irqreturn_t DAC960_PG_InterruptHandler(int IRQ_Channel,
+				       void *DeviceIdentifier,
+				       struct pt_regs *InterruptRegisters)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  DAC960_V1_StatusMailbox_T *NextStatusMailbox;
+  unsigned long flags;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_PG_AcknowledgeInterrupt(ControllerBaseAddress);
+  NextStatusMailbox = Controller->V1.NextStatusMailbox;
+  while (NextStatusMailbox->Fields.Valid)
+    {
+      DAC960_V1_CommandIdentifier_T CommandIdentifier =
+	NextStatusMailbox->Fields.CommandIdentifier;
+      DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
+      Command->V1.CommandStatus = NextStatusMailbox->Fields.CommandStatus;
+      NextStatusMailbox->Word = 0;
+      if (++NextStatusMailbox > Controller->V1.LastStatusMailbox)
+	NextStatusMailbox = Controller->V1.FirstStatusMailbox;
+      DAC960_V1_ProcessCompletedCommand(Command);
+    }
+  Controller->V1.NextStatusMailbox = NextStatusMailbox;
+  /*
+    Attempt to remove additional I/O Requests from the Controller's
+    I/O Request Queue and queue them to the Controller.
+  */
+  DAC960_ProcessRequest(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return IRQ_HANDLED;
+}
+
+
+/*
+  DAC960_PD_InterruptHandler handles hardware interrupts from DAC960 PD Series
+  Controllers.
+*/
+
+static irqreturn_t DAC960_PD_InterruptHandler(int IRQ_Channel,
+				       void *DeviceIdentifier,
+				       struct pt_regs *InterruptRegisters)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  unsigned long flags;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
+    {
+      DAC960_V1_CommandIdentifier_T CommandIdentifier =
+	DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
+      DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
+      Command->V1.CommandStatus =
+	DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
+      DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
+      DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
+      DAC960_V1_ProcessCompletedCommand(Command);
+    }
+  /*
+    Attempt to remove additional I/O Requests from the Controller's
+    I/O Request Queue and queue them to the Controller.
+  */
+  DAC960_ProcessRequest(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return IRQ_HANDLED;
+}
+
+
+/*
+  DAC960_P_InterruptHandler handles hardware interrupts from DAC960 P Series
+  Controllers.
+
+  Translations of DAC960_V1_Enquiry and DAC960_V1_GetDeviceState rely
+  on the data having been placed into DAC960_Controller_T, rather than
+  an arbitrary buffer.
+*/
+
+static irqreturn_t DAC960_P_InterruptHandler(int IRQ_Channel,
+				      void *DeviceIdentifier,
+				      struct pt_regs *InterruptRegisters)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) DeviceIdentifier;
+  void __iomem *ControllerBaseAddress = Controller->BaseAddress;
+  unsigned long flags;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  while (DAC960_PD_StatusAvailableP(ControllerBaseAddress))
+    {
+      DAC960_V1_CommandIdentifier_T CommandIdentifier =
+	DAC960_PD_ReadStatusCommandIdentifier(ControllerBaseAddress);
+      DAC960_Command_T *Command = Controller->Commands[CommandIdentifier-1];
+      DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+      DAC960_V1_CommandOpcode_T CommandOpcode =
+	CommandMailbox->Common.CommandOpcode;
+      Command->V1.CommandStatus =
+	DAC960_PD_ReadStatusRegister(ControllerBaseAddress);
+      DAC960_PD_AcknowledgeInterrupt(ControllerBaseAddress);
+      DAC960_PD_AcknowledgeStatus(ControllerBaseAddress);
+      switch (CommandOpcode)
+	{
+	case DAC960_V1_Enquiry_Old:
+	  Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Enquiry;
+	  DAC960_P_To_PD_TranslateEnquiry(Controller->V1.NewEnquiry);
+	  break;
+	case DAC960_V1_GetDeviceState_Old:
+	  Command->V1.CommandMailbox.Common.CommandOpcode =
+	    					DAC960_V1_GetDeviceState;
+	  DAC960_P_To_PD_TranslateDeviceState(Controller->V1.NewDeviceState);
+	  break;
+	case DAC960_V1_Read_Old:
+	  Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Read;
+	  DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
+	  break;
+	case DAC960_V1_Write_Old:
+	  Command->V1.CommandMailbox.Common.CommandOpcode = DAC960_V1_Write;
+	  DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
+	  break;
+	case DAC960_V1_ReadWithScatterGather_Old:
+	  Command->V1.CommandMailbox.Common.CommandOpcode =
+	    DAC960_V1_ReadWithScatterGather;
+	  DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
+	  break;
+	case DAC960_V1_WriteWithScatterGather_Old:
+	  Command->V1.CommandMailbox.Common.CommandOpcode =
+	    DAC960_V1_WriteWithScatterGather;
+	  DAC960_P_To_PD_TranslateReadWriteCommand(CommandMailbox);
+	  break;
+	default:
+	  break;
+	}
+      DAC960_V1_ProcessCompletedCommand(Command);
+    }
+  /*
+    Attempt to remove additional I/O Requests from the Controller's
+    I/O Request Queue and queue them to the Controller.
+  */
+  DAC960_ProcessRequest(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return IRQ_HANDLED;
+}
+
+
+/*
+  DAC960_V1_QueueMonitoringCommand queues a Monitoring Command to DAC960 V1
+  Firmware Controllers.
+*/
+
+static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  DAC960_V1_ClearCommand(Command);
+  Command->CommandType = DAC960_MonitoringCommand;
+  CommandMailbox->Type3.CommandOpcode = DAC960_V1_Enquiry;
+  CommandMailbox->Type3.BusAddress = Controller->V1.NewEnquiryDMA;
+  DAC960_QueueCommand(Command);
+}
+
+
+/*
+  DAC960_V2_QueueMonitoringCommand queues a Monitoring Command to DAC960 V2
+  Firmware Controllers.
+*/
+
+static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *Command)
+{
+  DAC960_Controller_T *Controller = Command->Controller;
+  DAC960_V2_CommandMailbox_T *CommandMailbox = &Command->V2.CommandMailbox;
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_MonitoringCommand;
+  CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->ControllerInfo.CommandControlBits
+				.DataTransferControllerToHost = true;
+  CommandMailbox->ControllerInfo.CommandControlBits
+				.NoAutoRequestSense = true;
+  CommandMailbox->ControllerInfo.DataTransferSize =
+    sizeof(DAC960_V2_ControllerInfo_T);
+  CommandMailbox->ControllerInfo.ControllerNumber = 0;
+  CommandMailbox->ControllerInfo.IOCTL_Opcode = DAC960_V2_GetControllerInfo;
+  CommandMailbox->ControllerInfo.DataTransferMemoryAddress
+				.ScatterGatherSegments[0]
+				.SegmentDataPointer =
+    Controller->V2.NewControllerInformationDMA;
+  CommandMailbox->ControllerInfo.DataTransferMemoryAddress
+				.ScatterGatherSegments[0]
+				.SegmentByteCount =
+    CommandMailbox->ControllerInfo.DataTransferSize;
+  DAC960_QueueCommand(Command);
+}
+
+
+/*
+  DAC960_MonitoringTimerFunction is the timer function for monitoring
+  the status of DAC960 Controllers.
+*/
+
+static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+  DAC960_Command_T *Command;
+  unsigned long flags;
+
+  if (Controller->FirmwareType == DAC960_V1_Controller)
+    {
+      spin_lock_irqsave(&Controller->queue_lock, flags);
+      /*
+	Queue a Status Monitoring Command to Controller.
+      */
+      Command = DAC960_AllocateCommand(Controller);
+      if (Command != NULL)
+	DAC960_V1_QueueMonitoringCommand(Command);
+      else Controller->MonitoringCommandDeferred = true;
+      spin_unlock_irqrestore(&Controller->queue_lock, flags);
+    }
+  else
+    {
+      DAC960_V2_ControllerInfo_T *ControllerInfo =
+	&Controller->V2.ControllerInformation;
+      unsigned int StatusChangeCounter =
+	Controller->V2.HealthStatusBuffer->StatusChangeCounter;
+      boolean ForceMonitoringCommand = false;
+      if (jiffies - Controller->SecondaryMonitoringTime
+	  > DAC960_SecondaryMonitoringInterval)
+	{
+	  int LogicalDriveNumber;
+	  for (LogicalDriveNumber = 0;
+	       LogicalDriveNumber < DAC960_MaxLogicalDrives;
+	       LogicalDriveNumber++)
+	    {
+	      DAC960_V2_LogicalDeviceInfo_T *LogicalDeviceInfo =
+		Controller->V2.LogicalDeviceInformation[LogicalDriveNumber];
+	      if (LogicalDeviceInfo == NULL) continue;
+	      if (!LogicalDeviceInfo->LogicalDeviceControl
+				     .LogicalDeviceInitialized)
+		{
+		  ForceMonitoringCommand = true;
+		  break;
+		}
+	    }
+	  Controller->SecondaryMonitoringTime = jiffies;
+	}
+      if (StatusChangeCounter == Controller->V2.StatusChangeCounter &&
+	  Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
+	  == Controller->V2.NextEventSequenceNumber &&
+	  (ControllerInfo->BackgroundInitializationsActive +
+	   ControllerInfo->LogicalDeviceInitializationsActive +
+	   ControllerInfo->PhysicalDeviceInitializationsActive +
+	   ControllerInfo->ConsistencyChecksActive +
+	   ControllerInfo->RebuildsActive +
+	   ControllerInfo->OnlineExpansionsActive == 0 ||
+	   jiffies - Controller->PrimaryMonitoringTime
+	   < DAC960_MonitoringTimerInterval) &&
+	  !ForceMonitoringCommand)
+	{
+	  Controller->MonitoringTimer.expires =
+	    jiffies + DAC960_HealthStatusMonitoringInterval;
+	    add_timer(&Controller->MonitoringTimer);
+	  return;
+	}
+      Controller->V2.StatusChangeCounter = StatusChangeCounter;
+      Controller->PrimaryMonitoringTime = jiffies;
+
+      spin_lock_irqsave(&Controller->queue_lock, flags);
+      /*
+	Queue a Status Monitoring Command to Controller.
+      */
+      Command = DAC960_AllocateCommand(Controller);
+      if (Command != NULL)
+	DAC960_V2_QueueMonitoringCommand(Command);
+      else Controller->MonitoringCommandDeferred = true;
+      spin_unlock_irqrestore(&Controller->queue_lock, flags);
+      /*
+	Wake up any processes waiting on a Health Status Buffer change.
+      */
+      wake_up(&Controller->HealthStatusWaitQueue);
+    }
+}
+
+/*
+  DAC960_CheckStatusBuffer verifies that there is room to hold ByteCount
+  additional bytes in the Combined Status Buffer and grows the buffer if
+  necessary.  It returns true if there is enough room and false otherwise.
+*/
+
+static boolean DAC960_CheckStatusBuffer(DAC960_Controller_T *Controller,
+					unsigned int ByteCount)
+{
+  unsigned char *NewStatusBuffer;
+  if (Controller->InitialStatusLength + 1 +
+      Controller->CurrentStatusLength + ByteCount + 1 <=
+      Controller->CombinedStatusBufferLength)
+    return true;
+  if (Controller->CombinedStatusBufferLength == 0)
+    {
+      unsigned int NewStatusBufferLength = DAC960_InitialStatusBufferSize;
+      while (NewStatusBufferLength < ByteCount)
+	NewStatusBufferLength *= 2;
+      Controller->CombinedStatusBuffer =
+	(unsigned char *) kmalloc(NewStatusBufferLength, GFP_ATOMIC);
+      if (Controller->CombinedStatusBuffer == NULL) return false;
+      Controller->CombinedStatusBufferLength = NewStatusBufferLength;
+      return true;
+    }
+  NewStatusBuffer = (unsigned char *)
+    kmalloc(2 * Controller->CombinedStatusBufferLength, GFP_ATOMIC);
+  if (NewStatusBuffer == NULL)
+    {
+      DAC960_Warning("Unable to expand Combined Status Buffer - Truncating\n",
+		     Controller);
+      return false;
+    }
+  memcpy(NewStatusBuffer, Controller->CombinedStatusBuffer,
+	 Controller->CombinedStatusBufferLength);
+  kfree(Controller->CombinedStatusBuffer);
+  Controller->CombinedStatusBuffer = NewStatusBuffer;
+  Controller->CombinedStatusBufferLength *= 2;
+  Controller->CurrentStatusBuffer =
+    &NewStatusBuffer[Controller->InitialStatusLength + 1];
+  return true;
+}
+
+
+/*
+  DAC960_Message prints Driver Messages.
+*/
+
+static void DAC960_Message(DAC960_MessageLevel_T MessageLevel,
+			   unsigned char *Format,
+			   DAC960_Controller_T *Controller,
+			   ...)
+{
+  static unsigned char Buffer[DAC960_LineBufferSize];
+  static boolean BeginningOfLine = true;
+  va_list Arguments;
+  int Length = 0;
+  va_start(Arguments, Controller);
+  Length = vsprintf(Buffer, Format, Arguments);
+  va_end(Arguments);
+  if (Controller == NULL)
+    printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
+	   DAC960_ControllerCount, Buffer);
+  else if (MessageLevel == DAC960_AnnounceLevel ||
+	   MessageLevel == DAC960_InfoLevel)
+    {
+      if (!Controller->ControllerInitialized)
+	{
+	  if (DAC960_CheckStatusBuffer(Controller, Length))
+	    {
+	      strcpy(&Controller->CombinedStatusBuffer
+				  [Controller->InitialStatusLength],
+		     Buffer);
+	      Controller->InitialStatusLength += Length;
+	      Controller->CurrentStatusBuffer =
+		&Controller->CombinedStatusBuffer
+			     [Controller->InitialStatusLength + 1];
+	    }
+	  if (MessageLevel == DAC960_AnnounceLevel)
+	    {
+	      static int AnnouncementLines = 0;
+	      if (++AnnouncementLines <= 2)
+		printk("%sDAC960: %s", DAC960_MessageLevelMap[MessageLevel],
+		       Buffer);
+	    }
+	  else
+	    {
+	      if (BeginningOfLine)
+		{
+		  if (Buffer[0] != '\n' || Length > 1)
+		    printk("%sDAC960#%d: %s",
+			   DAC960_MessageLevelMap[MessageLevel],
+			   Controller->ControllerNumber, Buffer);
+		}
+	      else printk("%s", Buffer);
+	    }
+	}
+      else if (DAC960_CheckStatusBuffer(Controller, Length))
+	{
+	  strcpy(&Controller->CurrentStatusBuffer[
+		    Controller->CurrentStatusLength], Buffer);
+	  Controller->CurrentStatusLength += Length;
+	}
+    }
+  else if (MessageLevel == DAC960_ProgressLevel)
+    {
+      strcpy(Controller->ProgressBuffer, Buffer);
+      Controller->ProgressBufferLength = Length;
+      if (Controller->EphemeralProgressMessage)
+	{
+	  if (jiffies - Controller->LastProgressReportTime
+	      >= DAC960_ProgressReportingInterval)
+	    {
+	      printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
+		     Controller->ControllerNumber, Buffer);
+	      Controller->LastProgressReportTime = jiffies;
+	    }
+	}
+      else printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
+		  Controller->ControllerNumber, Buffer);
+    }
+  else if (MessageLevel == DAC960_UserCriticalLevel)
+    {
+      strcpy(&Controller->UserStatusBuffer[Controller->UserStatusLength],
+	     Buffer);
+      Controller->UserStatusLength += Length;
+      if (Buffer[0] != '\n' || Length > 1)
+	printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
+	       Controller->ControllerNumber, Buffer);
+    }
+  else
+    {
+      if (BeginningOfLine)
+	printk("%sDAC960#%d: %s", DAC960_MessageLevelMap[MessageLevel],
+	       Controller->ControllerNumber, Buffer);
+      else printk("%s", Buffer);
+    }
+  BeginningOfLine = (Buffer[Length-1] == '\n');
+}
+
+
+/*
+  DAC960_ParsePhysicalDevice parses spaces followed by a Physical Device
+  Channel:TargetID specification from a User Command string.  It updates
+  Channel and TargetID and returns true on success and false on failure.
+*/
+
+static boolean DAC960_ParsePhysicalDevice(DAC960_Controller_T *Controller,
+					  char *UserCommandString,
+					  unsigned char *Channel,
+					  unsigned char *TargetID)
+{
+  char *NewUserCommandString = UserCommandString;
+  unsigned long XChannel, XTargetID;
+  while (*UserCommandString == ' ') UserCommandString++;
+  if (UserCommandString == NewUserCommandString)
+    return false;
+  XChannel = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
+  if (NewUserCommandString == UserCommandString ||
+      *NewUserCommandString != ':' ||
+      XChannel >= Controller->Channels)
+    return false;
+  UserCommandString = ++NewUserCommandString;
+  XTargetID = simple_strtoul(UserCommandString, &NewUserCommandString, 10);
+  if (NewUserCommandString == UserCommandString ||
+      *NewUserCommandString != '\0' ||
+      XTargetID >= Controller->Targets)
+    return false;
+  *Channel = XChannel;
+  *TargetID = XTargetID;
+  return true;
+}
+
+
+/*
+  DAC960_ParseLogicalDrive parses spaces followed by a Logical Drive Number
+  specification from a User Command string.  It updates LogicalDriveNumber and
+  returns true on success and false on failure.
+*/
+
+static boolean DAC960_ParseLogicalDrive(DAC960_Controller_T *Controller,
+					char *UserCommandString,
+					unsigned char *LogicalDriveNumber)
+{
+  char *NewUserCommandString = UserCommandString;
+  unsigned long XLogicalDriveNumber;
+  while (*UserCommandString == ' ') UserCommandString++;
+  if (UserCommandString == NewUserCommandString)
+    return false;
+  XLogicalDriveNumber =
+    simple_strtoul(UserCommandString, &NewUserCommandString, 10);
+  if (NewUserCommandString == UserCommandString ||
+      *NewUserCommandString != '\0' ||
+      XLogicalDriveNumber > DAC960_MaxLogicalDrives - 1)
+    return false;
+  *LogicalDriveNumber = XLogicalDriveNumber;
+  return true;
+}
+
+
+/*
+  DAC960_V1_SetDeviceState sets the Device State for a Physical Device for
+  DAC960 V1 Firmware Controllers.
+*/
+
+static void DAC960_V1_SetDeviceState(DAC960_Controller_T *Controller,
+				     DAC960_Command_T *Command,
+				     unsigned char Channel,
+				     unsigned char TargetID,
+				     DAC960_V1_PhysicalDeviceState_T
+				       DeviceState,
+				     const unsigned char *DeviceStateString)
+{
+  DAC960_V1_CommandMailbox_T *CommandMailbox = &Command->V1.CommandMailbox;
+  CommandMailbox->Type3D.CommandOpcode = DAC960_V1_StartDevice;
+  CommandMailbox->Type3D.Channel = Channel;
+  CommandMailbox->Type3D.TargetID = TargetID;
+  CommandMailbox->Type3D.DeviceState = DeviceState;
+  CommandMailbox->Type3D.Modifier = 0;
+  DAC960_ExecuteCommand(Command);
+  switch (Command->V1.CommandStatus)
+    {
+    case DAC960_V1_NormalCompletion:
+      DAC960_UserCritical("%s of Physical Device %d:%d Succeeded\n", Controller,
+			  DeviceStateString, Channel, TargetID);
+      break;
+    case DAC960_V1_UnableToStartDevice:
+      DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
+			  "Unable to Start Device\n", Controller,
+			  DeviceStateString, Channel, TargetID);
+      break;
+    case DAC960_V1_NoDeviceAtAddress:
+      DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
+			  "No Device at Address\n", Controller,
+			  DeviceStateString, Channel, TargetID);
+      break;
+    case DAC960_V1_InvalidChannelOrTargetOrModifier:
+      DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
+			  "Invalid Channel or Target or Modifier\n",
+			  Controller, DeviceStateString, Channel, TargetID);
+      break;
+    case DAC960_V1_ChannelBusy:
+      DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
+			  "Channel Busy\n", Controller,
+			  DeviceStateString, Channel, TargetID);
+      break;
+    default:
+      DAC960_UserCritical("%s of Physical Device %d:%d Failed - "
+			  "Unexpected Status %04X\n", Controller,
+			  DeviceStateString, Channel, TargetID,
+			  Command->V1.CommandStatus);
+      break;
+    }
+}
+
+
+/*
+  DAC960_V1_ExecuteUserCommand executes a User Command for DAC960 V1 Firmware
+  Controllers.
+*/
+
+static boolean DAC960_V1_ExecuteUserCommand(DAC960_Controller_T *Controller,
+					    unsigned char *UserCommand)
+{
+  DAC960_Command_T *Command;
+  DAC960_V1_CommandMailbox_T *CommandMailbox;
+  unsigned long flags;
+  unsigned char Channel, TargetID, LogicalDriveNumber;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
+    DAC960_WaitForCommand(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  Controller->UserStatusLength = 0;
+  DAC960_V1_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox = &Command->V1.CommandMailbox;
+  if (strcmp(UserCommand, "flush-cache") == 0)
+    {
+      CommandMailbox->Type3.CommandOpcode = DAC960_V1_Flush;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Cache Flush Completed\n", Controller);
+    }
+  else if (strncmp(UserCommand, "kill", 4) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
+				      &Channel, &TargetID))
+    {
+      DAC960_V1_DeviceState_T *DeviceState =
+	&Controller->V1.DeviceState[Channel][TargetID];
+      if (DeviceState->Present &&
+	  DeviceState->DeviceType == DAC960_V1_DiskType &&
+	  DeviceState->DeviceState != DAC960_V1_Device_Dead)
+	DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
+				 DAC960_V1_Device_Dead, "Kill");
+      else DAC960_UserCritical("Kill of Physical Device %d:%d Illegal\n",
+			       Controller, Channel, TargetID);
+    }
+  else if (strncmp(UserCommand, "make-online", 11) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
+				      &Channel, &TargetID))
+    {
+      DAC960_V1_DeviceState_T *DeviceState =
+	&Controller->V1.DeviceState[Channel][TargetID];
+      if (DeviceState->Present &&
+	  DeviceState->DeviceType == DAC960_V1_DiskType &&
+	  DeviceState->DeviceState == DAC960_V1_Device_Dead)
+	DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
+				 DAC960_V1_Device_Online, "Make Online");
+      else DAC960_UserCritical("Make Online of Physical Device %d:%d Illegal\n",
+			       Controller, Channel, TargetID);
+
+    }
+  else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
+				      &Channel, &TargetID))
+    {
+      DAC960_V1_DeviceState_T *DeviceState =
+	&Controller->V1.DeviceState[Channel][TargetID];
+      if (DeviceState->Present &&
+	  DeviceState->DeviceType == DAC960_V1_DiskType &&
+	  DeviceState->DeviceState == DAC960_V1_Device_Dead)
+	DAC960_V1_SetDeviceState(Controller, Command, Channel, TargetID,
+				 DAC960_V1_Device_Standby, "Make Standby");
+      else DAC960_UserCritical("Make Standby of Physical "
+			       "Device %d:%d Illegal\n",
+			       Controller, Channel, TargetID);
+    }
+  else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
+				      &Channel, &TargetID))
+    {
+      CommandMailbox->Type3D.CommandOpcode = DAC960_V1_RebuildAsync;
+      CommandMailbox->Type3D.Channel = Channel;
+      CommandMailbox->Type3D.TargetID = TargetID;
+      DAC960_ExecuteCommand(Command);
+      switch (Command->V1.CommandStatus)
+	{
+	case DAC960_V1_NormalCompletion:
+	  DAC960_UserCritical("Rebuild of Physical Device %d:%d Initiated\n",
+			      Controller, Channel, TargetID);
+	  break;
+	case DAC960_V1_AttemptToRebuildOnlineDrive:
+	  DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
+			      "Attempt to Rebuild Online or "
+			      "Unresponsive Drive\n",
+			      Controller, Channel, TargetID);
+	  break;
+	case DAC960_V1_NewDiskFailedDuringRebuild:
+	  DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
+			      "New Disk Failed During Rebuild\n",
+			      Controller, Channel, TargetID);
+	  break;
+	case DAC960_V1_InvalidDeviceAddress:
+	  DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
+			      "Invalid Device Address\n",
+			      Controller, Channel, TargetID);
+	  break;
+	case DAC960_V1_RebuildOrCheckAlreadyInProgress:
+	  DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
+			      "Rebuild or Consistency Check Already "
+			      "in Progress\n", Controller, Channel, TargetID);
+	  break;
+	default:
+	  DAC960_UserCritical("Rebuild of Physical Device %d:%d Failed - "
+			      "Unexpected Status %04X\n", Controller,
+			      Channel, TargetID, Command->V1.CommandStatus);
+	  break;
+	}
+    }
+  else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
+	   DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
+				    &LogicalDriveNumber))
+    {
+      CommandMailbox->Type3C.CommandOpcode = DAC960_V1_CheckConsistencyAsync;
+      CommandMailbox->Type3C.LogicalDriveNumber = LogicalDriveNumber;
+      CommandMailbox->Type3C.AutoRestore = true;
+      DAC960_ExecuteCommand(Command);
+      switch (Command->V1.CommandStatus)
+	{
+	case DAC960_V1_NormalCompletion:
+	  DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			      "(/dev/rd/c%dd%d) Initiated\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber);
+	  break;
+	case DAC960_V1_DependentDiskIsDead:
+	  DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			      "(/dev/rd/c%dd%d) Failed - "
+			      "Dependent Physical Device is DEAD\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber);
+	  break;
+	case DAC960_V1_InvalidOrNonredundantLogicalDrive:
+	  DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			      "(/dev/rd/c%dd%d) Failed - "
+			      "Invalid or Nonredundant Logical Drive\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber);
+	  break;
+	case DAC960_V1_RebuildOrCheckAlreadyInProgress:
+	  DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			      "(/dev/rd/c%dd%d) Failed - Rebuild or "
+			      "Consistency Check Already in Progress\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber);
+	  break;
+	default:
+	  DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			      "(/dev/rd/c%dd%d) Failed - "
+			      "Unexpected Status %04X\n",
+			      Controller, LogicalDriveNumber,
+			      Controller->ControllerNumber,
+			      LogicalDriveNumber, Command->V1.CommandStatus);
+	  break;
+	}
+    }
+  else if (strcmp(UserCommand, "cancel-rebuild") == 0 ||
+	   strcmp(UserCommand, "cancel-consistency-check") == 0)
+    {
+      /*
+        the OldRebuildRateConstant is never actually used
+        once its value is retrieved from the controller.
+       */
+      unsigned char *OldRebuildRateConstant;
+      dma_addr_t OldRebuildRateConstantDMA;
+
+      OldRebuildRateConstant = pci_alloc_consistent( Controller->PCIDevice,
+		sizeof(char), &OldRebuildRateConstantDMA);
+      if (OldRebuildRateConstant == NULL) {
+         DAC960_UserCritical("Cancellation of Rebuild or "
+			     "Consistency Check Failed - "
+			     "Out of Memory",
+                             Controller);
+	 goto failure;
+      }
+      CommandMailbox->Type3R.CommandOpcode = DAC960_V1_RebuildControl;
+      CommandMailbox->Type3R.RebuildRateConstant = 0xFF;
+      CommandMailbox->Type3R.BusAddress = OldRebuildRateConstantDMA;
+      DAC960_ExecuteCommand(Command);
+      switch (Command->V1.CommandStatus)
+	{
+	case DAC960_V1_NormalCompletion:
+	  DAC960_UserCritical("Rebuild or Consistency Check Cancelled\n",
+			      Controller);
+	  break;
+	default:
+	  DAC960_UserCritical("Cancellation of Rebuild or "
+			      "Consistency Check Failed - "
+			      "Unexpected Status %04X\n",
+			      Controller, Command->V1.CommandStatus);
+	  break;
+	}
+failure:
+  	pci_free_consistent(Controller->PCIDevice, sizeof(char),
+		OldRebuildRateConstant, OldRebuildRateConstantDMA);
+    }
+  else DAC960_UserCritical("Illegal User Command: '%s'\n",
+			   Controller, UserCommand);
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_DeallocateCommand(Command);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return true;
+}
+
+
+/*
+  DAC960_V2_TranslatePhysicalDevice translates a Physical Device Channel and
+  TargetID into a Logical Device.  It returns true on success and false
+  on failure.
+*/
+
+static boolean DAC960_V2_TranslatePhysicalDevice(DAC960_Command_T *Command,
+						 unsigned char Channel,
+						 unsigned char TargetID,
+						 unsigned short
+						   *LogicalDeviceNumber)
+{
+  DAC960_V2_CommandMailbox_T SavedCommandMailbox, *CommandMailbox;
+  DAC960_Controller_T *Controller =  Command->Controller;
+
+  CommandMailbox = &Command->V2.CommandMailbox;
+  memcpy(&SavedCommandMailbox, CommandMailbox,
+	 sizeof(DAC960_V2_CommandMailbox_T));
+
+  CommandMailbox->PhysicalDeviceInfo.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->PhysicalDeviceInfo.CommandControlBits
+				    .DataTransferControllerToHost = true;
+  CommandMailbox->PhysicalDeviceInfo.CommandControlBits
+				    .NoAutoRequestSense = true;
+  CommandMailbox->PhysicalDeviceInfo.DataTransferSize =
+    sizeof(DAC960_V2_PhysicalToLogicalDevice_T);
+  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.TargetID = TargetID;
+  CommandMailbox->PhysicalDeviceInfo.PhysicalDevice.Channel = Channel;
+  CommandMailbox->PhysicalDeviceInfo.IOCTL_Opcode =
+    DAC960_V2_TranslatePhysicalToLogicalDevice;
+  CommandMailbox->Common.DataTransferMemoryAddress
+			.ScatterGatherSegments[0]
+			.SegmentDataPointer =
+    		Controller->V2.PhysicalToLogicalDeviceDMA;
+  CommandMailbox->Common.DataTransferMemoryAddress
+			.ScatterGatherSegments[0]
+			.SegmentByteCount =
+    		CommandMailbox->Common.DataTransferSize;
+
+  DAC960_ExecuteCommand(Command);
+  *LogicalDeviceNumber = Controller->V2.PhysicalToLogicalDevice->LogicalDeviceNumber;
+
+  memcpy(CommandMailbox, &SavedCommandMailbox,
+	 sizeof(DAC960_V2_CommandMailbox_T));
+  return (Command->V2.CommandStatus == DAC960_V2_NormalCompletion);
+}
+
+
+/*
+  DAC960_V2_ExecuteUserCommand executes a User Command for DAC960 V2 Firmware
+  Controllers.
+*/
+
+static boolean DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
+					    unsigned char *UserCommand)
+{
+  DAC960_Command_T *Command;
+  DAC960_V2_CommandMailbox_T *CommandMailbox;
+  unsigned long flags;
+  unsigned char Channel, TargetID, LogicalDriveNumber;
+  unsigned short LogicalDeviceNumber;
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
+    DAC960_WaitForCommand(Controller);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  Controller->UserStatusLength = 0;
+  DAC960_V2_ClearCommand(Command);
+  Command->CommandType = DAC960_ImmediateCommand;
+  CommandMailbox = &Command->V2.CommandMailbox;
+  CommandMailbox->Common.CommandOpcode = DAC960_V2_IOCTL;
+  CommandMailbox->Common.CommandControlBits.DataTransferControllerToHost = true;
+  CommandMailbox->Common.CommandControlBits.NoAutoRequestSense = true;
+  if (strcmp(UserCommand, "flush-cache") == 0)
+    {
+      CommandMailbox->DeviceOperation.IOCTL_Opcode = DAC960_V2_PauseDevice;
+      CommandMailbox->DeviceOperation.OperationDevice =
+	DAC960_V2_RAID_Controller;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Cache Flush Completed\n", Controller);
+    }
+  else if (strncmp(UserCommand, "kill", 4) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[4],
+				      &Channel, &TargetID) &&
+	   DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
+					     &LogicalDeviceNumber))
+    {
+      CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
+	LogicalDeviceNumber;
+      CommandMailbox->SetDeviceState.IOCTL_Opcode =
+	DAC960_V2_SetDeviceState;
+      CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
+	DAC960_V2_Device_Dead;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Kill of Physical Device %d:%d %s\n",
+			  Controller, Channel, TargetID,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Succeeded" : "Failed"));
+    }
+  else if (strncmp(UserCommand, "make-online", 11) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[11],
+				      &Channel, &TargetID) &&
+	   DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
+					     &LogicalDeviceNumber))
+    {
+      CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
+	LogicalDeviceNumber;
+      CommandMailbox->SetDeviceState.IOCTL_Opcode =
+	DAC960_V2_SetDeviceState;
+      CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
+	DAC960_V2_Device_Online;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Make Online of Physical Device %d:%d %s\n",
+			  Controller, Channel, TargetID,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Succeeded" : "Failed"));
+    }
+  else if (strncmp(UserCommand, "make-standby", 12) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[12],
+				      &Channel, &TargetID) &&
+	   DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
+					     &LogicalDeviceNumber))
+    {
+      CommandMailbox->SetDeviceState.LogicalDevice.LogicalDeviceNumber =
+	LogicalDeviceNumber;
+      CommandMailbox->SetDeviceState.IOCTL_Opcode =
+	DAC960_V2_SetDeviceState;
+      CommandMailbox->SetDeviceState.DeviceState.PhysicalDeviceState =
+	DAC960_V2_Device_Standby;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Make Standby of Physical Device %d:%d %s\n",
+			  Controller, Channel, TargetID,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Succeeded" : "Failed"));
+    }
+  else if (strncmp(UserCommand, "rebuild", 7) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[7],
+				      &Channel, &TargetID) &&
+	   DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
+					     &LogicalDeviceNumber))
+    {
+      CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+	LogicalDeviceNumber;
+      CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
+	DAC960_V2_RebuildDeviceStart;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
+			  Controller, Channel, TargetID,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Initiated" : "Not Initiated"));
+    }
+  else if (strncmp(UserCommand, "cancel-rebuild", 14) == 0 &&
+	   DAC960_ParsePhysicalDevice(Controller, &UserCommand[14],
+				      &Channel, &TargetID) &&
+	   DAC960_V2_TranslatePhysicalDevice(Command, Channel, TargetID,
+					     &LogicalDeviceNumber))
+    {
+      CommandMailbox->LogicalDeviceInfo.LogicalDevice.LogicalDeviceNumber =
+	LogicalDeviceNumber;
+      CommandMailbox->LogicalDeviceInfo.IOCTL_Opcode =
+	DAC960_V2_RebuildDeviceStop;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Rebuild of Physical Device %d:%d %s\n",
+			  Controller, Channel, TargetID,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Cancelled" : "Not Cancelled"));
+    }
+  else if (strncmp(UserCommand, "check-consistency", 17) == 0 &&
+	   DAC960_ParseLogicalDrive(Controller, &UserCommand[17],
+				    &LogicalDriveNumber))
+    {
+      CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
+	LogicalDriveNumber;
+      CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
+	DAC960_V2_ConsistencyCheckStart;
+      CommandMailbox->ConsistencyCheck.RestoreConsistency = true;
+      CommandMailbox->ConsistencyCheck.InitializedAreaOnly = false;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			  "(/dev/rd/c%dd%d) %s\n",
+			  Controller, LogicalDriveNumber,
+			  Controller->ControllerNumber,
+			  LogicalDriveNumber,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Initiated" : "Not Initiated"));
+    }
+  else if (strncmp(UserCommand, "cancel-consistency-check", 24) == 0 &&
+	   DAC960_ParseLogicalDrive(Controller, &UserCommand[24],
+				    &LogicalDriveNumber))
+    {
+      CommandMailbox->ConsistencyCheck.LogicalDevice.LogicalDeviceNumber =
+	LogicalDriveNumber;
+      CommandMailbox->ConsistencyCheck.IOCTL_Opcode =
+	DAC960_V2_ConsistencyCheckStop;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Consistency Check of Logical Drive %d "
+			  "(/dev/rd/c%dd%d) %s\n",
+			  Controller, LogicalDriveNumber,
+			  Controller->ControllerNumber,
+			  LogicalDriveNumber,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Cancelled" : "Not Cancelled"));
+    }
+  else if (strcmp(UserCommand, "perform-discovery") == 0)
+    {
+      CommandMailbox->Common.IOCTL_Opcode = DAC960_V2_StartDiscovery;
+      DAC960_ExecuteCommand(Command);
+      DAC960_UserCritical("Discovery %s\n", Controller,
+			  (Command->V2.CommandStatus
+			   == DAC960_V2_NormalCompletion
+			   ? "Initiated" : "Not Initiated"));
+      if (Command->V2.CommandStatus == DAC960_V2_NormalCompletion)
+	{
+	  CommandMailbox->ControllerInfo.CommandOpcode = DAC960_V2_IOCTL;
+	  CommandMailbox->ControllerInfo.CommandControlBits
+					.DataTransferControllerToHost = true;
+	  CommandMailbox->ControllerInfo.CommandControlBits
+					.NoAutoRequestSense = true;
+	  CommandMailbox->ControllerInfo.DataTransferSize =
+	    sizeof(DAC960_V2_ControllerInfo_T);
+	  CommandMailbox->ControllerInfo.ControllerNumber = 0;
+	  CommandMailbox->ControllerInfo.IOCTL_Opcode =
+	    DAC960_V2_GetControllerInfo;
+	  /*
+	   * How does this NOT race with the queued Monitoring
+	   * usage of this structure?
+	   */
+	  CommandMailbox->ControllerInfo.DataTransferMemoryAddress
+					.ScatterGatherSegments[0]
+					.SegmentDataPointer =
+	    Controller->V2.NewControllerInformationDMA;
+	  CommandMailbox->ControllerInfo.DataTransferMemoryAddress
+					.ScatterGatherSegments[0]
+					.SegmentByteCount =
+	    CommandMailbox->ControllerInfo.DataTransferSize;
+	  DAC960_ExecuteCommand(Command);
+	  while (Controller->V2.NewControllerInformation->PhysicalScanActive)
+	    {
+	      DAC960_ExecuteCommand(Command);
+	      sleep_on_timeout(&Controller->CommandWaitQueue, HZ);
+	    }
+	  DAC960_UserCritical("Discovery Completed\n", Controller);
+ 	}
+    }
+  else if (strcmp(UserCommand, "suppress-enclosure-messages") == 0)
+    Controller->SuppressEnclosureMessages = true;
+  else DAC960_UserCritical("Illegal User Command: '%s'\n",
+			   Controller, UserCommand);
+
+  spin_lock_irqsave(&Controller->queue_lock, flags);
+  DAC960_DeallocateCommand(Command);
+  spin_unlock_irqrestore(&Controller->queue_lock, flags);
+  return true;
+}
+
+
+/*
+  DAC960_ProcReadStatus implements reading /proc/rd/status.
+*/
+
+static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
+				 int Count, int *EOF, void *Data)
+{
+  unsigned char *StatusMessage = "OK\n";
+  int ControllerNumber, BytesAvailable;
+  for (ControllerNumber = 0;
+       ControllerNumber < DAC960_ControllerCount;
+       ControllerNumber++)
+    {
+      DAC960_Controller_T *Controller = DAC960_Controllers[ControllerNumber];
+      if (Controller == NULL) continue;
+      if (Controller->MonitoringAlertMode)
+	{
+	  StatusMessage = "ALERT\n";
+	  break;
+	}
+    }
+  BytesAvailable = strlen(StatusMessage) - Offset;
+  if (Count >= BytesAvailable)
+    {
+      Count = BytesAvailable;
+      *EOF = true;
+    }
+  if (Count <= 0) return 0;
+  *Start = Page;
+  memcpy(Page, &StatusMessage[Offset], Count);
+  return Count;
+}
+
+
+/*
+  DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status.
+*/
+
+static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset,
+					int Count, int *EOF, void *Data)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
+  int BytesAvailable = Controller->InitialStatusLength - Offset;
+  if (Count >= BytesAvailable)
+    {
+      Count = BytesAvailable;
+      *EOF = true;
+    }
+  if (Count <= 0) return 0;
+  *Start = Page;
+  memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
+  return Count;
+}
+
+
+/*
+  DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status.
+*/
+
+static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
+					int Count, int *EOF, void *Data)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
+  unsigned char *StatusMessage =
+    "No Rebuild or Consistency Check in Progress\n";
+  int ProgressMessageLength = strlen(StatusMessage);
+  int BytesAvailable;
+  if (jiffies != Controller->LastCurrentStatusTime)
+    {
+      Controller->CurrentStatusLength = 0;
+      DAC960_AnnounceDriver(Controller);
+      DAC960_ReportControllerConfiguration(Controller);
+      DAC960_ReportDeviceConfiguration(Controller);
+      if (Controller->ProgressBufferLength > 0)
+	ProgressMessageLength = Controller->ProgressBufferLength;
+      if (DAC960_CheckStatusBuffer(Controller, 2 + ProgressMessageLength))
+	{
+	  unsigned char *CurrentStatusBuffer = Controller->CurrentStatusBuffer;
+	  CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
+	  CurrentStatusBuffer[Controller->CurrentStatusLength++] = ' ';
+	  if (Controller->ProgressBufferLength > 0)
+	    strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
+		   Controller->ProgressBuffer);
+	  else
+	    strcpy(&CurrentStatusBuffer[Controller->CurrentStatusLength],
+		   StatusMessage);
+	  Controller->CurrentStatusLength += ProgressMessageLength;
+	}
+      Controller->LastCurrentStatusTime = jiffies;
+    }
+  BytesAvailable = Controller->CurrentStatusLength - Offset;
+  if (Count >= BytesAvailable)
+    {
+      Count = BytesAvailable;
+      *EOF = true;
+    }
+  if (Count <= 0) return 0;
+  *Start = Page;
+  memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
+  return Count;
+}
+
+
+/*
+  DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command.
+*/
+
+static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset,
+				      int Count, int *EOF, void *Data)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
+  int BytesAvailable = Controller->UserStatusLength - Offset;
+  if (Count >= BytesAvailable)
+    {
+      Count = BytesAvailable;
+      *EOF = true;
+    }
+  if (Count <= 0) return 0;
+  *Start = Page;
+  memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
+  return Count;
+}
+
+
+/*
+  DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command.
+*/
+
+static int DAC960_ProcWriteUserCommand(struct file *file,
+				       const char __user *Buffer,
+				       unsigned long Count, void *Data)
+{
+  DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data;
+  unsigned char CommandBuffer[80];
+  int Length;
+  if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
+  if (copy_from_user(CommandBuffer, Buffer, Count)) return -EFAULT;
+  CommandBuffer[Count] = '\0';
+  Length = strlen(CommandBuffer);
+  if (CommandBuffer[Length-1] == '\n')
+    CommandBuffer[--Length] = '\0';
+  if (Controller->FirmwareType == DAC960_V1_Controller)
+    return (DAC960_V1_ExecuteUserCommand(Controller, CommandBuffer)
+	    ? Count : -EBUSY);
+  else
+    return (DAC960_V2_ExecuteUserCommand(Controller, CommandBuffer)
+	    ? Count : -EBUSY);
+}
+
+
+/*
+  DAC960_CreateProcEntries creates the /proc/rd/... entries for the
+  DAC960 Driver.
+*/
+
+static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
+{
+	struct proc_dir_entry *StatusProcEntry;
+	struct proc_dir_entry *ControllerProcEntry;
+	struct proc_dir_entry *UserCommandProcEntry;
+
+	if (DAC960_ProcDirectoryEntry == NULL) {
+  		DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
+  		StatusProcEntry = create_proc_read_entry("status", 0,
+					   DAC960_ProcDirectoryEntry,
+					   DAC960_ProcReadStatus, NULL);
+	}
+
+      sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
+      ControllerProcEntry = proc_mkdir(Controller->ControllerName,
+				       DAC960_ProcDirectoryEntry);
+      create_proc_read_entry("initial_status", 0, ControllerProcEntry,
+			     DAC960_ProcReadInitialStatus, Controller);
+      create_proc_read_entry("current_status", 0, ControllerProcEntry,
+			     DAC960_ProcReadCurrentStatus, Controller);
+      UserCommandProcEntry =
+	create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
+			       ControllerProcEntry, DAC960_ProcReadUserCommand,
+			       Controller);
+      UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
+      Controller->ControllerProcEntry = ControllerProcEntry;
+}
+
+
+/*
+  DAC960_DestroyProcEntries destroys the /proc/rd/... entries for the
+  DAC960 Driver.
+*/
+
+static void DAC960_DestroyProcEntries(DAC960_Controller_T *Controller)
+{
+      if (Controller->ControllerProcEntry == NULL)
+	      return;
+      remove_proc_entry("initial_status", Controller->ControllerProcEntry);
+      remove_proc_entry("current_status", Controller->ControllerProcEntry);
+      remove_proc_entry("user_command", Controller->ControllerProcEntry);
+      remove_proc_entry(Controller->ControllerName, DAC960_ProcDirectoryEntry);
+      Controller->ControllerProcEntry = NULL;
+}
+
+#ifdef DAC960_GAM_MINOR
+
+/*
+ * DAC960_gam_ioctl is the ioctl function for performing RAID operations.
+*/
+
+static int DAC960_gam_ioctl(struct inode *inode, struct file *file,
+			    unsigned int Request, unsigned long Argument)
+{
+  int ErrorCode = 0;
+  if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+  switch (Request)
+    {
+    case DAC960_IOCTL_GET_CONTROLLER_COUNT:
+      return DAC960_ControllerCount;
+    case DAC960_IOCTL_GET_CONTROLLER_INFO:
+      {
+	DAC960_ControllerInfo_T __user *UserSpaceControllerInfo =
+	  (DAC960_ControllerInfo_T __user *) Argument;
+	DAC960_ControllerInfo_T ControllerInfo;
+	DAC960_Controller_T *Controller;
+	int ControllerNumber;
+	if (UserSpaceControllerInfo == NULL) return -EINVAL;
+	ErrorCode = get_user(ControllerNumber,
+			     &UserSpaceControllerInfo->ControllerNumber);
+	if (ErrorCode != 0) return ErrorCode;
+	if (ControllerNumber < 0 ||
+	    ControllerNumber > DAC960_ControllerCount - 1)
+	  return -ENXIO;
+	Controller = DAC960_Controllers[ControllerNumber];
+	if (Controller == NULL) return -ENXIO;
+	memset(&ControllerInfo, 0, sizeof(DAC960_ControllerInfo_T));
+	ControllerInfo.ControllerNumber = ControllerNumber;
+	ControllerInfo.FirmwareType = Controller->FirmwareType;
+	ControllerInfo.Channels = Controller->Channels;
+	ControllerInfo.Targets = Controller->Targets;
+	ControllerInfo.PCI_Bus = Controller->Bus;
+	ControllerInfo.PCI_Device = Controller->Device;
+	ControllerInfo.PCI_Function = Controller->Function;
+	ControllerInfo.IRQ_Channel = Controller->IRQ_Channel;
+	ControllerInfo.PCI_Address = Controller->PCI_Address;
+	strcpy(ControllerInfo.ModelName, Controller->ModelName);
+	strcpy(ControllerInfo.FirmwareVersion, Controller->FirmwareVersion);
+	return (copy_to_user(UserSpaceControllerInfo, &ControllerInfo,
+			     sizeof(DAC960_ControllerInfo_T)) ? -EFAULT : 0);
+      }
+    case DAC960_IOCTL_V1_EXECUTE_COMMAND:
+      {
+	DAC960_V1_UserCommand_T __user *UserSpaceUserCommand =
+	  (DAC960_V1_UserCommand_T __user *) Argument;
+	DAC960_V1_UserCommand_T UserCommand;
+	DAC960_Controller_T *Controller;
+	DAC960_Command_T *Command = NULL;
+	DAC960_V1_CommandOpcode_T CommandOpcode;
+	DAC960_V1_CommandStatus_T CommandStatus;
+	DAC960_V1_DCDB_T DCDB;
+	DAC960_V1_DCDB_T *DCDB_IOBUF = NULL;
+	dma_addr_t	DCDB_IOBUFDMA;
+	unsigned long flags;
+	int ControllerNumber, DataTransferLength;
+	unsigned char *DataTransferBuffer = NULL;
+	dma_addr_t DataTransferBufferDMA;
+	if (UserSpaceUserCommand == NULL) return -EINVAL;
+	if (copy_from_user(&UserCommand, UserSpaceUserCommand,
+				   sizeof(DAC960_V1_UserCommand_T))) {
+		ErrorCode = -EFAULT;
+		goto Failure1a;
+	}
+	ControllerNumber = UserCommand.ControllerNumber;
+	if (ControllerNumber < 0 ||
+	    ControllerNumber > DAC960_ControllerCount - 1)
+	  return -ENXIO;
+	Controller = DAC960_Controllers[ControllerNumber];
+	if (Controller == NULL) return -ENXIO;
+	if (Controller->FirmwareType != DAC960_V1_Controller) return -EINVAL;
+	CommandOpcode = UserCommand.CommandMailbox.Common.CommandOpcode;
+	DataTransferLength = UserCommand.DataTransferLength;
+	if (CommandOpcode & 0x80) return -EINVAL;
+	if (CommandOpcode == DAC960_V1_DCDB)
+	  {
+	    if (copy_from_user(&DCDB, UserCommand.DCDB,
+			       sizeof(DAC960_V1_DCDB_T))) {
+		ErrorCode = -EFAULT;
+		goto Failure1a;
+	    }
+	    if (DCDB.Channel >= DAC960_V1_MaxChannels) return -EINVAL;
+	    if (!((DataTransferLength == 0 &&
+		   DCDB.Direction
+		   == DAC960_V1_DCDB_NoDataTransfer) ||
+		  (DataTransferLength > 0 &&
+		   DCDB.Direction
+		   == DAC960_V1_DCDB_DataTransferDeviceToSystem) ||
+		  (DataTransferLength < 0 &&
+		   DCDB.Direction
+		   == DAC960_V1_DCDB_DataTransferSystemToDevice)))
+	      return -EINVAL;
+	    if (((DCDB.TransferLengthHigh4 << 16) | DCDB.TransferLength)
+		!= abs(DataTransferLength))
+	      return -EINVAL;
+	    DCDB_IOBUF = pci_alloc_consistent(Controller->PCIDevice,
+			sizeof(DAC960_V1_DCDB_T), &DCDB_IOBUFDMA);
+	    if (DCDB_IOBUF == NULL)
+			return -ENOMEM;
+	  }
+	if (DataTransferLength > 0)
+	  {
+	    DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
+				DataTransferLength, &DataTransferBufferDMA);
+	    if (DataTransferBuffer == NULL) {
+		ErrorCode = -ENOMEM;
+		goto Failure1;
+	    }
+	    memset(DataTransferBuffer, 0, DataTransferLength);
+	  }
+	else if (DataTransferLength < 0)
+	  {
+	    DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
+				-DataTransferLength, &DataTransferBufferDMA);
+	    if (DataTransferBuffer == NULL) {
+		ErrorCode = -ENOMEM;
+		goto Failure1;
+	    }
+	    if (copy_from_user(DataTransferBuffer,
+			       UserCommand.DataTransferBuffer,
+			       -DataTransferLength)) {
+		ErrorCode = -EFAULT;
+		goto Failure1;
+	    }
+	  }
+	if (CommandOpcode == DAC960_V1_DCDB)
+	  {
+	    spin_lock_irqsave(&Controller->queue_lock, flags);
+	    while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
+	      DAC960_WaitForCommand(Controller);
+	    while (Controller->V1.DirectCommandActive[DCDB.Channel]
+						     [DCDB.TargetID])
+	      {
+		spin_unlock_irq(&Controller->queue_lock);
+		__wait_event(Controller->CommandWaitQueue,
+			     !Controller->V1.DirectCommandActive
+					     [DCDB.Channel][DCDB.TargetID]);
+		spin_lock_irq(&Controller->queue_lock);
+	      }
+	    Controller->V1.DirectCommandActive[DCDB.Channel]
+					      [DCDB.TargetID] = true;
+	    spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	    DAC960_V1_ClearCommand(Command);
+	    Command->CommandType = DAC960_ImmediateCommand;
+	    memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
+		   sizeof(DAC960_V1_CommandMailbox_T));
+	    Command->V1.CommandMailbox.Type3.BusAddress = DCDB_IOBUFDMA;
+	    DCDB.BusAddress = DataTransferBufferDMA;
+	    memcpy(DCDB_IOBUF, &DCDB, sizeof(DAC960_V1_DCDB_T));
+	  }
+	else
+	  {
+	    spin_lock_irqsave(&Controller->queue_lock, flags);
+	    while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
+	      DAC960_WaitForCommand(Controller);
+	    spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	    DAC960_V1_ClearCommand(Command);
+	    Command->CommandType = DAC960_ImmediateCommand;
+	    memcpy(&Command->V1.CommandMailbox, &UserCommand.CommandMailbox,
+		   sizeof(DAC960_V1_CommandMailbox_T));
+	    if (DataTransferBuffer != NULL)
+	      Command->V1.CommandMailbox.Type3.BusAddress =
+		DataTransferBufferDMA;
+	  }
+	DAC960_ExecuteCommand(Command);
+	CommandStatus = Command->V1.CommandStatus;
+	spin_lock_irqsave(&Controller->queue_lock, flags);
+	DAC960_DeallocateCommand(Command);
+	spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	if (DataTransferLength > 0)
+	  {
+	    if (copy_to_user(UserCommand.DataTransferBuffer,
+			     DataTransferBuffer, DataTransferLength)) {
+		ErrorCode = -EFAULT;
+		goto Failure1;
+            }
+	  }
+	if (CommandOpcode == DAC960_V1_DCDB)
+	  {
+	    /*
+	      I don't believe Target or Channel in the DCDB_IOBUF
+	      should be any different from the contents of DCDB.
+	     */
+	    Controller->V1.DirectCommandActive[DCDB.Channel]
+					      [DCDB.TargetID] = false;
+	    if (copy_to_user(UserCommand.DCDB, DCDB_IOBUF,
+			     sizeof(DAC960_V1_DCDB_T))) {
+		ErrorCode = -EFAULT;
+		goto Failure1;
+	    }
+	  }
+	ErrorCode = CommandStatus;
+      Failure1:
+	if (DataTransferBuffer != NULL)
+	  pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
+			DataTransferBuffer, DataTransferBufferDMA);
+	if (DCDB_IOBUF != NULL)
+	  pci_free_consistent(Controller->PCIDevice, sizeof(DAC960_V1_DCDB_T),
+			DCDB_IOBUF, DCDB_IOBUFDMA);
+      Failure1a:
+	return ErrorCode;
+      }
+    case DAC960_IOCTL_V2_EXECUTE_COMMAND:
+      {
+	DAC960_V2_UserCommand_T __user *UserSpaceUserCommand =
+	  (DAC960_V2_UserCommand_T __user *) Argument;
+	DAC960_V2_UserCommand_T UserCommand;
+	DAC960_Controller_T *Controller;
+	DAC960_Command_T *Command = NULL;
+	DAC960_V2_CommandMailbox_T *CommandMailbox;
+	DAC960_V2_CommandStatus_T CommandStatus;
+	unsigned long flags;
+	int ControllerNumber, DataTransferLength;
+	int DataTransferResidue, RequestSenseLength;
+	unsigned char *DataTransferBuffer = NULL;
+	dma_addr_t DataTransferBufferDMA;
+	unsigned char *RequestSenseBuffer = NULL;
+	dma_addr_t RequestSenseBufferDMA;
+	if (UserSpaceUserCommand == NULL) return -EINVAL;
+	if (copy_from_user(&UserCommand, UserSpaceUserCommand,
+			   sizeof(DAC960_V2_UserCommand_T))) {
+		ErrorCode = -EFAULT;
+		goto Failure2a;
+	}
+	ControllerNumber = UserCommand.ControllerNumber;
+	if (ControllerNumber < 0 ||
+	    ControllerNumber > DAC960_ControllerCount - 1)
+	  return -ENXIO;
+	Controller = DAC960_Controllers[ControllerNumber];
+	if (Controller == NULL) return -ENXIO;
+	if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL;
+	DataTransferLength = UserCommand.DataTransferLength;
+	if (DataTransferLength > 0)
+	  {
+	    DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
+				DataTransferLength, &DataTransferBufferDMA);
+	    if (DataTransferBuffer == NULL) return -ENOMEM;
+	    memset(DataTransferBuffer, 0, DataTransferLength);
+	  }
+	else if (DataTransferLength < 0)
+	  {
+	    DataTransferBuffer = pci_alloc_consistent(Controller->PCIDevice,
+				-DataTransferLength, &DataTransferBufferDMA);
+	    if (DataTransferBuffer == NULL) return -ENOMEM;
+	    if (copy_from_user(DataTransferBuffer,
+			       UserCommand.DataTransferBuffer,
+			       -DataTransferLength)) {
+		ErrorCode = -EFAULT;
+		goto Failure2;
+	    }
+	  }
+	RequestSenseLength = UserCommand.RequestSenseLength;
+	if (RequestSenseLength > 0)
+	  {
+	    RequestSenseBuffer = pci_alloc_consistent(Controller->PCIDevice,
+			RequestSenseLength, &RequestSenseBufferDMA);
+	    if (RequestSenseBuffer == NULL)
+	      {
+		ErrorCode = -ENOMEM;
+		goto Failure2;
+	      }
+	    memset(RequestSenseBuffer, 0, RequestSenseLength);
+	  }
+	spin_lock_irqsave(&Controller->queue_lock, flags);
+	while ((Command = DAC960_AllocateCommand(Controller)) == NULL)
+	  DAC960_WaitForCommand(Controller);
+	spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	DAC960_V2_ClearCommand(Command);
+	Command->CommandType = DAC960_ImmediateCommand;
+	CommandMailbox = &Command->V2.CommandMailbox;
+	memcpy(CommandMailbox, &UserCommand.CommandMailbox,
+	       sizeof(DAC960_V2_CommandMailbox_T));
+	CommandMailbox->Common.CommandControlBits
+			      .AdditionalScatterGatherListMemory = false;
+	CommandMailbox->Common.CommandControlBits
+			      .NoAutoRequestSense = true;
+	CommandMailbox->Common.DataTransferSize = 0;
+	CommandMailbox->Common.DataTransferPageNumber = 0;
+	memset(&CommandMailbox->Common.DataTransferMemoryAddress, 0,
+	       sizeof(DAC960_V2_DataTransferMemoryAddress_T));
+	if (DataTransferLength != 0)
+	  {
+	    if (DataTransferLength > 0)
+	      {
+		CommandMailbox->Common.CommandControlBits
+				      .DataTransferControllerToHost = true;
+		CommandMailbox->Common.DataTransferSize = DataTransferLength;
+	      }
+	    else
+	      {
+		CommandMailbox->Common.CommandControlBits
+				      .DataTransferControllerToHost = false;
+		CommandMailbox->Common.DataTransferSize = -DataTransferLength;
+	      }
+	    CommandMailbox->Common.DataTransferMemoryAddress
+				  .ScatterGatherSegments[0]
+				  .SegmentDataPointer = DataTransferBufferDMA;
+	    CommandMailbox->Common.DataTransferMemoryAddress
+				  .ScatterGatherSegments[0]
+				  .SegmentByteCount =
+	      CommandMailbox->Common.DataTransferSize;
+	  }
+	if (RequestSenseLength > 0)
+	  {
+	    CommandMailbox->Common.CommandControlBits
+				  .NoAutoRequestSense = false;
+	    CommandMailbox->Common.RequestSenseSize = RequestSenseLength;
+	    CommandMailbox->Common.RequestSenseBusAddress =
+	      						RequestSenseBufferDMA;
+	  }
+	DAC960_ExecuteCommand(Command);
+	CommandStatus = Command->V2.CommandStatus;
+	RequestSenseLength = Command->V2.RequestSenseLength;
+	DataTransferResidue = Command->V2.DataTransferResidue;
+	spin_lock_irqsave(&Controller->queue_lock, flags);
+	DAC960_DeallocateCommand(Command);
+	spin_unlock_irqrestore(&Controller->queue_lock, flags);
+	if (RequestSenseLength > UserCommand.RequestSenseLength)
+	  RequestSenseLength = UserCommand.RequestSenseLength;
+	if (copy_to_user(&UserSpaceUserCommand->DataTransferLength,
+				 &DataTransferResidue,
+				 sizeof(DataTransferResidue))) {
+		ErrorCode = -EFAULT;
+		goto Failure2;
+	}
+	if (copy_to_user(&UserSpaceUserCommand->RequestSenseLength,
+			 &RequestSenseLength, sizeof(RequestSenseLength))) {
+		ErrorCode = -EFAULT;
+		goto Failure2;
+	}
+	if (DataTransferLength > 0)
+	  {
+	    if (copy_to_user(UserCommand.DataTransferBuffer,
+			     DataTransferBuffer, DataTransferLength)) {
+		ErrorCode = -EFAULT;
+		goto Failure2;
+	    }
+	  }
+	if (RequestSenseLength > 0)
+	  {
+	    if (copy_to_user(UserCommand.RequestSenseBuffer,
+			     RequestSenseBuffer, RequestSenseLength)) {
+		ErrorCode = -EFAULT;
+		goto Failure2;
+	    }
+	  }
+	ErrorCode = CommandStatus;
+      Failure2:
+	  pci_free_consistent(Controller->PCIDevice, abs(DataTransferLength),
+		DataTransferBuffer, DataTransferBufferDMA);
+	if (RequestSenseBuffer != NULL)
+	  pci_free_consistent(Controller->PCIDevice, RequestSenseLength,
+		RequestSenseBuffer, RequestSenseBufferDMA);
+      Failure2a:
+	return ErrorCode;
+      }
+    case DAC960_IOCTL_V2_GET_HEALTH_STATUS:
+      {
+	DAC960_V2_GetHealthStatus_T __user *UserSpaceGetHealthStatus =
+	  (DAC960_V2_GetHealthStatus_T __user *) Argument;
+	DAC960_V2_GetHealthStatus_T GetHealthStatus;
+	DAC960_V2_HealthStatusBuffer_T HealthStatusBuffer;
+	DAC960_Controller_T *Controller;
+	int ControllerNumber;
+	if (UserSpaceGetHealthStatus == NULL) return -EINVAL;
+	if (copy_from_user(&GetHealthStatus, UserSpaceGetHealthStatus,
+			   sizeof(DAC960_V2_GetHealthStatus_T)))
+		return -EFAULT;
+	ControllerNumber = GetHealthStatus.ControllerNumber;
+	if (ControllerNumber < 0 ||
+	    ControllerNumber > DAC960_ControllerCount - 1)
+	  return -ENXIO;
+	Controller = DAC960_Controllers[ControllerNumber];
+	if (Controller == NULL) return -ENXIO;
+	if (Controller->FirmwareType != DAC960_V2_Controller) return -EINVAL;
+	if (copy_from_user(&HealthStatusBuffer,
+			   GetHealthStatus.HealthStatusBuffer,
+			   sizeof(DAC960_V2_HealthStatusBuffer_T)))
+		return -EFAULT;
+	while (Controller->V2.HealthStatusBuffer->StatusChangeCounter
+	       == HealthStatusBuffer.StatusChangeCounter &&
+	       Controller->V2.HealthStatusBuffer->NextEventSequenceNumber
+	       == HealthStatusBuffer.NextEventSequenceNumber)
+	  {
+	    interruptible_sleep_on_timeout(&Controller->HealthStatusWaitQueue,
+					   DAC960_MonitoringTimerInterval);
+	    if (signal_pending(current)) return -EINTR;
+	  }
+	if (copy_to_user(GetHealthStatus.HealthStatusBuffer,
+			 Controller->V2.HealthStatusBuffer,
+			 sizeof(DAC960_V2_HealthStatusBuffer_T)))
+		return -EFAULT;
+	return 0;
+      }
+    }
+  return -EINVAL;
+}
+
+static struct file_operations DAC960_gam_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= DAC960_gam_ioctl
+};
+
+static struct miscdevice DAC960_gam_dev = {
+	DAC960_GAM_MINOR,
+	"dac960_gam",
+	&DAC960_gam_fops
+};
+
+static int DAC960_gam_init(void)
+{
+	int ret;
+
+	ret = misc_register(&DAC960_gam_dev);
+	if (ret)
+		printk(KERN_ERR "DAC960_gam: can't misc_register on minor %d\n", DAC960_GAM_MINOR);
+	return ret;
+}
+
+static void DAC960_gam_cleanup(void)
+{
+	misc_deregister(&DAC960_gam_dev);
+}
+
+#endif /* DAC960_GAM_MINOR */
+
+static struct DAC960_privdata DAC960_BA_privdata = {
+	.HardwareType =		DAC960_BA_Controller,
+	.FirmwareType 	=	DAC960_V2_Controller,
+	.InterruptHandler =	DAC960_BA_InterruptHandler,
+	.MemoryWindowSize =	DAC960_BA_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_LP_privdata = {
+	.HardwareType =		DAC960_LP_Controller,
+	.FirmwareType 	=	DAC960_LP_Controller,
+	.InterruptHandler =	DAC960_LP_InterruptHandler,
+	.MemoryWindowSize =	DAC960_LP_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_LA_privdata = {
+	.HardwareType =		DAC960_LA_Controller,
+	.FirmwareType 	=	DAC960_V1_Controller,
+	.InterruptHandler =	DAC960_LA_InterruptHandler,
+	.MemoryWindowSize =	DAC960_LA_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_PG_privdata = {
+	.HardwareType =		DAC960_PG_Controller,
+	.FirmwareType 	=	DAC960_V1_Controller,
+	.InterruptHandler =	DAC960_PG_InterruptHandler,
+	.MemoryWindowSize =	DAC960_PG_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_PD_privdata = {
+	.HardwareType =		DAC960_PD_Controller,
+	.FirmwareType 	=	DAC960_V1_Controller,
+	.InterruptHandler =	DAC960_PD_InterruptHandler,
+	.MemoryWindowSize =	DAC960_PD_RegisterWindowSize,
+};
+
+static struct DAC960_privdata DAC960_P_privdata = {
+	.HardwareType =		DAC960_P_Controller,
+	.FirmwareType 	=	DAC960_V1_Controller,
+	.InterruptHandler =	DAC960_P_InterruptHandler,
+	.MemoryWindowSize =	DAC960_PD_RegisterWindowSize,
+};
+
+static struct pci_device_id DAC960_id_table[] = {
+	{
+		.vendor 	= PCI_VENDOR_ID_MYLEX,
+		.device		= PCI_DEVICE_ID_MYLEX_DAC960_BA,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (unsigned long) &DAC960_BA_privdata,
+	},
+	{
+		.vendor 	= PCI_VENDOR_ID_MYLEX,
+		.device		= PCI_DEVICE_ID_MYLEX_DAC960_LP,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (unsigned long) &DAC960_LP_privdata,
+	},
+	{
+		.vendor 	= PCI_VENDOR_ID_DEC,
+		.device		= PCI_DEVICE_ID_DEC_21285,
+		.subvendor	= PCI_VENDOR_ID_MYLEX,
+		.subdevice	= PCI_DEVICE_ID_MYLEX_DAC960_LA,
+		.driver_data	= (unsigned long) &DAC960_LA_privdata,
+	},
+	{
+		.vendor 	= PCI_VENDOR_ID_MYLEX,
+		.device		= PCI_DEVICE_ID_MYLEX_DAC960_PG,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (unsigned long) &DAC960_PG_privdata,
+	},
+	{
+		.vendor 	= PCI_VENDOR_ID_MYLEX,
+		.device		= PCI_DEVICE_ID_MYLEX_DAC960_PD,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (unsigned long) &DAC960_PD_privdata,
+	},
+	{
+		.vendor 	= PCI_VENDOR_ID_MYLEX,
+		.device		= PCI_DEVICE_ID_MYLEX_DAC960_P,
+		.subvendor	= PCI_ANY_ID,
+		.subdevice	= PCI_ANY_ID,
+		.driver_data	= (unsigned long) &DAC960_P_privdata,
+	},
+	{0, },
+};
+
+MODULE_DEVICE_TABLE(pci, DAC960_id_table);
+
+static struct pci_driver DAC960_pci_driver = {
+	.name		= "DAC960",
+	.id_table	= DAC960_id_table,
+	.probe		= DAC960_Probe,
+	.remove		= DAC960_Remove,
+};
+
+static int DAC960_init_module(void)
+{
+	int ret;
+
+	ret =  pci_module_init(&DAC960_pci_driver);
+#ifdef DAC960_GAM_MINOR
+	if (!ret)
+		DAC960_gam_init();
+#endif
+	return ret;
+}
+
+static void DAC960_cleanup_module(void)
+{
+	int i;
+
+#ifdef DAC960_GAM_MINOR
+	DAC960_gam_cleanup();
+#endif
+
+	for (i = 0; i < DAC960_ControllerCount; i++) {
+		DAC960_Controller_T *Controller = DAC960_Controllers[i];
+		if (Controller == NULL)
+			continue;
+		DAC960_FinalizeController(Controller);
+	}
+	if (DAC960_ProcDirectoryEntry != NULL) {
+  		remove_proc_entry("rd/status", NULL);
+  		remove_proc_entry("rd", NULL);
+	}
+	DAC960_ControllerCount = 0;
+	pci_unregister_driver(&DAC960_pci_driver);
+}
+
+module_init(DAC960_init_module);
+module_exit(DAC960_cleanup_module);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/DAC960.h b/drivers/block/DAC960.h
new file mode 100644
index 0000000..d5e8e71
--- /dev/null
+++ b/drivers/block/DAC960.h
@@ -0,0 +1,4114 @@
+/*
+
+  Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
+
+  Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
+
+  This program is free software; you may redistribute and/or modify it under
+  the terms of the GNU General Public License Version 2 as published by the
+  Free Software Foundation.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY, without even the implied warranty of MERCHANTABILITY
+  or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+  for complete details.
+
+  The author respectfully requests that any modifications to this software be
+  sent directly to him for evaluation and testing.
+
+*/
+
+
+/*
+  Define the maximum number of DAC960 Controllers supported by this driver.
+*/
+
+#define DAC960_MaxControllers			8
+
+
+/*
+  Define the maximum number of Controller Channels supported by DAC960
+  V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_V1_MaxChannels			3
+#define DAC960_V2_MaxChannels			4
+
+
+/*
+  Define the maximum number of Targets per Channel supported by DAC960
+  V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_V1_MaxTargets			16
+#define DAC960_V2_MaxTargets			128
+
+
+/*
+  Define the maximum number of Logical Drives supported by DAC960
+  V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_MaxLogicalDrives			32
+
+
+/*
+  Define the maximum number of Physical Devices supported by DAC960
+  V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_V1_MaxPhysicalDevices		45
+#define DAC960_V2_MaxPhysicalDevices		272
+
+/*
+  Define the pci dma mask supported by DAC960 V1 and V2 Firmware Controlers
+ */
+
+#define DAC690_V1_PciDmaMask	0xffffffff
+#define DAC690_V2_PciDmaMask	0xffffffffffffffffULL
+
+/*
+  Define a Boolean data type.
+*/
+
+typedef enum { false, true } __attribute__ ((packed)) boolean;
+
+
+/*
+  Define a 32/64 bit I/O Address data type.
+*/
+
+typedef unsigned long DAC960_IO_Address_T;
+
+
+/*
+  Define a 32/64 bit PCI Bus Address data type.
+*/
+
+typedef unsigned long DAC960_PCI_Address_T;
+
+
+/*
+  Define a 32 bit Bus Address data type.
+*/
+
+typedef unsigned int DAC960_BusAddress32_T;
+
+
+/*
+  Define a 64 bit Bus Address data type.
+*/
+
+typedef unsigned long long DAC960_BusAddress64_T;
+
+
+/*
+  Define a 32 bit Byte Count data type.
+*/
+
+typedef unsigned int DAC960_ByteCount32_T;
+
+
+/*
+  Define a 64 bit Byte Count data type.
+*/
+
+typedef unsigned long long DAC960_ByteCount64_T;
+
+
+/*
+  dma_loaf is used by helper routines to divide a region of
+  dma mapped memory into smaller pieces, where those pieces
+  are not of uniform size.
+ */
+
+struct dma_loaf {
+	void	*cpu_base;
+	dma_addr_t dma_base;
+	size_t  length;
+	void	*cpu_free;
+	dma_addr_t dma_free;
+};
+
+/*
+  Define the SCSI INQUIRY Standard Data structure.
+*/
+
+typedef struct DAC960_SCSI_Inquiry
+{
+  unsigned char PeripheralDeviceType:5;			/* Byte 0 Bits 0-4 */
+  unsigned char PeripheralQualifier:3;			/* Byte 0 Bits 5-7 */
+  unsigned char DeviceTypeModifier:7;			/* Byte 1 Bits 0-6 */
+  boolean RMB:1;					/* Byte 1 Bit 7 */
+  unsigned char ANSI_ApprovedVersion:3;			/* Byte 2 Bits 0-2 */
+  unsigned char ECMA_Version:3;				/* Byte 2 Bits 3-5 */
+  unsigned char ISO_Version:2;				/* Byte 2 Bits 6-7 */
+  unsigned char ResponseDataFormat:4;			/* Byte 3 Bits 0-3 */
+  unsigned char :2;					/* Byte 3 Bits 4-5 */
+  boolean TrmIOP:1;					/* Byte 3 Bit 6 */
+  boolean AENC:1;					/* Byte 3 Bit 7 */
+  unsigned char AdditionalLength;			/* Byte 4 */
+  unsigned char :8;					/* Byte 5 */
+  unsigned char :8;					/* Byte 6 */
+  boolean SftRe:1;					/* Byte 7 Bit 0 */
+  boolean CmdQue:1;					/* Byte 7 Bit 1 */
+  boolean :1;						/* Byte 7 Bit 2 */
+  boolean Linked:1;					/* Byte 7 Bit 3 */
+  boolean Sync:1;					/* Byte 7 Bit 4 */
+  boolean WBus16:1;					/* Byte 7 Bit 5 */
+  boolean WBus32:1;					/* Byte 7 Bit 6 */
+  boolean RelAdr:1;					/* Byte 7 Bit 7 */
+  unsigned char VendorIdentification[8];		/* Bytes 8-15 */
+  unsigned char ProductIdentification[16];		/* Bytes 16-31 */
+  unsigned char ProductRevisionLevel[4];		/* Bytes 32-35 */
+}
+DAC960_SCSI_Inquiry_T;
+
+
+/*
+  Define the SCSI INQUIRY Unit Serial Number structure.
+*/
+
+typedef struct DAC960_SCSI_Inquiry_UnitSerialNumber
+{
+  unsigned char PeripheralDeviceType:5;			/* Byte 0 Bits 0-4 */
+  unsigned char PeripheralQualifier:3;			/* Byte 0 Bits 5-7 */
+  unsigned char PageCode;				/* Byte 1 */
+  unsigned char :8;					/* Byte 2 */
+  unsigned char PageLength;				/* Byte 3 */
+  unsigned char ProductSerialNumber[28];		/* Bytes 4-31 */
+}
+DAC960_SCSI_Inquiry_UnitSerialNumber_T;
+
+
+/*
+  Define the SCSI REQUEST SENSE Sense Key type.
+*/
+
+typedef enum
+{
+  DAC960_SenseKey_NoSense =			0x0,
+  DAC960_SenseKey_RecoveredError =		0x1,
+  DAC960_SenseKey_NotReady =			0x2,
+  DAC960_SenseKey_MediumError =			0x3,
+  DAC960_SenseKey_HardwareError =		0x4,
+  DAC960_SenseKey_IllegalRequest =		0x5,
+  DAC960_SenseKey_UnitAttention =		0x6,
+  DAC960_SenseKey_DataProtect =			0x7,
+  DAC960_SenseKey_BlankCheck =			0x8,
+  DAC960_SenseKey_VendorSpecific =		0x9,
+  DAC960_SenseKey_CopyAborted =			0xA,
+  DAC960_SenseKey_AbortedCommand =		0xB,
+  DAC960_SenseKey_Equal =			0xC,
+  DAC960_SenseKey_VolumeOverflow =		0xD,
+  DAC960_SenseKey_Miscompare =			0xE,
+  DAC960_SenseKey_Reserved =			0xF
+}
+__attribute__ ((packed))
+DAC960_SCSI_RequestSenseKey_T;
+
+
+/*
+  Define the SCSI REQUEST SENSE structure.
+*/
+
+typedef struct DAC960_SCSI_RequestSense
+{
+  unsigned char ErrorCode:7;				/* Byte 0 Bits 0-6 */
+  boolean Valid:1;					/* Byte 0 Bit 7 */
+  unsigned char SegmentNumber;				/* Byte 1 */
+  DAC960_SCSI_RequestSenseKey_T SenseKey:4;		/* Byte 2 Bits 0-3 */
+  unsigned char :1;					/* Byte 2 Bit 4 */
+  boolean ILI:1;					/* Byte 2 Bit 5 */
+  boolean EOM:1;					/* Byte 2 Bit 6 */
+  boolean Filemark:1;					/* Byte 2 Bit 7 */
+  unsigned char Information[4];				/* Bytes 3-6 */
+  unsigned char AdditionalSenseLength;			/* Byte 7 */
+  unsigned char CommandSpecificInformation[4];		/* Bytes 8-11 */
+  unsigned char AdditionalSenseCode;			/* Byte 12 */
+  unsigned char AdditionalSenseCodeQualifier;		/* Byte 13 */
+}
+DAC960_SCSI_RequestSense_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Command Opcodes.
+*/
+
+typedef enum
+{
+  /* I/O Commands */
+  DAC960_V1_ReadExtended =			0x33,
+  DAC960_V1_WriteExtended =			0x34,
+  DAC960_V1_ReadAheadExtended =			0x35,
+  DAC960_V1_ReadExtendedWithScatterGather =	0xB3,
+  DAC960_V1_WriteExtendedWithScatterGather =	0xB4,
+  DAC960_V1_Read =				0x36,
+  DAC960_V1_ReadWithScatterGather =		0xB6,
+  DAC960_V1_Write =				0x37,
+  DAC960_V1_WriteWithScatterGather =		0xB7,
+  DAC960_V1_DCDB =				0x04,
+  DAC960_V1_DCDBWithScatterGather =		0x84,
+  DAC960_V1_Flush =				0x0A,
+  /* Controller Status Related Commands */
+  DAC960_V1_Enquiry =				0x53,
+  DAC960_V1_Enquiry2 =				0x1C,
+  DAC960_V1_GetLogicalDriveElement =		0x55,
+  DAC960_V1_GetLogicalDriveInformation =	0x19,
+  DAC960_V1_IOPortRead =			0x39,
+  DAC960_V1_IOPortWrite =			0x3A,
+  DAC960_V1_GetSDStats =			0x3E,
+  DAC960_V1_GetPDStats =			0x3F,
+  DAC960_V1_PerformEventLogOperation =		0x72,
+  /* Device Related Commands */
+  DAC960_V1_StartDevice =			0x10,
+  DAC960_V1_GetDeviceState =			0x50,
+  DAC960_V1_StopChannel =			0x13,
+  DAC960_V1_StartChannel =			0x12,
+  DAC960_V1_ResetChannel =			0x1A,
+  /* Commands Associated with Data Consistency and Errors */
+  DAC960_V1_Rebuild =				0x09,
+  DAC960_V1_RebuildAsync =			0x16,
+  DAC960_V1_CheckConsistency =			0x0F,
+  DAC960_V1_CheckConsistencyAsync =		0x1E,
+  DAC960_V1_RebuildStat =			0x0C,
+  DAC960_V1_GetRebuildProgress =		0x27,
+  DAC960_V1_RebuildControl =			0x1F,
+  DAC960_V1_ReadBadBlockTable =			0x0B,
+  DAC960_V1_ReadBadDataTable =			0x25,
+  DAC960_V1_ClearBadDataTable =			0x26,
+  DAC960_V1_GetErrorTable =			0x17,
+  DAC960_V1_AddCapacityAsync =			0x2A,
+  DAC960_V1_BackgroundInitializationControl =	0x2B,
+  /* Configuration Related Commands */
+  DAC960_V1_ReadConfig2 =			0x3D,
+  DAC960_V1_WriteConfig2 =			0x3C,
+  DAC960_V1_ReadConfigurationOnDisk =		0x4A,
+  DAC960_V1_WriteConfigurationOnDisk =		0x4B,
+  DAC960_V1_ReadConfiguration =			0x4E,
+  DAC960_V1_ReadBackupConfiguration =		0x4D,
+  DAC960_V1_WriteConfiguration =		0x4F,
+  DAC960_V1_AddConfiguration =			0x4C,
+  DAC960_V1_ReadConfigurationLabel =		0x48,
+  DAC960_V1_WriteConfigurationLabel =		0x49,
+  /* Firmware Upgrade Related Commands */
+  DAC960_V1_LoadImage =				0x20,
+  DAC960_V1_StoreImage =			0x21,
+  DAC960_V1_ProgramImage =			0x22,
+  /* Diagnostic Commands */
+  DAC960_V1_SetDiagnosticMode =			0x31,
+  DAC960_V1_RunDiagnostic =			0x32,
+  /* Subsystem Service Commands */
+  DAC960_V1_GetSubsystemData =			0x70,
+  DAC960_V1_SetSubsystemParameters =		0x71,
+  /* Version 2.xx Firmware Commands */
+  DAC960_V1_Enquiry_Old =			0x05,
+  DAC960_V1_GetDeviceState_Old =		0x14,
+  DAC960_V1_Read_Old =				0x02,
+  DAC960_V1_Write_Old =				0x03,
+  DAC960_V1_ReadWithScatterGather_Old =		0x82,
+  DAC960_V1_WriteWithScatterGather_Old =	0x83
+}
+__attribute__ ((packed))
+DAC960_V1_CommandOpcode_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Command Identifier type.
+*/
+
+typedef unsigned char DAC960_V1_CommandIdentifier_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Command Status Codes.
+*/
+
+#define DAC960_V1_NormalCompletion		0x0000	/* Common */
+#define DAC960_V1_CheckConditionReceived	0x0002	/* Common */
+#define DAC960_V1_NoDeviceAtAddress		0x0102	/* Common */
+#define DAC960_V1_InvalidDeviceAddress		0x0105	/* Common */
+#define DAC960_V1_InvalidParameter		0x0105	/* Common */
+#define DAC960_V1_IrrecoverableDataError	0x0001	/* I/O */
+#define DAC960_V1_LogicalDriveNonexistentOrOffline 0x0002 /* I/O */
+#define DAC960_V1_AccessBeyondEndOfLogicalDrive	0x0105	/* I/O */
+#define DAC960_V1_BadDataEncountered		0x010C	/* I/O */
+#define DAC960_V1_DeviceBusy			0x0008	/* DCDB */
+#define DAC960_V1_DeviceNonresponsive		0x000E	/* DCDB */
+#define DAC960_V1_CommandTerminatedAbnormally	0x000F	/* DCDB */
+#define DAC960_V1_UnableToStartDevice		0x0002	/* Device */
+#define DAC960_V1_InvalidChannelOrTargetOrModifier 0x0105 /* Device */
+#define DAC960_V1_ChannelBusy			0x0106	/* Device */
+#define DAC960_V1_ChannelNotStopped		0x0002	/* Device */
+#define DAC960_V1_AttemptToRebuildOnlineDrive	0x0002	/* Consistency */
+#define DAC960_V1_RebuildBadBlocksEncountered	0x0003	/* Consistency */
+#define DAC960_V1_NewDiskFailedDuringRebuild	0x0004	/* Consistency */
+#define DAC960_V1_RebuildOrCheckAlreadyInProgress 0x0106 /* Consistency */
+#define DAC960_V1_DependentDiskIsDead		0x0002	/* Consistency */
+#define DAC960_V1_InconsistentBlocksFound	0x0003	/* Consistency */
+#define DAC960_V1_InvalidOrNonredundantLogicalDrive 0x0105 /* Consistency */
+#define DAC960_V1_NoRebuildOrCheckInProgress	0x0105	/* Consistency */
+#define DAC960_V1_RebuildInProgress_DataValid	0x0000	/* Consistency */
+#define DAC960_V1_RebuildFailed_LogicalDriveFailure 0x0002 /* Consistency */
+#define DAC960_V1_RebuildFailed_BadBlocksOnOther 0x0003	/* Consistency */
+#define DAC960_V1_RebuildFailed_NewDriveFailed	0x0004	/* Consistency */
+#define DAC960_V1_RebuildSuccessful		0x0100	/* Consistency */
+#define DAC960_V1_RebuildSuccessfullyTerminated	0x0107	/* Consistency */
+#define DAC960_V1_BackgroundInitSuccessful	0x0100	/* Consistency */
+#define DAC960_V1_BackgroundInitAborted		0x0005	/* Consistency */
+#define DAC960_V1_NoBackgroundInitInProgress	0x0105	/* Consistency */
+#define DAC960_V1_AddCapacityInProgress		0x0004	/* Consistency */
+#define DAC960_V1_AddCapacityFailedOrSuspended	0x00F4	/* Consistency */
+#define DAC960_V1_Config2ChecksumError		0x0002	/* Configuration */
+#define DAC960_V1_ConfigurationSuspended	0x0106	/* Configuration */
+#define DAC960_V1_FailedToConfigureNVRAM	0x0105	/* Configuration */
+#define DAC960_V1_ConfigurationNotSavedStateChange 0x0106 /* Configuration */
+#define DAC960_V1_SubsystemNotInstalled		0x0001	/* Subsystem */
+#define DAC960_V1_SubsystemFailed		0x0002	/* Subsystem */
+#define DAC960_V1_SubsystemBusy			0x0106	/* Subsystem */
+
+typedef unsigned short DAC960_V1_CommandStatus_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Enquiry Command reply structure.
+*/
+
+typedef struct DAC960_V1_Enquiry
+{
+  unsigned char NumberOfLogicalDrives;			/* Byte 0 */
+  unsigned int :24;					/* Bytes 1-3 */
+  unsigned int LogicalDriveSizes[32];			/* Bytes 4-131 */
+  unsigned short FlashAge;				/* Bytes 132-133 */
+  struct {
+    boolean DeferredWriteError:1;			/* Byte 134 Bit 0 */
+    boolean BatteryLow:1;				/* Byte 134 Bit 1 */
+    unsigned char :6;					/* Byte 134 Bits 2-7 */
+  } StatusFlags;
+  unsigned char :8;					/* Byte 135 */
+  unsigned char MinorFirmwareVersion;			/* Byte 136 */
+  unsigned char MajorFirmwareVersion;			/* Byte 137 */
+  enum {
+    DAC960_V1_NoStandbyRebuildOrCheckInProgress =		    0x00,
+    DAC960_V1_StandbyRebuildInProgress =			    0x01,
+    DAC960_V1_BackgroundRebuildInProgress =			    0x02,
+    DAC960_V1_BackgroundCheckInProgress =			    0x03,
+    DAC960_V1_StandbyRebuildCompletedWithError =		    0xFF,
+    DAC960_V1_BackgroundRebuildOrCheckFailed_DriveFailed =	    0xF0,
+    DAC960_V1_BackgroundRebuildOrCheckFailed_LogicalDriveFailed =   0xF1,
+    DAC960_V1_BackgroundRebuildOrCheckFailed_OtherCauses =	    0xF2,
+    DAC960_V1_BackgroundRebuildOrCheckSuccessfullyTerminated =	    0xF3
+  } __attribute__ ((packed)) RebuildFlag;		/* Byte 138 */
+  unsigned char MaxCommands;				/* Byte 139 */
+  unsigned char OfflineLogicalDriveCount;		/* Byte 140 */
+  unsigned char :8;					/* Byte 141 */
+  unsigned short EventLogSequenceNumber;		/* Bytes 142-143 */
+  unsigned char CriticalLogicalDriveCount;		/* Byte 144 */
+  unsigned int :24;					/* Bytes 145-147 */
+  unsigned char DeadDriveCount;				/* Byte 148 */
+  unsigned char :8;					/* Byte 149 */
+  unsigned char RebuildCount;				/* Byte 150 */
+  struct {
+    unsigned char :3;					/* Byte 151 Bits 0-2 */
+    boolean BatteryBackupUnitPresent:1;			/* Byte 151 Bit 3 */
+    unsigned char :3;					/* Byte 151 Bits 4-6 */
+    unsigned char :1;					/* Byte 151 Bit 7 */
+  } MiscFlags;
+  struct {
+    unsigned char TargetID;
+    unsigned char Channel;
+  } DeadDrives[21];					/* Bytes 152-194 */
+  unsigned char Reserved[62];				/* Bytes 195-255 */
+}
+__attribute__ ((packed))
+DAC960_V1_Enquiry_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Enquiry2 Command reply structure.
+*/
+
+typedef struct DAC960_V1_Enquiry2
+{
+  struct {
+    enum {
+      DAC960_V1_P_PD_PU =			0x01,
+      DAC960_V1_PL =				0x02,
+      DAC960_V1_PG =				0x10,
+      DAC960_V1_PJ =				0x11,
+      DAC960_V1_PR =				0x12,
+      DAC960_V1_PT =				0x13,
+      DAC960_V1_PTL0 =				0x14,
+      DAC960_V1_PRL =				0x15,
+      DAC960_V1_PTL1 =				0x16,
+      DAC960_V1_1164P =				0x20
+    } __attribute__ ((packed)) SubModel;		/* Byte 0 */
+    unsigned char ActualChannels;			/* Byte 1 */
+    enum {
+      DAC960_V1_FiveChannelBoard =		0x01,
+      DAC960_V1_ThreeChannelBoard =		0x02,
+      DAC960_V1_TwoChannelBoard =		0x03,
+      DAC960_V1_ThreeChannelASIC_DAC =		0x04
+    } __attribute__ ((packed)) Model;			/* Byte 2 */
+    enum {
+      DAC960_V1_EISA_Controller =		0x01,
+      DAC960_V1_MicroChannel_Controller =	0x02,
+      DAC960_V1_PCI_Controller =		0x03,
+      DAC960_V1_SCSItoSCSI_Controller =		0x08
+    } __attribute__ ((packed)) ProductFamily;		/* Byte 3 */
+  } HardwareID;						/* Bytes 0-3 */
+  /* MajorVersion.MinorVersion-FirmwareType-TurnID */
+  struct {
+    unsigned char MajorVersion;				/* Byte 4 */
+    unsigned char MinorVersion;				/* Byte 5 */
+    unsigned char TurnID;				/* Byte 6 */
+    char FirmwareType;					/* Byte 7 */
+  } FirmwareID;						/* Bytes 4-7 */
+  unsigned char :8;					/* Byte 8 */
+  unsigned int :24;					/* Bytes 9-11 */
+  unsigned char ConfiguredChannels;			/* Byte 12 */
+  unsigned char ActualChannels;				/* Byte 13 */
+  unsigned char MaxTargets;				/* Byte 14 */
+  unsigned char MaxTags;				/* Byte 15 */
+  unsigned char MaxLogicalDrives;			/* Byte 16 */
+  unsigned char MaxArms;				/* Byte 17 */
+  unsigned char MaxSpans;				/* Byte 18 */
+  unsigned char :8;					/* Byte 19 */
+  unsigned int :32;					/* Bytes 20-23 */
+  unsigned int MemorySize;				/* Bytes 24-27 */
+  unsigned int CacheSize;				/* Bytes 28-31 */
+  unsigned int FlashMemorySize;				/* Bytes 32-35 */
+  unsigned int NonVolatileMemorySize;			/* Bytes 36-39 */
+  struct {
+    enum {
+      DAC960_V1_RamType_DRAM =			0x0,
+      DAC960_V1_RamType_EDO =			0x1,
+      DAC960_V1_RamType_SDRAM =			0x2,
+      DAC960_V1_RamType_Last =			0x7
+    } __attribute__ ((packed)) RamType:3;		/* Byte 40 Bits 0-2 */
+    enum {
+      DAC960_V1_ErrorCorrection_None =		0x0,
+      DAC960_V1_ErrorCorrection_Parity =	0x1,
+      DAC960_V1_ErrorCorrection_ECC =		0x2,
+      DAC960_V1_ErrorCorrection_Last =		0x7
+    } __attribute__ ((packed)) ErrorCorrection:3;	/* Byte 40 Bits 3-5 */
+    boolean FastPageMode:1;				/* Byte 40 Bit 6 */
+    boolean LowPowerMemory:1;				/* Byte 40 Bit 7 */
+    unsigned char :8;					/* Bytes 41 */
+  } MemoryType;
+  unsigned short ClockSpeed;				/* Bytes 42-43 */
+  unsigned short MemorySpeed;				/* Bytes 44-45 */
+  unsigned short HardwareSpeed;				/* Bytes 46-47 */
+  unsigned int :32;					/* Bytes 48-51 */
+  unsigned int :32;					/* Bytes 52-55 */
+  unsigned char :8;					/* Byte 56 */
+  unsigned char :8;					/* Byte 57 */
+  unsigned short :16;					/* Bytes 58-59 */
+  unsigned short MaxCommands;				/* Bytes 60-61 */
+  unsigned short MaxScatterGatherEntries;		/* Bytes 62-63 */
+  unsigned short MaxDriveCommands;			/* Bytes 64-65 */
+  unsigned short MaxIODescriptors;			/* Bytes 66-67 */
+  unsigned short MaxCombinedSectors;			/* Bytes 68-69 */
+  unsigned char Latency;				/* Byte 70 */
+  unsigned char :8;					/* Byte 71 */
+  unsigned char SCSITimeout;				/* Byte 72 */
+  unsigned char :8;					/* Byte 73 */
+  unsigned short MinFreeLines;				/* Bytes 74-75 */
+  unsigned int :32;					/* Bytes 76-79 */
+  unsigned int :32;					/* Bytes 80-83 */
+  unsigned char RebuildRateConstant;			/* Byte 84 */
+  unsigned char :8;					/* Byte 85 */
+  unsigned char :8;					/* Byte 86 */
+  unsigned char :8;					/* Byte 87 */
+  unsigned int :32;					/* Bytes 88-91 */
+  unsigned int :32;					/* Bytes 92-95 */
+  unsigned short PhysicalDriveBlockSize;		/* Bytes 96-97 */
+  unsigned short LogicalDriveBlockSize;			/* Bytes 98-99 */
+  unsigned short MaxBlocksPerCommand;			/* Bytes 100-101 */
+  unsigned short BlockFactor;				/* Bytes 102-103 */
+  unsigned short CacheLineSize;				/* Bytes 104-105 */
+  struct {
+    enum {
+      DAC960_V1_Narrow_8bit =			0x0,
+      DAC960_V1_Wide_16bit =			0x1,
+      DAC960_V1_Wide_32bit =			0x2
+    } __attribute__ ((packed)) BusWidth:2;		/* Byte 106 Bits 0-1 */
+    enum {
+      DAC960_V1_Fast =				0x0,
+      DAC960_V1_Ultra =				0x1,
+      DAC960_V1_Ultra2 =			0x2
+    } __attribute__ ((packed)) BusSpeed:2;		/* Byte 106 Bits 2-3 */
+    boolean Differential:1;				/* Byte 106 Bit 4 */
+    unsigned char :3;					/* Byte 106 Bits 5-7 */
+  } SCSICapability;
+  unsigned char :8;					/* Byte 107 */
+  unsigned int :32;					/* Bytes 108-111 */
+  unsigned short FirmwareBuildNumber;			/* Bytes 112-113 */
+  enum {
+    DAC960_V1_AEMI =				0x01,
+    DAC960_V1_OEM1 =				0x02,
+    DAC960_V1_OEM2 =				0x04,
+    DAC960_V1_OEM3 =				0x08,
+    DAC960_V1_Conner =				0x10,
+    DAC960_V1_SAFTE =				0x20
+  } __attribute__ ((packed)) FaultManagementType;	/* Byte 114 */
+  unsigned char :8;					/* Byte 115 */
+  struct {
+    boolean Clustering:1;				/* Byte 116 Bit 0 */
+    boolean MylexOnlineRAIDExpansion:1;			/* Byte 116 Bit 1 */
+    boolean ReadAhead:1;				/* Byte 116 Bit 2 */
+    boolean BackgroundInitialization:1;			/* Byte 116 Bit 3 */
+    unsigned int :28;					/* Bytes 116-119 */
+  } FirmwareFeatures;
+  unsigned int :32;					/* Bytes 120-123 */
+  unsigned int :32;					/* Bytes 124-127 */
+}
+DAC960_V1_Enquiry2_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Logical Drive State type.
+*/
+
+typedef enum
+{
+  DAC960_V1_LogicalDrive_Online =		0x03,
+  DAC960_V1_LogicalDrive_Critical =		0x04,
+  DAC960_V1_LogicalDrive_Offline =		0xFF
+}
+__attribute__ ((packed))
+DAC960_V1_LogicalDriveState_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Logical Drive Information structure.
+*/
+
+typedef struct DAC960_V1_LogicalDriveInformation
+{
+  unsigned int LogicalDriveSize;			/* Bytes 0-3 */
+  DAC960_V1_LogicalDriveState_T LogicalDriveState;	/* Byte 4 */
+  unsigned char RAIDLevel:7;				/* Byte 5 Bits 0-6 */
+  boolean WriteBack:1;					/* Byte 5 Bit 7 */
+  unsigned short :16;					/* Bytes 6-7 */
+}
+DAC960_V1_LogicalDriveInformation_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Get Logical Drive Information Command
+  reply structure.
+*/
+
+typedef DAC960_V1_LogicalDriveInformation_T
+	DAC960_V1_LogicalDriveInformationArray_T[DAC960_MaxLogicalDrives];
+
+
+/*
+  Define the DAC960 V1 Firmware Perform Event Log Operation Types.
+*/
+
+typedef enum
+{
+  DAC960_V1_GetEventLogEntry =			0x00
+}
+__attribute__ ((packed))
+DAC960_V1_PerformEventLogOpType_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Get Event Log Entry Command reply structure.
+*/
+
+typedef struct DAC960_V1_EventLogEntry
+{
+  unsigned char MessageType;				/* Byte 0 */
+  unsigned char MessageLength;				/* Byte 1 */
+  unsigned char TargetID:5;				/* Byte 2 Bits 0-4 */
+  unsigned char Channel:3;				/* Byte 2 Bits 5-7 */
+  unsigned char LogicalUnit:6;				/* Byte 3 Bits 0-5 */
+  unsigned char :2;					/* Byte 3 Bits 6-7 */
+  unsigned short SequenceNumber;			/* Bytes 4-5 */
+  unsigned char ErrorCode:7;				/* Byte 6 Bits 0-6 */
+  boolean Valid:1;					/* Byte 6 Bit 7 */
+  unsigned char SegmentNumber;				/* Byte 7 */
+  DAC960_SCSI_RequestSenseKey_T SenseKey:4;		/* Byte 8 Bits 0-3 */
+  unsigned char :1;					/* Byte 8 Bit 4 */
+  boolean ILI:1;					/* Byte 8 Bit 5 */
+  boolean EOM:1;					/* Byte 8 Bit 6 */
+  boolean Filemark:1;					/* Byte 8 Bit 7 */
+  unsigned char Information[4];				/* Bytes 9-12 */
+  unsigned char AdditionalSenseLength;			/* Byte 13 */
+  unsigned char CommandSpecificInformation[4];		/* Bytes 14-17 */
+  unsigned char AdditionalSenseCode;			/* Byte 18 */
+  unsigned char AdditionalSenseCodeQualifier;		/* Byte 19 */
+  unsigned char Dummy[12];				/* Bytes 20-31 */
+}
+DAC960_V1_EventLogEntry_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Physical Device State type.
+*/
+
+typedef enum
+{
+    DAC960_V1_Device_Dead =			0x00,
+    DAC960_V1_Device_WriteOnly =		0x02,
+    DAC960_V1_Device_Online =			0x03,
+    DAC960_V1_Device_Standby =			0x10
+}
+__attribute__ ((packed))
+DAC960_V1_PhysicalDeviceState_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Get Device State Command reply structure.
+  The structure is padded by 2 bytes for compatibility with Version 2.xx
+  Firmware.
+*/
+
+typedef struct DAC960_V1_DeviceState
+{
+  boolean Present:1;					/* Byte 0 Bit 0 */
+  unsigned char :7;					/* Byte 0 Bits 1-7 */
+  enum {
+    DAC960_V1_OtherType =			0x0,
+    DAC960_V1_DiskType =			0x1,
+    DAC960_V1_SequentialType =			0x2,
+    DAC960_V1_CDROM_or_WORM_Type =		0x3
+    } __attribute__ ((packed)) DeviceType:2;		/* Byte 1 Bits 0-1 */
+  boolean :1;						/* Byte 1 Bit 2 */
+  boolean Fast20:1;					/* Byte 1 Bit 3 */
+  boolean Sync:1;					/* Byte 1 Bit 4 */
+  boolean Fast:1;					/* Byte 1 Bit 5 */
+  boolean Wide:1;					/* Byte 1 Bit 6 */
+  boolean TaggedQueuingSupported:1;			/* Byte 1 Bit 7 */
+  DAC960_V1_PhysicalDeviceState_T DeviceState;		/* Byte 2 */
+  unsigned char :8;					/* Byte 3 */
+  unsigned char SynchronousMultiplier;			/* Byte 4 */
+  unsigned char SynchronousOffset:5;			/* Byte 5 Bits 0-4 */
+  unsigned char :3;					/* Byte 5 Bits 5-7 */
+  unsigned int DiskSize __attribute__ ((packed));	/* Bytes 6-9 */
+  unsigned short :16;					/* Bytes 10-11 */
+}
+DAC960_V1_DeviceState_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Get Rebuild Progress Command reply structure.
+*/
+
+typedef struct DAC960_V1_RebuildProgress
+{
+  unsigned int LogicalDriveNumber;			/* Bytes 0-3 */
+  unsigned int LogicalDriveSize;			/* Bytes 4-7 */
+  unsigned int RemainingBlocks;				/* Bytes 8-11 */
+}
+DAC960_V1_RebuildProgress_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Background Initialization Status Command
+  reply structure.
+*/
+
+typedef struct DAC960_V1_BackgroundInitializationStatus
+{
+  unsigned int LogicalDriveSize;			/* Bytes 0-3 */
+  unsigned int BlocksCompleted;				/* Bytes 4-7 */
+  unsigned char Reserved1[12];				/* Bytes 8-19 */
+  unsigned int LogicalDriveNumber;			/* Bytes 20-23 */
+  unsigned char RAIDLevel;				/* Byte 24 */
+  enum {
+    DAC960_V1_BackgroundInitializationInvalid =	    0x00,
+    DAC960_V1_BackgroundInitializationStarted =	    0x02,
+    DAC960_V1_BackgroundInitializationInProgress =  0x04,
+    DAC960_V1_BackgroundInitializationSuspended =   0x05,
+    DAC960_V1_BackgroundInitializationCancelled =   0x06
+  } __attribute__ ((packed)) Status;			/* Byte 25 */
+  unsigned char Reserved2[6];				/* Bytes 26-31 */
+}
+DAC960_V1_BackgroundInitializationStatus_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Error Table Entry structure.
+*/
+
+typedef struct DAC960_V1_ErrorTableEntry
+{
+  unsigned char ParityErrorCount;			/* Byte 0 */
+  unsigned char SoftErrorCount;				/* Byte 1 */
+  unsigned char HardErrorCount;				/* Byte 2 */
+  unsigned char MiscErrorCount;				/* Byte 3 */
+}
+DAC960_V1_ErrorTableEntry_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Get Error Table Command reply structure.
+*/
+
+typedef struct DAC960_V1_ErrorTable
+{
+  DAC960_V1_ErrorTableEntry_T
+    ErrorTableEntries[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+}
+DAC960_V1_ErrorTable_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Read Config2 Command reply structure.
+*/
+
+typedef struct DAC960_V1_Config2
+{
+  unsigned char :1;					/* Byte 0 Bit 0 */
+  boolean ActiveNegationEnabled:1;			/* Byte 0 Bit 1 */
+  unsigned char :5;					/* Byte 0 Bits 2-6 */
+  boolean NoRescanIfResetReceivedDuringScan:1;		/* Byte 0 Bit 7 */
+  boolean StorageWorksSupportEnabled:1;			/* Byte 1 Bit 0 */
+  boolean HewlettPackardSupportEnabled:1;		/* Byte 1 Bit 1 */
+  boolean NoDisconnectOnFirstCommand:1;			/* Byte 1 Bit 2 */
+  unsigned char :2;					/* Byte 1 Bits 3-4 */
+  boolean AEMI_ARM:1;					/* Byte 1 Bit 5 */
+  boolean AEMI_OFM:1;					/* Byte 1 Bit 6 */
+  unsigned char :1;					/* Byte 1 Bit 7 */
+  enum {
+    DAC960_V1_OEMID_Mylex =			0x00,
+    DAC960_V1_OEMID_IBM =			0x08,
+    DAC960_V1_OEMID_HP =			0x0A,
+    DAC960_V1_OEMID_DEC =			0x0C,
+    DAC960_V1_OEMID_Siemens =			0x10,
+    DAC960_V1_OEMID_Intel =			0x12
+  } __attribute__ ((packed)) OEMID;			/* Byte 2 */
+  unsigned char OEMModelNumber;				/* Byte 3 */
+  unsigned char PhysicalSector;				/* Byte 4 */
+  unsigned char LogicalSector;				/* Byte 5 */
+  unsigned char BlockFactor;				/* Byte 6 */
+  boolean ReadAheadEnabled:1;				/* Byte 7 Bit 0 */
+  boolean LowBIOSDelay:1;				/* Byte 7 Bit 1 */
+  unsigned char :2;					/* Byte 7 Bits 2-3 */
+  boolean ReassignRestrictedToOneSector:1;		/* Byte 7 Bit 4 */
+  unsigned char :1;					/* Byte 7 Bit 5 */
+  boolean ForceUnitAccessDuringWriteRecovery:1;		/* Byte 7 Bit 6 */
+  boolean EnableLeftSymmetricRAID5Algorithm:1;		/* Byte 7 Bit 7 */
+  unsigned char DefaultRebuildRate;			/* Byte 8 */
+  unsigned char :8;					/* Byte 9 */
+  unsigned char BlocksPerCacheLine;			/* Byte 10 */
+  unsigned char BlocksPerStripe;			/* Byte 11 */
+  struct {
+    enum {
+      DAC960_V1_Async =				0x0,
+      DAC960_V1_Sync_8MHz =			0x1,
+      DAC960_V1_Sync_5MHz =			0x2,
+      DAC960_V1_Sync_10or20MHz =		0x3	/* Byte 11 Bits 0-1 */
+    } __attribute__ ((packed)) Speed:2;
+    boolean Force8Bit:1;				/* Byte 11 Bit 2 */
+    boolean DisableFast20:1;				/* Byte 11 Bit 3 */
+    unsigned char :3;					/* Byte 11 Bits 4-6 */
+    boolean EnableTaggedQueuing:1;			/* Byte 11 Bit 7 */
+  } __attribute__ ((packed)) ChannelParameters[6];	/* Bytes 12-17 */
+  unsigned char SCSIInitiatorID;			/* Byte 18 */
+  unsigned char :8;					/* Byte 19 */
+  enum {
+    DAC960_V1_StartupMode_ControllerSpinUp =	0x00,
+    DAC960_V1_StartupMode_PowerOnSpinUp =	0x01
+  } __attribute__ ((packed)) StartupMode;		/* Byte 20 */
+  unsigned char SimultaneousDeviceSpinUpCount;		/* Byte 21 */
+  unsigned char SecondsDelayBetweenSpinUps;		/* Byte 22 */
+  unsigned char Reserved1[29];				/* Bytes 23-51 */
+  boolean BIOSDisabled:1;				/* Byte 52 Bit 0 */
+  boolean CDROMBootEnabled:1;				/* Byte 52 Bit 1 */
+  unsigned char :3;					/* Byte 52 Bits 2-4 */
+  enum {
+    DAC960_V1_Geometry_128_32 =			0x0,
+    DAC960_V1_Geometry_255_63 =			0x1,
+    DAC960_V1_Geometry_Reserved1 =		0x2,
+    DAC960_V1_Geometry_Reserved2 =		0x3
+  } __attribute__ ((packed)) DriveGeometry:2;		/* Byte 52 Bits 5-6 */
+  unsigned char :1;					/* Byte 52 Bit 7 */
+  unsigned char Reserved2[9];				/* Bytes 53-61 */
+  unsigned short Checksum;				/* Bytes 62-63 */
+}
+DAC960_V1_Config2_T;
+
+
+/*
+  Define the DAC960 V1 Firmware DCDB request structure.
+*/
+
+typedef struct DAC960_V1_DCDB
+{
+  unsigned char TargetID:4;				 /* Byte 0 Bits 0-3 */
+  unsigned char Channel:4;				 /* Byte 0 Bits 4-7 */
+  enum {
+    DAC960_V1_DCDB_NoDataTransfer =		0,
+    DAC960_V1_DCDB_DataTransferDeviceToSystem = 1,
+    DAC960_V1_DCDB_DataTransferSystemToDevice = 2,
+    DAC960_V1_DCDB_IllegalDataTransfer =	3
+  } __attribute__ ((packed)) Direction:2;		 /* Byte 1 Bits 0-1 */
+  boolean EarlyStatus:1;				 /* Byte 1 Bit 2 */
+  unsigned char :1;					 /* Byte 1 Bit 3 */
+  enum {
+    DAC960_V1_DCDB_Timeout_24_hours =		0,
+    DAC960_V1_DCDB_Timeout_10_seconds =		1,
+    DAC960_V1_DCDB_Timeout_60_seconds =		2,
+    DAC960_V1_DCDB_Timeout_10_minutes =		3
+  } __attribute__ ((packed)) Timeout:2;			 /* Byte 1 Bits 4-5 */
+  boolean NoAutomaticRequestSense:1;			 /* Byte 1 Bit 6 */
+  boolean DisconnectPermitted:1;			 /* Byte 1 Bit 7 */
+  unsigned short TransferLength;			 /* Bytes 2-3 */
+  DAC960_BusAddress32_T BusAddress;			 /* Bytes 4-7 */
+  unsigned char CDBLength:4;				 /* Byte 8 Bits 0-3 */
+  unsigned char TransferLengthHigh4:4;			 /* Byte 8 Bits 4-7 */
+  unsigned char SenseLength;				 /* Byte 9 */
+  unsigned char CDB[12];				 /* Bytes 10-21 */
+  unsigned char SenseData[64];				 /* Bytes 22-85 */
+  unsigned char Status;					 /* Byte 86 */
+  unsigned char :8;					 /* Byte 87 */
+}
+DAC960_V1_DCDB_T;
+
+
+/*
+  Define the DAC960 V1 Firmware Scatter/Gather List Type 1 32 Bit Address
+  32 Bit Byte Count structure.
+*/
+
+typedef struct DAC960_V1_ScatterGatherSegment
+{
+  DAC960_BusAddress32_T SegmentDataPointer;		/* Bytes 0-3 */
+  DAC960_ByteCount32_T SegmentByteCount;		/* Bytes 4-7 */
+}
+DAC960_V1_ScatterGatherSegment_T;
+
+
+/*
+  Define the 13 Byte DAC960 V1 Firmware Command Mailbox structure.  Bytes 13-15
+  are not used.  The Command Mailbox structure is padded to 16 bytes for
+  efficient access.
+*/
+
+typedef union DAC960_V1_CommandMailbox
+{
+  unsigned int Words[4];				/* Words 0-3 */
+  unsigned char Bytes[16];				/* Bytes 0-15 */
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char Dummy[14];				/* Bytes 2-15 */
+  } __attribute__ ((packed)) Common;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char Dummy1[6];				/* Bytes 2-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char Dummy2[4];				/* Bytes 12-15 */
+  } __attribute__ ((packed)) Type3;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char CommandOpcode2;			/* Byte 2 */
+    unsigned char Dummy1[5];				/* Bytes 3-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char Dummy2[4];				/* Bytes 12-15 */
+  } __attribute__ ((packed)) Type3B;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char Dummy1[5];				/* Bytes 2-6 */
+    unsigned char LogicalDriveNumber:6;			/* Byte 7 Bits 0-6 */
+    boolean AutoRestore:1;				/* Byte 7 Bit 7 */
+    unsigned char Dummy2[8];				/* Bytes 8-15 */
+  } __attribute__ ((packed)) Type3C;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char Channel;				/* Byte 2 */
+    unsigned char TargetID;				/* Byte 3 */
+    DAC960_V1_PhysicalDeviceState_T DeviceState:5;	/* Byte 4 Bits 0-4 */
+    unsigned char Modifier:3;				/* Byte 4 Bits 5-7 */
+    unsigned char Dummy1[3];				/* Bytes 5-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char Dummy2[4];				/* Bytes 12-15 */
+  } __attribute__ ((packed)) Type3D;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    DAC960_V1_PerformEventLogOpType_T OperationType;	/* Byte 2 */
+    unsigned char OperationQualifier;			/* Byte 3 */
+    unsigned short SequenceNumber;			/* Bytes 4-5 */
+    unsigned char Dummy1[2];				/* Bytes 6-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char Dummy2[4];				/* Bytes 12-15 */
+  } __attribute__ ((packed)) Type3E;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char Dummy1[2];				/* Bytes 2-3 */
+    unsigned char RebuildRateConstant;			/* Byte 4 */
+    unsigned char Dummy2[3];				/* Bytes 5-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char Dummy3[4];				/* Bytes 12-15 */
+  } __attribute__ ((packed)) Type3R;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned short TransferLength;			/* Bytes 2-3 */
+    unsigned int LogicalBlockAddress;			/* Bytes 4-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char LogicalDriveNumber;			/* Byte 12 */
+    unsigned char Dummy[3];				/* Bytes 13-15 */
+  } __attribute__ ((packed)) Type4;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    struct {
+      unsigned short TransferLength:11;			/* Bytes 2-3 */
+      unsigned char LogicalDriveNumber:5;		/* Byte 3 Bits 3-7 */
+    } __attribute__ ((packed)) LD;
+    unsigned int LogicalBlockAddress;			/* Bytes 4-7 */
+    DAC960_BusAddress32_T BusAddress;			/* Bytes 8-11 */
+    unsigned char ScatterGatherCount:6;			/* Byte 12 Bits 0-5 */
+    enum {
+      DAC960_V1_ScatterGather_32BitAddress_32BitByteCount = 0x0,
+      DAC960_V1_ScatterGather_32BitAddress_16BitByteCount = 0x1,
+      DAC960_V1_ScatterGather_32BitByteCount_32BitAddress = 0x2,
+      DAC960_V1_ScatterGather_16BitByteCount_32BitAddress = 0x3
+    } __attribute__ ((packed)) ScatterGatherType:2;	/* Byte 12 Bits 6-7 */
+    unsigned char Dummy[3];				/* Bytes 13-15 */
+  } __attribute__ ((packed)) Type5;
+  struct {
+    DAC960_V1_CommandOpcode_T CommandOpcode;		/* Byte 0 */
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 1 */
+    unsigned char CommandOpcode2;			/* Byte 2 */
+    unsigned char :8;					/* Byte 3 */
+    DAC960_BusAddress32_T CommandMailboxesBusAddress;	/* Bytes 4-7 */
+    DAC960_BusAddress32_T StatusMailboxesBusAddress;	/* Bytes 8-11 */
+    unsigned char Dummy[4];				/* Bytes 12-15 */
+  } __attribute__ ((packed)) TypeX;
+}
+DAC960_V1_CommandMailbox_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Command Opcodes.
+*/
+
+typedef enum
+{
+  DAC960_V2_MemCopy =				0x01,
+  DAC960_V2_SCSI_10_Passthru =			0x02,
+  DAC960_V2_SCSI_255_Passthru =			0x03,
+  DAC960_V2_SCSI_10 =				0x04,
+  DAC960_V2_SCSI_256 =				0x05,
+  DAC960_V2_IOCTL =				0x20
+}
+__attribute__ ((packed))
+DAC960_V2_CommandOpcode_T;
+
+
+/*
+  Define the DAC960 V2 Firmware IOCTL Opcodes.
+*/
+
+typedef enum
+{
+  DAC960_V2_GetControllerInfo =			0x01,
+  DAC960_V2_GetLogicalDeviceInfoValid =		0x03,
+  DAC960_V2_GetPhysicalDeviceInfoValid =	0x05,
+  DAC960_V2_GetHealthStatus =			0x11,
+  DAC960_V2_GetEvent =				0x15,
+  DAC960_V2_StartDiscovery =			0x81,
+  DAC960_V2_SetDeviceState =			0x82,
+  DAC960_V2_RebuildDeviceStart =		0x88,
+  DAC960_V2_RebuildDeviceStop =			0x89,
+  DAC960_V2_ConsistencyCheckStart =		0x8C,
+  DAC960_V2_ConsistencyCheckStop =		0x8D,
+  DAC960_V2_SetMemoryMailbox =			0x8E,
+  DAC960_V2_PauseDevice =			0x92,
+  DAC960_V2_TranslatePhysicalToLogicalDevice =	0xC5
+}
+__attribute__ ((packed))
+DAC960_V2_IOCTL_Opcode_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Command Identifier type.
+*/
+
+typedef unsigned short DAC960_V2_CommandIdentifier_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Command Status Codes.
+*/
+
+#define DAC960_V2_NormalCompletion		0x00
+#define DAC960_V2_AbormalCompletion		0x02
+#define DAC960_V2_DeviceBusy			0x08
+#define DAC960_V2_DeviceNonresponsive		0x0E
+#define DAC960_V2_DeviceNonresponsive2		0x0F
+#define DAC960_V2_DeviceRevervationConflict	0x18
+
+typedef unsigned char DAC960_V2_CommandStatus_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Memory Type structure.
+*/
+
+typedef struct DAC960_V2_MemoryType
+{
+  enum {
+    DAC960_V2_MemoryType_Reserved =		0x00,
+    DAC960_V2_MemoryType_DRAM =			0x01,
+    DAC960_V2_MemoryType_EDRAM =		0x02,
+    DAC960_V2_MemoryType_EDO =			0x03,
+    DAC960_V2_MemoryType_SDRAM =		0x04,
+    DAC960_V2_MemoryType_Last =			0x1F
+  } __attribute__ ((packed)) MemoryType:5;		/* Byte 0 Bits 0-4 */
+  boolean :1;						/* Byte 0 Bit 5 */
+  boolean MemoryParity:1;				/* Byte 0 Bit 6 */
+  boolean MemoryECC:1;					/* Byte 0 Bit 7 */
+}
+DAC960_V2_MemoryType_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Processor Type structure.
+*/
+
+typedef enum
+{
+  DAC960_V2_ProcessorType_i960CA =		0x01,
+  DAC960_V2_ProcessorType_i960RD =		0x02,
+  DAC960_V2_ProcessorType_i960RN =		0x03,
+  DAC960_V2_ProcessorType_i960RP =		0x04,
+  DAC960_V2_ProcessorType_NorthBay =		0x05,
+  DAC960_V2_ProcessorType_StrongArm =		0x06,
+  DAC960_V2_ProcessorType_i960RM =		0x07
+}
+__attribute__ ((packed))
+DAC960_V2_ProcessorType_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Get Controller Info reply structure.
+*/
+
+typedef struct DAC960_V2_ControllerInfo
+{
+  unsigned char :8;					/* Byte 0 */
+  enum {
+    DAC960_V2_SCSI_Bus =			0x00,
+    DAC960_V2_Fibre_Bus =			0x01,
+    DAC960_V2_PCI_Bus =				0x03
+  } __attribute__ ((packed)) BusInterfaceType;		/* Byte 1 */
+  enum {
+    DAC960_V2_DAC960E =				0x01,
+    DAC960_V2_DAC960M =				0x08,
+    DAC960_V2_DAC960PD =			0x10,
+    DAC960_V2_DAC960PL =			0x11,
+    DAC960_V2_DAC960PU =			0x12,
+    DAC960_V2_DAC960PE =			0x13,
+    DAC960_V2_DAC960PG =			0x14,
+    DAC960_V2_DAC960PJ =			0x15,
+    DAC960_V2_DAC960PTL0 =			0x16,
+    DAC960_V2_DAC960PR =			0x17,
+    DAC960_V2_DAC960PRL =			0x18,
+    DAC960_V2_DAC960PT =			0x19,
+    DAC960_V2_DAC1164P =			0x1A,
+    DAC960_V2_DAC960PTL1 =			0x1B,
+    DAC960_V2_EXR2000P =			0x1C,
+    DAC960_V2_EXR3000P =			0x1D,
+    DAC960_V2_AcceleRAID352 =			0x1E,
+    DAC960_V2_AcceleRAID170 =			0x1F,
+    DAC960_V2_AcceleRAID160 =			0x20,
+    DAC960_V2_DAC960S =				0x60,
+    DAC960_V2_DAC960SU =			0x61,
+    DAC960_V2_DAC960SX =			0x62,
+    DAC960_V2_DAC960SF =			0x63,
+    DAC960_V2_DAC960SS =			0x64,
+    DAC960_V2_DAC960FL =			0x65,
+    DAC960_V2_DAC960LL =			0x66,
+    DAC960_V2_DAC960FF =			0x67,
+    DAC960_V2_DAC960HP =			0x68,
+    DAC960_V2_RAIDBRICK =			0x69,
+    DAC960_V2_METEOR_FL =			0x6A,
+    DAC960_V2_METEOR_FF =			0x6B
+  } __attribute__ ((packed)) ControllerType;		/* Byte 2 */
+  unsigned char :8;					/* Byte 3 */
+  unsigned short BusInterfaceSpeedMHz;			/* Bytes 4-5 */
+  unsigned char BusWidthBits;				/* Byte 6 */
+  unsigned char FlashCodeTypeOrProductID;		/* Byte 7 */
+  unsigned char NumberOfHostPortsPresent;		/* Byte 8 */
+  unsigned char Reserved1[7];				/* Bytes 9-15 */
+  unsigned char BusInterfaceName[16];			/* Bytes 16-31 */
+  unsigned char ControllerName[16];			/* Bytes 32-47 */
+  unsigned char Reserved2[16];				/* Bytes 48-63 */
+  /* Firmware Release Information */
+  unsigned char FirmwareMajorVersion;			/* Byte 64 */
+  unsigned char FirmwareMinorVersion;			/* Byte 65 */
+  unsigned char FirmwareTurnNumber;			/* Byte 66 */
+  unsigned char FirmwareBuildNumber;			/* Byte 67 */
+  unsigned char FirmwareReleaseDay;			/* Byte 68 */
+  unsigned char FirmwareReleaseMonth;			/* Byte 69 */
+  unsigned char FirmwareReleaseYearHigh2Digits;		/* Byte 70 */
+  unsigned char FirmwareReleaseYearLow2Digits;		/* Byte 71 */
+  /* Hardware Release Information */
+  unsigned char HardwareRevision;			/* Byte 72 */
+  unsigned int :24;					/* Bytes 73-75 */
+  unsigned char HardwareReleaseDay;			/* Byte 76 */
+  unsigned char HardwareReleaseMonth;			/* Byte 77 */
+  unsigned char HardwareReleaseYearHigh2Digits;		/* Byte 78 */
+  unsigned char HardwareReleaseYearLow2Digits;		/* Byte 79 */
+  /* Hardware Manufacturing Information */
+  unsigned char ManufacturingBatchNumber;		/* Byte 80 */
+  unsigned char :8;					/* Byte 81 */
+  unsigned char ManufacturingPlantNumber;		/* Byte 82 */
+  unsigned char :8;					/* Byte 83 */
+  unsigned char HardwareManufacturingDay;		/* Byte 84 */
+  unsigned char HardwareManufacturingMonth;		/* Byte 85 */
+  unsigned char HardwareManufacturingYearHigh2Digits;	/* Byte 86 */
+  unsigned char HardwareManufacturingYearLow2Digits;	/* Byte 87 */
+  unsigned char MaximumNumberOfPDDperXLD;		/* Byte 88 */
+  unsigned char MaximumNumberOfILDperXLD;		/* Byte 89 */
+  unsigned short NonvolatileMemorySizeKB;		/* Bytes 90-91 */
+  unsigned char MaximumNumberOfXLD;			/* Byte 92 */
+  unsigned int :24;					/* Bytes 93-95 */
+  /* Unique Information per Controller */
+  unsigned char ControllerSerialNumber[16];		/* Bytes 96-111 */
+  unsigned char Reserved3[16];				/* Bytes 112-127 */
+  /* Vendor Information */
+  unsigned int :24;					/* Bytes 128-130 */
+  unsigned char OEM_Code;				/* Byte 131 */
+  unsigned char VendorName[16];				/* Bytes 132-147 */
+  /* Other Physical/Controller/Operation Information */
+  boolean BBU_Present:1;				/* Byte 148 Bit 0 */
+  boolean ActiveActiveClusteringMode:1;			/* Byte 148 Bit 1 */
+  unsigned char :6;					/* Byte 148 Bits 2-7 */
+  unsigned char :8;					/* Byte 149 */
+  unsigned short :16;					/* Bytes 150-151 */
+  /* Physical Device Scan Information */
+  boolean PhysicalScanActive:1;				/* Byte 152 Bit 0 */
+  unsigned char :7;					/* Byte 152 Bits 1-7 */
+  unsigned char PhysicalDeviceChannelNumber;		/* Byte 153 */
+  unsigned char PhysicalDeviceTargetID;			/* Byte 154 */
+  unsigned char PhysicalDeviceLogicalUnit;		/* Byte 155 */
+  /* Maximum Command Data Transfer Sizes */
+  unsigned short MaximumDataTransferSizeInBlocks;	/* Bytes 156-157 */
+  unsigned short MaximumScatterGatherEntries;		/* Bytes 158-159 */
+  /* Logical/Physical Device Counts */
+  unsigned short LogicalDevicesPresent;			/* Bytes 160-161 */
+  unsigned short LogicalDevicesCritical;		/* Bytes 162-163 */
+  unsigned short LogicalDevicesOffline;			/* Bytes 164-165 */
+  unsigned short PhysicalDevicesPresent;		/* Bytes 166-167 */
+  unsigned short PhysicalDisksPresent;			/* Bytes 168-169 */
+  unsigned short PhysicalDisksCritical;			/* Bytes 170-171 */
+  unsigned short PhysicalDisksOffline;			/* Bytes 172-173 */
+  unsigned short MaximumParallelCommands;		/* Bytes 174-175 */
+  /* Channel and Target ID Information */
+  unsigned char NumberOfPhysicalChannelsPresent;	/* Byte 176 */
+  unsigned char NumberOfVirtualChannelsPresent;		/* Byte 177 */
+  unsigned char NumberOfPhysicalChannelsPossible;	/* Byte 178 */
+  unsigned char NumberOfVirtualChannelsPossible;	/* Byte 179 */
+  unsigned char MaximumTargetsPerChannel[16];		/* Bytes 180-195 */
+  unsigned char Reserved4[12];				/* Bytes 196-207 */
+  /* Memory/Cache Information */
+  unsigned short MemorySizeMB;				/* Bytes 208-209 */
+  unsigned short CacheSizeMB;				/* Bytes 210-211 */
+  unsigned int ValidCacheSizeInBytes;			/* Bytes 212-215 */
+  unsigned int DirtyCacheSizeInBytes;			/* Bytes 216-219 */
+  unsigned short MemorySpeedMHz;			/* Bytes 220-221 */
+  unsigned char MemoryDataWidthBits;			/* Byte 222 */
+  DAC960_V2_MemoryType_T MemoryType;			/* Byte 223 */
+  unsigned char CacheMemoryTypeName[16];		/* Bytes 224-239 */
+  /* Execution Memory Information */
+  unsigned short ExecutionMemorySizeMB;			/* Bytes 240-241 */
+  unsigned short ExecutionL2CacheSizeMB;		/* Bytes 242-243 */
+  unsigned char Reserved5[8];				/* Bytes 244-251 */
+  unsigned short ExecutionMemorySpeedMHz;		/* Bytes 252-253 */
+  unsigned char ExecutionMemoryDataWidthBits;		/* Byte 254 */
+  DAC960_V2_MemoryType_T ExecutionMemoryType;		/* Byte 255 */
+  unsigned char ExecutionMemoryTypeName[16];		/* Bytes 256-271 */
+  /* First CPU Type Information */
+  unsigned short FirstProcessorSpeedMHz;		/* Bytes 272-273 */
+  DAC960_V2_ProcessorType_T FirstProcessorType;		/* Byte 274 */
+  unsigned char FirstProcessorCount;			/* Byte 275 */
+  unsigned char Reserved6[12];				/* Bytes 276-287 */
+  unsigned char FirstProcessorName[16];			/* Bytes 288-303 */
+  /* Second CPU Type Information */
+  unsigned short SecondProcessorSpeedMHz;		/* Bytes 304-305 */
+  DAC960_V2_ProcessorType_T SecondProcessorType;	/* Byte 306 */
+  unsigned char SecondProcessorCount;			/* Byte 307 */
+  unsigned char Reserved7[12];				/* Bytes 308-319 */
+  unsigned char SecondProcessorName[16];		/* Bytes 320-335 */
+  /* Debugging/Profiling/Command Time Tracing Information */
+  unsigned short CurrentProfilingDataPageNumber;	/* Bytes 336-337 */
+  unsigned short ProgramsAwaitingProfilingData;		/* Bytes 338-339 */
+  unsigned short CurrentCommandTimeTraceDataPageNumber;	/* Bytes 340-341 */
+  unsigned short ProgramsAwaitingCommandTimeTraceData;	/* Bytes 342-343 */
+  unsigned char Reserved8[8];				/* Bytes 344-351 */
+  /* Error Counters on Physical Devices */
+  unsigned short PhysicalDeviceBusResets;		/* Bytes 352-353 */
+  unsigned short PhysicalDeviceParityErrors;		/* Bytes 355-355 */
+  unsigned short PhysicalDeviceSoftErrors;		/* Bytes 356-357 */
+  unsigned short PhysicalDeviceCommandsFailed;		/* Bytes 358-359 */
+  unsigned short PhysicalDeviceMiscellaneousErrors;	/* Bytes 360-361 */
+  unsigned short PhysicalDeviceCommandTimeouts;		/* Bytes 362-363 */
+  unsigned short PhysicalDeviceSelectionTimeouts;	/* Bytes 364-365 */
+  unsigned short PhysicalDeviceRetriesDone;		/* Bytes 366-367 */
+  unsigned short PhysicalDeviceAbortsDone;		/* Bytes 368-369 */
+  unsigned short PhysicalDeviceHostCommandAbortsDone;	/* Bytes 370-371 */
+  unsigned short PhysicalDevicePredictedFailuresDetected; /* Bytes 372-373 */
+  unsigned short PhysicalDeviceHostCommandsFailed;	/* Bytes 374-375 */
+  unsigned short PhysicalDeviceHardErrors;		/* Bytes 376-377 */
+  unsigned char Reserved9[6];				/* Bytes 378-383 */
+  /* Error Counters on Logical Devices */
+  unsigned short LogicalDeviceSoftErrors;		/* Bytes 384-385 */
+  unsigned short LogicalDeviceCommandsFailed;		/* Bytes 386-387 */
+  unsigned short LogicalDeviceHostCommandAbortsDone;	/* Bytes 388-389 */
+  unsigned short :16;					/* Bytes 390-391 */
+  /* Error Counters on Controller */
+  unsigned short ControllerMemoryErrors;		/* Bytes 392-393 */
+  unsigned short ControllerHostCommandAbortsDone;	/* Bytes 394-395 */
+  unsigned int :32;					/* Bytes 396-399 */
+  /* Long Duration Activity Information */
+  unsigned short BackgroundInitializationsActive;	/* Bytes 400-401 */
+  unsigned short LogicalDeviceInitializationsActive;	/* Bytes 402-403 */
+  unsigned short PhysicalDeviceInitializationsActive;	/* Bytes 404-405 */
+  unsigned short ConsistencyChecksActive;		/* Bytes 406-407 */
+  unsigned short RebuildsActive;			/* Bytes 408-409 */
+  unsigned short OnlineExpansionsActive;		/* Bytes 410-411 */
+  unsigned short PatrolActivitiesActive;		/* Bytes 412-413 */
+  unsigned short :16;					/* Bytes 414-415 */
+  /* Flash ROM Information */
+  unsigned char FlashType;				/* Byte 416 */
+  unsigned char :8;					/* Byte 417 */
+  unsigned short FlashSizeMB;				/* Bytes 418-419 */
+  unsigned int FlashLimit;				/* Bytes 420-423 */
+  unsigned int FlashCount;				/* Bytes 424-427 */
+  unsigned int :32;					/* Bytes 428-431 */
+  unsigned char FlashTypeName[16];			/* Bytes 432-447 */
+  /* Firmware Run Time Information */
+  unsigned char RebuildRate;				/* Byte 448 */
+  unsigned char BackgroundInitializationRate;		/* Byte 449 */
+  unsigned char ForegroundInitializationRate;		/* Byte 450 */
+  unsigned char ConsistencyCheckRate;			/* Byte 451 */
+  unsigned int :32;					/* Bytes 452-455 */
+  unsigned int MaximumDP;				/* Bytes 456-459 */
+  unsigned int FreeDP;					/* Bytes 460-463 */
+  unsigned int MaximumIOP;				/* Bytes 464-467 */
+  unsigned int FreeIOP;					/* Bytes 468-471 */
+  unsigned short MaximumCombLengthInBlocks;		/* Bytes 472-473 */
+  unsigned short NumberOfConfigurationGroups;		/* Bytes 474-475 */
+  boolean InstallationAbortStatus:1;			/* Byte 476 Bit 0 */
+  boolean MaintenanceModeStatus:1;			/* Byte 476 Bit 1 */
+  unsigned int :24;					/* Bytes 476-479 */
+  unsigned char Reserved10[32];				/* Bytes 480-511 */
+  unsigned char Reserved11[512];			/* Bytes 512-1023 */
+}
+DAC960_V2_ControllerInfo_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Logical Device State type.
+*/
+
+typedef enum
+{
+  DAC960_V2_LogicalDevice_Online =		0x01,
+  DAC960_V2_LogicalDevice_Offline =		0x08,
+  DAC960_V2_LogicalDevice_Critical =		0x09
+}
+__attribute__ ((packed))
+DAC960_V2_LogicalDeviceState_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Get Logical Device Info reply structure.
+*/
+
+typedef struct DAC960_V2_LogicalDeviceInfo
+{
+  unsigned char :8;					/* Byte 0 */
+  unsigned char Channel;				/* Byte 1 */
+  unsigned char TargetID;				/* Byte 2 */
+  unsigned char LogicalUnit;				/* Byte 3 */
+  DAC960_V2_LogicalDeviceState_T LogicalDeviceState;	/* Byte 4 */
+  unsigned char RAIDLevel;				/* Byte 5 */
+  unsigned char StripeSize;				/* Byte 6 */
+  unsigned char CacheLineSize;				/* Byte 7 */
+  struct {
+    enum {
+      DAC960_V2_ReadCacheDisabled =		0x0,
+      DAC960_V2_ReadCacheEnabled =		0x1,
+      DAC960_V2_ReadAheadEnabled =		0x2,
+      DAC960_V2_IntelligentReadAheadEnabled =	0x3,
+      DAC960_V2_ReadCache_Last =		0x7
+    } __attribute__ ((packed)) ReadCache:3;		/* Byte 8 Bits 0-2 */
+    enum {
+      DAC960_V2_WriteCacheDisabled =		0x0,
+      DAC960_V2_LogicalDeviceReadOnly =		0x1,
+      DAC960_V2_WriteCacheEnabled =		0x2,
+      DAC960_V2_IntelligentWriteCacheEnabled =	0x3,
+      DAC960_V2_WriteCache_Last =		0x7
+    } __attribute__ ((packed)) WriteCache:3;		/* Byte 8 Bits 3-5 */
+    boolean :1;						/* Byte 8 Bit 6 */
+    boolean LogicalDeviceInitialized:1;			/* Byte 8 Bit 7 */
+  } LogicalDeviceControl;				/* Byte 8 */
+  /* Logical Device Operations Status */
+  boolean ConsistencyCheckInProgress:1;			/* Byte 9 Bit 0 */
+  boolean RebuildInProgress:1;				/* Byte 9 Bit 1 */
+  boolean BackgroundInitializationInProgress:1;		/* Byte 9 Bit 2 */
+  boolean ForegroundInitializationInProgress:1;		/* Byte 9 Bit 3 */
+  boolean DataMigrationInProgress:1;			/* Byte 9 Bit 4 */
+  boolean PatrolOperationInProgress:1;			/* Byte 9 Bit 5 */
+  unsigned char :2;					/* Byte 9 Bits 6-7 */
+  unsigned char RAID5WriteUpdate;			/* Byte 10 */
+  unsigned char RAID5Algorithm;				/* Byte 11 */
+  unsigned short LogicalDeviceNumber;			/* Bytes 12-13 */
+  /* BIOS Info */
+  boolean BIOSDisabled:1;				/* Byte 14 Bit 0 */
+  boolean CDROMBootEnabled:1;				/* Byte 14 Bit 1 */
+  boolean DriveCoercionEnabled:1;			/* Byte 14 Bit 2 */
+  boolean WriteSameDisabled:1;				/* Byte 14 Bit 3 */
+  boolean HBA_ModeEnabled:1;				/* Byte 14 Bit 4 */
+  enum {
+    DAC960_V2_Geometry_128_32 =			0x0,
+    DAC960_V2_Geometry_255_63 =			0x1,
+    DAC960_V2_Geometry_Reserved1 =		0x2,
+    DAC960_V2_Geometry_Reserved2 =		0x3
+  } __attribute__ ((packed)) DriveGeometry:2;		/* Byte 14 Bits 5-6 */
+  boolean SuperReadAheadEnabled:1;			/* Byte 14 Bit 7 */
+  unsigned char :8;					/* Byte 15 */
+  /* Error Counters */
+  unsigned short SoftErrors;				/* Bytes 16-17 */
+  unsigned short CommandsFailed;			/* Bytes 18-19 */
+  unsigned short HostCommandAbortsDone;			/* Bytes 20-21 */
+  unsigned short DeferredWriteErrors;			/* Bytes 22-23 */
+  unsigned int :32;					/* Bytes 24-27 */
+  unsigned int :32;					/* Bytes 28-31 */
+  /* Device Size Information */
+  unsigned short :16;					/* Bytes 32-33 */
+  unsigned short DeviceBlockSizeInBytes;		/* Bytes 34-35 */
+  unsigned int OriginalDeviceSize;			/* Bytes 36-39 */
+  unsigned int ConfigurableDeviceSize;			/* Bytes 40-43 */
+  unsigned int :32;					/* Bytes 44-47 */
+  unsigned char LogicalDeviceName[32];			/* Bytes 48-79 */
+  unsigned char SCSI_InquiryData[36];			/* Bytes 80-115 */
+  unsigned char Reserved1[12];				/* Bytes 116-127 */
+  DAC960_ByteCount64_T LastReadBlockNumber;		/* Bytes 128-135 */
+  DAC960_ByteCount64_T LastWrittenBlockNumber;		/* Bytes 136-143 */
+  DAC960_ByteCount64_T ConsistencyCheckBlockNumber;	/* Bytes 144-151 */
+  DAC960_ByteCount64_T RebuildBlockNumber;		/* Bytes 152-159 */
+  DAC960_ByteCount64_T BackgroundInitializationBlockNumber; /* Bytes 160-167 */
+  DAC960_ByteCount64_T ForegroundInitializationBlockNumber; /* Bytes 168-175 */
+  DAC960_ByteCount64_T DataMigrationBlockNumber;	/* Bytes 176-183 */
+  DAC960_ByteCount64_T PatrolOperationBlockNumber;	/* Bytes 184-191 */
+  unsigned char Reserved2[64];				/* Bytes 192-255 */
+}
+DAC960_V2_LogicalDeviceInfo_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Physical Device State type.
+*/
+
+typedef enum
+{
+    DAC960_V2_Device_Unconfigured =		0x00,
+    DAC960_V2_Device_Online =			0x01,
+    DAC960_V2_Device_Rebuild =			0x03,
+    DAC960_V2_Device_Missing =			0x04,
+    DAC960_V2_Device_Critical =			0x05,
+    DAC960_V2_Device_Dead =			0x08,
+    DAC960_V2_Device_SuspectedDead =		0x0C,
+    DAC960_V2_Device_CommandedOffline =		0x10,
+    DAC960_V2_Device_Standby =			0x21,
+    DAC960_V2_Device_InvalidState =		0xFF
+}
+__attribute__ ((packed))
+DAC960_V2_PhysicalDeviceState_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Get Physical Device Info reply structure.
+*/
+
+typedef struct DAC960_V2_PhysicalDeviceInfo
+{
+  unsigned char :8;					/* Byte 0 */
+  unsigned char Channel;				/* Byte 1 */
+  unsigned char TargetID;				/* Byte 2 */
+  unsigned char LogicalUnit;				/* Byte 3 */
+  /* Configuration Status Bits */
+  boolean PhysicalDeviceFaultTolerant:1;		/* Byte 4 Bit 0 */
+  boolean PhysicalDeviceConnected:1;			/* Byte 4 Bit 1 */
+  boolean PhysicalDeviceLocalToController:1;		/* Byte 4 Bit 2 */
+  unsigned char :5;					/* Byte 4 Bits 3-7 */
+  /* Multiple Host/Controller Status Bits */
+  boolean RemoteHostSystemDead:1;			/* Byte 5 Bit 0 */
+  boolean RemoteControllerDead:1;			/* Byte 5 Bit 1 */
+  unsigned char :6;					/* Byte 5 Bits 2-7 */
+  DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState;	/* Byte 6 */
+  unsigned char NegotiatedDataWidthBits;		/* Byte 7 */
+  unsigned short NegotiatedSynchronousMegaTransfers;	/* Bytes 8-9 */
+  /* Multiported Physical Device Information */
+  unsigned char NumberOfPortConnections;		/* Byte 10 */
+  unsigned char DriveAccessibilityBitmap;		/* Byte 11 */
+  unsigned int :32;					/* Bytes 12-15 */
+  unsigned char NetworkAddress[16];			/* Bytes 16-31 */
+  unsigned short MaximumTags;				/* Bytes 32-33 */
+  /* Physical Device Operations Status */
+  boolean ConsistencyCheckInProgress:1;			/* Byte 34 Bit 0 */
+  boolean RebuildInProgress:1;				/* Byte 34 Bit 1 */
+  boolean MakingDataConsistentInProgress:1;		/* Byte 34 Bit 2 */
+  boolean PhysicalDeviceInitializationInProgress:1;	/* Byte 34 Bit 3 */
+  boolean DataMigrationInProgress:1;			/* Byte 34 Bit 4 */
+  boolean PatrolOperationInProgress:1;			/* Byte 34 Bit 5 */
+  unsigned char :2;					/* Byte 34 Bits 6-7 */
+  unsigned char LongOperationStatus;			/* Byte 35 */
+  unsigned char ParityErrors;				/* Byte 36 */
+  unsigned char SoftErrors;				/* Byte 37 */
+  unsigned char HardErrors;				/* Byte 38 */
+  unsigned char MiscellaneousErrors;			/* Byte 39 */
+  unsigned char CommandTimeouts;			/* Byte 40 */
+  unsigned char Retries;				/* Byte 41 */
+  unsigned char Aborts;					/* Byte 42 */
+  unsigned char PredictedFailuresDetected;		/* Byte 43 */
+  unsigned int :32;					/* Bytes 44-47 */
+  unsigned short :16;					/* Bytes 48-49 */
+  unsigned short DeviceBlockSizeInBytes;		/* Bytes 50-51 */
+  unsigned int OriginalDeviceSize;			/* Bytes 52-55 */
+  unsigned int ConfigurableDeviceSize;			/* Bytes 56-59 */
+  unsigned int :32;					/* Bytes 60-63 */
+  unsigned char PhysicalDeviceName[16];			/* Bytes 64-79 */
+  unsigned char Reserved1[16];				/* Bytes 80-95 */
+  unsigned char Reserved2[32];				/* Bytes 96-127 */
+  unsigned char SCSI_InquiryData[36];			/* Bytes 128-163 */
+  unsigned char Reserved3[20];				/* Bytes 164-183 */
+  unsigned char Reserved4[8];				/* Bytes 184-191 */
+  DAC960_ByteCount64_T LastReadBlockNumber;		/* Bytes 192-199 */
+  DAC960_ByteCount64_T LastWrittenBlockNumber;		/* Bytes 200-207 */
+  DAC960_ByteCount64_T ConsistencyCheckBlockNumber;	/* Bytes 208-215 */
+  DAC960_ByteCount64_T RebuildBlockNumber;		/* Bytes 216-223 */
+  DAC960_ByteCount64_T MakingDataConsistentBlockNumber;	/* Bytes 224-231 */
+  DAC960_ByteCount64_T DeviceInitializationBlockNumber; /* Bytes 232-239 */
+  DAC960_ByteCount64_T DataMigrationBlockNumber;	/* Bytes 240-247 */
+  DAC960_ByteCount64_T PatrolOperationBlockNumber;	/* Bytes 248-255 */
+  unsigned char Reserved5[256];				/* Bytes 256-511 */
+}
+DAC960_V2_PhysicalDeviceInfo_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Health Status Buffer structure.
+*/
+
+typedef struct DAC960_V2_HealthStatusBuffer
+{
+  unsigned int MicrosecondsFromControllerStartTime;	/* Bytes 0-3 */
+  unsigned int MillisecondsFromControllerStartTime;	/* Bytes 4-7 */
+  unsigned int SecondsFrom1January1970;			/* Bytes 8-11 */
+  unsigned int :32;					/* Bytes 12-15 */
+  unsigned int StatusChangeCounter;			/* Bytes 16-19 */
+  unsigned int :32;					/* Bytes 20-23 */
+  unsigned int DebugOutputMessageBufferIndex;		/* Bytes 24-27 */
+  unsigned int CodedMessageBufferIndex;			/* Bytes 28-31 */
+  unsigned int CurrentTimeTracePageNumber;		/* Bytes 32-35 */
+  unsigned int CurrentProfilerPageNumber;		/* Bytes 36-39 */
+  unsigned int NextEventSequenceNumber;			/* Bytes 40-43 */
+  unsigned int :32;					/* Bytes 44-47 */
+  unsigned char Reserved1[16];				/* Bytes 48-63 */
+  unsigned char Reserved2[64];				/* Bytes 64-127 */
+}
+DAC960_V2_HealthStatusBuffer_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Get Event reply structure.
+*/
+
+typedef struct DAC960_V2_Event
+{
+  unsigned int EventSequenceNumber;			/* Bytes 0-3 */
+  unsigned int EventTime;				/* Bytes 4-7 */
+  unsigned int EventCode;				/* Bytes 8-11 */
+  unsigned char :8;					/* Byte 12 */
+  unsigned char Channel;				/* Byte 13 */
+  unsigned char TargetID;				/* Byte 14 */
+  unsigned char LogicalUnit;				/* Byte 15 */
+  unsigned int :32;					/* Bytes 16-19 */
+  unsigned int EventSpecificParameter;			/* Bytes 20-23 */
+  unsigned char RequestSenseData[40];			/* Bytes 24-63 */
+}
+DAC960_V2_Event_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Command Control Bits structure.
+*/
+
+typedef struct DAC960_V2_CommandControlBits
+{
+  boolean ForceUnitAccess:1;				/* Byte 0 Bit 0 */
+  boolean DisablePageOut:1;				/* Byte 0 Bit 1 */
+  boolean :1;						/* Byte 0 Bit 2 */
+  boolean AdditionalScatterGatherListMemory:1;		/* Byte 0 Bit 3 */
+  boolean DataTransferControllerToHost:1;		/* Byte 0 Bit 4 */
+  boolean :1;						/* Byte 0 Bit 5 */
+  boolean NoAutoRequestSense:1;				/* Byte 0 Bit 6 */
+  boolean DisconnectProhibited:1;			/* Byte 0 Bit 7 */
+}
+DAC960_V2_CommandControlBits_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Command Timeout structure.
+*/
+
+typedef struct DAC960_V2_CommandTimeout
+{
+  unsigned char TimeoutValue:6;				/* Byte 0 Bits 0-5 */
+  enum {
+    DAC960_V2_TimeoutScale_Seconds =		0,
+    DAC960_V2_TimeoutScale_Minutes =		1,
+    DAC960_V2_TimeoutScale_Hours =		2,
+    DAC960_V2_TimeoutScale_Reserved =		3
+  } __attribute__ ((packed)) TimeoutScale:2;		/* Byte 0 Bits 6-7 */
+}
+DAC960_V2_CommandTimeout_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Physical Device structure.
+*/
+
+typedef struct DAC960_V2_PhysicalDevice
+{
+  unsigned char LogicalUnit;				/* Byte 0 */
+  unsigned char TargetID;				/* Byte 1 */
+  unsigned char Channel:3;				/* Byte 2 Bits 0-2 */
+  unsigned char Controller:5;				/* Byte 2 Bits 3-7 */
+}
+__attribute__ ((packed))
+DAC960_V2_PhysicalDevice_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Logical Device structure.
+*/
+
+typedef struct DAC960_V2_LogicalDevice
+{
+  unsigned short LogicalDeviceNumber;			/* Bytes 0-1 */
+  unsigned char :3;					/* Byte 2 Bits 0-2 */
+  unsigned char Controller:5;				/* Byte 2 Bits 3-7 */
+}
+__attribute__ ((packed))
+DAC960_V2_LogicalDevice_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Operation Device type.
+*/
+
+typedef enum
+{
+  DAC960_V2_Physical_Device =			0x00,
+  DAC960_V2_RAID_Device =			0x01,
+  DAC960_V2_Physical_Channel =			0x02,
+  DAC960_V2_RAID_Channel =			0x03,
+  DAC960_V2_Physical_Controller =		0x04,
+  DAC960_V2_RAID_Controller =			0x05,
+  DAC960_V2_Configuration_Group =		0x10,
+  DAC960_V2_Enclosure =				0x11
+}
+__attribute__ ((packed))
+DAC960_V2_OperationDevice_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Translate Physical To Logical Device structure.
+*/
+
+typedef struct DAC960_V2_PhysicalToLogicalDevice
+{
+  unsigned short LogicalDeviceNumber;			/* Bytes 0-1 */
+  unsigned short :16;					/* Bytes 2-3 */
+  unsigned char PreviousBootController;			/* Byte 4 */
+  unsigned char PreviousBootChannel;			/* Byte 5 */
+  unsigned char PreviousBootTargetID;			/* Byte 6 */
+  unsigned char PreviousBootLogicalUnit;		/* Byte 7 */
+}
+DAC960_V2_PhysicalToLogicalDevice_T;
+
+
+
+/*
+  Define the DAC960 V2 Firmware Scatter/Gather List Entry structure.
+*/
+
+typedef struct DAC960_V2_ScatterGatherSegment
+{
+  DAC960_BusAddress64_T SegmentDataPointer;		/* Bytes 0-7 */
+  DAC960_ByteCount64_T SegmentByteCount;		/* Bytes 8-15 */
+}
+DAC960_V2_ScatterGatherSegment_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Data Transfer Memory Address structure.
+*/
+
+typedef union DAC960_V2_DataTransferMemoryAddress
+{
+  DAC960_V2_ScatterGatherSegment_T ScatterGatherSegments[2]; /* Bytes 0-31 */
+  struct {
+    unsigned short ScatterGatherList0Length;		/* Bytes 0-1 */
+    unsigned short ScatterGatherList1Length;		/* Bytes 2-3 */
+    unsigned short ScatterGatherList2Length;		/* Bytes 4-5 */
+    unsigned short :16;					/* Bytes 6-7 */
+    DAC960_BusAddress64_T ScatterGatherList0Address;	/* Bytes 8-15 */
+    DAC960_BusAddress64_T ScatterGatherList1Address;	/* Bytes 16-23 */
+    DAC960_BusAddress64_T ScatterGatherList2Address;	/* Bytes 24-31 */
+  } ExtendedScatterGather;
+}
+DAC960_V2_DataTransferMemoryAddress_T;
+
+
+/*
+  Define the 64 Byte DAC960 V2 Firmware Command Mailbox structure.
+*/
+
+typedef union DAC960_V2_CommandMailbox
+{
+  unsigned int Words[16];				/* Words 0-15 */
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    unsigned int :24;					/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    unsigned char Reserved[10];				/* Bytes 22-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } Common;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize;		/* Bytes 4-7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_PhysicalDevice_T PhysicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char CDBLength;				/* Byte 21 */
+    unsigned char SCSI_CDB[10];				/* Bytes 22-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } SCSI_10;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize;		/* Bytes 4-7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_PhysicalDevice_T PhysicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char CDBLength;				/* Byte 21 */
+    unsigned short :16;					/* Bytes 22-23 */
+    DAC960_BusAddress64_T SCSI_CDB_BusAddress;		/* Bytes 24-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } SCSI_255;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    unsigned short :16;					/* Bytes 16-17 */
+    unsigned char ControllerNumber;			/* Byte 18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    unsigned char Reserved[10];				/* Bytes 22-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } ControllerInfo;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_LogicalDevice_T LogicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    unsigned char Reserved[10];				/* Bytes 22-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } LogicalDeviceInfo;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_PhysicalDevice_T PhysicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    unsigned char Reserved[10];				/* Bytes 22-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } PhysicalDeviceInfo;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    unsigned short EventSequenceNumberHigh16;		/* Bytes 16-17 */
+    unsigned char ControllerNumber;			/* Byte 18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    unsigned short EventSequenceNumberLow16;		/* Bytes 22-23 */
+    unsigned char Reserved[8];				/* Bytes 24-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } GetEvent;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_LogicalDevice_T LogicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    union {
+      DAC960_V2_LogicalDeviceState_T LogicalDeviceState;
+      DAC960_V2_PhysicalDeviceState_T PhysicalDeviceState;
+    } DeviceState;					/* Byte 22 */
+    unsigned char Reserved[9];				/* Bytes 23-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } SetDeviceState;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_LogicalDevice_T LogicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    boolean RestoreConsistency:1;			/* Byte 22 Bit 0 */
+    boolean InitializedAreaOnly:1;			/* Byte 22 Bit 1 */
+    unsigned char :6;					/* Byte 22 Bits 2-7 */
+    unsigned char Reserved[9];				/* Bytes 23-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } ConsistencyCheck;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    unsigned char FirstCommandMailboxSizeKB;		/* Byte 4 */
+    unsigned char FirstStatusMailboxSizeKB;		/* Byte 5 */
+    unsigned char SecondCommandMailboxSizeKB;		/* Byte 6 */
+    unsigned char SecondStatusMailboxSizeKB;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    unsigned int :24;					/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    unsigned char HealthStatusBufferSizeKB;		/* Byte 22 */
+    unsigned char :8;					/* Byte 23 */
+    DAC960_BusAddress64_T HealthStatusBufferBusAddress; /* Bytes 24-31 */
+    DAC960_BusAddress64_T FirstCommandMailboxBusAddress; /* Bytes 32-39 */
+    DAC960_BusAddress64_T FirstStatusMailboxBusAddress; /* Bytes 40-47 */
+    DAC960_BusAddress64_T SecondCommandMailboxBusAddress; /* Bytes 48-55 */
+    DAC960_BusAddress64_T SecondStatusMailboxBusAddress; /* Bytes 56-63 */
+  } SetMemoryMailbox;
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandOpcode_T CommandOpcode;		/* Byte 2 */
+    DAC960_V2_CommandControlBits_T CommandControlBits;	/* Byte 3 */
+    DAC960_ByteCount32_T DataTransferSize:24;		/* Bytes 4-6 */
+    unsigned char DataTransferPageNumber;		/* Byte 7 */
+    DAC960_BusAddress64_T RequestSenseBusAddress;	/* Bytes 8-15 */
+    DAC960_V2_PhysicalDevice_T PhysicalDevice;		/* Bytes 16-18 */
+    DAC960_V2_CommandTimeout_T CommandTimeout;		/* Byte 19 */
+    unsigned char RequestSenseSize;			/* Byte 20 */
+    unsigned char IOCTL_Opcode;				/* Byte 21 */
+    DAC960_V2_OperationDevice_T OperationDevice;	/* Byte 22 */
+    unsigned char Reserved[9];				/* Bytes 23-31 */
+    DAC960_V2_DataTransferMemoryAddress_T
+      DataTransferMemoryAddress;			/* Bytes 32-63 */
+  } DeviceOperation;
+}
+DAC960_V2_CommandMailbox_T;
+
+
+/*
+  Define the DAC960 Driver IOCTL requests.
+*/
+
+#define DAC960_IOCTL_GET_CONTROLLER_COUNT	0xDAC001
+#define DAC960_IOCTL_GET_CONTROLLER_INFO	0xDAC002
+#define DAC960_IOCTL_V1_EXECUTE_COMMAND		0xDAC003
+#define DAC960_IOCTL_V2_EXECUTE_COMMAND		0xDAC004
+#define DAC960_IOCTL_V2_GET_HEALTH_STATUS	0xDAC005
+
+
+/*
+  Define the DAC960_IOCTL_GET_CONTROLLER_INFO reply structure.
+*/
+
+typedef struct DAC960_ControllerInfo
+{
+  unsigned char ControllerNumber;
+  unsigned char FirmwareType;
+  unsigned char Channels;
+  unsigned char Targets;
+  unsigned char PCI_Bus;
+  unsigned char PCI_Device;
+  unsigned char PCI_Function;
+  unsigned char IRQ_Channel;
+  DAC960_PCI_Address_T PCI_Address;
+  unsigned char ModelName[20];
+  unsigned char FirmwareVersion[12];
+}
+DAC960_ControllerInfo_T;
+
+
+/*
+  Define the User Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
+*/
+
+typedef struct DAC960_V1_UserCommand
+{
+  unsigned char ControllerNumber;
+  DAC960_V1_CommandMailbox_T CommandMailbox;
+  int DataTransferLength;
+  void __user *DataTransferBuffer;
+  DAC960_V1_DCDB_T __user *DCDB;
+}
+DAC960_V1_UserCommand_T;
+
+
+/*
+  Define the Kernel Mode DAC960_IOCTL_V1_EXECUTE_COMMAND request structure.
+*/
+
+typedef struct DAC960_V1_KernelCommand
+{
+  unsigned char ControllerNumber;
+  DAC960_V1_CommandMailbox_T CommandMailbox;
+  int DataTransferLength;
+  void *DataTransferBuffer;
+  DAC960_V1_DCDB_T *DCDB;
+  DAC960_V1_CommandStatus_T CommandStatus;
+  void (*CompletionFunction)(struct DAC960_V1_KernelCommand *);
+  void *CompletionData;
+}
+DAC960_V1_KernelCommand_T;
+
+
+/*
+  Define the User Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
+*/
+
+typedef struct DAC960_V2_UserCommand
+{
+  unsigned char ControllerNumber;
+  DAC960_V2_CommandMailbox_T CommandMailbox;
+  int DataTransferLength;
+  int RequestSenseLength;
+  void __user *DataTransferBuffer;
+  void __user *RequestSenseBuffer;
+}
+DAC960_V2_UserCommand_T;
+
+
+/*
+  Define the Kernel Mode DAC960_IOCTL_V2_EXECUTE_COMMAND request structure.
+*/
+
+typedef struct DAC960_V2_KernelCommand
+{
+  unsigned char ControllerNumber;
+  DAC960_V2_CommandMailbox_T CommandMailbox;
+  int DataTransferLength;
+  int RequestSenseLength;
+  void *DataTransferBuffer;
+  void *RequestSenseBuffer;
+  DAC960_V2_CommandStatus_T CommandStatus;
+  void (*CompletionFunction)(struct DAC960_V2_KernelCommand *);
+  void *CompletionData;
+}
+DAC960_V2_KernelCommand_T;
+
+
+/*
+  Define the User Mode DAC960_IOCTL_V2_GET_HEALTH_STATUS request structure.
+*/
+
+typedef struct DAC960_V2_GetHealthStatus
+{
+  unsigned char ControllerNumber;
+  DAC960_V2_HealthStatusBuffer_T __user *HealthStatusBuffer;
+}
+DAC960_V2_GetHealthStatus_T;
+
+
+/*
+  Import the Kernel Mode IOCTL interface.
+*/
+
+extern int DAC960_KernelIOCTL(unsigned int Request, void *Argument);
+
+
+/*
+  DAC960_DriverVersion protects the private portion of this file.
+*/
+
+#ifdef DAC960_DriverVersion
+
+
+/*
+  Define the maximum Driver Queue Depth and Controller Queue Depth supported
+  by DAC960 V1 and V2 Firmware Controllers.
+*/
+
+#define DAC960_MaxDriverQueueDepth		511
+#define DAC960_MaxControllerQueueDepth		512
+
+
+/*
+  Define the maximum number of Scatter/Gather Segments supported for any
+  DAC960 V1 and V2 Firmware controller.
+*/
+
+#define DAC960_V1_ScatterGatherLimit		33
+#define DAC960_V2_ScatterGatherLimit		128
+
+
+/*
+  Define the number of Command Mailboxes and Status Mailboxes used by the
+  DAC960 V1 and V2 Firmware Memory Mailbox Interface.
+*/
+
+#define DAC960_V1_CommandMailboxCount		256
+#define DAC960_V1_StatusMailboxCount		1024
+#define DAC960_V2_CommandMailboxCount		512
+#define DAC960_V2_StatusMailboxCount		512
+
+
+/*
+  Define the DAC960 Controller Monitoring Timer Interval.
+*/
+
+#define DAC960_MonitoringTimerInterval		(10 * HZ)
+
+
+/*
+  Define the DAC960 Controller Secondary Monitoring Interval.
+*/
+
+#define DAC960_SecondaryMonitoringInterval	(60 * HZ)
+
+
+/*
+  Define the DAC960 Controller Health Status Monitoring Interval.
+*/
+
+#define DAC960_HealthStatusMonitoringInterval	(1 * HZ)
+
+
+/*
+  Define the DAC960 Controller Progress Reporting Interval.
+*/
+
+#define DAC960_ProgressReportingInterval	(60 * HZ)
+
+
+/*
+  Define the maximum number of Partitions allowed for each Logical Drive.
+*/
+
+#define DAC960_MaxPartitions			8
+#define DAC960_MaxPartitionsBits		3
+
+/*
+  Define the DAC960 Controller fixed Block Size and Block Size Bits.
+*/
+
+#define DAC960_BlockSize			512
+#define DAC960_BlockSizeBits			9
+
+
+/*
+  Define the number of Command structures that should be allocated as a
+  group to optimize kernel memory allocation.
+*/
+
+#define DAC960_V1_CommandAllocationGroupSize	11
+#define DAC960_V2_CommandAllocationGroupSize	29
+
+
+/*
+  Define the Controller Line Buffer, Progress Buffer, User Message, and
+  Initial Status Buffer sizes.
+*/
+
+#define DAC960_LineBufferSize			100
+#define DAC960_ProgressBufferSize		200
+#define DAC960_UserMessageSize			200
+#define DAC960_InitialStatusBufferSize		(8192-32)
+
+
+/*
+  Define the DAC960 Controller Firmware Types.
+*/
+
+typedef enum
+{
+  DAC960_V1_Controller =			1,
+  DAC960_V2_Controller =			2
+}
+DAC960_FirmwareType_T;
+
+
+/*
+  Define the DAC960 Controller Hardware Types.
+*/
+
+typedef enum
+{
+  DAC960_BA_Controller =			1,	/* eXtremeRAID 2000 */
+  DAC960_LP_Controller =			2,	/* AcceleRAID 352 */
+  DAC960_LA_Controller =			3,	/* DAC1164P */
+  DAC960_PG_Controller =			4,	/* DAC960PTL/PJ/PG */
+  DAC960_PD_Controller =			5,	/* DAC960PU/PD/PL/P */
+  DAC960_P_Controller =				6	/* DAC960PU/PD/PL/P */
+}
+DAC960_HardwareType_T;
+
+
+/*
+  Define the Driver Message Levels.
+*/
+
+typedef enum DAC960_MessageLevel
+{
+  DAC960_AnnounceLevel =			0,
+  DAC960_InfoLevel =				1,
+  DAC960_NoticeLevel =				2,
+  DAC960_WarningLevel =				3,
+  DAC960_ErrorLevel =				4,
+  DAC960_ProgressLevel =			5,
+  DAC960_CriticalLevel =			6,
+  DAC960_UserCriticalLevel =			7
+}
+DAC960_MessageLevel_T;
+
+static char
+  *DAC960_MessageLevelMap[] =
+    { KERN_NOTICE, KERN_NOTICE, KERN_NOTICE, KERN_WARNING,
+      KERN_ERR, KERN_CRIT, KERN_CRIT, KERN_CRIT };
+
+
+/*
+  Define Driver Message macros.
+*/
+
+#define DAC960_Announce(Format, Arguments...) \
+  DAC960_Message(DAC960_AnnounceLevel, Format, ##Arguments)
+
+#define DAC960_Info(Format, Arguments...) \
+  DAC960_Message(DAC960_InfoLevel, Format, ##Arguments)
+
+#define DAC960_Notice(Format, Arguments...) \
+  DAC960_Message(DAC960_NoticeLevel, Format, ##Arguments)
+
+#define DAC960_Warning(Format, Arguments...) \
+  DAC960_Message(DAC960_WarningLevel, Format, ##Arguments)
+
+#define DAC960_Error(Format, Arguments...) \
+  DAC960_Message(DAC960_ErrorLevel, Format, ##Arguments)
+
+#define DAC960_Progress(Format, Arguments...) \
+  DAC960_Message(DAC960_ProgressLevel, Format, ##Arguments)
+
+#define DAC960_Critical(Format, Arguments...) \
+  DAC960_Message(DAC960_CriticalLevel, Format, ##Arguments)
+
+#define DAC960_UserCritical(Format, Arguments...) \
+  DAC960_Message(DAC960_UserCriticalLevel, Format, ##Arguments)
+
+
+struct DAC960_privdata {
+	DAC960_HardwareType_T	HardwareType;
+	DAC960_FirmwareType_T	FirmwareType;
+	irqreturn_t (*InterruptHandler)(int, void *, struct pt_regs *);
+	unsigned int		MemoryWindowSize;
+};
+
+
+/*
+  Define the DAC960 V1 Firmware Controller Status Mailbox structure.
+*/
+
+typedef union DAC960_V1_StatusMailbox
+{
+  unsigned int Word;					/* Word 0 */
+  struct {
+    DAC960_V1_CommandIdentifier_T CommandIdentifier;	/* Byte 0 */
+    unsigned char :7;					/* Byte 1 Bits 0-6 */
+    boolean Valid:1;					/* Byte 1 Bit 7 */
+    DAC960_V1_CommandStatus_T CommandStatus;		/* Bytes 2-3 */
+  } Fields;
+}
+DAC960_V1_StatusMailbox_T;
+
+
+/*
+  Define the DAC960 V2 Firmware Controller Status Mailbox structure.
+*/
+
+typedef union DAC960_V2_StatusMailbox
+{
+  unsigned int Words[2];				/* Words 0-1 */
+  struct {
+    DAC960_V2_CommandIdentifier_T CommandIdentifier;	/* Bytes 0-1 */
+    DAC960_V2_CommandStatus_T CommandStatus;		/* Byte 2 */
+    unsigned char RequestSenseLength;			/* Byte 3 */
+    int DataTransferResidue;				/* Bytes 4-7 */
+  } Fields;
+}
+DAC960_V2_StatusMailbox_T;
+
+
+/*
+  Define the DAC960 Driver Command Types.
+*/
+
+typedef enum
+{
+  DAC960_ReadCommand =				1,
+  DAC960_WriteCommand =				2,
+  DAC960_ReadRetryCommand =			3,
+  DAC960_WriteRetryCommand =			4,
+  DAC960_MonitoringCommand =			5,
+  DAC960_ImmediateCommand =			6,
+  DAC960_QueuedCommand =			7
+}
+DAC960_CommandType_T;
+
+
+/*
+  Define the DAC960 Driver Command structure.
+*/
+
+typedef struct DAC960_Command
+{
+  int CommandIdentifier;
+  DAC960_CommandType_T CommandType;
+  struct DAC960_Controller *Controller;
+  struct DAC960_Command *Next;
+  struct completion *Completion;
+  unsigned int LogicalDriveNumber;
+  unsigned int BlockNumber;
+  unsigned int BlockCount;
+  unsigned int SegmentCount;
+  int	DmaDirection;
+  struct scatterlist *cmd_sglist;
+  struct request *Request;
+  union {
+    struct {
+      DAC960_V1_CommandMailbox_T CommandMailbox;
+      DAC960_V1_KernelCommand_T *KernelCommand;
+      DAC960_V1_CommandStatus_T CommandStatus;
+      DAC960_V1_ScatterGatherSegment_T *ScatterGatherList;
+      dma_addr_t ScatterGatherListDMA;
+      struct scatterlist ScatterList[DAC960_V1_ScatterGatherLimit];
+      unsigned int EndMarker[0];
+    } V1;
+    struct {
+      DAC960_V2_CommandMailbox_T CommandMailbox;
+      DAC960_V2_KernelCommand_T *KernelCommand;
+      DAC960_V2_CommandStatus_T CommandStatus;
+      unsigned char RequestSenseLength;
+      int DataTransferResidue;
+      DAC960_V2_ScatterGatherSegment_T *ScatterGatherList;
+      dma_addr_t ScatterGatherListDMA;
+      DAC960_SCSI_RequestSense_T *RequestSense;
+      dma_addr_t RequestSenseDMA;
+      struct scatterlist ScatterList[DAC960_V2_ScatterGatherLimit];
+      unsigned int EndMarker[0];
+    } V2;
+  } FW;
+}
+DAC960_Command_T;
+
+
+/*
+  Define the DAC960 Driver Controller structure.
+*/
+
+typedef struct DAC960_Controller
+{
+  void __iomem *BaseAddress;
+  void __iomem *MemoryMappedAddress;
+  DAC960_FirmwareType_T FirmwareType;
+  DAC960_HardwareType_T HardwareType;
+  DAC960_IO_Address_T IO_Address;
+  DAC960_PCI_Address_T PCI_Address;
+  struct pci_dev *PCIDevice;
+  unsigned char ControllerNumber;
+  unsigned char ControllerName[4];
+  unsigned char ModelName[20];
+  unsigned char FullModelName[28];
+  unsigned char FirmwareVersion[12];
+  unsigned char Bus;
+  unsigned char Device;
+  unsigned char Function;
+  unsigned char IRQ_Channel;
+  unsigned char Channels;
+  unsigned char Targets;
+  unsigned char MemorySize;
+  unsigned char LogicalDriveCount;
+  unsigned short CommandAllocationGroupSize;
+  unsigned short ControllerQueueDepth;
+  unsigned short DriverQueueDepth;
+  unsigned short MaxBlocksPerCommand;
+  unsigned short ControllerScatterGatherLimit;
+  unsigned short DriverScatterGatherLimit;
+  u64		BounceBufferLimit;
+  unsigned int CombinedStatusBufferLength;
+  unsigned int InitialStatusLength;
+  unsigned int CurrentStatusLength;
+  unsigned int ProgressBufferLength;
+  unsigned int UserStatusLength;
+  struct dma_loaf DmaPages;
+  unsigned long MonitoringTimerCount;
+  unsigned long PrimaryMonitoringTime;
+  unsigned long SecondaryMonitoringTime;
+  unsigned long ShutdownMonitoringTimer;
+  unsigned long LastProgressReportTime;
+  unsigned long LastCurrentStatusTime;
+  boolean ControllerInitialized;
+  boolean MonitoringCommandDeferred;
+  boolean EphemeralProgressMessage;
+  boolean DriveSpinUpMessageDisplayed;
+  boolean MonitoringAlertMode;
+  boolean SuppressEnclosureMessages;
+  struct timer_list MonitoringTimer;
+  struct gendisk *disks[DAC960_MaxLogicalDrives];
+  struct pci_pool *ScatterGatherPool;
+  DAC960_Command_T *FreeCommands;
+  unsigned char *CombinedStatusBuffer;
+  unsigned char *CurrentStatusBuffer;
+  struct request_queue *RequestQueue[DAC960_MaxLogicalDrives];
+  int req_q_index;
+  spinlock_t queue_lock;
+  wait_queue_head_t CommandWaitQueue;
+  wait_queue_head_t HealthStatusWaitQueue;
+  DAC960_Command_T InitialCommand;
+  DAC960_Command_T *Commands[DAC960_MaxDriverQueueDepth];
+  struct proc_dir_entry *ControllerProcEntry;
+  boolean LogicalDriveInitiallyAccessible[DAC960_MaxLogicalDrives];
+  void (*QueueCommand)(DAC960_Command_T *Command);
+  boolean (*ReadControllerConfiguration)(struct DAC960_Controller *);
+  boolean (*ReadDeviceConfiguration)(struct DAC960_Controller *);
+  boolean (*ReportDeviceConfiguration)(struct DAC960_Controller *);
+  void (*QueueReadWriteCommand)(DAC960_Command_T *Command);
+  union {
+    struct {
+      unsigned char GeometryTranslationHeads;
+      unsigned char GeometryTranslationSectors;
+      unsigned char PendingRebuildFlag;
+      unsigned short StripeSize;
+      unsigned short SegmentSize;
+      unsigned short NewEventLogSequenceNumber;
+      unsigned short OldEventLogSequenceNumber;
+      unsigned short DeviceStateChannel;
+      unsigned short DeviceStateTargetID;
+      boolean DualModeMemoryMailboxInterface;
+      boolean BackgroundInitializationStatusSupported;
+      boolean SAFTE_EnclosureManagementEnabled;
+      boolean NeedLogicalDriveInformation;
+      boolean NeedErrorTableInformation;
+      boolean NeedDeviceStateInformation;
+      boolean NeedDeviceInquiryInformation;
+      boolean NeedDeviceSerialNumberInformation;
+      boolean NeedRebuildProgress;
+      boolean NeedConsistencyCheckProgress;
+      boolean NeedBackgroundInitializationStatus;
+      boolean StartDeviceStateScan;
+      boolean RebuildProgressFirst;
+      boolean RebuildFlagPending;
+      boolean RebuildStatusPending;
+
+      dma_addr_t	FirstCommandMailboxDMA;
+      DAC960_V1_CommandMailbox_T *FirstCommandMailbox;
+      DAC960_V1_CommandMailbox_T *LastCommandMailbox;
+      DAC960_V1_CommandMailbox_T *NextCommandMailbox;
+      DAC960_V1_CommandMailbox_T *PreviousCommandMailbox1;
+      DAC960_V1_CommandMailbox_T *PreviousCommandMailbox2;
+
+      dma_addr_t	FirstStatusMailboxDMA;
+      DAC960_V1_StatusMailbox_T *FirstStatusMailbox;
+      DAC960_V1_StatusMailbox_T *LastStatusMailbox;
+      DAC960_V1_StatusMailbox_T *NextStatusMailbox;
+
+      DAC960_V1_DCDB_T *MonitoringDCDB;
+      dma_addr_t MonitoringDCDB_DMA;
+
+      DAC960_V1_Enquiry_T Enquiry;
+      DAC960_V1_Enquiry_T *NewEnquiry;
+      dma_addr_t NewEnquiryDMA;
+
+      DAC960_V1_ErrorTable_T ErrorTable;
+      DAC960_V1_ErrorTable_T *NewErrorTable;
+      dma_addr_t NewErrorTableDMA;
+
+      DAC960_V1_EventLogEntry_T *EventLogEntry;
+      dma_addr_t EventLogEntryDMA;
+
+      DAC960_V1_RebuildProgress_T *RebuildProgress;
+      dma_addr_t RebuildProgressDMA;
+      DAC960_V1_CommandStatus_T LastRebuildStatus;
+      DAC960_V1_CommandStatus_T PendingRebuildStatus;
+
+      DAC960_V1_LogicalDriveInformationArray_T LogicalDriveInformation;
+      DAC960_V1_LogicalDriveInformationArray_T *NewLogicalDriveInformation;
+      dma_addr_t NewLogicalDriveInformationDMA;
+
+      DAC960_V1_BackgroundInitializationStatus_T
+        	*BackgroundInitializationStatus;
+      dma_addr_t BackgroundInitializationStatusDMA;
+      DAC960_V1_BackgroundInitializationStatus_T
+        	LastBackgroundInitializationStatus;
+
+      DAC960_V1_DeviceState_T
+	DeviceState[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+      DAC960_V1_DeviceState_T *NewDeviceState;
+      dma_addr_t	NewDeviceStateDMA;
+
+      DAC960_SCSI_Inquiry_T
+	InquiryStandardData[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+      DAC960_SCSI_Inquiry_T *NewInquiryStandardData;
+      dma_addr_t NewInquiryStandardDataDMA;
+
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T
+	InquiryUnitSerialNumber[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
+      dma_addr_t NewInquiryUnitSerialNumberDMA;
+
+      int DeviceResetCount[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+      boolean DirectCommandActive[DAC960_V1_MaxChannels][DAC960_V1_MaxTargets];
+    } V1;
+    struct {
+      unsigned int StatusChangeCounter;
+      unsigned int NextEventSequenceNumber;
+      unsigned int PhysicalDeviceIndex;
+      boolean NeedLogicalDeviceInformation;
+      boolean NeedPhysicalDeviceInformation;
+      boolean NeedDeviceSerialNumberInformation;
+      boolean StartLogicalDeviceInformationScan;
+      boolean StartPhysicalDeviceInformationScan;
+      struct pci_pool *RequestSensePool;
+
+      dma_addr_t	FirstCommandMailboxDMA;
+      DAC960_V2_CommandMailbox_T *FirstCommandMailbox;
+      DAC960_V2_CommandMailbox_T *LastCommandMailbox;
+      DAC960_V2_CommandMailbox_T *NextCommandMailbox;
+      DAC960_V2_CommandMailbox_T *PreviousCommandMailbox1;
+      DAC960_V2_CommandMailbox_T *PreviousCommandMailbox2;
+
+      dma_addr_t	FirstStatusMailboxDMA;
+      DAC960_V2_StatusMailbox_T *FirstStatusMailbox;
+      DAC960_V2_StatusMailbox_T *LastStatusMailbox;
+      DAC960_V2_StatusMailbox_T *NextStatusMailbox;
+
+      dma_addr_t	HealthStatusBufferDMA;
+      DAC960_V2_HealthStatusBuffer_T *HealthStatusBuffer;
+
+      DAC960_V2_ControllerInfo_T ControllerInformation;
+      DAC960_V2_ControllerInfo_T *NewControllerInformation;
+      dma_addr_t	NewControllerInformationDMA;
+
+      DAC960_V2_LogicalDeviceInfo_T
+	*LogicalDeviceInformation[DAC960_MaxLogicalDrives];
+      DAC960_V2_LogicalDeviceInfo_T *NewLogicalDeviceInformation;
+      dma_addr_t	 NewLogicalDeviceInformationDMA;
+
+      DAC960_V2_PhysicalDeviceInfo_T
+	*PhysicalDeviceInformation[DAC960_V2_MaxPhysicalDevices];
+      DAC960_V2_PhysicalDeviceInfo_T *NewPhysicalDeviceInformation;
+      dma_addr_t	NewPhysicalDeviceInformationDMA;
+
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T *NewInquiryUnitSerialNumber;
+      dma_addr_t	NewInquiryUnitSerialNumberDMA;
+      DAC960_SCSI_Inquiry_UnitSerialNumber_T
+	*InquiryUnitSerialNumber[DAC960_V2_MaxPhysicalDevices];
+
+      DAC960_V2_Event_T *Event;
+      dma_addr_t EventDMA;
+
+      DAC960_V2_PhysicalToLogicalDevice_T *PhysicalToLogicalDevice;
+      dma_addr_t PhysicalToLogicalDeviceDMA;
+
+      DAC960_V2_PhysicalDevice_T
+	LogicalDriveToVirtualDevice[DAC960_MaxLogicalDrives];
+      boolean LogicalDriveFoundDuringScan[DAC960_MaxLogicalDrives];
+    } V2;
+  } FW;
+  unsigned char ProgressBuffer[DAC960_ProgressBufferSize];
+  unsigned char UserStatusBuffer[DAC960_UserMessageSize];
+}
+DAC960_Controller_T;
+
+
+/*
+  Simplify access to Firmware Version Dependent Data Structure Components
+  and Functions.
+*/
+
+#define V1				FW.V1
+#define V2				FW.V2
+#define DAC960_QueueCommand(Command) \
+  (Controller->QueueCommand)(Command)
+#define DAC960_ReadControllerConfiguration(Controller) \
+  (Controller->ReadControllerConfiguration)(Controller)
+#define DAC960_ReadDeviceConfiguration(Controller) \
+  (Controller->ReadDeviceConfiguration)(Controller)
+#define DAC960_ReportDeviceConfiguration(Controller) \
+  (Controller->ReportDeviceConfiguration)(Controller)
+#define DAC960_QueueReadWriteCommand(Command) \
+  (Controller->QueueReadWriteCommand)(Command)
+
+/*
+ * dma_addr_writeql is provided to write dma_addr_t types
+ * to a 64-bit pci address space register.  The controller
+ * will accept having the register written as two 32-bit
+ * values.
+ *
+ * In HIGHMEM kernels, dma_addr_t is a 64-bit value.
+ * without HIGHMEM,  dma_addr_t is a 32-bit value.
+ *
+ * The compiler should always fix up the assignment
+ * to u.wq appropriately, depending upon the size of
+ * dma_addr_t.
+ */
+static inline
+void dma_addr_writeql(dma_addr_t addr, void __iomem *write_address)
+{
+	union {
+		u64 wq;
+		uint wl[2];
+	} u;
+
+	u.wq = addr;
+
+	writel(u.wl[0], write_address);
+	writel(u.wl[1], write_address + 4);
+}
+
+/*
+  Define the DAC960 BA Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_BA_RegisterWindowSize		0x80
+
+typedef enum
+{
+  DAC960_BA_InboundDoorBellRegisterOffset =	0x60,
+  DAC960_BA_OutboundDoorBellRegisterOffset =	0x61,
+  DAC960_BA_InterruptStatusRegisterOffset =	0x30,
+  DAC960_BA_InterruptMaskRegisterOffset =	0x34,
+  DAC960_BA_CommandMailboxBusAddressOffset =	0x50,
+  DAC960_BA_CommandStatusOffset =		0x58,
+  DAC960_BA_ErrorStatusRegisterOffset =		0x63
+}
+DAC960_BA_RegisterOffsets_T;
+
+
+/*
+  Define the structure of the DAC960 BA Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_BA_InboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean HardwareMailboxNewCommand:1;		/* Bit 0 */
+    boolean AcknowledgeHardwareMailboxStatus:1;		/* Bit 1 */
+    boolean GenerateInterrupt:1;			/* Bit 2 */
+    boolean ControllerReset:1;				/* Bit 3 */
+    boolean MemoryMailboxNewCommand:1;			/* Bit 4 */
+    unsigned char :3;					/* Bits 5-7 */
+  } Write;
+  struct {
+    boolean HardwareMailboxEmpty:1;			/* Bit 0 */
+    boolean InitializationNotInProgress:1;		/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_BA_InboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 BA Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_BA_OutboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean AcknowledgeHardwareMailboxInterrupt:1;	/* Bit 0 */
+    boolean AcknowledgeMemoryMailboxInterrupt:1;	/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Write;
+  struct {
+    boolean HardwareMailboxStatusAvailable:1;		/* Bit 0 */
+    boolean MemoryMailboxStatusAvailable:1;		/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_BA_OutboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 BA Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_BA_InterruptMaskRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean DisableInterrupts:1;			/* Bit 2 */
+    boolean DisableInterruptsI2O:1;			/* Bit 3 */
+    unsigned int :4;					/* Bits 4-7 */
+  } Bits;
+}
+DAC960_BA_InterruptMaskRegister_T;
+
+
+/*
+  Define the structure of the DAC960 BA Series Error Status Register.
+*/
+
+typedef union DAC960_BA_ErrorStatusRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean ErrorStatusPending:1;			/* Bit 2 */
+    unsigned int :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_BA_ErrorStatusRegister_T;
+
+
+/*
+  Define inline functions to provide an abstraction for reading and writing the
+  DAC960 BA Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_BA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.GenerateInterrupt = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_ControllerReset(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.ControllerReset = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_BA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+  return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
+}
+
+static inline
+boolean DAC960_BA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_BA_InboundDoorBellRegisterOffset);
+  return !InboundDoorBellRegister.Read.InitializationNotInProgress;
+}
+
+static inline
+void DAC960_BA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_BA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_BA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+boolean DAC960_BA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_BA_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_BA_EnableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0xFF;
+  InterruptMaskRegister.Bits.DisableInterrupts = false;
+  InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
+  writeb(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_BA_DisableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0xFF;
+  InterruptMaskRegister.Bits.DisableInterrupts = true;
+  InterruptMaskRegister.Bits.DisableInterruptsI2O = true;
+  writeb(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
+}
+
+static inline
+boolean DAC960_BA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_BA_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All =
+    readb(ControllerBaseAddress + DAC960_BA_InterruptMaskRegisterOffset);
+  return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_BA_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
+				     *MemoryCommandMailbox,
+				   DAC960_V2_CommandMailbox_T
+				     *CommandMailbox)
+{
+  memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
+	 sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
+  wmb();
+  MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
+  mb();
+}
+
+
+static inline
+void DAC960_BA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
+				    dma_addr_t CommandMailboxDMA)
+{
+	dma_addr_writeql(CommandMailboxDMA,
+		ControllerBaseAddress +
+		DAC960_BA_CommandMailboxBusAddressOffset);
+}
+
+static inline DAC960_V2_CommandIdentifier_T
+DAC960_BA_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset);
+}
+
+static inline DAC960_V2_CommandStatus_T
+DAC960_BA_ReadCommandStatus(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_BA_CommandStatusOffset + 2);
+}
+
+static inline boolean
+DAC960_BA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
+			  unsigned char *ErrorStatus,
+			  unsigned char *Parameter0,
+			  unsigned char *Parameter1)
+{
+  DAC960_BA_ErrorStatusRegister_T ErrorStatusRegister;
+  ErrorStatusRegister.All =
+    readb(ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
+  if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+  ErrorStatusRegister.Bits.ErrorStatusPending = false;
+  *ErrorStatus = ErrorStatusRegister.All;
+  *Parameter0 =
+    readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 0);
+  *Parameter1 =
+    readb(ControllerBaseAddress + DAC960_BA_CommandMailboxBusAddressOffset + 1);
+  writeb(0xFF, ControllerBaseAddress + DAC960_BA_ErrorStatusRegisterOffset);
+  return true;
+}
+
+
+/*
+  Define the DAC960 LP Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_LP_RegisterWindowSize		0x80
+
+typedef enum
+{
+  DAC960_LP_InboundDoorBellRegisterOffset =	0x20,
+  DAC960_LP_OutboundDoorBellRegisterOffset =	0x2C,
+  DAC960_LP_InterruptStatusRegisterOffset =	0x30,
+  DAC960_LP_InterruptMaskRegisterOffset =	0x34,
+  DAC960_LP_CommandMailboxBusAddressOffset =	0x10,
+  DAC960_LP_CommandStatusOffset =		0x18,
+  DAC960_LP_ErrorStatusRegisterOffset =		0x2E
+}
+DAC960_LP_RegisterOffsets_T;
+
+
+/*
+  Define the structure of the DAC960 LP Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_LP_InboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean HardwareMailboxNewCommand:1;		/* Bit 0 */
+    boolean AcknowledgeHardwareMailboxStatus:1;		/* Bit 1 */
+    boolean GenerateInterrupt:1;			/* Bit 2 */
+    boolean ControllerReset:1;				/* Bit 3 */
+    boolean MemoryMailboxNewCommand:1;			/* Bit 4 */
+    unsigned char :3;					/* Bits 5-7 */
+  } Write;
+  struct {
+    boolean HardwareMailboxFull:1;			/* Bit 0 */
+    boolean InitializationInProgress:1;			/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_LP_InboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 LP Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_LP_OutboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean AcknowledgeHardwareMailboxInterrupt:1;	/* Bit 0 */
+    boolean AcknowledgeMemoryMailboxInterrupt:1;	/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Write;
+  struct {
+    boolean HardwareMailboxStatusAvailable:1;		/* Bit 0 */
+    boolean MemoryMailboxStatusAvailable:1;		/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_LP_OutboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 LP Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_LP_InterruptMaskRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean DisableInterrupts:1;			/* Bit 2 */
+    unsigned int :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_LP_InterruptMaskRegister_T;
+
+
+/*
+  Define the structure of the DAC960 LP Series Error Status Register.
+*/
+
+typedef union DAC960_LP_ErrorStatusRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean ErrorStatusPending:1;			/* Bit 2 */
+    unsigned int :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_LP_ErrorStatusRegister_T;
+
+
+/*
+  Define inline functions to provide an abstraction for reading and writing the
+  DAC960 LP Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_LP_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_GenerateInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.GenerateInterrupt = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_ControllerReset(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.ControllerReset = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_LP_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+  return InboundDoorBellRegister.Read.HardwareMailboxFull;
+}
+
+static inline
+boolean DAC960_LP_InitializationInProgressP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LP_InboundDoorBellRegisterOffset);
+  return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_LP_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LP_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_LP_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+boolean DAC960_LP_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LP_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_LP_EnableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0xFF;
+  InterruptMaskRegister.Bits.DisableInterrupts = false;
+  writeb(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_LP_DisableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0xFF;
+  InterruptMaskRegister.Bits.DisableInterrupts = true;
+  writeb(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
+}
+
+static inline
+boolean DAC960_LP_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LP_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All =
+    readb(ControllerBaseAddress + DAC960_LP_InterruptMaskRegisterOffset);
+  return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_LP_WriteCommandMailbox(DAC960_V2_CommandMailbox_T
+				     *MemoryCommandMailbox,
+				   DAC960_V2_CommandMailbox_T
+				     *CommandMailbox)
+{
+  memcpy(&MemoryCommandMailbox->Words[1], &CommandMailbox->Words[1],
+	 sizeof(DAC960_V2_CommandMailbox_T) - sizeof(unsigned int));
+  wmb();
+  MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
+  mb();
+}
+
+static inline
+void DAC960_LP_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
+				    dma_addr_t CommandMailboxDMA)
+{
+	dma_addr_writeql(CommandMailboxDMA,
+		ControllerBaseAddress +
+		DAC960_LP_CommandMailboxBusAddressOffset);
+}
+
+static inline DAC960_V2_CommandIdentifier_T
+DAC960_LP_ReadCommandIdentifier(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset);
+}
+
+static inline DAC960_V2_CommandStatus_T
+DAC960_LP_ReadCommandStatus(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_LP_CommandStatusOffset + 2);
+}
+
+static inline boolean
+DAC960_LP_ReadErrorStatus(void __iomem *ControllerBaseAddress,
+			  unsigned char *ErrorStatus,
+			  unsigned char *Parameter0,
+			  unsigned char *Parameter1)
+{
+  DAC960_LP_ErrorStatusRegister_T ErrorStatusRegister;
+  ErrorStatusRegister.All =
+    readb(ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
+  if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+  ErrorStatusRegister.Bits.ErrorStatusPending = false;
+  *ErrorStatus = ErrorStatusRegister.All;
+  *Parameter0 =
+    readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 0);
+  *Parameter1 =
+    readb(ControllerBaseAddress + DAC960_LP_CommandMailboxBusAddressOffset + 1);
+  writeb(0xFF, ControllerBaseAddress + DAC960_LP_ErrorStatusRegisterOffset);
+  return true;
+}
+
+
+/*
+  Define the DAC960 LA Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_LA_RegisterWindowSize		0x80
+
+typedef enum
+{
+  DAC960_LA_InboundDoorBellRegisterOffset =	0x60,
+  DAC960_LA_OutboundDoorBellRegisterOffset =	0x61,
+  DAC960_LA_InterruptMaskRegisterOffset =	0x34,
+  DAC960_LA_CommandOpcodeRegisterOffset =	0x50,
+  DAC960_LA_CommandIdentifierRegisterOffset =	0x51,
+  DAC960_LA_MailboxRegister2Offset =		0x52,
+  DAC960_LA_MailboxRegister3Offset =		0x53,
+  DAC960_LA_MailboxRegister4Offset =		0x54,
+  DAC960_LA_MailboxRegister5Offset =		0x55,
+  DAC960_LA_MailboxRegister6Offset =		0x56,
+  DAC960_LA_MailboxRegister7Offset =		0x57,
+  DAC960_LA_MailboxRegister8Offset =		0x58,
+  DAC960_LA_MailboxRegister9Offset =		0x59,
+  DAC960_LA_MailboxRegister10Offset =		0x5A,
+  DAC960_LA_MailboxRegister11Offset =		0x5B,
+  DAC960_LA_MailboxRegister12Offset =		0x5C,
+  DAC960_LA_StatusCommandIdentifierRegOffset =	0x5D,
+  DAC960_LA_StatusRegisterOffset =		0x5E,
+  DAC960_LA_ErrorStatusRegisterOffset =		0x63
+}
+DAC960_LA_RegisterOffsets_T;
+
+
+/*
+  Define the structure of the DAC960 LA Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_LA_InboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean HardwareMailboxNewCommand:1;		/* Bit 0 */
+    boolean AcknowledgeHardwareMailboxStatus:1;		/* Bit 1 */
+    boolean GenerateInterrupt:1;			/* Bit 2 */
+    boolean ControllerReset:1;				/* Bit 3 */
+    boolean MemoryMailboxNewCommand:1;			/* Bit 4 */
+    unsigned char :3;					/* Bits 5-7 */
+  } Write;
+  struct {
+    boolean HardwareMailboxEmpty:1;			/* Bit 0 */
+    boolean InitializationNotInProgress:1;		/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_LA_InboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 LA Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_LA_OutboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean AcknowledgeHardwareMailboxInterrupt:1;	/* Bit 0 */
+    boolean AcknowledgeMemoryMailboxInterrupt:1;	/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Write;
+  struct {
+    boolean HardwareMailboxStatusAvailable:1;		/* Bit 0 */
+    boolean MemoryMailboxStatusAvailable:1;		/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_LA_OutboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 LA Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_LA_InterruptMaskRegister
+{
+  unsigned char All;
+  struct {
+    unsigned char :2;					/* Bits 0-1 */
+    boolean DisableInterrupts:1;			/* Bit 2 */
+    unsigned char :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_LA_InterruptMaskRegister_T;
+
+
+/*
+  Define the structure of the DAC960 LA Series Error Status Register.
+*/
+
+typedef union DAC960_LA_ErrorStatusRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean ErrorStatusPending:1;			/* Bit 2 */
+    unsigned int :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_LA_ErrorStatusRegister_T;
+
+
+/*
+  Define inline functions to provide an abstraction for reading and writing the
+  DAC960 LA Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_LA_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_GenerateInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.GenerateInterrupt = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_ControllerReset(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.ControllerReset = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_LA_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+  return !InboundDoorBellRegister.Read.HardwareMailboxEmpty;
+}
+
+static inline
+boolean DAC960_LA_InitializationInProgressP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LA_InboundDoorBellRegisterOffset);
+  return !InboundDoorBellRegister.Read.InitializationNotInProgress;
+}
+
+static inline
+void DAC960_LA_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_LA_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_LA_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+boolean DAC960_LA_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_LA_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_LA_EnableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0xFF;
+  InterruptMaskRegister.Bits.DisableInterrupts = false;
+  writeb(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_LA_DisableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0xFF;
+  InterruptMaskRegister.Bits.DisableInterrupts = true;
+  writeb(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
+}
+
+static inline
+boolean DAC960_LA_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_LA_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All =
+    readb(ControllerBaseAddress + DAC960_LA_InterruptMaskRegisterOffset);
+  return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_LA_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
+				     *MemoryCommandMailbox,
+				   DAC960_V1_CommandMailbox_T
+				     *CommandMailbox)
+{
+  MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
+  MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
+  MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
+  wmb();
+  MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
+  mb();
+}
+
+static inline
+void DAC960_LA_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
+				    DAC960_V1_CommandMailbox_T *CommandMailbox)
+{
+  writel(CommandMailbox->Words[0],
+	 ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
+  writel(CommandMailbox->Words[1],
+	 ControllerBaseAddress + DAC960_LA_MailboxRegister4Offset);
+  writel(CommandMailbox->Words[2],
+	 ControllerBaseAddress + DAC960_LA_MailboxRegister8Offset);
+  writeb(CommandMailbox->Bytes[12],
+	 ControllerBaseAddress + DAC960_LA_MailboxRegister12Offset);
+}
+
+static inline DAC960_V1_CommandIdentifier_T
+DAC960_LA_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
+{
+  return readb(ControllerBaseAddress
+	       + DAC960_LA_StatusCommandIdentifierRegOffset);
+}
+
+static inline DAC960_V1_CommandStatus_T
+DAC960_LA_ReadStatusRegister(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_LA_StatusRegisterOffset);
+}
+
+static inline boolean
+DAC960_LA_ReadErrorStatus(void __iomem *ControllerBaseAddress,
+			  unsigned char *ErrorStatus,
+			  unsigned char *Parameter0,
+			  unsigned char *Parameter1)
+{
+  DAC960_LA_ErrorStatusRegister_T ErrorStatusRegister;
+  ErrorStatusRegister.All =
+    readb(ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
+  if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+  ErrorStatusRegister.Bits.ErrorStatusPending = false;
+  *ErrorStatus = ErrorStatusRegister.All;
+  *Parameter0 =
+    readb(ControllerBaseAddress + DAC960_LA_CommandOpcodeRegisterOffset);
+  *Parameter1 =
+    readb(ControllerBaseAddress + DAC960_LA_CommandIdentifierRegisterOffset);
+  writeb(0xFF, ControllerBaseAddress + DAC960_LA_ErrorStatusRegisterOffset);
+  return true;
+}
+
+/*
+  Define the DAC960 PG Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_PG_RegisterWindowSize		0x2000
+
+typedef enum
+{
+  DAC960_PG_InboundDoorBellRegisterOffset =	0x0020,
+  DAC960_PG_OutboundDoorBellRegisterOffset =	0x002C,
+  DAC960_PG_InterruptMaskRegisterOffset =	0x0034,
+  DAC960_PG_CommandOpcodeRegisterOffset =	0x1000,
+  DAC960_PG_CommandIdentifierRegisterOffset =	0x1001,
+  DAC960_PG_MailboxRegister2Offset =		0x1002,
+  DAC960_PG_MailboxRegister3Offset =		0x1003,
+  DAC960_PG_MailboxRegister4Offset =		0x1004,
+  DAC960_PG_MailboxRegister5Offset =		0x1005,
+  DAC960_PG_MailboxRegister6Offset =		0x1006,
+  DAC960_PG_MailboxRegister7Offset =		0x1007,
+  DAC960_PG_MailboxRegister8Offset =		0x1008,
+  DAC960_PG_MailboxRegister9Offset =		0x1009,
+  DAC960_PG_MailboxRegister10Offset =		0x100A,
+  DAC960_PG_MailboxRegister11Offset =		0x100B,
+  DAC960_PG_MailboxRegister12Offset =		0x100C,
+  DAC960_PG_StatusCommandIdentifierRegOffset =	0x1018,
+  DAC960_PG_StatusRegisterOffset =		0x101A,
+  DAC960_PG_ErrorStatusRegisterOffset =		0x103F
+}
+DAC960_PG_RegisterOffsets_T;
+
+
+/*
+  Define the structure of the DAC960 PG Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_PG_InboundDoorBellRegister
+{
+  unsigned int All;
+  struct {
+    boolean HardwareMailboxNewCommand:1;		/* Bit 0 */
+    boolean AcknowledgeHardwareMailboxStatus:1;		/* Bit 1 */
+    boolean GenerateInterrupt:1;			/* Bit 2 */
+    boolean ControllerReset:1;				/* Bit 3 */
+    boolean MemoryMailboxNewCommand:1;			/* Bit 4 */
+    unsigned int :27;					/* Bits 5-31 */
+  } Write;
+  struct {
+    boolean HardwareMailboxFull:1;			/* Bit 0 */
+    boolean InitializationInProgress:1;			/* Bit 1 */
+    unsigned int :30;					/* Bits 2-31 */
+  } Read;
+}
+DAC960_PG_InboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 PG Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_PG_OutboundDoorBellRegister
+{
+  unsigned int All;
+  struct {
+    boolean AcknowledgeHardwareMailboxInterrupt:1;	/* Bit 0 */
+    boolean AcknowledgeMemoryMailboxInterrupt:1;	/* Bit 1 */
+    unsigned int :30;					/* Bits 2-31 */
+  } Write;
+  struct {
+    boolean HardwareMailboxStatusAvailable:1;		/* Bit 0 */
+    boolean MemoryMailboxStatusAvailable:1;		/* Bit 1 */
+    unsigned int :30;					/* Bits 2-31 */
+  } Read;
+}
+DAC960_PG_OutboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 PG Series Interrupt Mask Register.
+*/
+
+typedef union DAC960_PG_InterruptMaskRegister
+{
+  unsigned int All;
+  struct {
+    unsigned int MessageUnitInterruptMask1:2;		/* Bits 0-1 */
+    boolean DisableInterrupts:1;			/* Bit 2 */
+    unsigned int MessageUnitInterruptMask2:5;		/* Bits 3-7 */
+    unsigned int Reserved0:24;				/* Bits 8-31 */
+  } Bits;
+}
+DAC960_PG_InterruptMaskRegister_T;
+
+
+/*
+  Define the structure of the DAC960 PG Series Error Status Register.
+*/
+
+typedef union DAC960_PG_ErrorStatusRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean ErrorStatusPending:1;			/* Bit 2 */
+    unsigned int :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_PG_ErrorStatusRegister_T;
+
+
+/*
+  Define inline functions to provide an abstraction for reading and writing the
+  DAC960 PG Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_PG_HardwareMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.HardwareMailboxNewCommand = true;
+  writel(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_AcknowledgeHardwareMailboxStatus(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.AcknowledgeHardwareMailboxStatus = true;
+  writel(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_GenerateInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.GenerateInterrupt = true;
+  writel(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_ControllerReset(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.ControllerReset = true;
+  writel(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_MemoryMailboxNewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.MemoryMailboxNewCommand = true;
+  writel(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_PG_HardwareMailboxFullP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+  return InboundDoorBellRegister.Read.HardwareMailboxFull;
+}
+
+static inline
+boolean DAC960_PG_InitializationInProgressP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readl(ControllerBaseAddress + DAC960_PG_InboundDoorBellRegisterOffset);
+  return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_PG_AcknowledgeHardwareMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  writel(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_AcknowledgeMemoryMailboxInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writel(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PG_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeHardwareMailboxInterrupt = true;
+  OutboundDoorBellRegister.Write.AcknowledgeMemoryMailboxInterrupt = true;
+  writel(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_PG_HardwareMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.HardwareMailboxStatusAvailable;
+}
+
+static inline
+boolean DAC960_PG_MemoryMailboxStatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readl(ControllerBaseAddress + DAC960_PG_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.MemoryMailboxStatusAvailable;
+}
+
+static inline
+void DAC960_PG_EnableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0;
+  InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
+  InterruptMaskRegister.Bits.DisableInterrupts = false;
+  InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
+  writel(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
+}
+
+static inline
+void DAC960_PG_DisableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All = 0;
+  InterruptMaskRegister.Bits.MessageUnitInterruptMask1 = 0x3;
+  InterruptMaskRegister.Bits.DisableInterrupts = true;
+  InterruptMaskRegister.Bits.MessageUnitInterruptMask2 = 0x1F;
+  writel(InterruptMaskRegister.All,
+	 ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
+}
+
+static inline
+boolean DAC960_PG_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PG_InterruptMaskRegister_T InterruptMaskRegister;
+  InterruptMaskRegister.All =
+    readl(ControllerBaseAddress + DAC960_PG_InterruptMaskRegisterOffset);
+  return !InterruptMaskRegister.Bits.DisableInterrupts;
+}
+
+static inline
+void DAC960_PG_WriteCommandMailbox(DAC960_V1_CommandMailbox_T
+				     *MemoryCommandMailbox,
+				   DAC960_V1_CommandMailbox_T
+				     *CommandMailbox)
+{
+  MemoryCommandMailbox->Words[1] = CommandMailbox->Words[1];
+  MemoryCommandMailbox->Words[2] = CommandMailbox->Words[2];
+  MemoryCommandMailbox->Words[3] = CommandMailbox->Words[3];
+  wmb();
+  MemoryCommandMailbox->Words[0] = CommandMailbox->Words[0];
+  mb();
+}
+
+static inline
+void DAC960_PG_WriteHardwareMailbox(void __iomem *ControllerBaseAddress,
+				    DAC960_V1_CommandMailbox_T *CommandMailbox)
+{
+  writel(CommandMailbox->Words[0],
+	 ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
+  writel(CommandMailbox->Words[1],
+	 ControllerBaseAddress + DAC960_PG_MailboxRegister4Offset);
+  writel(CommandMailbox->Words[2],
+	 ControllerBaseAddress + DAC960_PG_MailboxRegister8Offset);
+  writeb(CommandMailbox->Bytes[12],
+	 ControllerBaseAddress + DAC960_PG_MailboxRegister12Offset);
+}
+
+static inline DAC960_V1_CommandIdentifier_T
+DAC960_PG_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
+{
+  return readb(ControllerBaseAddress
+	       + DAC960_PG_StatusCommandIdentifierRegOffset);
+}
+
+static inline DAC960_V1_CommandStatus_T
+DAC960_PG_ReadStatusRegister(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_PG_StatusRegisterOffset);
+}
+
+static inline boolean
+DAC960_PG_ReadErrorStatus(void __iomem *ControllerBaseAddress,
+			  unsigned char *ErrorStatus,
+			  unsigned char *Parameter0,
+			  unsigned char *Parameter1)
+{
+  DAC960_PG_ErrorStatusRegister_T ErrorStatusRegister;
+  ErrorStatusRegister.All =
+    readb(ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
+  if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+  ErrorStatusRegister.Bits.ErrorStatusPending = false;
+  *ErrorStatus = ErrorStatusRegister.All;
+  *Parameter0 =
+    readb(ControllerBaseAddress + DAC960_PG_CommandOpcodeRegisterOffset);
+  *Parameter1 =
+    readb(ControllerBaseAddress + DAC960_PG_CommandIdentifierRegisterOffset);
+  writeb(0, ControllerBaseAddress + DAC960_PG_ErrorStatusRegisterOffset);
+  return true;
+}
+
+/*
+  Define the DAC960 PD Series Controller Interface Register Offsets.
+*/
+
+#define DAC960_PD_RegisterWindowSize		0x80
+
+typedef enum
+{
+  DAC960_PD_CommandOpcodeRegisterOffset =	0x00,
+  DAC960_PD_CommandIdentifierRegisterOffset =	0x01,
+  DAC960_PD_MailboxRegister2Offset =		0x02,
+  DAC960_PD_MailboxRegister3Offset =		0x03,
+  DAC960_PD_MailboxRegister4Offset =		0x04,
+  DAC960_PD_MailboxRegister5Offset =		0x05,
+  DAC960_PD_MailboxRegister6Offset =		0x06,
+  DAC960_PD_MailboxRegister7Offset =		0x07,
+  DAC960_PD_MailboxRegister8Offset =		0x08,
+  DAC960_PD_MailboxRegister9Offset =		0x09,
+  DAC960_PD_MailboxRegister10Offset =		0x0A,
+  DAC960_PD_MailboxRegister11Offset =		0x0B,
+  DAC960_PD_MailboxRegister12Offset =		0x0C,
+  DAC960_PD_StatusCommandIdentifierRegOffset =	0x0D,
+  DAC960_PD_StatusRegisterOffset =		0x0E,
+  DAC960_PD_ErrorStatusRegisterOffset =		0x3F,
+  DAC960_PD_InboundDoorBellRegisterOffset =	0x40,
+  DAC960_PD_OutboundDoorBellRegisterOffset =	0x41,
+  DAC960_PD_InterruptEnableRegisterOffset =	0x43
+}
+DAC960_PD_RegisterOffsets_T;
+
+
+/*
+  Define the structure of the DAC960 PD Series Inbound Door Bell Register.
+*/
+
+typedef union DAC960_PD_InboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean NewCommand:1;				/* Bit 0 */
+    boolean AcknowledgeStatus:1;			/* Bit 1 */
+    boolean GenerateInterrupt:1;			/* Bit 2 */
+    boolean ControllerReset:1;				/* Bit 3 */
+    unsigned char :4;					/* Bits 4-7 */
+  } Write;
+  struct {
+    boolean MailboxFull:1;				/* Bit 0 */
+    boolean InitializationInProgress:1;			/* Bit 1 */
+    unsigned char :6;					/* Bits 2-7 */
+  } Read;
+}
+DAC960_PD_InboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 PD Series Outbound Door Bell Register.
+*/
+
+typedef union DAC960_PD_OutboundDoorBellRegister
+{
+  unsigned char All;
+  struct {
+    boolean AcknowledgeInterrupt:1;			/* Bit 0 */
+    unsigned char :7;					/* Bits 1-7 */
+  } Write;
+  struct {
+    boolean StatusAvailable:1;				/* Bit 0 */
+    unsigned char :7;					/* Bits 1-7 */
+  } Read;
+}
+DAC960_PD_OutboundDoorBellRegister_T;
+
+
+/*
+  Define the structure of the DAC960 PD Series Interrupt Enable Register.
+*/
+
+typedef union DAC960_PD_InterruptEnableRegister
+{
+  unsigned char All;
+  struct {
+    boolean EnableInterrupts:1;				/* Bit 0 */
+    unsigned char :7;					/* Bits 1-7 */
+  } Bits;
+}
+DAC960_PD_InterruptEnableRegister_T;
+
+
+/*
+  Define the structure of the DAC960 PD Series Error Status Register.
+*/
+
+typedef union DAC960_PD_ErrorStatusRegister
+{
+  unsigned char All;
+  struct {
+    unsigned int :2;					/* Bits 0-1 */
+    boolean ErrorStatusPending:1;			/* Bit 2 */
+    unsigned int :5;					/* Bits 3-7 */
+  } Bits;
+}
+DAC960_PD_ErrorStatusRegister_T;
+
+
+/*
+  Define inline functions to provide an abstraction for reading and writing the
+  DAC960 PD Series Controller Interface Registers.
+*/
+
+static inline
+void DAC960_PD_NewCommand(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.NewCommand = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PD_AcknowledgeStatus(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.AcknowledgeStatus = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PD_GenerateInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.GenerateInterrupt = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+void DAC960_PD_ControllerReset(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All = 0;
+  InboundDoorBellRegister.Write.ControllerReset = true;
+  writeb(InboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_PD_MailboxFullP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
+  return InboundDoorBellRegister.Read.MailboxFull;
+}
+
+static inline
+boolean DAC960_PD_InitializationInProgressP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InboundDoorBellRegister_T InboundDoorBellRegister;
+  InboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_PD_InboundDoorBellRegisterOffset);
+  return InboundDoorBellRegister.Read.InitializationInProgress;
+}
+
+static inline
+void DAC960_PD_AcknowledgeInterrupt(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All = 0;
+  OutboundDoorBellRegister.Write.AcknowledgeInterrupt = true;
+  writeb(OutboundDoorBellRegister.All,
+	 ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
+}
+
+static inline
+boolean DAC960_PD_StatusAvailableP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_OutboundDoorBellRegister_T OutboundDoorBellRegister;
+  OutboundDoorBellRegister.All =
+    readb(ControllerBaseAddress + DAC960_PD_OutboundDoorBellRegisterOffset);
+  return OutboundDoorBellRegister.Read.StatusAvailable;
+}
+
+static inline
+void DAC960_PD_EnableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
+  InterruptEnableRegister.All = 0;
+  InterruptEnableRegister.Bits.EnableInterrupts = true;
+  writeb(InterruptEnableRegister.All,
+	 ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
+}
+
+static inline
+void DAC960_PD_DisableInterrupts(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
+  InterruptEnableRegister.All = 0;
+  InterruptEnableRegister.Bits.EnableInterrupts = false;
+  writeb(InterruptEnableRegister.All,
+	 ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
+}
+
+static inline
+boolean DAC960_PD_InterruptsEnabledP(void __iomem *ControllerBaseAddress)
+{
+  DAC960_PD_InterruptEnableRegister_T InterruptEnableRegister;
+  InterruptEnableRegister.All =
+    readb(ControllerBaseAddress + DAC960_PD_InterruptEnableRegisterOffset);
+  return InterruptEnableRegister.Bits.EnableInterrupts;
+}
+
+static inline
+void DAC960_PD_WriteCommandMailbox(void __iomem *ControllerBaseAddress,
+				   DAC960_V1_CommandMailbox_T *CommandMailbox)
+{
+  writel(CommandMailbox->Words[0],
+	 ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
+  writel(CommandMailbox->Words[1],
+	 ControllerBaseAddress + DAC960_PD_MailboxRegister4Offset);
+  writel(CommandMailbox->Words[2],
+	 ControllerBaseAddress + DAC960_PD_MailboxRegister8Offset);
+  writeb(CommandMailbox->Bytes[12],
+	 ControllerBaseAddress + DAC960_PD_MailboxRegister12Offset);
+}
+
+static inline DAC960_V1_CommandIdentifier_T
+DAC960_PD_ReadStatusCommandIdentifier(void __iomem *ControllerBaseAddress)
+{
+  return readb(ControllerBaseAddress
+	       + DAC960_PD_StatusCommandIdentifierRegOffset);
+}
+
+static inline DAC960_V1_CommandStatus_T
+DAC960_PD_ReadStatusRegister(void __iomem *ControllerBaseAddress)
+{
+  return readw(ControllerBaseAddress + DAC960_PD_StatusRegisterOffset);
+}
+
+static inline boolean
+DAC960_PD_ReadErrorStatus(void __iomem *ControllerBaseAddress,
+			  unsigned char *ErrorStatus,
+			  unsigned char *Parameter0,
+			  unsigned char *Parameter1)
+{
+  DAC960_PD_ErrorStatusRegister_T ErrorStatusRegister;
+  ErrorStatusRegister.All =
+    readb(ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
+  if (!ErrorStatusRegister.Bits.ErrorStatusPending) return false;
+  ErrorStatusRegister.Bits.ErrorStatusPending = false;
+  *ErrorStatus = ErrorStatusRegister.All;
+  *Parameter0 =
+    readb(ControllerBaseAddress + DAC960_PD_CommandOpcodeRegisterOffset);
+  *Parameter1 =
+    readb(ControllerBaseAddress + DAC960_PD_CommandIdentifierRegisterOffset);
+  writeb(0, ControllerBaseAddress + DAC960_PD_ErrorStatusRegisterOffset);
+  return true;
+}
+
+static inline void DAC960_P_To_PD_TranslateEnquiry(void *Enquiry)
+{
+  memcpy(Enquiry + 132, Enquiry + 36, 64);
+  memset(Enquiry + 36, 0, 96);
+}
+
+static inline void DAC960_P_To_PD_TranslateDeviceState(void *DeviceState)
+{
+  memcpy(DeviceState + 2, DeviceState + 3, 1);
+  memcpy(DeviceState + 4, DeviceState + 5, 2);
+  memcpy(DeviceState + 6, DeviceState + 8, 4);
+}
+
+static inline
+void DAC960_PD_To_P_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
+					      *CommandMailbox)
+{
+  int LogicalDriveNumber = CommandMailbox->Type5.LD.LogicalDriveNumber;
+  CommandMailbox->Bytes[3] &= 0x7;
+  CommandMailbox->Bytes[3] |= CommandMailbox->Bytes[7] << 6;
+  CommandMailbox->Bytes[7] = LogicalDriveNumber;
+}
+
+static inline
+void DAC960_P_To_PD_TranslateReadWriteCommand(DAC960_V1_CommandMailbox_T
+					      *CommandMailbox)
+{
+  int LogicalDriveNumber = CommandMailbox->Bytes[7];
+  CommandMailbox->Bytes[7] = CommandMailbox->Bytes[3] >> 6;
+  CommandMailbox->Bytes[3] &= 0x7;
+  CommandMailbox->Bytes[3] |= LogicalDriveNumber << 3;
+}
+
+
+/*
+  Define prototypes for the forward referenced DAC960 Driver Internal Functions.
+*/
+
+static void DAC960_FinalizeController(DAC960_Controller_T *);
+static void DAC960_V1_QueueReadWriteCommand(DAC960_Command_T *);
+static void DAC960_V2_QueueReadWriteCommand(DAC960_Command_T *); 
+static void DAC960_RequestFunction(struct request_queue *);
+static irqreturn_t DAC960_BA_InterruptHandler(int, void *, struct pt_regs *);
+static irqreturn_t DAC960_LP_InterruptHandler(int, void *, struct pt_regs *);
+static irqreturn_t DAC960_LA_InterruptHandler(int, void *, struct pt_regs *);
+static irqreturn_t DAC960_PG_InterruptHandler(int, void *, struct pt_regs *);
+static irqreturn_t DAC960_PD_InterruptHandler(int, void *, struct pt_regs *);
+static irqreturn_t DAC960_P_InterruptHandler(int, void *, struct pt_regs *);
+static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
+static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
+static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
+			   DAC960_Controller_T *, ...);
+static void DAC960_CreateProcEntries(DAC960_Controller_T *);
+static void DAC960_DestroyProcEntries(DAC960_Controller_T *);
+
+#endif /* DAC960_DriverVersion */
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
new file mode 100644
index 0000000..e83a1e2
--- /dev/null
+++ b/drivers/block/Kconfig
@@ -0,0 +1,509 @@
+#
+# Block device driver configuration
+#
+
+menu "Block devices"
+
+config BLK_DEV_FD
+	tristate "Normal floppy disk support"
+	depends on (!ARCH_S390 && !M68K && !IA64 && !UML) || Q40 || (SUN3X && BROKEN)
+	---help---
+	  If you want to use the floppy disk drive(s) of your PC under Linux,
+	  say Y. Information about this driver, especially important for IBM
+	  Thinkpad users, is contained in <file:Documentation/floppy.txt>.
+	  That file also contains the location of the Floppy driver FAQ as
+	  well as location of the fdutils package used to configure additional
+	  parameters of the driver at run time.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called floppy.
+
+config AMIGA_FLOPPY
+	tristate "Amiga floppy support"
+	depends on AMIGA
+
+config ATARI_FLOPPY
+	tristate "Atari floppy support"
+	depends on ATARI
+
+config BLK_DEV_SWIM_IOP
+	bool "Macintosh IIfx/Quadra 900/Quadra 950 floppy support (EXPERIMENTAL)"
+	depends on MAC && EXPERIMENTAL && BROKEN
+	help
+	  Say Y here to support the SWIM (Super Woz Integrated Machine) IOP
+	  floppy controller on the Macintosh IIfx and Quadra 900/950.
+
+config MAC_FLOPPY
+	tristate "Support for PowerMac floppy"
+	depends on PPC_PMAC && !PPC_PMAC64
+	help
+	  If you have a SWIM-3 (Super Woz Integrated Machine 3; from Apple)
+	  floppy controller, say Y here. Most commonly found in PowerMacs.
+
+config BLK_DEV_PS2
+	tristate "PS/2 ESDI hard disk support"
+	depends on MCA && MCA_LEGACY && BROKEN
+	help
+	  Say Y here if you have a PS/2 machine with a MCA bus and an ESDI
+	  hard disk.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called ps2esdi.
+
+config AMIGA_Z2RAM
+	tristate "Amiga Zorro II ramdisk support"
+	depends on ZORRO
+	help
+	  This enables support for using Chip RAM and Zorro II RAM as a
+	  ramdisk or as a swap partition. Say Y if you want to include this
+	  driver in the kernel.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called z2ram.
+
+config ATARI_ACSI
+	tristate "Atari ACSI support"
+	depends on ATARI && BROKEN
+	---help---
+	  This enables support for the Atari ACSI interface. The driver
+	  supports hard disks and CD-ROMs, which have 512-byte sectors, or can
+	  be switched to that mode. Due to the ACSI command format, only disks
+	  up to 1 GB are supported. Special support for certain ACSI to SCSI
+	  adapters, which could relax that, isn't included yet. The ACSI
+	  driver is also the basis for certain other drivers for devices
+	  attached to the ACSI bus: Atari SLM laser printer, BioNet-100
+	  Ethernet, and PAMsNet Ethernet. If you want to use one of these
+	  devices, you need ACSI support, too.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called acsi.
+
+comment "Some devices (e.g. CD jukebox) support multiple LUNs"
+	depends on ATARI && ATARI_ACSI
+
+config ACSI_MULTI_LUN
+	bool "Probe all LUNs on each ACSI device"
+	depends on ATARI_ACSI
+	help
+	  If you have a ACSI device that supports more than one LUN (Logical
+	  Unit Number), e.g. a CD jukebox, you should say Y here so that all
+	  will be found by the ACSI driver. An ACSI device with multiple LUNs
+	  acts logically like multiple ACSI devices. The vast majority of ACSI
+	  devices have only one LUN, and so most people can say N here and
+	  should in fact do so, because it is safer.
+
+config ATARI_SLM
+	tristate "Atari SLM laser printer support"
+	depends on ATARI && ATARI_ACSI!=n
+	help
+	  If you have an Atari SLM laser printer, say Y to include support for
+	  it in the kernel. Otherwise, say N. This driver is also available as
+	  a module ( = code which can be inserted in and removed from the
+	  running kernel whenever you want). The module will be called
+	  acsi_slm. Be warned: the driver needs much ST-RAM and can cause
+	  problems due to that fact!
+
+config BLK_DEV_XD
+	tristate "XT hard disk support"
+	depends on ISA
+	help
+	  Very old 8 bit hard disk controllers used in the IBM XT computer
+	  will be supported if you say Y here.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called xd.
+
+	  It's pretty unlikely that you have one of these: say N.
+
+config PARIDE
+	tristate "Parallel port IDE device support"
+	depends on PARPORT
+	---help---
+	  There are many external CD-ROM and disk devices that connect through
+	  your computer's parallel port. Most of them are actually IDE devices
+	  using a parallel port IDE adapter. This option enables the PARIDE
+	  subsystem which contains drivers for many of these external drives.
+	  Read <file:Documentation/paride.txt> for more information.
+
+	  If you have said Y to the "Parallel-port support" configuration
+	  option, you may share a single port between your printer and other
+	  parallel port devices. Answer Y to build PARIDE support into your
+	  kernel, or M if you would like to build it as a loadable module. If
+	  your parallel port support is in a loadable module, you must build
+	  PARIDE as a module. If you built PARIDE support into your kernel,
+	  you may still build the individual protocol modules and high-level
+	  drivers as loadable modules. If you build this support as a module,
+	  it will be called paride.
+
+	  To use the PARIDE support, you must say Y or M here and also to at
+	  least one high-level driver (e.g. "Parallel port IDE disks",
+	  "Parallel port ATAPI CD-ROMs", "Parallel port ATAPI disks" etc.) and
+	  to at least one protocol driver (e.g. "ATEN EH-100 protocol",
+	  "MicroSolutions backpack protocol", "DataStor Commuter protocol"
+	  etc.).
+
+source "drivers/block/paride/Kconfig"
+
+config BLK_CPQ_DA
+	tristate "Compaq SMART2 support"
+	depends on PCI
+	help
+	  This is the driver for Compaq Smart Array controllers.  Everyone
+	  using these boards should say Y here.  See the file
+	  <file:Documentation/cpqarray.txt> for the current list of boards
+	  supported by this driver, and for further information on the use of
+	  this driver.
+
+config BLK_CPQ_CISS_DA
+	tristate "Compaq Smart Array 5xxx support"
+	depends on PCI
+	help
+	  This is the driver for Compaq Smart Array 5xxx controllers.
+	  Everyone using these boards should say Y here.
+	  See <file:Documentation/cciss.txt> for the current list of
+	  boards supported by this driver, and for further information
+	  on the use of this driver.
+
+config CISS_SCSI_TAPE
+	bool "SCSI tape drive support for Smart Array 5xxx"
+	depends on BLK_CPQ_CISS_DA && SCSI && PROC_FS
+	help
+	  When enabled (Y), this option allows SCSI tape drives and SCSI medium
+	  changers (tape robots) to be accessed via a Compaq 5xxx array 
+	  controller.  (See <file:Documentation/cciss.txt> for more details.)
+
+	  "SCSI support" and "SCSI tape support" must also be enabled for this 
+	  option to work.
+
+	  When this option is disabled (N), the SCSI portion of the driver 
+	  is not compiled.
+
+config BLK_DEV_DAC960
+	tristate "Mylex DAC960/DAC1100 PCI RAID Controller support"
+	depends on PCI
+	help
+	  This driver adds support for the Mylex DAC960, AcceleRAID, and
+	  eXtremeRAID PCI RAID controllers.  See the file
+	  <file:Documentation/README.DAC960> for further information about
+	  this driver.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called DAC960.
+
+config BLK_DEV_UMEM
+	tristate "Micro Memory MM5415 Battery Backed RAM support (EXPERIMENTAL)"
+	depends on PCI && EXPERIMENTAL
+	---help---
+	  Saying Y here will include support for the MM5415 family of
+	  battery backed (Non-volatile) RAM cards.
+	  <http://www.umem.com/>
+
+	  The cards appear as block devices that can be partitioned into
+	  as many as 15 partitions.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called umem.
+
+	  The umem driver has not yet been allocated a MAJOR number, so
+	  one is chosen dynamically.  Use "devfs" or look in /proc/devices
+	  for the device number
+
+config BLK_DEV_UBD
+	bool "Virtual block device"
+	depends on UML
+	---help---
+          The User-Mode Linux port includes a driver called UBD which will let
+          you access arbitrary files on the host computer as block devices.
+          Unless you know that you do not need such virtual block devices say
+          Y here.
+
+config BLK_DEV_UBD_SYNC
+	bool "Always do synchronous disk IO for UBD"
+	depends on BLK_DEV_UBD
+	---help---
+	  Writes to the virtual block device are not immediately written to the
+	  host's disk; this may cause problems if, for example, the User-Mode
+	  Linux 'Virtual Machine' uses a journalling filesystem and the host
+	  computer crashes.
+
+          Synchronous operation (i.e. always writing data to the host's disk
+          immediately) is configurable on a per-UBD basis by using a special
+          kernel command line option.  Alternatively, you can say Y here to
+          turn on synchronous operation by default for all block devices.
+
+          If you're running a journalling file system (like reiserfs, for
+          example) in your virtual machine, you will want to say Y here.  If
+          you care for the safety of the data in your virtual machine, Y is a
+          wise choice too.  In all other cases (for example, if you're just
+          playing around with User-Mode Linux) you can choose N.
+
+config BLK_DEV_COW_COMMON
+	bool
+	default BLK_DEV_UBD
+
+config MMAPPER
+	tristate "Example IO memory driver (BROKEN)"
+	depends on UML && BROKEN
+	---help---
+          The User-Mode Linux port can provide support for IO Memory
+          emulation with this option.  This allows a host file to be
+          specified as an I/O region on the kernel command line. That file
+          will be mapped into UML's kernel address space where a driver can
+          locate it and do whatever it wants with the memory, including
+          providing an interface to it for UML processes to use.
+
+          For more information, see
+          <http://user-mode-linux.sourceforge.net/iomem.html>.
+
+          If you'd like to be able to provide a simulated IO port space for
+          User-Mode Linux processes, say Y.  If unsure, say N.
+
+config BLK_DEV_LOOP
+	tristate "Loopback device support"
+	---help---
+	  Saying Y here will allow you to use a regular file as a block
+	  device; you can then create a file system on that block device and
+	  mount it just as you would mount other block devices such as hard
+	  drive partitions, CD-ROM drives or floppy drives. The loop devices
+	  are block special device files with major number 7 and typically
+	  called /dev/loop0, /dev/loop1 etc.
+
+	  This is useful if you want to check an ISO 9660 file system before
+	  burning the CD, or if you want to use floppy images without first
+	  writing them to floppy. Furthermore, some Linux distributions avoid
+	  the need for a dedicated Linux partition by keeping their complete
+	  root file system inside a DOS FAT file using this loop device
+	  driver.
+
+	  To use the loop device, you need the losetup utility, found in the
+	  util-linux package, see
+	  <ftp://ftp.kernel.org/pub/linux/utils/util-linux/>.
+
+	  The loop device driver can also be used to "hide" a file system in
+	  a disk partition, floppy, or regular file, either using encryption
+	  (scrambling the data) or steganography (hiding the data in the low
+	  bits of, say, a sound file). This is also safe if the file resides
+	  on a remote file server.
+
+	  There are several ways of encrypting disks. Some of these require
+	  kernel patches. The vanilla kernel offers the cryptoloop option
+	  and a Device Mapper target (which is superior, as it supports all
+	  file systems). If you want to use the cryptoloop, say Y to both
+	  LOOP and CRYPTOLOOP, and make sure you have a recent (version 2.12
+	  or later) version of util-linux. Additionally, be aware that
+	  the cryptoloop is not safe for storing journaled filesystems.
+
+	  Note that this loop device has nothing to do with the loopback
+	  device used for network connections from the machine to itself.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called loop.
+
+	  Most users will answer N here.
+
+config BLK_DEV_CRYPTOLOOP
+	tristate "Cryptoloop Support"
+	select CRYPTO
+	depends on BLK_DEV_LOOP
+	---help---
+	  Say Y here if you want to be able to use the ciphers that are 
+	  provided by the CryptoAPI as loop transformation. This might be
+	  used as hard disk encryption.
+
+	  WARNING: This device is not safe for journaled file systems like
+	  ext3 or Reiserfs. Please use the Device Mapper crypto module
+	  instead, which can be configured to be on-disk compatible with the
+	  cryptoloop device.
+
+config BLK_DEV_NBD
+	tristate "Network block device support"
+	depends on NET
+	---help---
+	  Saying Y here will allow your computer to be a client for network
+	  block devices, i.e. it will be able to use block devices exported by
+	  servers (mount file systems on them etc.). Communication between
+	  client and server works over TCP/IP networking, but to the client
+	  program this is hidden: it looks like a regular local file access to
+	  a block device special file such as /dev/nd0.
+
+	  Network block devices also allows you to run a block-device in
+	  userland (making server and client physically the same computer,
+	  communicating using the loopback network device).
+
+	  Read <file:Documentation/nbd.txt> for more information, especially
+	  about where to find the server code, which runs in user space and
+	  does not need special kernel support.
+
+	  Note that this has nothing to do with the network file systems NFS
+	  or Coda; you can say N here even if you intend to use NFS or Coda.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called nbd.
+
+	  If unsure, say N.
+
+config BLK_DEV_SX8
+	tristate "Promise SATA SX8 support"
+	depends on PCI
+	---help---
+	  Saying Y or M here will enable support for the 
+	  Promise SATA SX8 controllers.
+
+	  Use devices /dev/sx8/$N and /dev/sx8/$Np$M.
+
+config BLK_DEV_UB
+	tristate "Low Performance USB Block driver"
+	depends on USB
+	help
+	  This driver supports certain USB attached storage devices
+	  such as flash keys.
+
+	  Warning: Enabling this cripples the usb-storage driver.
+
+	  If unsure, say N.
+
+config BLK_DEV_RAM
+	tristate "RAM disk support"
+	---help---
+	  Saying Y here will allow you to use a portion of your RAM memory as
+	  a block device, so that you can make file systems on it, read and
+	  write to it and do all the other things that you can do with normal
+	  block devices (such as hard drives). It is usually used to load and
+	  store a copy of a minimal root file system off of a floppy into RAM
+	  during the initial install of Linux.
+
+	  Note that the kernel command line option "ramdisk=XX" is now
+	  obsolete. For details, read <file:Documentation/ramdisk.txt>.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called rd.
+
+	  Most normal users won't need the RAM disk functionality, and can
+	  thus say N here.
+
+config BLK_DEV_RAM_COUNT
+	int "Default number of RAM disks" if BLK_DEV_RAM
+	default "16"
+	help
+	  The default value is 16 RAM disks. Change this if you know what
+	  are doing. If you boot from a filesystem that needs to be extracted
+	  in memory, you will need at least one RAM disk (e.g. root on cramfs).
+
+config BLK_DEV_RAM_SIZE
+	int "Default RAM disk size (kbytes)"
+	depends on BLK_DEV_RAM
+	default "4096"
+	help
+	  The default value is 4096 kilobytes. Only change this if you know
+	  what are you doing. If you are using IBM S/390, then set this to
+	  8192.
+
+config BLK_DEV_INITRD
+	bool "Initial RAM disk (initrd) support"
+	depends on BLK_DEV_RAM=y
+	help
+	  The initial RAM disk is a RAM disk that is loaded by the boot loader
+	  (loadlin or lilo) and that is mounted as root before the normal boot
+	  procedure. It is typically used to load modules needed to mount the
+	  "real" root file system, etc. See <file:Documentation/initrd.txt>
+	  for details.
+
+config INITRAMFS_SOURCE
+	string "Initramfs source file(s)"
+	default ""
+	help
+	  This can be either a single cpio archive with a .cpio suffix or a
+	  space-separated list of directories and files for building the
+	  initramfs image.  A cpio archive should contain a filesystem archive
+	  to be used as an initramfs image.  Directories should contain a
+	  filesystem layout to be included in the initramfs image.  Files
+	  should contain entries according to the format described by the
+	  "usr/gen_init_cpio" program in the kernel tree.
+
+	  When multiple directories and files are specified then the
+	  initramfs image will be the aggregate of all of them.
+
+	  See <file:Documentation/early-userspace/README for more details.
+
+	  If you are not sure, leave it blank.
+
+config INITRAMFS_ROOT_UID
+	int "User ID to map to 0 (user root)"
+	depends on INITRAMFS_SOURCE!=""
+	default "0"
+	help
+	  This setting is only meaningful if the INITRAMFS_SOURCE is
+	  contains a directory.  Setting this user ID (UID) to something
+	  other than "0" will cause all files owned by that UID to be
+	  owned by user root in the initial ramdisk image.
+
+	  If you are not sure, leave it set to "0".
+
+config INITRAMFS_ROOT_GID
+	int "Group ID to map to 0 (group root)"
+	depends on INITRAMFS_SOURCE!=""
+	default "0"
+	help
+	  This setting is only meaningful if the INITRAMFS_SOURCE is
+	  contains a directory.  Setting this group ID (GID) to something
+	  other than "0" will cause all files owned by that GID to be
+	  owned by group root in the initial ramdisk image.
+
+	  If you are not sure, leave it set to "0".
+
+#XXX - it makes sense to enable this only for 32-bit subarch's, not for x86_64
+#for instance.
+config LBD
+	bool "Support for Large Block Devices"
+	depends on X86 || MIPS32 || PPC32 || ARCH_S390_31 || SUPERH || UML
+	help
+	  Say Y here if you want to attach large (bigger than 2TB) discs to
+	  your machine, or if you want to have a raid or loopback device
+	  bigger than 2TB.  Otherwise say N.
+
+config CDROM_PKTCDVD
+	tristate "Packet writing on CD/DVD media"
+	depends on !UML
+	help
+	  If you have a CDROM drive that supports packet writing, say Y to
+	  include preliminary support. It should work with any MMC/Mt Fuji
+	  compliant ATAPI or SCSI drive, which is just about any newer CD
+	  writer.
+
+	  Currently only writing to CD-RW, DVD-RW and DVD+RW discs is possible.
+	  DVD-RW disks must be in restricted overwrite mode.
+
+	  To compile this driver as a module, choose M here: the
+	  module will be called pktcdvd.
+
+config CDROM_PKTCDVD_BUFFERS
+	int "Free buffers for data gathering"
+	depends on CDROM_PKTCDVD
+	default "8"
+	help
+	  This controls the maximum number of active concurrent packets. More
+	  concurrent packets can increase write performance, but also require
+	  more memory. Each concurrent packet will require approximately 64Kb
+	  of non-swappable kernel memory, memory which will be allocated at
+	  pktsetup time.
+
+config CDROM_PKTCDVD_WCACHE
+	bool "Enable write caching"
+	depends on CDROM_PKTCDVD
+	help
+	  If enabled, write caching will be set for the CD-R/W device. For now
+	  this option is dangerous unless the CD-RW media is known good, as we
+	  don't do deferred write error handling yet.
+
+source "drivers/s390/block/Kconfig"
+
+source "drivers/block/Kconfig.iosched"
+
+config ATA_OVER_ETH
+	tristate "ATA over Ethernet support"
+	depends on NET
+	help
+	This driver provides Support for ATA over Ethernet block
+	devices like the Coraid EtherDrive (R) Storage Blade.
+
+endmenu
diff --git a/drivers/block/Kconfig.iosched b/drivers/block/Kconfig.iosched
new file mode 100644
index 0000000..6070a48
--- /dev/null
+++ b/drivers/block/Kconfig.iosched
@@ -0,0 +1,41 @@
+
+menu "IO Schedulers"
+
+config IOSCHED_NOOP
+	bool
+	default y
+	---help---
+	  The no-op I/O scheduler is a minimal scheduler that does basic merging
+	  and sorting. Its main uses include non-disk based block devices like
+	  memory devices, and specialised software or hardware environments
+	  that do their own scheduling and require only minimal assistance from
+	  the kernel.
+
+config IOSCHED_AS
+	tristate "Anticipatory I/O scheduler"
+	default y
+	---help---
+	  The anticipatory I/O scheduler is the default disk scheduler. It is
+	  generally a good choice for most environments, but is quite large and
+	  complex when compared to the deadline I/O scheduler, it can also be
+	  slower in some cases especially some database loads.
+
+config IOSCHED_DEADLINE
+	tristate "Deadline I/O scheduler"
+	default y
+	---help---
+	  The deadline I/O scheduler is simple and compact, and is often as
+	  good as the anticipatory I/O scheduler, and in some database
+	  workloads, better. In the case of a single process performing I/O to
+	  a disk at any one time, its behaviour is almost identical to the
+	  anticipatory I/O scheduler and so is a good choice.
+
+config IOSCHED_CFQ
+	tristate "CFQ I/O scheduler"
+	default y
+	---help---
+	  The CFQ I/O scheduler tries to distribute bandwidth equally
+	  among all processes in the system. It should provide a fair
+	  working environment, suitable for desktop systems.
+
+endmenu
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
new file mode 100644
index 0000000..1cf09a1
--- /dev/null
+++ b/drivers/block/Makefile
@@ -0,0 +1,47 @@
+#
+# Makefile for the kernel block device drivers.
+#
+# 12 June 2000, Christoph Hellwig <hch@infradead.org>
+# Rewritten to use lists instead of if-statements.
+# 
+# Note : at this point, these files are compiled on all systems.
+# In the future, some of these should be built conditionally.
+#
+
+#
+# NOTE that ll_rw_blk.c must come early in linkage order - it starts the
+# kblockd threads
+#
+
+obj-y	:= elevator.o ll_rw_blk.o ioctl.o genhd.o scsi_ioctl.o
+
+obj-$(CONFIG_IOSCHED_NOOP)	+= noop-iosched.o
+obj-$(CONFIG_IOSCHED_AS)	+= as-iosched.o
+obj-$(CONFIG_IOSCHED_DEADLINE)	+= deadline-iosched.o
+obj-$(CONFIG_IOSCHED_CFQ)	+= cfq-iosched.o
+obj-$(CONFIG_MAC_FLOPPY)	+= swim3.o
+obj-$(CONFIG_BLK_DEV_FD)	+= floppy.o
+obj-$(CONFIG_BLK_DEV_FD98)	+= floppy98.o
+obj-$(CONFIG_AMIGA_FLOPPY)	+= amiflop.o
+obj-$(CONFIG_ATARI_FLOPPY)	+= ataflop.o
+obj-$(CONFIG_BLK_DEV_SWIM_IOP)	+= swim_iop.o
+obj-$(CONFIG_ATARI_ACSI)	+= acsi.o
+obj-$(CONFIG_ATARI_SLM)		+= acsi_slm.o
+obj-$(CONFIG_AMIGA_Z2RAM)	+= z2ram.o
+obj-$(CONFIG_BLK_DEV_RAM)	+= rd.o
+obj-$(CONFIG_BLK_DEV_LOOP)	+= loop.o
+obj-$(CONFIG_BLK_DEV_PS2)	+= ps2esdi.o
+obj-$(CONFIG_BLK_DEV_XD)	+= xd.o
+obj-$(CONFIG_BLK_CPQ_DA)	+= cpqarray.o
+obj-$(CONFIG_BLK_CPQ_CISS_DA)  += cciss.o
+obj-$(CONFIG_BLK_DEV_DAC960)	+= DAC960.o
+obj-$(CONFIG_CDROM_PKTCDVD)	+= pktcdvd.o
+
+obj-$(CONFIG_BLK_DEV_UMEM)	+= umem.o
+obj-$(CONFIG_BLK_DEV_NBD)	+= nbd.o
+obj-$(CONFIG_BLK_DEV_CRYPTOLOOP) += cryptoloop.o
+
+obj-$(CONFIG_VIODASD)		+= viodasd.o
+obj-$(CONFIG_BLK_DEV_SX8)	+= sx8.o
+obj-$(CONFIG_BLK_DEV_UB)	+= ub.o
+
diff --git a/drivers/block/acsi.c b/drivers/block/acsi.c
new file mode 100644
index 0000000..ce933de
--- /dev/null
+++ b/drivers/block/acsi.c
@@ -0,0 +1,1829 @@
+/*
+ * acsi.c -- Device driver for Atari ACSI hard disks
+ *
+ * Copyright 1994 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ *
+ * Some parts are based on hd.c by Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive for
+ * more details.
+ *
+ */
+
+/*
+ * Still to in this file:
+ *  - If a command ends with an error status (!= 0), the following
+ *    REQUEST SENSE commands (4 to fill the ST-DMA FIFO) are done by
+ *    polling the _IRQ signal (not interrupt-driven). This should be
+ *    avoided in future because it takes up a non-neglectible time in
+ *    the interrupt service routine while interrupts are disabled.
+ *    Maybe a timer interrupt will get lost :-(
+ */
+
+/*
+ * General notes:
+ *
+ *  - All ACSI devices (disks, CD-ROMs, ...) use major number 28.
+ *    Minors are organized like it is with SCSI: The upper 4 bits
+ *    identify the device, the lower 4 bits the partition.
+ *    The device numbers (the upper 4 bits) are given in the same
+ *    order as the devices are found on the bus.
+ *  - Up to 8 LUNs are supported for each target (if CONFIG_ACSI_MULTI_LUN
+ *    is defined), but only a total of 16 devices (due to minor
+ *    numbers...). Note that Atari allows only a maximum of 4 targets
+ *    (i.e. controllers, not devices) on the ACSI bus!
+ *  - A optimizing scheme similar to SCSI scatter-gather is implemented.
+ *  - Removable media are supported. After a medium change to device
+ *    is reinitialized (partition check etc.). Also, if the device
+ *    knows the PREVENT/ALLOW MEDIUM REMOVAL command, the door should
+ *    be locked and unlocked when mounting the first or unmounting the
+ *    last filesystem on the device. The code is untested, because I
+ *    don't have a removable hard disk.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/genhd.h>
+#include <linux/delay.h>
+#include <linux/mm.h>
+#include <linux/major.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <scsi/scsi.h> /* for SCSI_IOCTL_GET_IDLUN */
+typedef void Scsi_Device; /* hack to avoid including scsi.h */
+#include <scsi/scsi_ioctl.h>
+#include <linux/hdreg.h> /* for HDIO_GETGEO */
+#include <linux/blkpg.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+
+#include <asm/setup.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_acsi.h>
+#include <asm/atari_stdma.h>
+#include <asm/atari_stram.h>
+
+static void (*do_acsi)(void) = NULL;
+static struct request_queue *acsi_queue;
+#define QUEUE (acsi_queue)
+#define CURRENT elv_next_request(acsi_queue)
+
+#define DEBUG
+#undef DEBUG_DETECT
+#undef NO_WRITE
+
+#define MAX_ERRORS     		8	/* Max read/write errors/sector */
+#define MAX_LUN				8	/* Max LUNs per target */
+#define MAX_DEV		   		16
+
+#define ACSI_BUFFER_SIZE			(16*1024) /* "normal" ACSI buffer size */
+#define ACSI_BUFFER_MINSIZE			(2048) 	  /* min. buf size if ext. DMA */
+#define ACSI_BUFFER_SIZE_ORDER	 	2		  /* order size for above */
+#define ACSI_BUFFER_MINSIZE_ORDER	0 	  	  /* order size for above */
+#define ACSI_BUFFER_SECTORS	(ACSI_BUFFER_SIZE/512)
+
+#define ACSI_BUFFER_ORDER \
+	(ATARIHW_PRESENT(EXTD_DMA) ? \
+	 ACSI_BUFFER_MINSIZE_ORDER : \
+	 ACSI_BUFFER_SIZE_ORDER)
+
+#define ACSI_TIMEOUT		(4*HZ)
+
+/* minimum delay between two commands */
+
+#define COMMAND_DELAY 500
+
+typedef enum {
+	NONE, HARDDISK, CDROM
+} ACSI_TYPE;
+
+struct acsi_info_struct {
+	ACSI_TYPE		type;			/* type of device */
+	unsigned		target;			/* target number */
+	unsigned		lun;			/* LUN in target controller */
+	unsigned		removable : 1;	/* Flag for removable media */
+	unsigned		read_only : 1;	/* Flag for read only devices */
+	unsigned		old_atari_disk : 1; /* Is an old Atari disk       */
+	unsigned		changed : 1;	/* Medium has been changed */
+	unsigned long 	size;			/* #blocks */
+	int access_count;
+} acsi_info[MAX_DEV];
+
+/*
+ *	SENSE KEYS
+ */
+
+#define NO_SENSE		0x00
+#define RECOVERED_ERROR 	0x01
+#define NOT_READY		0x02
+#define MEDIUM_ERROR		0x03
+#define HARDWARE_ERROR		0x04
+#define ILLEGAL_REQUEST 	0x05
+#define UNIT_ATTENTION		0x06
+#define DATA_PROTECT		0x07
+#define BLANK_CHECK		0x08
+#define COPY_ABORTED		0x0a
+#define ABORTED_COMMAND 	0x0b
+#define VOLUME_OVERFLOW 	0x0d
+#define MISCOMPARE		0x0e
+
+
+/*
+ *	DEVICE TYPES
+ */
+
+#define TYPE_DISK	0x00
+#define TYPE_TAPE	0x01
+#define TYPE_WORM	0x04
+#define TYPE_ROM	0x05
+#define TYPE_MOD	0x07
+#define TYPE_NO_LUN	0x7f
+
+/* The data returned by MODE SENSE differ between the old Atari
+ * hard disks and SCSI disks connected to ACSI. In the following, both
+ * formats are defined and some macros to operate on them potably.
+ */
+
+typedef struct {
+	unsigned long	dummy[2];
+	unsigned long	sector_size;
+	unsigned char	format_code;
+#define ATARI_SENSE_FORMAT_FIX	1	
+#define ATARI_SENSE_FORMAT_CHNG	2
+	unsigned char	cylinders_h;
+	unsigned char	cylinders_l;
+	unsigned char	heads;
+	unsigned char	reduced_h;
+	unsigned char	reduced_l;
+	unsigned char	precomp_h;
+	unsigned char	precomp_l;
+	unsigned char	landing_zone;
+	unsigned char	steprate;
+	unsigned char	type;
+#define ATARI_SENSE_TYPE_FIXCHNG_MASK		4
+#define ATARI_SENSE_TYPE_SOFTHARD_MASK		8
+#define ATARI_SENSE_TYPE_FIX				4
+#define ATARI_SENSE_TYPE_CHNG				0
+#define ATARI_SENSE_TYPE_SOFT				0
+#define ATARI_SENSE_TYPE_HARD				8
+	unsigned char	sectors;
+} ATARI_SENSE_DATA;
+
+#define ATARI_CAPACITY(sd) \
+	(((int)((sd).cylinders_h<<8)|(sd).cylinders_l) * \
+	 (sd).heads * (sd).sectors)
+
+
+typedef struct {
+	unsigned char   dummy1;
+	unsigned char   medium_type;
+	unsigned char   dummy2;
+	unsigned char   descriptor_size;
+	unsigned long   block_count;
+	unsigned long   sector_size;
+	/* Page 0 data */
+	unsigned char	page_code;
+	unsigned char	page_size;
+	unsigned char	page_flags;
+	unsigned char	qualifier;
+} SCSI_SENSE_DATA;
+
+#define SCSI_CAPACITY(sd) 	((sd).block_count & 0xffffff)
+
+
+typedef union {
+	ATARI_SENSE_DATA	atari;
+	SCSI_SENSE_DATA		scsi;
+} SENSE_DATA;
+
+#define SENSE_TYPE_UNKNOWN	0
+#define SENSE_TYPE_ATARI	1
+#define SENSE_TYPE_SCSI		2
+
+#define SENSE_TYPE(sd)										\
+	(((sd).atari.dummy[0] == 8 &&							\
+	  ((sd).atari.format_code == 1 ||						\
+	   (sd).atari.format_code == 2)) ? SENSE_TYPE_ATARI :	\
+	 ((sd).scsi.dummy1 >= 11) ? SENSE_TYPE_SCSI :			\
+	 SENSE_TYPE_UNKNOWN)
+	 
+#define CAPACITY(sd)							\
+	(SENSE_TYPE(sd) == SENSE_TYPE_ATARI ?		\
+	 ATARI_CAPACITY((sd).atari) :				\
+	 SCSI_CAPACITY((sd).scsi))
+
+#define SECTOR_SIZE(sd)							\
+	(SENSE_TYPE(sd) == SENSE_TYPE_ATARI ?		\
+	 (sd).atari.sector_size :					\
+	 (sd).scsi.sector_size & 0xffffff)
+
+/* Default size if capacity cannot be determined (1 GByte) */
+#define	DEFAULT_SIZE	0x1fffff
+
+#define CARTRCH_STAT(aip,buf)						\
+	(aip->old_atari_disk ?						\
+	 (((buf)[0] & 0x7f) == 0x28) :					\
+	 ((((buf)[0] & 0x70) == 0x70) ?					\
+	  (((buf)[2] & 0x0f) == 0x06) :					\
+	  (((buf)[0] & 0x0f) == 0x06)))					\
+
+/* These two are also exported to other drivers that work on the ACSI bus and
+ * need an ST-RAM buffer. */
+char 			*acsi_buffer;
+unsigned long 	phys_acsi_buffer;
+
+static int NDevices;
+
+static int				CurrentNReq;
+static int				CurrentNSect;
+static char				*CurrentBuffer;
+
+static DEFINE_SPINLOCK(acsi_lock);
+
+
+#define SET_TIMER()	mod_timer(&acsi_timer, jiffies + ACSI_TIMEOUT)
+#define CLEAR_TIMER()	del_timer(&acsi_timer)
+
+static unsigned long	STramMask;
+#define STRAM_ADDR(a)	(((a) & STramMask) == 0)
+
+
+
+/* ACSI commands */
+
+static char tur_cmd[6]        = { 0x00, 0, 0, 0, 0, 0 };
+static char modesense_cmd[6]  = { 0x1a, 0, 0, 0, 24, 0 };
+static char modeselect_cmd[6] = { 0x15, 0, 0, 0, 12, 0 };
+static char inquiry_cmd[6]    = { 0x12, 0, 0, 0,255, 0 };
+static char reqsense_cmd[6]   = { 0x03, 0, 0, 0, 4, 0 };
+static char read_cmd[6]       = { 0x08, 0, 0, 0, 0, 0 };
+static char write_cmd[6]      = { 0x0a, 0, 0, 0, 0, 0 };
+static char pa_med_rem_cmd[6] = { 0x1e, 0, 0, 0, 0, 0 };
+
+#define CMDSET_TARG_LUN(cmd,targ,lun)			\
+    do {						\
+		cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5;	\
+		cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5;	\
+	} while(0)
+
+#define CMDSET_BLOCK(cmd,blk)						\
+    do {											\
+		unsigned long __blk = (blk);				\
+		cmd[3] = __blk; __blk >>= 8;				\
+		cmd[2] = __blk; __blk >>= 8;				\
+		cmd[1] = (cmd[1] & 0xe0) | (__blk & 0x1f);	\
+	} while(0)
+
+#define CMDSET_LEN(cmd,len)						\
+	do {										\
+		cmd[4] = (len);							\
+	} while(0)
+
+/* ACSI errors (from REQUEST SENSE); There are two tables, one for the
+ * old Atari disks and one for SCSI on ACSI disks.
+ */
+
+struct acsi_error {
+	unsigned char	code;
+	const char		*text;
+} atari_acsi_errors[] = {
+	{ 0x00, "No error (??)" },
+	{ 0x01, "No index pulses" },
+	{ 0x02, "Seek not complete" },
+	{ 0x03, "Write fault" },
+	{ 0x04, "Drive not ready" },
+	{ 0x06, "No Track 00 signal" },
+	{ 0x10, "ECC error in ID field" },
+	{ 0x11, "Uncorrectable data error" },
+	{ 0x12, "ID field address mark not found" },
+	{ 0x13, "Data field address mark not found" },
+	{ 0x14, "Record not found" },
+	{ 0x15, "Seek error" },
+	{ 0x18, "Data check in no retry mode" },
+	{ 0x19, "ECC error during verify" },
+	{ 0x1a, "Access to bad block" },
+	{ 0x1c, "Unformatted or bad format" },
+	{ 0x20, "Invalid command" },
+	{ 0x21, "Invalid block address" },
+	{ 0x23, "Volume overflow" },
+	{ 0x24, "Invalid argument" },
+	{ 0x25, "Invalid drive number" },
+	{ 0x26, "Byte zero parity check" },
+	{ 0x28, "Cartride changed" },
+	{ 0x2c, "Error count overflow" },
+	{ 0x30, "Controller selftest failed" }
+},
+
+	scsi_acsi_errors[] = {
+	{ 0x00, "No error (??)" },
+	{ 0x01, "Recovered error" },
+	{ 0x02, "Drive not ready" },
+	{ 0x03, "Uncorrectable medium error" },
+	{ 0x04, "Hardware error" },
+	{ 0x05, "Illegal request" },
+	{ 0x06, "Unit attention (Reset or cartridge changed)" },
+	{ 0x07, "Data protection" },
+	{ 0x08, "Blank check" },
+	{ 0x0b, "Aborted Command" },
+	{ 0x0d, "Volume overflow" }
+};
+
+
+
+/***************************** Prototypes *****************************/
+
+static int acsicmd_dma( const char *cmd, char *buffer, int blocks, int
+                        rwflag, int enable);
+static int acsi_reqsense( char *buffer, int targ, int lun);
+static void acsi_print_error(const unsigned char *errblk, struct acsi_info_struct *aip);
+static irqreturn_t acsi_interrupt (int irq, void *data, struct pt_regs *fp);
+static void unexpected_acsi_interrupt( void );
+static void bad_rw_intr( void );
+static void read_intr( void );
+static void write_intr( void);
+static void acsi_times_out( unsigned long dummy );
+static void copy_to_acsibuffer( void );
+static void copy_from_acsibuffer( void );
+static void do_end_requests( void );
+static void do_acsi_request( request_queue_t * );
+static void redo_acsi_request( void );
+static int acsi_ioctl( struct inode *inode, struct file *file, unsigned int
+                       cmd, unsigned long arg );
+static int acsi_open( struct inode * inode, struct file * filp );
+static int acsi_release( struct inode * inode, struct file * file );
+static void acsi_prevent_removal(struct acsi_info_struct *aip, int flag );
+static int acsi_change_blk_size( int target, int lun);
+static int acsi_mode_sense( int target, int lun, SENSE_DATA *sd );
+static int acsi_revalidate (struct gendisk *disk);
+
+/************************* End of Prototypes **************************/
+
+
+struct timer_list acsi_timer = TIMER_INITIALIZER(acsi_times_out, 0, 0);
+
+
+#ifdef CONFIG_ATARI_SLM
+
+extern int attach_slm( int target, int lun );
+extern int slm_init( void );
+
+#endif
+
+
+
+/***********************************************************************
+ *
+ *   ACSI primitives
+ *
+ **********************************************************************/
+
+
+/*
+ * The following two functions wait for _IRQ to become Low or High,
+ * resp., with a timeout. The 'timeout' parameter is in jiffies
+ * (10ms).
+ * If the functions are called with timer interrupts on (int level <
+ * 6), the timeout is based on the 'jiffies' variable to provide exact
+ * timeouts for device probing etc.
+ * If interrupts are disabled, the number of tries is based on the
+ * 'loops_per_jiffy' variable. A rough estimation is sufficient here...
+ */
+
+#define INT_LEVEL													\
+	({	unsigned __sr;												\
+		__asm__ __volatile__ ( "movew	%/sr,%0" : "=dm" (__sr) );	\
+		(__sr >> 8) & 7;											\
+	})
+
+int acsi_wait_for_IRQ( unsigned timeout )
+
+{
+	if (INT_LEVEL < 6) {
+		unsigned long maxjif = jiffies + timeout;
+		while (time_before(jiffies, maxjif))
+			if (!(mfp.par_dt_reg & 0x20)) return( 1 );
+	}
+	else {
+		long tries = loops_per_jiffy / 8 * timeout;
+		while( --tries >= 0 )
+			if (!(mfp.par_dt_reg & 0x20)) return( 1 );
+	}		
+	return( 0 ); /* timeout! */
+}
+
+
+int acsi_wait_for_noIRQ( unsigned timeout )
+
+{
+	if (INT_LEVEL < 6) {
+		unsigned long maxjif = jiffies + timeout;
+		while (time_before(jiffies, maxjif))
+			if (mfp.par_dt_reg & 0x20) return( 1 );
+	}
+	else {
+		long tries = loops_per_jiffy * timeout / 8;
+		while( tries-- >= 0 )
+			if (mfp.par_dt_reg & 0x20) return( 1 );
+	}		
+	return( 0 ); /* timeout! */
+}
+
+static struct timeval start_time;
+
+void
+acsi_delay_start(void)
+{
+	do_gettimeofday(&start_time);
+}
+
+/* wait from acsi_delay_start to now usec (<1E6) usec */
+
+void
+acsi_delay_end(long usec)
+{
+	struct timeval end_time;
+	long deltau,deltas;
+	do_gettimeofday(&end_time);
+	deltau=end_time.tv_usec - start_time.tv_usec;
+	deltas=end_time.tv_sec - start_time.tv_sec;
+	if (deltas > 1 || deltas < 0)
+		return;
+	if (deltas > 0)
+		deltau += 1000*1000;
+	if (deltau >= usec)
+		return;
+	udelay(usec-deltau);
+}
+
+/* acsicmd_dma() sends an ACSI command and sets up the DMA to transfer
+ * 'blocks' blocks of 512 bytes from/to 'buffer'.
+ * Because the _IRQ signal is used for handshaking the command bytes,
+ * the ACSI interrupt has to be disabled in this function. If the end
+ * of the operation should be signalled by a real interrupt, it has to be
+ * reenabled afterwards.
+ */
+
+static int acsicmd_dma( const char *cmd, char *buffer, int blocks, int rwflag, int enable)
+
+{	unsigned long	flags, paddr;
+	int				i;
+
+#ifdef NO_WRITE
+	if (rwflag || *cmd == 0x0a) {
+		printk( "ACSI: Write commands disabled!\n" );
+		return( 0 );
+	}
+#endif
+	
+	rwflag = rwflag ? 0x100 : 0;
+	paddr = virt_to_phys( buffer );
+
+	acsi_delay_end(COMMAND_DELAY);
+	DISABLE_IRQ();
+
+	local_irq_save(flags);
+	/* Low on A1 */
+	dma_wd.dma_mode_status = 0x88 | rwflag;
+	MFPDELAY();
+
+	/* set DMA address */
+	dma_wd.dma_lo = (unsigned char)paddr;
+	paddr >>= 8;
+	MFPDELAY();
+	dma_wd.dma_md = (unsigned char)paddr;
+	paddr >>= 8;
+	MFPDELAY();
+	if (ATARIHW_PRESENT(EXTD_DMA))
+		st_dma_ext_dmahi = (unsigned short)paddr;
+	else
+		dma_wd.dma_hi = (unsigned char)paddr;
+	MFPDELAY();
+	local_irq_restore(flags);
+
+	/* send the command bytes except the last */
+	for( i = 0; i < 5; ++i ) {
+		DMA_LONG_WRITE( *cmd++, 0x8a | rwflag );
+		udelay(20);
+		if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
+	}
+
+	/* Clear FIFO and switch DMA to correct direction */  
+	dma_wd.dma_mode_status = 0x92 | (rwflag ^ 0x100);  
+	MFPDELAY();
+	dma_wd.dma_mode_status = 0x92 | rwflag;
+	MFPDELAY();
+
+	/* How many sectors for DMA */
+	dma_wd.fdc_acces_seccount = blocks;
+	MFPDELAY();
+	
+	/* send last command byte */
+	dma_wd.dma_mode_status = 0x8a | rwflag;
+	MFPDELAY();
+	DMA_LONG_WRITE( *cmd++, 0x0a | rwflag );
+	if (enable)
+		ENABLE_IRQ();
+	udelay(80);
+
+	return( 1 );
+}
+
+
+/*
+ * acsicmd_nodma() sends an ACSI command that requires no DMA.
+ */
+
+int acsicmd_nodma( const char *cmd, int enable)
+
+{	int	i;
+
+	acsi_delay_end(COMMAND_DELAY);
+	DISABLE_IRQ();
+
+	/* send first command byte */
+	dma_wd.dma_mode_status = 0x88;
+	MFPDELAY();
+	DMA_LONG_WRITE( *cmd++, 0x8a );
+	udelay(20);
+	if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
+
+	/* send the intermediate command bytes */
+	for( i = 0; i < 4; ++i ) {
+		DMA_LONG_WRITE( *cmd++, 0x8a );
+		udelay(20);
+		if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
+	}
+
+	/* send last command byte */
+	DMA_LONG_WRITE( *cmd++, 0x0a );
+	if (enable)
+		ENABLE_IRQ();
+	udelay(80);
+	
+	return( 1 );
+	/* Note that the ACSI interrupt is still disabled after this
+	 * function. If you want to get the IRQ delivered, enable it manually!
+	 */
+}
+
+
+static int acsi_reqsense( char *buffer, int targ, int lun)
+
+{
+	CMDSET_TARG_LUN( reqsense_cmd, targ, lun);
+	if (!acsicmd_dma( reqsense_cmd, buffer, 1, 0, 0 )) return( 0 );
+	if (!acsi_wait_for_IRQ( 10 )) return( 0 );
+	acsi_getstatus();
+	if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
+	if (!acsi_wait_for_IRQ( 10 )) return( 0 );
+	acsi_getstatus();
+	if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
+	if (!acsi_wait_for_IRQ( 10 )) return( 0 );
+	acsi_getstatus();
+	if (!acsicmd_nodma( reqsense_cmd, 0 )) return( 0 );
+	if (!acsi_wait_for_IRQ( 10 )) return( 0 );
+	acsi_getstatus();
+	dma_cache_maintenance( virt_to_phys(buffer), 16, 0 );
+	
+	return( 1 );
+}	
+
+
+/*
+ * ACSI status phase: get the status byte from the bus
+ *
+ * I've seen several times that a 0xff status is read, propably due to
+ * a timing error. In this case, the procedure is repeated after the
+ * next _IRQ edge.
+ */
+
+int acsi_getstatus( void )
+
+{	int	status;
+
+	DISABLE_IRQ();
+	for(;;) {
+		if (!acsi_wait_for_IRQ( 100 )) {
+			acsi_delay_start();
+			return( -1 );
+		}
+		dma_wd.dma_mode_status = 0x8a;
+		MFPDELAY();
+		status = dma_wd.fdc_acces_seccount;
+		if (status != 0xff) break;
+#ifdef DEBUG
+		printk("ACSI: skipping 0xff status byte\n" );
+#endif
+		udelay(40);
+		acsi_wait_for_noIRQ( 20 );
+	}
+	dma_wd.dma_mode_status = 0x80;
+	udelay(40);
+	acsi_wait_for_noIRQ( 20 );
+
+	acsi_delay_start();
+	return( status & 0x1f ); /* mask of the device# */
+}
+
+
+#if (defined(CONFIG_ATARI_SLM) || defined(CONFIG_ATARI_SLM_MODULE))
+
+/* Receive data in an extended status phase. Needed by SLM printer. */
+
+int acsi_extstatus( char *buffer, int cnt )
+
+{	int	status;
+
+	DISABLE_IRQ();
+	udelay(80);
+	while( cnt-- > 0 ) {
+		if (!acsi_wait_for_IRQ( 40 )) return( 0 );
+		dma_wd.dma_mode_status = 0x8a;
+		MFPDELAY();
+		status = dma_wd.fdc_acces_seccount;
+		MFPDELAY();
+		*buffer++ = status & 0xff;
+		udelay(40);
+	}
+	return( 1 );
+}
+
+
+/* Finish an extended status phase */
+
+void acsi_end_extstatus( void )
+
+{
+	dma_wd.dma_mode_status = 0x80;
+	udelay(40);
+	acsi_wait_for_noIRQ( 20 );
+	acsi_delay_start();
+}
+
+
+/* Send data in an extended command phase */
+
+int acsi_extcmd( unsigned char *buffer, int cnt )
+
+{
+	while( cnt-- > 0 ) {
+		DMA_LONG_WRITE( *buffer++, 0x8a );
+		udelay(20);
+		if (!acsi_wait_for_IRQ( HZ/2 )) return( 0 ); /* timeout */
+	}
+	return( 1 );
+}
+
+#endif
+
+
+static void acsi_print_error(const unsigned char *errblk, struct acsi_info_struct *aip)
+
+{	int atari_err, i, errcode;
+	struct acsi_error *arr;
+
+	atari_err = aip->old_atari_disk;
+	if (atari_err)
+		errcode = errblk[0] & 0x7f;
+	else
+		if ((errblk[0] & 0x70) == 0x70)
+			errcode = errblk[2] & 0x0f;
+		else
+			errcode = errblk[0] & 0x0f;
+	
+	printk( KERN_ERR "ACSI error 0x%02x", errcode );
+
+	if (errblk[0] & 0x80)
+		printk( " for sector %d",
+				((errblk[1] & 0x1f) << 16) |
+				(errblk[2] << 8) | errblk[0] );
+
+	arr = atari_err ? atari_acsi_errors : scsi_acsi_errors;
+	i = atari_err ? sizeof(atari_acsi_errors)/sizeof(*atari_acsi_errors) :
+		            sizeof(scsi_acsi_errors)/sizeof(*scsi_acsi_errors);
+	
+	for( --i; i >= 0; --i )
+		if (arr[i].code == errcode) break;
+	if (i >= 0)
+		printk( ": %s\n", arr[i].text );
+}
+
+/*******************************************************************
+ *
+ * ACSI interrupt routine
+ *   Test, if this is a ACSI interrupt and call the irq handler
+ *   Otherwise ignore this interrupt.
+ *
+ *******************************************************************/
+
+static irqreturn_t acsi_interrupt(int irq, void *data, struct pt_regs *fp )
+
+{	void (*acsi_irq_handler)(void) = do_acsi;
+
+	do_acsi = NULL;
+	CLEAR_TIMER();
+
+	if (!acsi_irq_handler)
+		acsi_irq_handler = unexpected_acsi_interrupt;
+	acsi_irq_handler();
+	return IRQ_HANDLED;
+}
+
+
+/******************************************************************
+ *
+ * The Interrupt handlers
+ *
+ *******************************************************************/
+
+
+static void unexpected_acsi_interrupt( void )
+
+{
+	printk( KERN_WARNING "Unexpected ACSI interrupt\n" );
+}
+
+
+/* This function is called in case of errors. Because we cannot reset
+ * the ACSI bus or a single device, there is no other choice than
+ * retrying several times :-(
+ */
+
+static void bad_rw_intr( void )
+
+{
+	if (!CURRENT)
+		return;
+
+	if (++CURRENT->errors >= MAX_ERRORS)
+		end_request(CURRENT, 0);
+	/* Otherwise just retry */
+}
+
+
+static void read_intr( void )
+
+{	int		status;
+	
+	status = acsi_getstatus();
+	if (status != 0) {
+		struct gendisk *disk = CURRENT->rq_disk;
+		struct acsi_info_struct *aip = disk->private_data;
+		printk(KERN_ERR "%s: ", disk->disk_name);
+		if (!acsi_reqsense(acsi_buffer, aip->target, aip->lun))
+			printk( "ACSI error and REQUEST SENSE failed (status=0x%02x)\n", status );
+		else {
+			acsi_print_error(acsi_buffer, aip);
+			if (CARTRCH_STAT(aip, acsi_buffer))
+				aip->changed = 1;
+		}
+		ENABLE_IRQ();
+		bad_rw_intr();
+		redo_acsi_request();
+		return;
+	}
+
+	dma_cache_maintenance( virt_to_phys(CurrentBuffer), CurrentNSect*512, 0 );
+	if (CurrentBuffer == acsi_buffer)
+		copy_from_acsibuffer();
+
+	do_end_requests();
+	redo_acsi_request();
+}
+
+
+static void write_intr(void)
+
+{	int	status;
+
+	status = acsi_getstatus();
+	if (status != 0) {
+		struct gendisk *disk = CURRENT->rq_disk;
+		struct acsi_info_struct *aip = disk->private_data;
+		printk( KERN_ERR "%s: ", disk->disk_name);
+		if (!acsi_reqsense( acsi_buffer, aip->target, aip->lun))
+			printk( "ACSI error and REQUEST SENSE failed (status=0x%02x)\n", status );
+		else {
+			acsi_print_error(acsi_buffer, aip);
+			if (CARTRCH_STAT(aip, acsi_buffer))
+				aip->changed = 1;
+		}
+		bad_rw_intr();
+		redo_acsi_request();
+		return;
+	}
+
+	do_end_requests();
+	redo_acsi_request();
+}
+
+
+static void acsi_times_out( unsigned long dummy )
+
+{
+	DISABLE_IRQ();
+	if (!do_acsi) return;
+
+	do_acsi = NULL;
+	printk( KERN_ERR "ACSI timeout\n" );
+	if (!CURRENT)
+	    return;
+	if (++CURRENT->errors >= MAX_ERRORS) {
+#ifdef DEBUG
+		printk( KERN_ERR "ACSI: too many errors.\n" );
+#endif
+		end_request(CURRENT, 0);
+	}
+
+	redo_acsi_request();
+}
+
+
+
+/***********************************************************************
+ *
+ *  Scatter-gather utility functions
+ *
+ ***********************************************************************/
+
+
+static void copy_to_acsibuffer( void )
+
+{	int					i;
+	char				*src, *dst;
+	struct buffer_head	*bh;
+	
+	src = CURRENT->buffer;
+	dst = acsi_buffer;
+	bh = CURRENT->bh;
+
+	if (!bh)
+		memcpy( dst, src, CurrentNSect*512 );
+	else
+		for( i = 0; i < CurrentNReq; ++i ) {
+			memcpy( dst, src, bh->b_size );
+			dst += bh->b_size;
+			if ((bh = bh->b_reqnext))
+				src = bh->b_data;
+		}
+}
+
+
+static void copy_from_acsibuffer( void )
+
+{	int					i;
+	char				*src, *dst;
+	struct buffer_head	*bh;
+	
+	dst = CURRENT->buffer;
+	src = acsi_buffer;
+	bh = CURRENT->bh;
+
+	if (!bh)
+		memcpy( dst, src, CurrentNSect*512 );
+	else
+		for( i = 0; i < CurrentNReq; ++i ) {
+			memcpy( dst, src, bh->b_size );
+			src += bh->b_size;
+			if ((bh = bh->b_reqnext))
+				dst = bh->b_data;
+		}
+}
+
+
+static void do_end_requests( void )
+
+{	int		i, n;
+
+	if (!CURRENT->bh) {
+		CURRENT->nr_sectors -= CurrentNSect;
+		CURRENT->current_nr_sectors -= CurrentNSect;
+		CURRENT->sector += CurrentNSect;
+		if (CURRENT->nr_sectors == 0)
+			end_request(CURRENT, 1);
+	}
+	else {
+		for( i = 0; i < CurrentNReq; ++i ) {
+			n = CURRENT->bh->b_size >> 9;
+			CURRENT->nr_sectors -= n;
+			CURRENT->current_nr_sectors -= n;
+			CURRENT->sector += n;
+			end_request(CURRENT, 1);
+		}
+	}
+}
+
+
+
+
+/***********************************************************************
+ *
+ *  do_acsi_request and friends
+ *
+ ***********************************************************************/
+
+static void do_acsi_request( request_queue_t * q )
+
+{
+	stdma_lock( acsi_interrupt, NULL );
+	redo_acsi_request();
+}
+
+
+static void redo_acsi_request( void )
+{
+	unsigned			block, target, lun, nsect;
+	char 				*buffer;
+	unsigned long		pbuffer;
+	struct buffer_head	*bh;
+	struct gendisk *disk;
+	struct acsi_info_struct *aip;
+
+  repeat:
+	CLEAR_TIMER();
+
+	if (do_acsi)
+		return;
+
+	if (!CURRENT) {
+		do_acsi = NULL;
+		ENABLE_IRQ();
+		stdma_release();
+		return;
+	}
+
+	disk = CURRENT->rq_disk;
+	aip = disk->private_data;
+	if (CURRENT->bh) {
+		if (!CURRENT->bh && !buffer_locked(CURRENT->bh))
+			panic("ACSI: block not locked");
+	}
+
+	block = CURRENT->sector;
+	if (block+CURRENT->nr_sectors >= get_capacity(disk)) {
+#ifdef DEBUG
+		printk( "%s: attempted access for blocks %d...%ld past end of device at block %ld.\n",
+		       disk->disk_name,
+		       block, block + CURRENT->nr_sectors - 1,
+		       get_capacity(disk));
+#endif
+		end_request(CURRENT, 0);
+		goto repeat;
+	}
+	if (aip->changed) {
+		printk( KERN_NOTICE "%s: request denied because cartridge has "
+				"been changed.\n", disk->disk_name);
+		end_request(CURRENT, 0);
+		goto repeat;
+	}
+	
+	target = aip->target;
+	lun    = aip->lun;
+
+	/* Find out how many sectors should be transferred from/to
+	 * consecutive buffers and thus can be done with a single command.
+	 */
+	buffer      = CURRENT->buffer;
+	pbuffer     = virt_to_phys(buffer);
+	nsect       = CURRENT->current_nr_sectors;
+	CurrentNReq = 1;
+
+	if ((bh = CURRENT->bh) && bh != CURRENT->bhtail) {
+		if (!STRAM_ADDR(pbuffer)) {
+			/* If transfer is done via the ACSI buffer anyway, we can
+			 * assemble as much bh's as fit in the buffer.
+			 */
+			while( (bh = bh->b_reqnext) ) {
+				if (nsect + (bh->b_size>>9) > ACSI_BUFFER_SECTORS) break;
+				nsect += bh->b_size >> 9;
+				++CurrentNReq;
+				if (bh == CURRENT->bhtail) break;
+			}
+			buffer = acsi_buffer;
+			pbuffer = phys_acsi_buffer;
+		}
+		else {
+			unsigned long pendadr, pnewadr;
+			pendadr = pbuffer + nsect*512;
+			while( (bh = bh->b_reqnext) ) {
+				pnewadr = virt_to_phys(bh->b_data);
+				if (!STRAM_ADDR(pnewadr) || pendadr != pnewadr) break;
+				nsect += bh->b_size >> 9;
+				pendadr = pnewadr + bh->b_size;
+				++CurrentNReq;
+				if (bh == CURRENT->bhtail) break;
+			}
+		}
+	}
+	else {
+		if (!STRAM_ADDR(pbuffer)) {
+			buffer = acsi_buffer;
+			pbuffer = phys_acsi_buffer;
+			if (nsect > ACSI_BUFFER_SECTORS)
+				nsect = ACSI_BUFFER_SECTORS;
+		}
+	}
+	CurrentBuffer = buffer;
+	CurrentNSect  = nsect;
+
+	if (rq_data_dir(CURRENT) == WRITE) {
+		CMDSET_TARG_LUN( write_cmd, target, lun );
+		CMDSET_BLOCK( write_cmd, block );
+		CMDSET_LEN( write_cmd, nsect );
+		if (buffer == acsi_buffer)
+			copy_to_acsibuffer();
+		dma_cache_maintenance( pbuffer, nsect*512, 1 );
+		do_acsi = write_intr;
+		if (!acsicmd_dma( write_cmd, buffer, nsect, 1, 1)) {
+			do_acsi = NULL;
+			printk( KERN_ERR "ACSI (write): Timeout in command block\n" );
+			bad_rw_intr();
+			goto repeat;
+		}
+		SET_TIMER();
+		return;
+	}
+	if (rq_data_dir(CURRENT) == READ) {
+		CMDSET_TARG_LUN( read_cmd, target, lun );
+		CMDSET_BLOCK( read_cmd, block );
+		CMDSET_LEN( read_cmd, nsect );
+		do_acsi = read_intr;
+		if (!acsicmd_dma( read_cmd, buffer, nsect, 0, 1)) {
+			do_acsi = NULL;
+			printk( KERN_ERR "ACSI (read): Timeout in command block\n" );
+			bad_rw_intr();
+			goto repeat;
+		}
+		SET_TIMER();
+		return;
+	}
+	panic("unknown ACSI command");
+}
+
+
+
+/***********************************************************************
+ *
+ *  Misc functions: ioctl, open, release, check_change, ...
+ *
+ ***********************************************************************/
+
+
+static int acsi_ioctl( struct inode *inode, struct file *file,
+					   unsigned int cmd, unsigned long arg )
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct acsi_info_struct *aip = disk->private_data;
+	switch (cmd) {
+	  case HDIO_GETGEO:
+		/* HDIO_GETGEO is supported more for getting the partition's
+		 * start sector... */
+	  { struct hd_geometry *geo = (struct hd_geometry *)arg;
+	    /* just fake some geometry here, it's nonsense anyway; to make it
+		 * easy, use Adaptec's usual 64/32 mapping */
+	    put_user( 64, &geo->heads );
+	    put_user( 32, &geo->sectors );
+	    put_user( aip->size >> 11, &geo->cylinders );
+		put_user(get_start_sect(inode->i_bdev), &geo->start);
+		return 0;
+	  }
+	  case SCSI_IOCTL_GET_IDLUN:
+		/* SCSI compatible GET_IDLUN call to get target's ID and LUN number */
+		put_user( aip->target | (aip->lun << 8),
+				  &((Scsi_Idlun *) arg)->dev_id );
+		put_user( 0, &((Scsi_Idlun *) arg)->host_unique_id );
+		return 0;
+	  default:
+		return -EINVAL;
+	}
+}
+
+
+/*
+ * Open a device, check for read-only and lock the medium if it is
+ * removable.
+ *
+ * Changes by Martin Rogge, 9th Aug 1995:
+ * Check whether check_disk_change (and therefore revalidate_acsidisk)
+ * was successful. They fail when there is no medium in the drive.
+ *
+ * The problem of media being changed during an operation can be 
+ * ignored because of the prevent_removal code.
+ *
+ * Added check for the validity of the device number.
+ *
+ */
+
+static int acsi_open( struct inode * inode, struct file * filp )
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct acsi_info_struct *aip = disk->private_data;
+
+	if (aip->access_count == 0 && aip->removable) {
+#if 0
+		aip->changed = 1;	/* safety first */
+#endif
+		check_disk_change( inode->i_bdev );
+		if (aip->changed)	/* revalidate was not successful (no medium) */
+			return -ENXIO;
+		acsi_prevent_removal(aip, 1);
+	}
+	aip->access_count++;
+
+	if (filp && filp->f_mode) {
+		check_disk_change( inode->i_bdev );
+		if (filp->f_mode & 2) {
+			if (aip->read_only) {
+				acsi_release( inode, filp );
+				return -EROFS;
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * Releasing a block device means we sync() it, so that it can safely
+ * be forgotten about...
+ */
+
+static int acsi_release( struct inode * inode, struct file * file )
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct acsi_info_struct *aip = disk->private_data;
+	if (--aip->access_count == 0 && aip->removable)
+		acsi_prevent_removal(aip, 0);
+	return( 0 );
+}
+
+/*
+ * Prevent or allow a media change for removable devices.
+ */
+
+static void acsi_prevent_removal(struct acsi_info_struct *aip, int flag)
+{
+	stdma_lock( NULL, NULL );
+	
+	CMDSET_TARG_LUN(pa_med_rem_cmd, aip->target, aip->lun);
+	CMDSET_LEN( pa_med_rem_cmd, flag );
+	
+	if (acsicmd_nodma(pa_med_rem_cmd, 0) && acsi_wait_for_IRQ(3*HZ))
+		acsi_getstatus();
+	/* Do not report errors -- some devices may not know this command. */
+
+	ENABLE_IRQ();
+	stdma_release();
+}
+
+static int acsi_media_change(struct gendisk *disk)
+{
+	struct acsi_info_struct *aip = disk->private_data;
+
+	if (!aip->removable) 
+		return 0;
+
+	if (aip->changed)
+		/* We can be sure that the medium has been changed -- REQUEST
+		 * SENSE has reported this earlier.
+		 */
+		return 1;
+
+	/* If the flag isn't set, make a test by reading block 0.
+	 * If errors happen, it seems to be better to say "changed"...
+	 */
+	stdma_lock( NULL, NULL );
+	CMDSET_TARG_LUN(read_cmd, aip->target, aip->lun);
+	CMDSET_BLOCK( read_cmd, 0 );
+	CMDSET_LEN( read_cmd, 1 );
+	if (acsicmd_dma(read_cmd, acsi_buffer, 1, 0, 0) &&
+	    acsi_wait_for_IRQ(3*HZ)) {
+		if (acsi_getstatus()) {
+			if (acsi_reqsense(acsi_buffer, aip->target, aip->lun)) {
+				if (CARTRCH_STAT(aip, acsi_buffer))
+					aip->changed = 1;
+			}
+			else {
+				printk( KERN_ERR "%s: REQUEST SENSE failed in test for "
+				       "medium change; assuming a change\n", disk->disk_name );
+				aip->changed = 1;
+			}
+		}
+	}
+	else {
+		printk( KERN_ERR "%s: Test for medium changed timed out; "
+				"assuming a change\n", disk->disk_name);
+		aip->changed = 1;
+	}
+	ENABLE_IRQ();
+	stdma_release();
+
+	/* Now, after reading a block, the changed status is surely valid. */
+	return aip->changed;
+}
+
+
+static int acsi_change_blk_size( int target, int lun)
+
+{	int i;
+
+	for (i=0; i<12; i++)
+		acsi_buffer[i] = 0;
+
+	acsi_buffer[3] = 8;
+	acsi_buffer[10] = 2;
+	CMDSET_TARG_LUN( modeselect_cmd, target, lun);
+
+	if (!acsicmd_dma( modeselect_cmd, acsi_buffer, 1,1,0) ||
+		!acsi_wait_for_IRQ( 3*HZ ) ||
+		acsi_getstatus() != 0 ) {
+		return(0);
+	}
+	return(1);
+}
+
+
+static int acsi_mode_sense( int target, int lun, SENSE_DATA *sd )
+
+{
+	int page;
+
+	CMDSET_TARG_LUN( modesense_cmd, target, lun );
+	for (page=0; page<4; page++) {
+		modesense_cmd[2] = page;
+		if (!acsicmd_dma( modesense_cmd, acsi_buffer, 1, 0, 0 ) ||
+		    !acsi_wait_for_IRQ( 3*HZ ) ||
+		    acsi_getstatus())
+			continue;
+
+		/* read twice to jump over the second 16-byte border! */
+		udelay(300);
+		if (acsi_wait_for_noIRQ( 20 ) &&
+		    acsicmd_nodma( modesense_cmd, 0 ) &&
+		    acsi_wait_for_IRQ( 3*HZ ) &&
+		    acsi_getstatus() == 0)
+			break;
+	}
+	if (page == 4) {
+		return(0);
+	}
+
+	dma_cache_maintenance( phys_acsi_buffer, sizeof(SENSE_DATA), 0 );
+	*sd = *(SENSE_DATA *)acsi_buffer;
+
+	/* Validity check, depending on type of data */
+	
+	switch( SENSE_TYPE(*sd) ) {
+
+	  case SENSE_TYPE_ATARI:
+		if (CAPACITY(*sd) == 0)
+			goto invalid_sense;
+		break;
+
+	  case SENSE_TYPE_SCSI:
+		if (sd->scsi.descriptor_size != 8)
+			goto invalid_sense;
+		break;
+
+	  case SENSE_TYPE_UNKNOWN:
+
+		printk( KERN_ERR "ACSI target %d, lun %d: Cannot interpret "
+				"sense data\n", target, lun ); 
+		
+	  invalid_sense:
+
+#ifdef DEBUG
+		{	int i;
+		printk( "Mode sense data for ACSI target %d, lun %d seem not valid:",
+				target, lun );
+		for( i = 0; i < sizeof(SENSE_DATA); ++i )
+			printk( "%02x ", (unsigned char)acsi_buffer[i] );
+		printk( "\n" );
+		}
+#endif
+		return( 0 );
+	}
+		
+	return( 1 );
+}
+
+
+
+/*******************************************************************
+ *
+ *  Initialization
+ *
+ ********************************************************************/
+
+
+extern struct block_device_operations acsi_fops;
+
+static struct gendisk *acsi_gendisk[MAX_DEV];
+
+#define MAX_SCSI_DEVICE_CODE 10
+
+static const char *const scsi_device_types[MAX_SCSI_DEVICE_CODE] =
+{
+ "Direct-Access    ",
+ "Sequential-Access",
+ "Printer          ",
+ "Processor        ",
+ "WORM             ",
+ "CD-ROM           ",
+ "Scanner          ",
+ "Optical Device   ",
+ "Medium Changer   ",
+ "Communications   "
+};
+
+static void print_inquiry(unsigned char *data)
+{
+	int i;
+
+	printk(KERN_INFO "  Vendor: ");
+	for (i = 8; i < 16; i++)
+		{
+	        if (data[i] >= 0x20 && i < data[4] + 5)
+			printk("%c", data[i]);
+		else
+			printk(" ");
+		}
+
+	printk("  Model: ");
+	for (i = 16; i < 32; i++)
+		{
+	        if (data[i] >= 0x20 && i < data[4] + 5)
+			printk("%c", data[i]);
+		else
+			printk(" ");
+		}
+
+	printk("  Rev: ");
+	for (i = 32; i < 36; i++)
+		{
+	        if (data[i] >= 0x20 && i < data[4] + 5)
+			printk("%c", data[i]);
+		else
+			printk(" ");
+		}
+
+	printk("\n");
+
+	i = data[0] & 0x1f;
+
+	printk(KERN_INFO "  Type:   %s ", (i < MAX_SCSI_DEVICE_CODE
+									   ? scsi_device_types[i]
+									   : "Unknown          "));
+	printk("                 ANSI SCSI revision: %02x", data[2] & 0x07);
+	if ((data[2] & 0x07) == 1 && (data[3] & 0x0f) == 1)
+	  printk(" CCS\n");
+	else
+	  printk("\n");
+}
+
+
+/* 
+ * Changes by Martin Rogge, 9th Aug 1995: 
+ * acsi_devinit has been taken out of acsi_geninit, because it needs 
+ * to be called from revalidate_acsidisk. The result of request sense 
+ * is now checked for DRIVE NOT READY.
+ *
+ * The structure *aip is only valid when acsi_devinit returns 
+ * DEV_SUPPORTED. 
+ *
+ */
+	
+#define DEV_NONE	0
+#define DEV_UNKNOWN	1
+#define DEV_SUPPORTED	2
+#define DEV_SLM		3
+
+static int acsi_devinit(struct acsi_info_struct *aip)
+{
+	int status, got_inquiry;
+	SENSE_DATA sense;
+	unsigned char reqsense, extsense;
+
+	/*****************************************************************/
+	/* Do a TEST UNIT READY command to test the presence of a device */
+	/*****************************************************************/
+
+	CMDSET_TARG_LUN(tur_cmd, aip->target, aip->lun);
+	if (!acsicmd_nodma(tur_cmd, 0)) {
+		/* timed out -> no device here */
+#ifdef DEBUG_DETECT
+		printk("target %d lun %d: timeout\n", aip->target, aip->lun);
+#endif
+		return DEV_NONE;
+	}
+		
+	/*************************/
+	/* Read the ACSI status. */
+	/*************************/
+
+	status = acsi_getstatus();
+	if (status) {
+		if (status == 0x12) {
+			/* The SLM printer should be the only device that
+			 * responds with the error code in the status byte. In
+			 * correct status bytes, bit 4 is never set.
+			 */
+			printk( KERN_INFO "Detected SLM printer at id %d lun %d\n",
+			       aip->target, aip->lun);
+			return DEV_SLM;
+		}
+		/* ignore CHECK CONDITION, since some devices send a
+		   UNIT ATTENTION */
+		if ((status & 0x1e) != 0x2) {
+#ifdef DEBUG_DETECT
+			printk("target %d lun %d: status %d\n",
+			       aip->target, aip->lun, status);
+#endif
+			return DEV_UNKNOWN;
+		}
+	}
+
+	/*******************************/
+	/* Do a REQUEST SENSE command. */
+	/*******************************/
+
+	if (!acsi_reqsense(acsi_buffer, aip->target, aip->lun)) {
+		printk( KERN_WARNING "acsi_reqsense failed\n");
+		acsi_buffer[0] = 0;
+		acsi_buffer[2] = UNIT_ATTENTION;
+	}
+	reqsense = acsi_buffer[0];
+	extsense = acsi_buffer[2] & 0xf;
+	if (status) {
+		if ((reqsense & 0x70) == 0x70) {	/* extended sense */
+			if (extsense != UNIT_ATTENTION &&
+			    extsense != NOT_READY) {
+#ifdef DEBUG_DETECT
+				printk("target %d lun %d: extended sense %d\n",
+				       aip->target, aip->lun, extsense);
+#endif
+				return DEV_UNKNOWN;
+			}
+		}
+		else {
+			if (reqsense & 0x7f) {
+#ifdef DEBUG_DETECT
+				printk("target %d lun %d: sense %d\n",
+				       aip->target, aip->lun, reqsense);
+#endif
+				return DEV_UNKNOWN;
+			}
+		}
+	}
+	else 
+		if (reqsense == 0x4) {	/* SH204 Bug workaround */
+#ifdef DEBUG_DETECT
+			printk("target %d lun %d status=0 sense=4\n",
+			       aip->target, aip->lun);
+#endif
+			return DEV_UNKNOWN;
+		}
+
+	/***********************************************************/
+	/* Do an INQUIRY command to get more infos on this device. */
+	/***********************************************************/
+
+	/* Assume default values */
+	aip->removable = 1;
+	aip->read_only = 0;
+	aip->old_atari_disk = 0;
+	aip->changed = (extsense == NOT_READY);	/* medium inserted? */
+	aip->size = DEFAULT_SIZE;
+	got_inquiry = 0;
+	/* Fake inquiry result for old atari disks */
+	memcpy(acsi_buffer, "\000\000\001\000    Adaptec 40xx"
+	       "                    ", 40);
+	CMDSET_TARG_LUN(inquiry_cmd, aip->target, aip->lun);
+	if (acsicmd_dma(inquiry_cmd, acsi_buffer, 1, 0, 0) &&
+	    acsi_getstatus() == 0) {
+		acsicmd_nodma(inquiry_cmd, 0);
+		acsi_getstatus();
+		dma_cache_maintenance( phys_acsi_buffer, 256, 0 );
+		got_inquiry = 1;
+		aip->removable = !!(acsi_buffer[1] & 0x80);
+	}
+	if (aip->type == NONE)	/* only at boot time */
+		print_inquiry(acsi_buffer);
+	switch(acsi_buffer[0]) {
+	  case TYPE_DISK:
+		aip->type = HARDDISK;
+		break;
+	  case TYPE_ROM:
+		aip->type = CDROM;
+		aip->read_only = 1;
+		break;
+	  default:
+		return DEV_UNKNOWN;
+	}
+	/****************************/
+	/* Do a MODE SENSE command. */
+	/****************************/
+
+	if (!acsi_mode_sense(aip->target, aip->lun, &sense)) {
+		printk( KERN_WARNING "No mode sense data.\n" );
+		return DEV_UNKNOWN;
+	}
+	if ((SECTOR_SIZE(sense) != 512) &&
+	    ((aip->type != CDROM) ||
+	     !acsi_change_blk_size(aip->target, aip->lun) ||
+	     !acsi_mode_sense(aip->target, aip->lun, &sense) ||
+	     (SECTOR_SIZE(sense) != 512))) {
+		printk( KERN_WARNING "Sector size != 512 not supported.\n" );
+		return DEV_UNKNOWN;
+	}
+	/* There are disks out there that claim to have 0 sectors... */
+	if (CAPACITY(sense))
+		aip->size = CAPACITY(sense);	/* else keep DEFAULT_SIZE */
+	if (!got_inquiry && SENSE_TYPE(sense) == SENSE_TYPE_ATARI) {
+		/* If INQUIRY failed and the sense data suggest an old
+		 * Atari disk (SH20x, Megafile), the disk is not removable
+		 */
+		aip->removable = 0;
+		aip->old_atari_disk = 1;
+	}
+	
+	/******************/
+	/* We've done it. */
+	/******************/
+	
+	return DEV_SUPPORTED;
+}
+
+EXPORT_SYMBOL(acsi_delay_start);
+EXPORT_SYMBOL(acsi_delay_end);
+EXPORT_SYMBOL(acsi_wait_for_IRQ);
+EXPORT_SYMBOL(acsi_wait_for_noIRQ);
+EXPORT_SYMBOL(acsicmd_nodma);
+EXPORT_SYMBOL(acsi_getstatus);
+EXPORT_SYMBOL(acsi_buffer);
+EXPORT_SYMBOL(phys_acsi_buffer);
+
+#ifdef CONFIG_ATARI_SLM_MODULE
+void acsi_attach_SLMs( int (*attach_func)( int, int ) );
+
+EXPORT_SYMBOL(acsi_extstatus);
+EXPORT_SYMBOL(acsi_end_extstatus);
+EXPORT_SYMBOL(acsi_extcmd);
+EXPORT_SYMBOL(acsi_attach_SLMs);
+
+/* to remember IDs of SLM devices, SLM module is loaded later
+ * (index is target#, contents is lun#, -1 means "no SLM") */
+int SLM_devices[8];
+#endif
+
+static struct block_device_operations acsi_fops = {
+	.owner		= THIS_MODULE,
+	.open		= acsi_open,
+	.release	= acsi_release,
+	.ioctl		= acsi_ioctl,
+	.media_changed	= acsi_media_change,
+	.revalidate_disk= acsi_revalidate,
+};
+
+#ifdef CONFIG_ATARI_SLM_MODULE
+/* call attach_slm() for each device that is a printer; needed for init of SLM
+ * driver as a module, since it's not yet present if acsi.c is inited and thus
+ * the bus gets scanned. */
+void acsi_attach_SLMs( int (*attach_func)( int, int ) )
+{
+	int i, n = 0;
+
+	for( i = 0; i < 8; ++i )
+		if (SLM_devices[i] >= 0)
+			n += (*attach_func)( i, SLM_devices[i] );
+	printk( KERN_INFO "Found %d SLM printer(s) total.\n", n );
+}
+#endif /* CONFIG_ATARI_SLM_MODULE */
+
+
+int acsi_init( void )
+{
+	int err = 0;
+	int i, target, lun;
+	struct acsi_info_struct *aip;
+#ifdef CONFIG_ATARI_SLM
+	int n_slm = 0;
+#endif
+	if (!MACH_IS_ATARI || !ATARIHW_PRESENT(ACSI))
+		return 0;
+	if (register_blkdev(ACSI_MAJOR, "ad")) {
+		err = -EBUSY;
+		goto out1;
+	}
+	if (!(acsi_buffer =
+		  (char *)atari_stram_alloc(ACSI_BUFFER_SIZE, "acsi"))) {
+		err = -ENOMEM;
+		printk( KERN_ERR "Unable to get ACSI ST-Ram buffer.\n" );
+		goto out2;
+	}
+	phys_acsi_buffer = virt_to_phys( acsi_buffer );
+	STramMask = ATARIHW_PRESENT(EXTD_DMA) ? 0x00000000 : 0xff000000;
+	
+	acsi_queue = blk_init_queue(do_acsi_request, &acsi_lock);
+	if (!acsi_queue) {
+		err = -ENOMEM;
+		goto out2a;
+	}
+#ifdef CONFIG_ATARI_SLM
+	err = slm_init();
+#endif
+	if (err)
+		goto out3;
+
+	printk( KERN_INFO "Probing ACSI devices:\n" );
+	NDevices = 0;
+#ifdef CONFIG_ATARI_SLM_MODULE
+	for( i = 0; i < 8; ++i )
+		SLM_devices[i] = -1;
+#endif
+	stdma_lock(NULL, NULL);
+
+	for (target = 0; target < 8 && NDevices < MAX_DEV; ++target) {
+		lun = 0;
+		do {
+			aip = &acsi_info[NDevices];
+			aip->type = NONE;
+			aip->target = target;
+			aip->lun = lun;
+			i = acsi_devinit(aip);
+			switch (i) {
+			  case DEV_SUPPORTED:
+				printk( KERN_INFO "Detected ");
+				switch (aip->type) {
+				  case HARDDISK:
+					printk("disk");
+					break;
+				  case CDROM:
+					printk("cdrom");
+					break;
+				  default:
+				}
+				printk(" ad%c at id %d lun %d ",
+				       'a' + NDevices, target, lun);
+				if (aip->removable) 
+					printk("(removable) ");
+				if (aip->read_only) 
+					printk("(read-only) ");
+				if (aip->size == DEFAULT_SIZE)
+					printk(" unkown size, using default ");
+				printk("%ld MByte\n",
+				       (aip->size*512+1024*1024/2)/(1024*1024));
+				NDevices++;
+				break;
+			  case DEV_SLM:
+#ifdef CONFIG_ATARI_SLM
+				n_slm += attach_slm( target, lun );
+				break;
+#endif
+#ifdef CONFIG_ATARI_SLM_MODULE
+				SLM_devices[target] = lun;
+				break;
+#endif
+				/* neither of the above: fall through to unknown device */
+			  case DEV_UNKNOWN:
+				printk( KERN_INFO "Detected unsupported device at "
+						"id %d lun %d\n", target, lun);
+				break;
+			}
+		}
+#ifdef CONFIG_ACSI_MULTI_LUN
+		while (i != DEV_NONE && ++lun < MAX_LUN);
+#else
+		while (0);
+#endif
+	}
+
+	/* reenable interrupt */
+	ENABLE_IRQ();
+	stdma_release();
+
+#ifndef CONFIG_ATARI_SLM
+	printk( KERN_INFO "Found %d ACSI device(s) total.\n", NDevices );
+#else
+	printk( KERN_INFO "Found %d ACSI device(s) and %d SLM printer(s) total.\n",
+			NDevices, n_slm );
+#endif
+	err = -ENOMEM;
+	for( i = 0; i < NDevices; ++i ) {
+		acsi_gendisk[i] = alloc_disk(16);
+		if (!acsi_gendisk[i])
+			goto out4;
+	}
+
+	for( i = 0; i < NDevices; ++i ) {
+		struct gendisk *disk = acsi_gendisk[i];
+		sprintf(disk->disk_name, "ad%c", 'a'+i);
+		aip = &acsi_info[NDevices];
+		sprintf(disk->devfs_name, "ad/target%d/lun%d", aip->target, aip->lun);
+		disk->major = ACSI_MAJOR;
+		disk->first_minor = i << 4;
+		if (acsi_info[i].type != HARDDISK) {
+			disk->minors = 1;
+			strcat(disk->devfs_name, "/disc");
+		}
+		disk->fops = &acsi_fops;
+		disk->private_data = &acsi_info[i];
+		set_capacity(disk, acsi_info[i].size);
+		disk->queue = acsi_queue;
+		add_disk(disk);
+	}
+	return 0;
+out4:
+	while (i--)
+		put_disk(acsi_gendisk[i]);
+out3:
+	blk_cleanup_queue(acsi_queue);
+out2a:
+	atari_stram_free( acsi_buffer );
+out2:
+	unregister_blkdev( ACSI_MAJOR, "ad" );
+out1:
+	return err;
+}
+
+
+#ifdef MODULE
+
+MODULE_LICENSE("GPL");
+
+int init_module(void)
+{
+	int err;
+
+	if ((err = acsi_init()))
+		return( err );
+	printk( KERN_INFO "ACSI driver loaded as module.\n");
+	return( 0 );
+}
+
+void cleanup_module(void)
+{
+	int i;
+	del_timer( &acsi_timer );
+	blk_cleanup_queue(acsi_queue);
+	atari_stram_free( acsi_buffer );
+
+	if (unregister_blkdev( ACSI_MAJOR, "ad" ) != 0)
+		printk( KERN_ERR "acsi: cleanup_module failed\n");
+
+	for (i = 0; i < NDevices; i++) {
+		del_gendisk(acsi_gendisk[i]);
+		put_disk(acsi_gendisk[i]);
+	}
+}
+#endif
+
+/*
+ * This routine is called to flush all partitions and partition tables
+ * for a changed scsi disk, and then re-read the new partition table.
+ * If we are revalidating a disk because of a media change, then we
+ * enter with usage == 0.  If we are using an ioctl, we automatically have
+ * usage == 1 (we need an open channel to use an ioctl :-), so this
+ * is our limit.
+ *
+ * Changes by Martin Rogge, 9th Aug 1995: 
+ * got cd-roms to work by calling acsi_devinit. There are only two problems:
+ * First, if there is no medium inserted, the status will remain "changed".
+ * That is no problem at all, but our design of three-valued logic (medium
+ * changed, medium not changed, no medium inserted).
+ * Secondly the check could fail completely and the drive could deliver
+ * nonsensical data, which could mess up the acsi_info[] structure. In
+ * that case we try to make the entry safe.
+ *
+ */
+
+static int acsi_revalidate(struct gendisk *disk)
+{
+	struct acsi_info_struct *aip = disk->private_data;
+	stdma_lock( NULL, NULL );
+	if (acsi_devinit(aip) != DEV_SUPPORTED) {
+		printk( KERN_ERR "ACSI: revalidate failed for target %d lun %d\n",
+		       aip->target, aip->lun);
+		aip->size = 0;
+		aip->read_only = 1;
+		aip->removable = 1;
+		aip->changed = 1; /* next acsi_open will try again... */
+	}
+
+	ENABLE_IRQ();
+	stdma_release();
+	set_capacity(disk, aip->size);
+	return 0;
+}
diff --git a/drivers/block/acsi_slm.c b/drivers/block/acsi_slm.c
new file mode 100644
index 0000000..e3be8c3
--- /dev/null
+++ b/drivers/block/acsi_slm.c
@@ -0,0 +1,1045 @@
+/*
+ * acsi_slm.c -- Device driver for the Atari SLM laser printer
+ *
+ * Copyright 1995 Roman Hodek <Roman.Hodek@informatik.uni-erlangen.de>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file COPYING in the main directory of this archive for
+ * more details.
+ * 
+ */
+
+/*
+
+Notes:
+
+The major number for SLM printers is 28 (like ACSI), but as a character
+device, not block device. The minor number is the number of the printer (if
+you have more than one SLM; currently max. 2 (#define-constant) SLMs are
+supported). The device can be opened for reading and writing. If reading it,
+you get some status infos (MODE SENSE data). Writing mode is used for the data
+to be printed. Some ioctls allow to get the printer status and to tune printer
+modes and some internal variables.
+
+A special problem of the SLM driver is the timing and thus the buffering of
+the print data. The problem is that all the data for one page must be present
+in memory when printing starts, else --when swapping occurs-- the timing could
+not be guaranteed. There are several ways to assure this:
+
+ 1) Reserve a buffer of 1196k (maximum page size) statically by
+    atari_stram_alloc(). The data are collected there until they're complete,
+	and then printing starts. Since the buffer is reserved, no further
+	considerations about memory and swapping are needed. So this is the
+	simplest method, but it needs a lot of memory for just the SLM.
+
+    An striking advantage of this method is (supposed the SLM_CONT_CNT_REPROG
+	method works, see there), that there are no timing problems with the DMA
+	anymore.
+	
+ 2) The other method would be to reserve the buffer dynamically each time
+    printing is required. I could think of looking at mem_map where the
+	largest unallocted ST-RAM area is, taking the area, and then extending it
+	by swapping out the neighbored pages, until the needed size is reached.
+	This requires some mm hacking, but seems possible. The only obstacle could
+	be pages that cannot be swapped out (reserved pages)...
+
+ 3) Another possibility would be to leave the real data in user space and to
+    work with two dribble buffers of about 32k in the driver: While the one
+	buffer is DMAed to the SLM, the other can be filled with new data. But
+	to keep the timing, that requires that the user data remain in memory and
+	are not swapped out. Requires mm hacking, too, but maybe not so bad as
+	method 2).
+
+*/
+
+#include <linux/module.h>
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/major.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/smp_lock.h>
+
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/atarihw.h>
+#include <asm/atariints.h>
+#include <asm/atari_acsi.h>
+#include <asm/atari_stdma.h>
+#include <asm/atari_stram.h>
+#include <asm/atari_SLM.h>
+
+
+#undef	DEBUG
+
+/* Define this if the page data are continuous in physical memory. That
+ * requires less reprogramming of the ST-DMA */
+#define	SLM_CONTINUOUS_DMA
+
+/* Use continuous reprogramming of the ST-DMA counter register. This is
+ * --strictly speaking-- not allowed, Atari recommends not to look at the
+ * counter register while a DMA is going on. But I don't know if that applies
+ * only for reading the register, or also writing to it. Writing only works
+ * fine for me... The advantage is that the timing becomes absolutely
+ * uncritical: Just update each, say 200ms, the counter reg to its maximum,
+ * and the DMA will work until the status byte interrupt occurs.
+ */
+#define	SLM_CONT_CNT_REPROG
+
+#define CMDSET_TARG_LUN(cmd,targ,lun)			\
+    do {										\
+		cmd[0] = (cmd[0] & ~0xe0) | (targ)<<5;	\
+		cmd[1] = (cmd[1] & ~0xe0) | (lun)<<5;	\
+	} while(0)
+
+#define	START_TIMER(to)	mod_timer(&slm_timer, jiffies + (to))
+#define	STOP_TIMER()	del_timer(&slm_timer)
+
+
+static char slmreqsense_cmd[6] = { 0x03, 0, 0, 0, 0, 0 };
+static char slmprint_cmd[6]    = { 0x0a, 0, 0, 0, 0, 0 };
+static char slminquiry_cmd[6]  = { 0x12, 0, 0, 0, 0, 0x80 };
+static char slmmsense_cmd[6]   = { 0x1a, 0, 0, 0, 255, 0 };
+#if 0
+static char slmmselect_cmd[6]  = { 0x15, 0, 0, 0, 0, 0 };
+#endif
+
+
+#define	MAX_SLM		2
+
+static struct slm {
+	unsigned	target;			/* target number */
+	unsigned	lun;			/* LUN in target controller */
+	atomic_t	wr_ok; 			/* set to 0 if output part busy */
+	atomic_t	rd_ok;			/* set to 0 if status part busy */
+} slm_info[MAX_SLM];
+
+int N_SLM_Printers = 0;
+
+/* printer buffer */
+static unsigned char	*SLMBuffer;	/* start of buffer */
+static unsigned char	*BufferP;	/* current position in buffer */
+static int				BufferSize;	/* length of buffer for page size */
+
+typedef enum { IDLE, FILLING, PRINTING } SLMSTATE;
+static SLMSTATE			SLMState;
+static int				SLMBufOwner;	/* SLM# currently using the buffer */
+
+/* DMA variables */
+#ifndef SLM_CONT_CNT_REPROG
+static unsigned long	SLMCurAddr;		/* current base addr of DMA chunk */
+static unsigned long	SLMEndAddr;		/* expected end addr */
+static unsigned long	SLMSliceSize;	/* size of one DMA chunk */
+#endif
+static int				SLMError;
+
+/* wait queues */
+static DECLARE_WAIT_QUEUE_HEAD(slm_wait);	/* waiting for buffer */
+static DECLARE_WAIT_QUEUE_HEAD(print_wait);	/* waiting for printing finished */
+
+/* status codes */
+#define	SLMSTAT_OK		0x00
+#define	SLMSTAT_ORNERY	0x02
+#define	SLMSTAT_TONER	0x03
+#define	SLMSTAT_WARMUP	0x04
+#define	SLMSTAT_PAPER	0x05
+#define	SLMSTAT_DRUM	0x06
+#define	SLMSTAT_INJAM	0x07
+#define	SLMSTAT_THRJAM	0x08
+#define	SLMSTAT_OUTJAM	0x09
+#define	SLMSTAT_COVER	0x0a
+#define	SLMSTAT_FUSER	0x0b
+#define	SLMSTAT_IMAGER	0x0c
+#define	SLMSTAT_MOTOR	0x0d
+#define	SLMSTAT_VIDEO	0x0e
+#define	SLMSTAT_SYSTO	0x10
+#define	SLMSTAT_OPCODE	0x12
+#define	SLMSTAT_DEVNUM	0x15
+#define	SLMSTAT_PARAM	0x1a
+#define	SLMSTAT_ACSITO	0x1b	/* driver defined */
+#define	SLMSTAT_NOTALL	0x1c	/* driver defined */
+
+static char *SLMErrors[] = {
+	/* 0x00 */	"OK and ready",
+	/* 0x01 */	NULL,
+	/* 0x02 */	"ornery printer",
+	/* 0x03 */	"toner empty",
+	/* 0x04 */	"warming up",
+	/* 0x05 */	"paper empty",
+	/* 0x06 */	"drum empty",
+	/* 0x07 */	"input jam",
+	/* 0x08 */	"through jam",
+	/* 0x09 */	"output jam",
+	/* 0x0a */	"cover open",
+	/* 0x0b */	"fuser malfunction",
+	/* 0x0c */	"imager malfunction",
+	/* 0x0d */	"motor malfunction",
+	/* 0x0e */	"video malfunction",
+	/* 0x0f */	NULL,
+	/* 0x10 */	"printer system timeout",
+	/* 0x11 */	NULL,
+	/* 0x12 */	"invalid operation code",
+	/* 0x13 */	NULL,
+	/* 0x14 */	NULL,
+	/* 0x15 */	"invalid device number",
+	/* 0x16 */	NULL,
+	/* 0x17 */	NULL,
+	/* 0x18 */	NULL,
+	/* 0x19 */	NULL,
+	/* 0x1a */	"invalid parameter list",
+	/* 0x1b */	"ACSI timeout",
+	/* 0x1c */	"not all printed"
+};
+
+#define	N_ERRORS	(sizeof(SLMErrors)/sizeof(*SLMErrors))
+
+/* real (driver caused) error? */
+#define	IS_REAL_ERROR(x)	(x > 0x10)
+
+
+static struct {
+	char	*name;
+	int 	w, h;
+} StdPageSize[] = {
+	{ "Letter", 2400, 3180 },
+	{ "Legal",  2400, 4080 },
+	{ "A4",     2336, 3386 },
+	{ "B5",     2016, 2914 }
+};
+
+#define	N_STD_SIZES		(sizeof(StdPageSize)/sizeof(*StdPageSize))
+
+#define	SLM_BUFFER_SIZE	(2336*3386/8)	/* A4 for now */
+#define	SLM_DMA_AMOUNT	255				/* #sectors to program the DMA for */
+
+#ifdef	SLM_CONTINUOUS_DMA
+# define	SLM_DMA_INT_OFFSET	0		/* DMA goes until seccnt 0, no offs */
+# define	SLM_DMA_END_OFFSET	32		/* 32 Byte ST-DMA FIFO */
+# define	SLM_SLICE_SIZE(w) 	(255*512)
+#else
+# define	SLM_DMA_INT_OFFSET	32		/* 32 Byte ST-DMA FIFO */
+# define	SLM_DMA_END_OFFSET	32		/* 32 Byte ST-DMA FIFO */
+# define	SLM_SLICE_SIZE(w)	((254*512)/(w/8)*(w/8))
+#endif
+
+/* calculate the number of jiffies to wait for 'n' bytes */
+#ifdef SLM_CONT_CNT_REPROG
+#define	DMA_TIME_FOR(n)		50
+#define	DMA_STARTUP_TIME	0
+#else
+#define	DMA_TIME_FOR(n)		(n/1400-1)
+#define	DMA_STARTUP_TIME	650
+#endif
+
+/***************************** Prototypes *****************************/
+
+static char *slm_errstr( int stat );
+static int slm_getstats( char *buffer, int device );
+static ssize_t slm_read( struct file* file, char *buf, size_t count, loff_t
+                         *ppos );
+static void start_print( int device );
+static irqreturn_t slm_interrupt(int irc, void *data, struct pt_regs *fp);
+static void slm_test_ready( unsigned long dummy );
+static void set_dma_addr( unsigned long paddr );
+static unsigned long get_dma_addr( void );
+static ssize_t slm_write( struct file *file, const char *buf, size_t count,
+                          loff_t *ppos );
+static int slm_ioctl( struct inode *inode, struct file *file, unsigned int
+                      cmd, unsigned long arg );
+static int slm_open( struct inode *inode, struct file *file );
+static int slm_release( struct inode *inode, struct file *file );
+static int slm_req_sense( int device );
+static int slm_mode_sense( int device, char *buffer, int abs_flag );
+#if 0
+static int slm_mode_select( int device, char *buffer, int len, int
+                            default_flag );
+#endif
+static int slm_get_pagesize( int device, int *w, int *h );
+
+/************************* End of Prototypes **************************/
+
+
+static struct timer_list slm_timer = TIMER_INITIALIZER(slm_test_ready, 0, 0);
+
+static struct file_operations slm_fops = {
+	.owner =	THIS_MODULE,
+	.read =		slm_read,
+	.write =	slm_write,
+	.ioctl =	slm_ioctl,
+	.open =		slm_open,
+	.release =	slm_release,
+};
+
+
+/* ---------------------------------------------------------------------- */
+/*							   Status Functions							  */
+
+
+static char *slm_errstr( int stat )
+
+{	char *p;
+	static char	str[22];
+
+	stat &= 0x1f;
+	if (stat >= 0 && stat < N_ERRORS && (p = SLMErrors[stat]))
+		return( p );
+	sprintf( str, "unknown status 0x%02x", stat );
+	return( str );
+}
+
+
+static int slm_getstats( char *buffer, int device )
+
+{	int 			len = 0, stat, i, w, h;
+	unsigned char	buf[256];
+	
+	stat = slm_mode_sense( device, buf, 0 );
+	if (IS_REAL_ERROR(stat))
+		return( -EIO );
+	
+#define SHORTDATA(i)		((buf[i] << 8) | buf[i+1])
+#define	BOOLDATA(i,mask)	((buf[i] & mask) ? "on" : "off")
+
+	w = SHORTDATA( 3 );
+	h = SHORTDATA( 1 );
+		
+	len += sprintf( buffer+len, "Status\t\t%s\n",
+					slm_errstr( stat ) );
+	len += sprintf( buffer+len, "Page Size\t%dx%d",
+					w, h );
+
+	for( i = 0; i < N_STD_SIZES; ++i ) {
+		if (w == StdPageSize[i].w && h == StdPageSize[i].h)
+			break;
+	}
+	if (i < N_STD_SIZES)
+		len += sprintf( buffer+len, " (%s)", StdPageSize[i].name );
+	buffer[len++] = '\n';
+
+	len += sprintf( buffer+len, "Top/Left Margin\t%d/%d\n",
+					SHORTDATA( 5 ), SHORTDATA( 7 ) );
+	len += sprintf( buffer+len, "Manual Feed\t%s\n",
+					BOOLDATA( 9, 0x01 ) );
+	len += sprintf( buffer+len, "Input Select\t%d\n",
+					(buf[9] >> 1) & 7 );
+	len += sprintf( buffer+len, "Auto Select\t%s\n",
+					BOOLDATA( 9, 0x10 ) );
+	len += sprintf( buffer+len, "Prefeed Paper\t%s\n",
+					BOOLDATA( 9, 0x20 ) );
+	len += sprintf( buffer+len, "Thick Pixels\t%s\n",
+					BOOLDATA( 9, 0x40 ) );
+	len += sprintf( buffer+len, "H/V Resol.\t%d/%d dpi\n",
+					SHORTDATA( 12 ), SHORTDATA( 10 ) );
+	len += sprintf( buffer+len, "System Timeout\t%d\n",
+					buf[14] );
+	len += sprintf( buffer+len, "Scan Time\t%d\n",
+					SHORTDATA( 15 ) );
+	len += sprintf( buffer+len, "Page Count\t%d\n",
+					SHORTDATA( 17 ) );
+	len += sprintf( buffer+len, "In/Out Cap.\t%d/%d\n",
+					SHORTDATA( 19 ), SHORTDATA( 21 ) );
+	len += sprintf( buffer+len, "Stagger Output\t%s\n",
+					BOOLDATA( 23, 0x01 ) );
+	len += sprintf( buffer+len, "Output Select\t%d\n",
+					(buf[23] >> 1) & 7 );
+	len += sprintf( buffer+len, "Duplex Print\t%s\n",
+					BOOLDATA( 23, 0x10 ) );
+	len += sprintf( buffer+len, "Color Sep.\t%s\n",
+					BOOLDATA( 23, 0x20 ) );
+
+	return( len );
+}
+
+
+static ssize_t slm_read( struct file *file, char *buf, size_t count,
+						 loff_t *ppos )
+
+{
+	struct inode *node = file->f_dentry->d_inode;
+	unsigned long page;
+	int length;
+	int end;
+
+	if (count < 0)
+		return( -EINVAL );
+	if (!(page = __get_free_page( GFP_KERNEL )))
+		return( -ENOMEM );
+	
+	length = slm_getstats( (char *)page, iminor(node) );
+	if (length < 0) {
+		count = length;
+		goto out;
+	}
+	if (file->f_pos >= length) {
+		count = 0;
+		goto out;
+	}
+	if (count + file->f_pos > length)
+		count = length - file->f_pos;
+	end = count + file->f_pos;
+	if (copy_to_user(buf, (char *)page + file->f_pos, count)) {
+		count = -EFAULT;
+		goto out;
+	}
+	file->f_pos = end;
+out:	free_page( page );
+	return( count );
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*								   Printing								  */
+
+
+static void start_print( int device )
+
+{	struct slm *sip = &slm_info[device];
+	unsigned char	*cmd;
+	unsigned long	paddr;
+	int				i;
+	
+	stdma_lock( slm_interrupt, NULL );
+
+	CMDSET_TARG_LUN( slmprint_cmd, sip->target, sip->lun );
+	cmd = slmprint_cmd;
+	paddr = virt_to_phys( SLMBuffer );
+	dma_cache_maintenance( paddr, virt_to_phys(BufferP)-paddr, 1 );
+	DISABLE_IRQ();
+
+	/* Low on A1 */
+	dma_wd.dma_mode_status = 0x88;
+	MFPDELAY();
+
+	/* send the command bytes except the last */
+	for( i = 0; i < 5; ++i ) {
+		DMA_LONG_WRITE( *cmd++, 0x8a );
+		udelay(20);
+		if (!acsi_wait_for_IRQ( HZ/2 )) {
+			SLMError = 1;
+			return; /* timeout */
+		}
+	}
+	/* last command byte */
+	DMA_LONG_WRITE( *cmd++, 0x82 );
+	MFPDELAY();
+	/* set DMA address */
+	set_dma_addr( paddr );
+	/* program DMA for write and select sector counter reg */
+	dma_wd.dma_mode_status = 0x192;
+	MFPDELAY();
+	/* program for 255*512 bytes and start DMA */
+	DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
+
+#ifndef SLM_CONT_CNT_REPROG
+	SLMCurAddr = paddr;
+	SLMEndAddr = paddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
+#endif
+	START_TIMER( DMA_STARTUP_TIME + DMA_TIME_FOR( SLMSliceSize ));
+#if !defined(SLM_CONT_CNT_REPROG) && defined(DEBUG)
+	printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
+			SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
+#endif
+	
+	ENABLE_IRQ();
+}
+
+
+/* Only called when an error happened or at the end of a page */
+
+static irqreturn_t slm_interrupt(int irc, void *data, struct pt_regs *fp)
+
+{	unsigned long	addr;
+	int				stat;
+	
+	STOP_TIMER();
+	addr = get_dma_addr();
+	stat = acsi_getstatus();
+	SLMError = (stat < 0)             ? SLMSTAT_ACSITO :
+		       (addr < virt_to_phys(BufferP)) ? SLMSTAT_NOTALL :
+									    stat;
+
+	dma_wd.dma_mode_status = 0x80;
+	MFPDELAY();
+#ifdef DEBUG
+	printk( "SLM: interrupt, addr=%#lx, error=%d\n", addr, SLMError );
+#endif
+
+	wake_up( &print_wait );
+	stdma_release();
+	ENABLE_IRQ();
+	return IRQ_HANDLED;
+}
+
+
+static void slm_test_ready( unsigned long dummy )
+
+{
+#ifdef SLM_CONT_CNT_REPROG
+	/* program for 255*512 bytes again */
+	dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
+	START_TIMER( DMA_TIME_FOR(0) );
+#ifdef DEBUG
+	printk( "SLM: reprogramming timer for %d jiffies, addr=%#lx\n",
+			DMA_TIME_FOR(0), get_dma_addr() );
+#endif
+	
+#else /* !SLM_CONT_CNT_REPROG */
+
+	unsigned long	flags, addr;
+	int				d, ti;
+#ifdef DEBUG
+	struct timeval start_tm, end_tm;
+	int			   did_wait = 0;
+#endif
+
+	local_irq_save(flags);
+
+	addr = get_dma_addr();
+	if ((d = SLMEndAddr - addr) > 0) {
+		local_irq_restore(flags);
+		
+		/* slice not yet finished, decide whether to start another timer or to
+		 * busy-wait */
+		ti = DMA_TIME_FOR( d );
+		if (ti > 0) {
+#ifdef DEBUG
+			printk( "SLM: reprogramming timer for %d jiffies, rest %d bytes\n",
+					ti, d );
+#endif
+			START_TIMER( ti );
+			return;
+		}
+		/* wait for desired end address to be reached */
+#ifdef DEBUG
+		do_gettimeofday( &start_tm );
+		did_wait = 1;
+#endif
+		local_irq_disable();
+		while( get_dma_addr() < SLMEndAddr )
+			barrier();
+	}
+
+	/* slice finished, start next one */
+	SLMCurAddr += SLMSliceSize;
+
+#ifdef SLM_CONTINUOUS_DMA
+	/* program for 255*512 bytes again */
+	dma_wd.fdc_acces_seccount = SLM_DMA_AMOUNT;
+#else
+	/* set DMA address;
+	 * add 2 bytes for the ones in the SLM controller FIFO! */
+	set_dma_addr( SLMCurAddr + 2 );
+	/* toggle DMA to write and select sector counter reg */
+	dma_wd.dma_mode_status = 0x92;
+	MFPDELAY();
+	dma_wd.dma_mode_status = 0x192;
+	MFPDELAY();
+	/* program for 255*512 bytes and start DMA */
+	DMA_LONG_WRITE( SLM_DMA_AMOUNT, 0x112 );
+#endif
+	
+	local_irq_restore(flags);
+
+#ifdef DEBUG
+	if (did_wait) {
+		int ms;
+		do_gettimeofday( &end_tm );
+		ms = (end_tm.tv_sec*1000000+end_tm.tv_usec) -
+			 (start_tm.tv_sec*1000000+start_tm.tv_usec); 
+		printk( "SLM: did %ld.%ld ms busy waiting for %d bytes\n",
+				ms/1000, ms%1000, d );
+	}
+	else
+		printk( "SLM: didn't wait (!)\n" );
+#endif
+
+	if ((unsigned char *)PTOV( SLMCurAddr + SLMSliceSize ) >= BufferP) {
+		/* will be last slice, no timer necessary */
+#ifdef DEBUG
+		printk( "SLM: CurAddr=%#lx EndAddr=%#lx last slice -> no timer\n",
+				SLMCurAddr, SLMEndAddr );
+#endif
+	}
+	else {
+		/* not last slice */
+		SLMEndAddr = SLMCurAddr + SLMSliceSize + SLM_DMA_INT_OFFSET;
+		START_TIMER( DMA_TIME_FOR( SLMSliceSize ));
+#ifdef DEBUG
+		printk( "SLM: CurAddr=%#lx EndAddr=%#lx timer=%ld\n",
+				SLMCurAddr, SLMEndAddr, DMA_TIME_FOR( SLMSliceSize ) );
+#endif
+	}
+#endif /* SLM_CONT_CNT_REPROG */
+}
+
+
+static void set_dma_addr( unsigned long paddr )
+
+{	unsigned long flags;
+
+	local_irq_save(flags);
+	dma_wd.dma_lo = (unsigned char)paddr;
+	paddr >>= 8;
+	MFPDELAY();
+	dma_wd.dma_md = (unsigned char)paddr;
+	paddr >>= 8;
+	MFPDELAY();
+	if (ATARIHW_PRESENT( EXTD_DMA ))
+		st_dma_ext_dmahi = (unsigned short)paddr;
+	else
+		dma_wd.dma_hi = (unsigned char)paddr;
+	MFPDELAY();
+	local_irq_restore(flags);
+}
+
+
+static unsigned long get_dma_addr( void )
+
+{	unsigned long	addr;
+	
+	addr = dma_wd.dma_lo & 0xff;
+	MFPDELAY();
+	addr |= (dma_wd.dma_md & 0xff) << 8;
+	MFPDELAY();
+	addr |= (dma_wd.dma_hi & 0xff) << 16;
+	MFPDELAY();
+
+	return( addr );
+}
+
+
+static ssize_t slm_write( struct file *file, const char *buf, size_t count,
+						  loff_t *ppos )
+
+{
+	struct inode *node = file->f_dentry->d_inode;
+	int		device = iminor(node);
+	int		n, filled, w, h;
+
+	while( SLMState == PRINTING ||
+		   (SLMState == FILLING && SLMBufOwner != device) ) {
+		interruptible_sleep_on( &slm_wait );
+		if (signal_pending(current))
+			return( -ERESTARTSYS );
+	}
+	if (SLMState == IDLE) {
+		/* first data of page: get current page size  */
+		if (slm_get_pagesize( device, &w, &h ))
+			return( -EIO );
+		BufferSize = w*h/8;
+		if (BufferSize > SLM_BUFFER_SIZE)
+			return( -ENOMEM );
+
+		SLMState = FILLING;
+		SLMBufOwner = device;
+	}
+
+	n = count;
+	filled = BufferP - SLMBuffer;
+	if (filled + n > BufferSize)
+		n = BufferSize - filled;
+
+	if (copy_from_user(BufferP, buf, n))
+		return -EFAULT;
+	BufferP += n;
+	filled += n;
+
+	if (filled == BufferSize) {
+		/* Check the paper size again! The user may have switched it in the
+		 * time between starting the data and finishing them. Would end up in
+		 * a trashy page... */
+		if (slm_get_pagesize( device, &w, &h ))
+			return( -EIO );
+		if (BufferSize != w*h/8) {
+			printk( KERN_NOTICE "slm%d: page size changed while printing\n",
+					device );
+			return( -EAGAIN );
+		}
+
+		SLMState = PRINTING;
+		/* choose a slice size that is a multiple of the line size */
+#ifndef SLM_CONT_CNT_REPROG
+		SLMSliceSize = SLM_SLICE_SIZE(w);
+#endif
+		
+		start_print( device );
+		sleep_on( &print_wait );
+		if (SLMError && IS_REAL_ERROR(SLMError)) {
+			printk( KERN_ERR "slm%d: %s\n", device, slm_errstr(SLMError) );
+			n = -EIO;
+		}
+
+		SLMState = IDLE;
+		BufferP = SLMBuffer;
+		wake_up_interruptible( &slm_wait );
+	}
+	
+	return( n );
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*							   ioctl Functions							  */
+
+
+static int slm_ioctl( struct inode *inode, struct file *file,
+					  unsigned int cmd, unsigned long arg )
+
+{	int		device = iminor(inode), err;
+	
+	/* I can think of setting:
+	 *  - manual feed
+	 *  - paper format
+	 *  - copy count
+	 *  - ...
+	 * but haven't implemented that yet :-)
+	 * BTW, has anybody better docs about the MODE SENSE/MODE SELECT data?
+	 */
+	switch( cmd ) {
+
+	  case SLMIORESET:		/* reset buffer, i.e. empty the buffer */
+		if (!(file->f_mode & 2))
+			return( -EINVAL );
+		if (SLMState == PRINTING)
+			return( -EBUSY );
+		SLMState = IDLE;
+		BufferP = SLMBuffer;
+		wake_up_interruptible( &slm_wait );
+		return( 0 );
+		
+	  case SLMIOGSTAT: {	/* get status */
+		int stat;
+		char *str;
+
+		stat = slm_req_sense( device );
+		if (arg) {
+			str = slm_errstr( stat );
+			if (put_user(stat,
+    	    	    	    	     (long *)&((struct SLM_status *)arg)->stat))
+    	    	    	    	return -EFAULT;
+			if (copy_to_user( ((struct SLM_status *)arg)->str, str,
+						 strlen(str) + 1))
+				return -EFAULT;
+		}
+		return( stat );
+	  }
+		
+	  case SLMIOGPSIZE: {	/* get paper size */
+		int w, h;
+		
+		if ((err = slm_get_pagesize( device, &w, &h ))) return( err );
+		
+    	    	if (put_user(w, (long *)&((struct SLM_paper_size *)arg)->width))
+			return -EFAULT;
+		if (put_user(h, (long *)&((struct SLM_paper_size *)arg)->height))
+			return -EFAULT;
+		return( 0 );
+	  }
+		
+	  case SLMIOGMFEED:	/* get manual feed */
+		return( -EINVAL );
+
+	  case SLMIOSPSIZE:	/* set paper size */
+		return( -EINVAL );
+
+	  case SLMIOSMFEED:	/* set manual feed */
+		return( -EINVAL );
+
+	}
+	return( -EINVAL );
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*							 Opening and Closing						  */
+
+
+static int slm_open( struct inode *inode, struct file *file )
+
+{	int device;
+	struct slm *sip;
+	
+	device = iminor(inode);
+	if (device >= N_SLM_Printers)
+		return( -ENXIO );
+	sip = &slm_info[device];
+
+	if (file->f_mode & 2) {
+		/* open for writing is exclusive */
+		if ( !atomic_dec_and_test(&sip->wr_ok) ) {
+			atomic_inc(&sip->wr_ok);	
+			return( -EBUSY );
+		}
+	}
+	if (file->f_mode & 1) {
+		/* open for reading is exclusive */
+                if ( !atomic_dec_and_test(&sip->rd_ok) ) {
+                        atomic_inc(&sip->rd_ok);
+                        return( -EBUSY );
+                }
+	}
+
+	return( 0 );
+}
+
+
+static int slm_release( struct inode *inode, struct file *file )
+
+{	int device;
+	struct slm *sip;
+	
+	device = iminor(inode);
+	sip = &slm_info[device];
+
+	if (file->f_mode & 2)
+		atomic_inc( &sip->wr_ok );
+	if (file->f_mode & 1)
+		atomic_inc( &sip->rd_ok );
+	
+	return( 0 );
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*						 ACSI Primitives for the SLM					  */
+
+
+static int slm_req_sense( int device )
+
+{	int			stat, rv;
+	struct slm *sip = &slm_info[device];
+	
+	stdma_lock( NULL, NULL );
+
+	CMDSET_TARG_LUN( slmreqsense_cmd, sip->target, sip->lun );
+	if (!acsicmd_nodma( slmreqsense_cmd, 0 ) ||
+		(stat = acsi_getstatus()) < 0)
+		rv = SLMSTAT_ACSITO;
+	else
+		rv = stat & 0x1f;
+
+	ENABLE_IRQ();
+	stdma_release();
+	return( rv );
+}
+
+
+static int slm_mode_sense( int device, char *buffer, int abs_flag )
+
+{	unsigned char	stat, len;
+	int				rv = 0;
+	struct slm		*sip = &slm_info[device];
+	
+	stdma_lock( NULL, NULL );
+
+	CMDSET_TARG_LUN( slmmsense_cmd, sip->target, sip->lun );
+	slmmsense_cmd[5] = abs_flag ? 0x80 : 0;
+	if (!acsicmd_nodma( slmmsense_cmd, 0 )) {
+		rv = SLMSTAT_ACSITO;
+		goto the_end;
+	}
+
+	if (!acsi_extstatus( &stat, 1 )) {
+		acsi_end_extstatus();
+		rv = SLMSTAT_ACSITO;
+		goto the_end;
+	}
+	
+	if (!acsi_extstatus( &len, 1 )) {
+		acsi_end_extstatus();
+		rv = SLMSTAT_ACSITO;
+		goto the_end;
+	}
+	buffer[0] = len;
+	if (!acsi_extstatus( buffer+1, len )) {
+		acsi_end_extstatus();
+		rv = SLMSTAT_ACSITO;
+		goto the_end;
+	}
+	
+	acsi_end_extstatus();
+	rv = stat & 0x1f;
+
+  the_end:
+	ENABLE_IRQ();
+	stdma_release();
+	return( rv );
+}
+
+
+#if 0
+/* currently unused */
+static int slm_mode_select( int device, char *buffer, int len,
+							int default_flag )
+
+{	int			stat, rv;
+	struct slm	*sip = &slm_info[device];
+	
+	stdma_lock( NULL, NULL );
+
+	CMDSET_TARG_LUN( slmmselect_cmd, sip->target, sip->lun );
+	slmmselect_cmd[5] = default_flag ? 0x80 : 0;
+	if (!acsicmd_nodma( slmmselect_cmd, 0 )) {
+		rv = SLMSTAT_ACSITO;
+		goto the_end;
+	}
+
+	if (!default_flag) {
+		unsigned char c = len;
+		if (!acsi_extcmd( &c, 1 )) {
+			rv = SLMSTAT_ACSITO;
+			goto the_end;
+		}
+		if (!acsi_extcmd( buffer, len )) {
+			rv = SLMSTAT_ACSITO;
+			goto the_end;
+		}
+	}
+	
+	stat = acsi_getstatus();
+	rv = (stat < 0 ? SLMSTAT_ACSITO : stat);
+
+  the_end:
+	ENABLE_IRQ();
+	stdma_release();
+	return( rv );
+}
+#endif
+
+
+static int slm_get_pagesize( int device, int *w, int *h )
+
+{	char	buf[256];
+	int		stat;
+	
+	stat = slm_mode_sense( device, buf, 0 );
+	ENABLE_IRQ();
+	stdma_release();
+
+	if (stat != SLMSTAT_OK)
+		return( -EIO );
+
+	*w = (buf[3] << 8) | buf[4];
+	*h = (buf[1] << 8) | buf[2];
+	return( 0 );
+}
+
+
+/* ---------------------------------------------------------------------- */
+/*								Initialization							  */
+
+
+int attach_slm( int target, int lun )
+
+{	static int	did_register;
+	int			len;
+
+	if (N_SLM_Printers >= MAX_SLM) {
+		printk( KERN_WARNING "Too much SLMs\n" );
+		return( 0 );
+	}
+	
+	/* do an INQUIRY */
+	udelay(100);
+	CMDSET_TARG_LUN( slminquiry_cmd, target, lun );
+	if (!acsicmd_nodma( slminquiry_cmd, 0 )) {
+	  inq_timeout:
+		printk( KERN_ERR "SLM inquiry command timed out.\n" );
+	  inq_fail:
+		acsi_end_extstatus();
+		return( 0 );
+	}
+	/* read status and header of return data */
+	if (!acsi_extstatus( SLMBuffer, 6 ))
+		goto inq_timeout;
+
+	if (SLMBuffer[1] != 2) { /* device type == printer? */
+		printk( KERN_ERR "SLM inquiry returned device type != printer\n" );
+		goto inq_fail;
+	}
+	len = SLMBuffer[5];
+	
+	/* read id string */
+	if (!acsi_extstatus( SLMBuffer, len ))
+		goto inq_timeout;
+	acsi_end_extstatus();
+	SLMBuffer[len] = 0;
+
+	if (!did_register) {
+		did_register = 1;
+	}
+
+	slm_info[N_SLM_Printers].target = target;
+	slm_info[N_SLM_Printers].lun    = lun;
+	atomic_set(&slm_info[N_SLM_Printers].wr_ok, 1 ); 
+	atomic_set(&slm_info[N_SLM_Printers].rd_ok, 1 );
+	
+	printk( KERN_INFO "  Printer: %s\n", SLMBuffer );
+	printk( KERN_INFO "Detected slm%d at id %d lun %d\n",
+			N_SLM_Printers, target, lun );
+	N_SLM_Printers++;
+	return( 1 );
+}
+
+int slm_init( void )
+
+{
+	int i;
+	if (register_chrdev( ACSI_MAJOR, "slm", &slm_fops )) {
+		printk( KERN_ERR "Unable to get major %d for ACSI SLM\n", ACSI_MAJOR );
+		return -EBUSY;
+	}
+	
+	if (!(SLMBuffer = atari_stram_alloc( SLM_BUFFER_SIZE, "SLM" ))) {
+		printk( KERN_ERR "Unable to get SLM ST-Ram buffer.\n" );
+		unregister_chrdev( ACSI_MAJOR, "slm" );
+		return -ENOMEM;
+	}
+	BufferP = SLMBuffer;
+	SLMState = IDLE;
+	
+	devfs_mk_dir("slm");
+	for (i = 0; i < MAX_SLM; i++) {
+		devfs_mk_cdev(MKDEV(ACSI_MAJOR, i),
+				S_IFCHR|S_IRUSR|S_IWUSR, "slm/%d", i);
+	}
+	return 0;
+}
+
+#ifdef MODULE
+
+/* from acsi.c */
+void acsi_attach_SLMs( int (*attach_func)( int, int ) );
+
+int init_module(void)
+{
+	int err;
+
+	if ((err = slm_init()))
+		return( err );
+	/* This calls attach_slm() for every target/lun where acsi.c detected a
+	 * printer */
+	acsi_attach_SLMs( attach_slm );
+	return( 0 );
+}
+
+void cleanup_module(void)
+{
+	int i;
+	for (i = 0; i < MAX_SLM; i++)
+		devfs_remove("slm/%d", i);
+	devfs_remove("slm");
+	if (unregister_chrdev( ACSI_MAJOR, "slm" ) != 0)
+		printk( KERN_ERR "acsi_slm: cleanup_module failed\n");
+	atari_stram_free( SLMBuffer );
+}
+#endif
diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c
new file mode 100644
index 0000000..1468e8c
--- /dev/null
+++ b/drivers/block/amiflop.c
@@ -0,0 +1,1850 @@
+/*
+ *  linux/amiga/amiflop.c
+ *
+ *  Copyright (C) 1993  Greg Harp
+ *  Portions of this driver are based on code contributed by Brad Pepers
+ *  
+ *  revised 28.5.95 by Joerg Dorchain
+ *  - now no bugs(?) any more for both HD & DD
+ *  - added support for 40 Track 5.25" drives, 80-track hopefully behaves
+ *    like 3.5" dd (no way to test - are there any 5.25" drives out there
+ *    that work on an A4000?)
+ *  - wrote formatting routine (maybe dirty, but works)
+ *
+ *  june/july 1995 added ms-dos support by Joerg Dorchain
+ *  (portions based on messydos.device and various contributors)
+ *  - currently only 9 and 18 sector disks
+ *
+ *  - fixed a bug with the internal trackbuffer when using multiple 
+ *    disks the same time
+ *  - made formatting a bit safer
+ *  - added command line and machine based default for "silent" df0
+ *
+ *  december 1995 adapted for 1.2.13pl4 by Joerg Dorchain
+ *  - works but I think it's inefficient. (look in redo_fd_request)
+ *    But the changes were very efficient. (only three and a half lines)
+ *
+ *  january 1996 added special ioctl for tracking down read/write problems
+ *  - usage ioctl(d, RAW_TRACK, ptr); the raw track buffer (MFM-encoded data
+ *    is copied to area. (area should be large enough since no checking is
+ *    done - 30K is currently sufficient). return the actual size of the
+ *    trackbuffer
+ *  - replaced udelays() by a timer (CIAA timer B) for the waits 
+ *    needed for the disk mechanic.
+ *
+ *  february 1996 fixed error recovery and multiple disk access
+ *  - both got broken the first time I tampered with the driver :-(
+ *  - still not safe, but better than before
+ *
+ *  revised Marts 3rd, 1996 by Jes Sorensen for use in the 1.3.28 kernel.
+ *  - Minor changes to accept the kdev_t.
+ *  - Replaced some more udelays with ms_delays. Udelay is just a loop,
+ *    and so the delay will be different depending on the given
+ *    processor :-(
+ *  - The driver could use a major cleanup because of the new
+ *    major/minor handling that came with kdev_t. It seems to work for
+ *    the time being, but I can't guarantee that it will stay like
+ *    that when we start using 16 (24?) bit minors.
+ *
+ * restructured jan 1997 by Joerg Dorchain
+ * - Fixed Bug accessing multiple disks
+ * - some code cleanup
+ * - added trackbuffer for each drive to speed things up
+ * - fixed some race conditions (who finds the next may send it to me ;-)
+ */
+
+#include <linux/module.h>
+
+#include <linux/fd.h>
+#include <linux/hdreg.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/amifdreg.h>
+#include <linux/amifd.h>
+#include <linux/buffer_head.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+
+#include <asm/setup.h>
+#include <asm/uaccess.h>
+#include <asm/amigahw.h>
+#include <asm/amigaints.h>
+#include <asm/irq.h>
+
+#undef DEBUG /* print _LOTS_ of infos */
+
+#define RAW_IOCTL
+#ifdef RAW_IOCTL
+#define IOCTL_RAW_TRACK 0x5254524B  /* 'RTRK' */
+#endif
+
+/*
+ *  Defines
+ */
+
+/*
+ *  Error codes
+ */
+#define FD_OK		0	/* operation succeeded */
+#define FD_ERROR	-1	/* general error (seek, read, write, etc) */
+#define FD_NOUNIT	1	/* unit does not exist */
+#define FD_UNITBUSY	2	/* unit already active */
+#define FD_NOTACTIVE	3	/* unit is not active */
+#define FD_NOTREADY	4	/* unit is not ready (motor not on/no disk) */
+
+#define MFM_NOSYNC	1
+#define MFM_HEADER	2
+#define MFM_DATA	3
+#define MFM_TRACK	4
+
+/*
+ *  Floppy ID values
+ */
+#define FD_NODRIVE	0x00000000  /* response when no unit is present */
+#define FD_DD_3 	0xffffffff  /* double-density 3.5" (880K) drive */
+#define FD_HD_3 	0x55555555  /* high-density 3.5" (1760K) drive */
+#define FD_DD_5 	0xaaaaaaaa  /* double-density 5.25" (440K) drive */
+
+static unsigned long int fd_def_df0 = FD_DD_3;     /* default for df0 if it doesn't identify */
+
+module_param(fd_def_df0, ulong, 0);
+MODULE_LICENSE("GPL");
+
+static struct request_queue *floppy_queue;
+#define QUEUE (floppy_queue)
+#define CURRENT elv_next_request(floppy_queue)
+
+/*
+ *  Macros
+ */
+#define MOTOR_ON	(ciab.prb &= ~DSKMOTOR)
+#define MOTOR_OFF	(ciab.prb |= DSKMOTOR)
+#define SELECT(mask)    (ciab.prb &= ~mask)
+#define DESELECT(mask)  (ciab.prb |= mask)
+#define SELMASK(drive)  (1 << (3 + (drive & 3)))
+
+static struct fd_drive_type drive_types[] = {
+/*  code	name	   tr he   rdsz   wrsz sm pc1 pc2 sd  st st*/
+/*  warning: times are now in milliseconds (ms)                    */
+{ FD_DD_3,	"DD 3.5",  80, 2, 14716, 13630, 1, 80,161, 3, 18, 1},
+{ FD_HD_3,	"HD 3.5",  80, 2, 28344, 27258, 2, 80,161, 3, 18, 1},
+{ FD_DD_5,	"DD 5.25", 40, 2, 14716, 13630, 1, 40, 81, 6, 30, 2},
+{ FD_NODRIVE, "No Drive", 0, 0,     0,     0, 0,  0,  0,  0,  0, 0}
+};
+static int num_dr_types = sizeof(drive_types) / sizeof(drive_types[0]);
+
+static int amiga_read(int), dos_read(int);
+static void amiga_write(int), dos_write(int);
+static struct fd_data_type data_types[] = {
+	{ "Amiga", 11 , amiga_read, amiga_write},
+	{ "MS-Dos", 9, dos_read, dos_write}
+};
+
+/* current info on each unit */
+static struct amiga_floppy_struct unit[FD_MAX_UNITS];
+
+static struct timer_list flush_track_timer[FD_MAX_UNITS];
+static struct timer_list post_write_timer;
+static struct timer_list motor_on_timer;
+static struct timer_list motor_off_timer[FD_MAX_UNITS];
+static int on_attempts;
+
+/* Synchronization of FDC access */
+/* request loop (trackbuffer) */
+static volatile int fdc_busy = -1;
+static volatile int fdc_nested;
+static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
+ 
+static DECLARE_WAIT_QUEUE_HEAD(motor_wait);
+
+static volatile int selected = -1;	/* currently selected drive */
+
+static int writepending;
+static int writefromint;
+static char *raw_buf;
+
+static DEFINE_SPINLOCK(amiflop_lock);
+
+#define RAW_BUF_SIZE 30000  /* size of raw disk data */
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+static volatile char block_flag;
+static DECLARE_WAIT_QUEUE_HEAD(wait_fd_block);
+
+/* MS-Dos MFM Coding tables (should go quick and easy) */
+static unsigned char mfmencode[16]={
+	0x2a, 0x29, 0x24, 0x25, 0x12, 0x11, 0x14, 0x15,
+	0x4a, 0x49, 0x44, 0x45, 0x52, 0x51, 0x54, 0x55
+};
+static unsigned char mfmdecode[128];
+
+/* floppy internal millisecond timer stuff */
+static volatile int ms_busy = -1;
+static DECLARE_WAIT_QUEUE_HEAD(ms_wait);
+#define MS_TICKS ((amiga_eclock+50)/1000)
+
+/*
+ * Note that MAX_ERRORS=X doesn't imply that we retry every bad read
+ * max X times - some types of errors increase the errorcount by 2 or
+ * even 3, so we might actually retry only X/2 times before giving up.
+ */
+#define MAX_ERRORS 12
+
+/* Prevent "aliased" accesses. */
+static int fd_ref[4] = { 0,0,0,0 };
+static int fd_device[4] = { 0, 0, 0, 0 };
+
+/*
+ * Here come the actual hardware access and helper functions.
+ * They are not reentrant and single threaded because all drives
+ * share the same hardware and the same trackbuffer.
+ */
+
+/* Milliseconds timer */
+
+static irqreturn_t ms_isr(int irq, void *dummy, struct pt_regs *fp)
+{
+	ms_busy = -1;
+	wake_up(&ms_wait);
+	return IRQ_HANDLED;
+}
+
+/* all waits are queued up 
+   A more generic routine would do a schedule a la timer.device */
+static void ms_delay(int ms)
+{
+	unsigned long flags;
+	int ticks;
+	if (ms > 0) {
+		local_irq_save(flags);
+		while (ms_busy == 0)
+			sleep_on(&ms_wait);
+		ms_busy = 0;
+		local_irq_restore(flags);
+		ticks = MS_TICKS*ms-1;
+		ciaa.tblo=ticks%256;
+		ciaa.tbhi=ticks/256;
+		ciaa.crb=0x19; /*count eclock, force load, one-shoot, start */
+		sleep_on(&ms_wait);
+	}
+}
+
+/* Hardware semaphore */
+
+/* returns true when we would get the semaphore */
+static inline int try_fdc(int drive)
+{
+	drive &= 3;
+	return ((fdc_busy < 0) || (fdc_busy == drive));
+}
+
+static void get_fdc(int drive)
+{
+	unsigned long flags;
+
+	drive &= 3;
+#ifdef DEBUG
+	printk("get_fdc: drive %d  fdc_busy %d  fdc_nested %d\n",drive,fdc_busy,fdc_nested);
+#endif
+	local_irq_save(flags);
+	while (!try_fdc(drive))
+		sleep_on(&fdc_wait);
+	fdc_busy = drive;
+	fdc_nested++;
+	local_irq_restore(flags);
+}
+
+static inline void rel_fdc(void)
+{
+#ifdef DEBUG
+	if (fdc_nested == 0)
+		printk("fd: unmatched rel_fdc\n");
+	printk("rel_fdc: fdc_busy %d fdc_nested %d\n",fdc_busy,fdc_nested);
+#endif
+	fdc_nested--;
+	if (fdc_nested == 0) {
+		fdc_busy = -1;
+		wake_up(&fdc_wait);
+	}
+}
+
+static void fd_select (int drive)
+{
+	unsigned char prb = ~0;
+
+	drive&=3;
+#ifdef DEBUG
+	printk("selecting %d\n",drive);
+#endif
+	if (drive == selected)
+		return;
+	get_fdc(drive);
+	selected = drive;
+
+	if (unit[drive].track % 2 != 0)
+		prb &= ~DSKSIDE;
+	if (unit[drive].motor == 1)
+		prb &= ~DSKMOTOR;
+	ciab.prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3));
+	ciab.prb = prb;
+	prb &= ~SELMASK(drive);
+	ciab.prb = prb;
+	rel_fdc();
+}
+
+static void fd_deselect (int drive)
+{
+	unsigned char prb;
+	unsigned long flags;
+
+	drive&=3;
+#ifdef DEBUG
+	printk("deselecting %d\n",drive);
+#endif
+	if (drive != selected) {
+		printk(KERN_WARNING "Deselecting drive %d while %d was selected!\n",drive,selected);
+		return;
+	}
+
+	get_fdc(drive);
+	local_irq_save(flags);
+
+	selected = -1;
+
+	prb = ciab.prb;
+	prb |= (SELMASK(0)|SELMASK(1)|SELMASK(2)|SELMASK(3));
+	ciab.prb = prb;
+
+	local_irq_restore (flags);
+	rel_fdc();
+
+}
+
+static void motor_on_callback(unsigned long nr)
+{
+	if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) {
+		wake_up (&motor_wait);
+	} else {
+		motor_on_timer.expires = jiffies + HZ/10;
+		add_timer(&motor_on_timer);
+	}
+}
+
+static int fd_motor_on(int nr)
+{
+	nr &= 3;
+
+	del_timer(motor_off_timer + nr);
+
+	if (!unit[nr].motor) {
+		unit[nr].motor = 1;
+		fd_select(nr);
+
+		motor_on_timer.data = nr;
+		mod_timer(&motor_on_timer, jiffies + HZ/2);
+
+		on_attempts = 10;
+		sleep_on (&motor_wait);
+		fd_deselect(nr);
+	}
+
+	if (on_attempts == 0) {
+		on_attempts = -1;
+#if 0
+		printk (KERN_ERR "motor_on failed, turning motor off\n");
+		fd_motor_off (nr);
+		return 0;
+#else
+		printk (KERN_WARNING "DSKRDY not set after 1.5 seconds - assuming drive is spinning notwithstanding\n");
+#endif
+	}
+
+	return 1;
+}
+
+static void fd_motor_off(unsigned long drive)
+{
+	long calledfromint;
+#ifdef MODULE
+	long decusecount;
+
+	decusecount = drive & 0x40000000;
+#endif
+	calledfromint = drive & 0x80000000;
+	drive&=3;
+	if (calledfromint && !try_fdc(drive)) {
+		/* We would be blocked in an interrupt, so try again later */
+		motor_off_timer[drive].expires = jiffies + 1;
+		add_timer(motor_off_timer + drive);
+		return;
+	}
+	unit[drive].motor = 0;
+	fd_select(drive);
+	udelay (1);
+	fd_deselect(drive);
+}
+
+static void floppy_off (unsigned int nr)
+{
+	int drive;
+
+	drive = nr & 3;
+	/* called this way it is always from interrupt */
+	motor_off_timer[drive].data = nr | 0x80000000;
+	mod_timer(motor_off_timer + drive, jiffies + 3*HZ);
+}
+
+static int fd_calibrate(int drive)
+{
+	unsigned char prb;
+	int n;
+
+	drive &= 3;
+	get_fdc(drive);
+	if (!fd_motor_on (drive))
+		return 0;
+	fd_select (drive);
+	prb = ciab.prb;
+	prb |= DSKSIDE;
+	prb &= ~DSKDIREC;
+	ciab.prb = prb;
+	for (n = unit[drive].type->tracks/2; n != 0; --n) {
+		if (ciaa.pra & DSKTRACK0)
+			break;
+		prb &= ~DSKSTEP;
+		ciab.prb = prb;
+		prb |= DSKSTEP;
+		udelay (2);
+		ciab.prb = prb;
+		ms_delay(unit[drive].type->step_delay);
+	}
+	ms_delay (unit[drive].type->settle_time);
+	prb |= DSKDIREC;
+	n = unit[drive].type->tracks + 20;
+	for (;;) {
+		prb &= ~DSKSTEP;
+		ciab.prb = prb;
+		prb |= DSKSTEP;
+		udelay (2);
+		ciab.prb = prb;
+		ms_delay(unit[drive].type->step_delay + 1);
+		if ((ciaa.pra & DSKTRACK0) == 0)
+			break;
+		if (--n == 0) {
+			printk (KERN_ERR "fd%d: calibrate failed, turning motor off\n", drive);
+			fd_motor_off (drive);
+			unit[drive].track = -1;
+			rel_fdc();
+			return 0;
+		}
+	}
+	unit[drive].track = 0;
+	ms_delay(unit[drive].type->settle_time);
+
+	rel_fdc();
+	fd_deselect(drive);
+	return 1;
+}
+
+static int fd_seek(int drive, int track)
+{
+	unsigned char prb;
+	int cnt;
+
+#ifdef DEBUG
+	printk("seeking drive %d to track %d\n",drive,track);
+#endif
+	drive &= 3;
+	get_fdc(drive);
+	if (unit[drive].track == track) {
+		rel_fdc();
+		return 1;
+	}
+	if (!fd_motor_on(drive)) {
+		rel_fdc();
+		return 0;
+	}
+	if (unit[drive].track < 0 && !fd_calibrate(drive)) {
+		rel_fdc();
+		return 0;
+	}
+
+	fd_select (drive);
+	cnt = unit[drive].track/2 - track/2;
+	prb = ciab.prb;
+	prb |= DSKSIDE | DSKDIREC;
+	if (track % 2 != 0)
+		prb &= ~DSKSIDE;
+	if (cnt < 0) {
+		cnt = - cnt;
+		prb &= ~DSKDIREC;
+	}
+	ciab.prb = prb;
+	if (track % 2 != unit[drive].track % 2)
+		ms_delay (unit[drive].type->side_time);
+	unit[drive].track = track;
+	if (cnt == 0) {
+		rel_fdc();
+		fd_deselect(drive);
+		return 1;
+	}
+	do {
+		prb &= ~DSKSTEP;
+		ciab.prb = prb;
+		prb |= DSKSTEP;
+		udelay (1);
+		ciab.prb = prb;
+		ms_delay (unit[drive].type->step_delay);
+	} while (--cnt != 0);
+	ms_delay (unit[drive].type->settle_time);
+
+	rel_fdc();
+	fd_deselect(drive);
+	return 1;
+}
+
+static unsigned long fd_get_drive_id(int drive)
+{
+	int i;
+	ulong id = 0;
+
+  	drive&=3;
+  	get_fdc(drive);
+	/* set up for ID */
+	MOTOR_ON;
+	udelay(2);
+	SELECT(SELMASK(drive));
+	udelay(2);
+	DESELECT(SELMASK(drive));
+	udelay(2);
+	MOTOR_OFF;
+	udelay(2);
+	SELECT(SELMASK(drive));
+	udelay(2);
+	DESELECT(SELMASK(drive));
+	udelay(2);
+
+	/* loop and read disk ID */
+	for (i=0; i<32; i++) {
+		SELECT(SELMASK(drive));
+		udelay(2);
+
+		/* read and store value of DSKRDY */
+		id <<= 1;
+		id |= (ciaa.pra & DSKRDY) ? 0 : 1;	/* cia regs are low-active! */
+
+		DESELECT(SELMASK(drive));
+	}
+
+	rel_fdc();
+
+        /*
+         * RB: At least A500/A2000's df0: don't identify themselves.
+         * As every (real) Amiga has at least a 3.5" DD drive as df0:
+         * we default to that if df0: doesn't identify as a certain
+         * type.
+         */
+        if(drive == 0 && id == FD_NODRIVE)
+	{
+                id = fd_def_df0;
+                printk(KERN_NOTICE "fd: drive 0 didn't identify, setting default %08lx\n", (ulong)fd_def_df0);
+	}
+	/* return the ID value */
+	return (id);
+}
+
+static irqreturn_t fd_block_done(int irq, void *dummy, struct pt_regs *fp)
+{
+	if (block_flag)
+		custom.dsklen = 0x4000;
+
+	if (block_flag == 2) { /* writing */
+		writepending = 2;
+		post_write_timer.expires = jiffies + 1; /* at least 2 ms */
+		post_write_timer.data = selected;
+		add_timer(&post_write_timer);
+	}
+	else {                /* reading */
+		block_flag = 0;
+		wake_up (&wait_fd_block);
+	}
+	return IRQ_HANDLED;
+}
+
+static void raw_read(int drive)
+{
+	drive&=3;
+	get_fdc(drive);
+	while (block_flag)
+		sleep_on(&wait_fd_block);
+	fd_select(drive);
+	/* setup adkcon bits correctly */
+	custom.adkcon = ADK_MSBSYNC;
+	custom.adkcon = ADK_SETCLR|ADK_WORDSYNC|ADK_FAST;
+
+	custom.dsksync = MFM_SYNC;
+
+	custom.dsklen = 0;
+	custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf);
+	custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN;
+	custom.dsklen = unit[drive].type->read_size/sizeof(short) | DSKLEN_DMAEN;
+
+	block_flag = 1;
+
+	while (block_flag)
+		sleep_on (&wait_fd_block);
+
+	custom.dsklen = 0;
+	fd_deselect(drive);
+	rel_fdc();
+}
+
+static int raw_write(int drive)
+{
+	ushort adk;
+
+	drive&=3;
+	get_fdc(drive); /* corresponds to rel_fdc() in post_write() */
+	if ((ciaa.pra & DSKPROT) == 0) {
+		rel_fdc();
+		return 0;
+	}
+	while (block_flag)
+		sleep_on(&wait_fd_block);
+	fd_select(drive);
+	/* clear adkcon bits */
+	custom.adkcon = ADK_PRECOMP1|ADK_PRECOMP0|ADK_WORDSYNC|ADK_MSBSYNC;
+	/* set appropriate adkcon bits */
+	adk = ADK_SETCLR|ADK_FAST;
+	if ((ulong)unit[drive].track >= unit[drive].type->precomp2)
+		adk |= ADK_PRECOMP1;
+	else if ((ulong)unit[drive].track >= unit[drive].type->precomp1)
+		adk |= ADK_PRECOMP0;
+	custom.adkcon = adk;
+
+	custom.dsklen = DSKLEN_WRITE;
+	custom.dskptr = (u_char *)ZTWO_PADDR((u_char *)raw_buf);
+	custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE;
+	custom.dsklen = unit[drive].type->write_size/sizeof(short) | DSKLEN_DMAEN|DSKLEN_WRITE;
+
+	block_flag = 2;
+	return 1;
+}
+
+/*
+ * to be called at least 2ms after the write has finished but before any
+ * other access to the hardware.
+ */
+static void post_write (unsigned long drive)
+{
+#ifdef DEBUG
+	printk("post_write for drive %ld\n",drive);
+#endif
+	drive &= 3;
+	custom.dsklen = 0;
+	block_flag = 0;
+	writepending = 0;
+	writefromint = 0;
+	unit[drive].dirty = 0;
+	wake_up(&wait_fd_block);
+	fd_deselect(drive);
+	rel_fdc(); /* corresponds to get_fdc() in raw_write */
+}
+
+
+/*
+ * The following functions are to convert the block contents into raw data
+ * written to disk and vice versa.
+ * (Add other formats here ;-))
+ */
+
+static unsigned long scan_sync(unsigned long raw, unsigned long end)
+{
+	ushort *ptr = (ushort *)raw, *endp = (ushort *)end;
+
+	while (ptr < endp && *ptr++ != 0x4489)
+		;
+	if (ptr < endp) {
+		while (*ptr == 0x4489 && ptr < endp)
+			ptr++;
+		return (ulong)ptr;
+	}
+	return 0;
+}
+
+static inline unsigned long checksum(unsigned long *addr, int len)
+{
+	unsigned long csum = 0;
+
+	len /= sizeof(*addr);
+	while (len-- > 0)
+		csum ^= *addr++;
+	csum = ((csum>>1) & 0x55555555)  ^  (csum & 0x55555555);
+
+	return csum;
+}
+
+static unsigned long decode (unsigned long *data, unsigned long *raw,
+			     int len)
+{
+	ulong *odd, *even;
+
+	/* convert length from bytes to longwords */
+	len >>= 2;
+	odd = raw;
+	even = odd + len;
+
+	/* prepare return pointer */
+	raw += len * 2;
+
+	do {
+		*data++ = ((*odd++ & 0x55555555) << 1) | (*even++ & 0x55555555);
+	} while (--len != 0);
+
+	return (ulong)raw;
+}
+
+struct header {
+	unsigned char magic;
+	unsigned char track;
+	unsigned char sect;
+	unsigned char ord;
+	unsigned char labels[16];
+	unsigned long hdrchk;
+	unsigned long datachk;
+};
+
+static int amiga_read(int drive)
+{
+	unsigned long raw;
+	unsigned long end;
+	int scnt;
+	unsigned long csum;
+	struct header hdr;
+
+	drive&=3;
+	raw = (long) raw_buf;
+	end = raw + unit[drive].type->read_size;
+
+	for (scnt = 0;scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) {
+		if (!(raw = scan_sync(raw, end))) {
+			printk (KERN_INFO "can't find sync for sector %d\n", scnt);
+			return MFM_NOSYNC;
+		}
+
+		raw = decode ((ulong *)&hdr.magic, (ulong *)raw, 4);
+		raw = decode ((ulong *)&hdr.labels, (ulong *)raw, 16);
+		raw = decode ((ulong *)&hdr.hdrchk, (ulong *)raw, 4);
+		raw = decode ((ulong *)&hdr.datachk, (ulong *)raw, 4);
+		csum = checksum((ulong *)&hdr,
+				(char *)&hdr.hdrchk-(char *)&hdr);
+
+#ifdef DEBUG
+		printk ("(%x,%d,%d,%d) (%lx,%lx,%lx,%lx) %lx %lx\n",
+			hdr.magic, hdr.track, hdr.sect, hdr.ord,
+			*(ulong *)&hdr.labels[0], *(ulong *)&hdr.labels[4],
+			*(ulong *)&hdr.labels[8], *(ulong *)&hdr.labels[12],
+			hdr.hdrchk, hdr.datachk);
+#endif
+
+		if (hdr.hdrchk != csum) {
+			printk(KERN_INFO "MFM_HEADER: %08lx,%08lx\n", hdr.hdrchk, csum);
+			return MFM_HEADER;
+		}
+
+		/* verify track */
+		if (hdr.track != unit[drive].track) {
+			printk(KERN_INFO "MFM_TRACK: %d, %d\n", hdr.track, unit[drive].track);
+			return MFM_TRACK;
+		}
+
+		raw = decode ((ulong *)(unit[drive].trackbuf + hdr.sect*512),
+			      (ulong *)raw, 512);
+		csum = checksum((ulong *)(unit[drive].trackbuf + hdr.sect*512), 512);
+
+		if (hdr.datachk != csum) {
+			printk(KERN_INFO "MFM_DATA: (%x:%d:%d:%d) sc=%d %lx, %lx\n",
+			       hdr.magic, hdr.track, hdr.sect, hdr.ord, scnt,
+			       hdr.datachk, csum);
+			printk (KERN_INFO "data=(%lx,%lx,%lx,%lx)\n",
+				((ulong *)(unit[drive].trackbuf+hdr.sect*512))[0],
+				((ulong *)(unit[drive].trackbuf+hdr.sect*512))[1],
+				((ulong *)(unit[drive].trackbuf+hdr.sect*512))[2],
+				((ulong *)(unit[drive].trackbuf+hdr.sect*512))[3]);
+			return MFM_DATA;
+		}
+	}
+
+	return 0;
+}
+
+static void encode(unsigned long data, unsigned long *dest)
+{
+	unsigned long data2;
+
+	data &= 0x55555555;
+	data2 = data ^ 0x55555555;
+	data |= ((data2 >> 1) | 0x80000000) & (data2 << 1);
+
+	if (*(dest - 1) & 0x00000001)
+		data &= 0x7FFFFFFF;
+
+	*dest = data;
+}
+
+static void encode_block(unsigned long *dest, unsigned long *src, int len)
+{
+	int cnt, to_cnt = 0;
+	unsigned long data;
+
+	/* odd bits */
+	for (cnt = 0; cnt < len / 4; cnt++) {
+		data = src[cnt] >> 1;
+		encode(data, dest + to_cnt++);
+	}
+
+	/* even bits */
+	for (cnt = 0; cnt < len / 4; cnt++) {
+		data = src[cnt];
+		encode(data, dest + to_cnt++);
+	}
+}
+
+static unsigned long *putsec(int disk, unsigned long *raw, int cnt)
+{
+	struct header hdr;
+	int i;
+
+	disk&=3;
+	*raw = (raw[-1]&1) ? 0x2AAAAAAA : 0xAAAAAAAA;
+	raw++;
+	*raw++ = 0x44894489;
+
+	hdr.magic = 0xFF;
+	hdr.track = unit[disk].track;
+	hdr.sect = cnt;
+	hdr.ord = unit[disk].dtype->sects * unit[disk].type->sect_mult - cnt;
+	for (i = 0; i < 16; i++)
+		hdr.labels[i] = 0;
+	hdr.hdrchk = checksum((ulong *)&hdr,
+			      (char *)&hdr.hdrchk-(char *)&hdr);
+	hdr.datachk = checksum((ulong *)(unit[disk].trackbuf+cnt*512), 512);
+
+	encode_block(raw, (ulong *)&hdr.magic, 4);
+	raw += 2;
+	encode_block(raw, (ulong *)&hdr.labels, 16);
+	raw += 8;
+	encode_block(raw, (ulong *)&hdr.hdrchk, 4);
+	raw += 2;
+	encode_block(raw, (ulong *)&hdr.datachk, 4);
+	raw += 2;
+	encode_block(raw, (ulong *)(unit[disk].trackbuf+cnt*512), 512);
+	raw += 256;
+
+	return raw;
+}
+
+static void amiga_write(int disk)
+{
+	unsigned int cnt;
+	unsigned long *ptr = (unsigned long *)raw_buf;
+
+	disk&=3;
+	/* gap space */
+	for (cnt = 0; cnt < 415 * unit[disk].type->sect_mult; cnt++)
+		*ptr++ = 0xaaaaaaaa;
+
+	/* sectors */
+	for (cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++)
+		ptr = putsec (disk, ptr, cnt);
+	*(ushort *)ptr = (ptr[-1]&1) ? 0x2AA8 : 0xAAA8;
+}
+
+
+struct dos_header {
+	unsigned char track,   /* 0-80 */
+		side,    /* 0-1 */
+		sec,     /* 0-...*/
+		len_desc;/* 2 */
+	unsigned short crc;     /* on 68000 we got an alignment problem, 
+				   but this compiler solves it  by adding silently 
+				   adding a pad byte so data won't fit
+				   and this took about 3h to discover.... */
+	unsigned char gap1[22];     /* for longword-alignedness (0x4e) */
+};
+
+/* crc routines are borrowed from the messydos-handler  */
+
+/* excerpt from the messydos-device           
+; The CRC is computed not only over the actual data, but including
+; the SYNC mark (3 * $a1) and the 'ID/DATA - Address Mark' ($fe/$fb).
+; As we don't read or encode these fields into our buffers, we have to
+; preload the registers containing the CRC with the values they would have
+; after stepping over these fields.
+;
+; How CRCs "really" work:
+;
+; First, you should regard a bitstring as a series of coefficients of
+; polynomials. We calculate with these polynomials in modulo-2
+; arithmetic, in which both add and subtract are done the same as
+; exclusive-or. Now, we modify our data (a very long polynomial) in
+; such a way that it becomes divisible by the CCITT-standard 16-bit
+;		 16   12   5
+; polynomial:	x  + x	+ x + 1, represented by $11021. The easiest
+; way to do this would be to multiply (using proper arithmetic) our
+; datablock with $11021. So we have:
+;   data * $11021		 =
+;   data * ($10000 + $1021)      =
+;   data * $10000 + data * $1021
+; The left part of this is simple: Just add two 0 bytes. But then
+; the right part (data $1021) remains difficult and even could have
+; a carry into the left part. The solution is to use a modified
+; multiplication, which has a result that is not correct, but with
+; a difference of any multiple of $11021. We then only need to keep
+; the 16 least significant bits of the result.
+;
+; The following algorithm does this for us:
+;
+;   unsigned char *data, c, crclo, crchi;
+;   while (not done) {
+;	c = *data++ + crchi;
+;	crchi = (@ c) >> 8 + crclo;
+;	crclo = @ c;
+;   }
+;
+; Remember, + is done with EOR, the @ operator is in two tables (high
+; and low byte separately), which is calculated as
+;
+;      $1021 * (c & $F0)
+;  xor $1021 * (c & $0F)
+;  xor $1021 * (c >> 4)         (* is regular multiplication)
+;
+;
+; Anyway, the end result is the same as the remainder of the division of
+; the data by $11021. I am afraid I need to study theory a bit more...
+
+
+my only works was to code this from manx to C....
+
+*/
+
+static ushort dos_crc(void * data_a3, int data_d0, int data_d1, int data_d3)
+{
+	static unsigned char CRCTable1[] = {
+		0x00,0x10,0x20,0x30,0x40,0x50,0x60,0x70,0x81,0x91,0xa1,0xb1,0xc1,0xd1,0xe1,0xf1,
+		0x12,0x02,0x32,0x22,0x52,0x42,0x72,0x62,0x93,0x83,0xb3,0xa3,0xd3,0xc3,0xf3,0xe3,
+		0x24,0x34,0x04,0x14,0x64,0x74,0x44,0x54,0xa5,0xb5,0x85,0x95,0xe5,0xf5,0xc5,0xd5,
+		0x36,0x26,0x16,0x06,0x76,0x66,0x56,0x46,0xb7,0xa7,0x97,0x87,0xf7,0xe7,0xd7,0xc7,
+		0x48,0x58,0x68,0x78,0x08,0x18,0x28,0x38,0xc9,0xd9,0xe9,0xf9,0x89,0x99,0xa9,0xb9,
+		0x5a,0x4a,0x7a,0x6a,0x1a,0x0a,0x3a,0x2a,0xdb,0xcb,0xfb,0xeb,0x9b,0x8b,0xbb,0xab,
+		0x6c,0x7c,0x4c,0x5c,0x2c,0x3c,0x0c,0x1c,0xed,0xfd,0xcd,0xdd,0xad,0xbd,0x8d,0x9d,
+		0x7e,0x6e,0x5e,0x4e,0x3e,0x2e,0x1e,0x0e,0xff,0xef,0xdf,0xcf,0xbf,0xaf,0x9f,0x8f,
+		0x91,0x81,0xb1,0xa1,0xd1,0xc1,0xf1,0xe1,0x10,0x00,0x30,0x20,0x50,0x40,0x70,0x60,
+		0x83,0x93,0xa3,0xb3,0xc3,0xd3,0xe3,0xf3,0x02,0x12,0x22,0x32,0x42,0x52,0x62,0x72,
+		0xb5,0xa5,0x95,0x85,0xf5,0xe5,0xd5,0xc5,0x34,0x24,0x14,0x04,0x74,0x64,0x54,0x44,
+		0xa7,0xb7,0x87,0x97,0xe7,0xf7,0xc7,0xd7,0x26,0x36,0x06,0x16,0x66,0x76,0x46,0x56,
+		0xd9,0xc9,0xf9,0xe9,0x99,0x89,0xb9,0xa9,0x58,0x48,0x78,0x68,0x18,0x08,0x38,0x28,
+		0xcb,0xdb,0xeb,0xfb,0x8b,0x9b,0xab,0xbb,0x4a,0x5a,0x6a,0x7a,0x0a,0x1a,0x2a,0x3a,
+		0xfd,0xed,0xdd,0xcd,0xbd,0xad,0x9d,0x8d,0x7c,0x6c,0x5c,0x4c,0x3c,0x2c,0x1c,0x0c,
+		0xef,0xff,0xcf,0xdf,0xaf,0xbf,0x8f,0x9f,0x6e,0x7e,0x4e,0x5e,0x2e,0x3e,0x0e,0x1e
+	};
+
+	static unsigned char CRCTable2[] = {
+		0x00,0x21,0x42,0x63,0x84,0xa5,0xc6,0xe7,0x08,0x29,0x4a,0x6b,0x8c,0xad,0xce,0xef,
+		0x31,0x10,0x73,0x52,0xb5,0x94,0xf7,0xd6,0x39,0x18,0x7b,0x5a,0xbd,0x9c,0xff,0xde,
+		0x62,0x43,0x20,0x01,0xe6,0xc7,0xa4,0x85,0x6a,0x4b,0x28,0x09,0xee,0xcf,0xac,0x8d,
+		0x53,0x72,0x11,0x30,0xd7,0xf6,0x95,0xb4,0x5b,0x7a,0x19,0x38,0xdf,0xfe,0x9d,0xbc,
+		0xc4,0xe5,0x86,0xa7,0x40,0x61,0x02,0x23,0xcc,0xed,0x8e,0xaf,0x48,0x69,0x0a,0x2b,
+		0xf5,0xd4,0xb7,0x96,0x71,0x50,0x33,0x12,0xfd,0xdc,0xbf,0x9e,0x79,0x58,0x3b,0x1a,
+		0xa6,0x87,0xe4,0xc5,0x22,0x03,0x60,0x41,0xae,0x8f,0xec,0xcd,0x2a,0x0b,0x68,0x49,
+		0x97,0xb6,0xd5,0xf4,0x13,0x32,0x51,0x70,0x9f,0xbe,0xdd,0xfc,0x1b,0x3a,0x59,0x78,
+		0x88,0xa9,0xca,0xeb,0x0c,0x2d,0x4e,0x6f,0x80,0xa1,0xc2,0xe3,0x04,0x25,0x46,0x67,
+		0xb9,0x98,0xfb,0xda,0x3d,0x1c,0x7f,0x5e,0xb1,0x90,0xf3,0xd2,0x35,0x14,0x77,0x56,
+		0xea,0xcb,0xa8,0x89,0x6e,0x4f,0x2c,0x0d,0xe2,0xc3,0xa0,0x81,0x66,0x47,0x24,0x05,
+		0xdb,0xfa,0x99,0xb8,0x5f,0x7e,0x1d,0x3c,0xd3,0xf2,0x91,0xb0,0x57,0x76,0x15,0x34,
+		0x4c,0x6d,0x0e,0x2f,0xc8,0xe9,0x8a,0xab,0x44,0x65,0x06,0x27,0xc0,0xe1,0x82,0xa3,
+		0x7d,0x5c,0x3f,0x1e,0xf9,0xd8,0xbb,0x9a,0x75,0x54,0x37,0x16,0xf1,0xd0,0xb3,0x92,
+		0x2e,0x0f,0x6c,0x4d,0xaa,0x8b,0xe8,0xc9,0x26,0x07,0x64,0x45,0xa2,0x83,0xe0,0xc1,
+		0x1f,0x3e,0x5d,0x7c,0x9b,0xba,0xd9,0xf8,0x17,0x36,0x55,0x74,0x93,0xb2,0xd1,0xf0
+	};
+
+/* look at the asm-code - what looks in C a bit strange is almost as good as handmade */
+	register int i;
+	register unsigned char *CRCT1, *CRCT2, *data, c, crch, crcl;
+
+	CRCT1=CRCTable1;
+	CRCT2=CRCTable2;
+	data=data_a3;
+	crcl=data_d1;
+	crch=data_d0;
+	for (i=data_d3; i>=0; i--) {
+		c = (*data++) ^ crch;
+		crch = CRCT1[c] ^ crcl;
+		crcl = CRCT2[c];
+	}
+	return (crch<<8)|crcl;
+}
+
+static inline ushort dos_hdr_crc (struct dos_header *hdr)
+{
+	return dos_crc(&(hdr->track), 0xb2, 0x30, 3); /* precomputed magic */
+}
+
+static inline ushort dos_data_crc(unsigned char *data)
+{
+	return dos_crc(data, 0xe2, 0x95 ,511); /* precomputed magic */
+}
+
+static inline unsigned char dos_decode_byte(ushort word)
+{
+	register ushort w2;
+	register unsigned char byte;
+	register unsigned char *dec = mfmdecode;
+
+	w2=word;
+	w2>>=8;
+	w2&=127;
+	byte = dec[w2];
+	byte <<= 4;
+	w2 = word & 127;
+	byte |= dec[w2];
+	return byte;
+}
+
+static unsigned long dos_decode(unsigned char *data, unsigned short *raw, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		*data++=dos_decode_byte(*raw++);
+	return ((ulong)raw);
+}
+
+#ifdef DEBUG
+static void dbg(unsigned long ptr)
+{
+	printk("raw data @%08lx: %08lx, %08lx ,%08lx, %08lx\n", ptr,
+	       ((ulong *)ptr)[0], ((ulong *)ptr)[1],
+	       ((ulong *)ptr)[2], ((ulong *)ptr)[3]);
+}
+#endif
+
+static int dos_read(int drive)
+{
+	unsigned long end;
+	unsigned long raw;
+	int scnt;
+	unsigned short crc,data_crc[2];
+	struct dos_header hdr;
+
+	drive&=3;
+	raw = (long) raw_buf;
+	end = raw + unit[drive].type->read_size;
+
+	for (scnt=0; scnt < unit[drive].dtype->sects * unit[drive].type->sect_mult; scnt++) {
+		do { /* search for the right sync of each sec-hdr */
+			if (!(raw = scan_sync (raw, end))) {
+				printk(KERN_INFO "dos_read: no hdr sync on "
+				       "track %d, unit %d for sector %d\n",
+				       unit[drive].track,drive,scnt);
+				return MFM_NOSYNC;
+			}
+#ifdef DEBUG
+			dbg(raw);
+#endif
+		} while (*((ushort *)raw)!=0x5554); /* loop usually only once done */
+		raw+=2; /* skip over headermark */
+		raw = dos_decode((unsigned char *)&hdr,(ushort *) raw,8);
+		crc = dos_hdr_crc(&hdr);
+
+#ifdef DEBUG
+		printk("(%3d,%d,%2d,%d) %x\n", hdr.track, hdr.side,
+		       hdr.sec, hdr.len_desc, hdr.crc);
+#endif
+
+		if (crc != hdr.crc) {
+			printk(KERN_INFO "dos_read: MFM_HEADER %04x,%04x\n",
+			       hdr.crc, crc);
+			return MFM_HEADER;
+		}
+		if (hdr.track != unit[drive].track/unit[drive].type->heads) {
+			printk(KERN_INFO "dos_read: MFM_TRACK %d, %d\n",
+			       hdr.track,
+			       unit[drive].track/unit[drive].type->heads);
+			return MFM_TRACK;
+		}
+
+		if (hdr.side != unit[drive].track%unit[drive].type->heads) {
+			printk(KERN_INFO "dos_read: MFM_SIDE %d, %d\n",
+			       hdr.side,
+			       unit[drive].track%unit[drive].type->heads);
+			return MFM_TRACK;
+		}
+
+		if (hdr.len_desc != 2) {
+			printk(KERN_INFO "dos_read: unknown sector len "
+			       "descriptor %d\n", hdr.len_desc);
+			return MFM_DATA;
+		}
+#ifdef DEBUG
+		printk("hdr accepted\n");
+#endif
+		if (!(raw = scan_sync (raw, end))) {
+			printk(KERN_INFO "dos_read: no data sync on track "
+			       "%d, unit %d for sector%d, disk sector %d\n",
+			       unit[drive].track, drive, scnt, hdr.sec);
+			return MFM_NOSYNC;
+		}
+#ifdef DEBUG
+		dbg(raw);
+#endif
+
+		if (*((ushort *)raw)!=0x5545) {
+			printk(KERN_INFO "dos_read: no data mark after "
+			       "sync (%d,%d,%d,%d) sc=%d\n",
+			       hdr.track,hdr.side,hdr.sec,hdr.len_desc,scnt);
+			return MFM_NOSYNC;
+		}
+
+		raw+=2;  /* skip data mark (included in checksum) */
+		raw = dos_decode((unsigned char *)(unit[drive].trackbuf + (hdr.sec - 1) * 512), (ushort *) raw, 512);
+		raw = dos_decode((unsigned char  *)data_crc,(ushort *) raw,4);
+		crc = dos_data_crc(unit[drive].trackbuf + (hdr.sec - 1) * 512);
+
+		if (crc != data_crc[0]) {
+			printk(KERN_INFO "dos_read: MFM_DATA (%d,%d,%d,%d) "
+			       "sc=%d, %x %x\n", hdr.track, hdr.side,
+			       hdr.sec, hdr.len_desc, scnt,data_crc[0], crc);
+			printk(KERN_INFO "data=(%lx,%lx,%lx,%lx,...)\n",
+			       ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[0],
+			       ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[1],
+			       ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[2],
+			       ((ulong *)(unit[drive].trackbuf+(hdr.sec-1)*512))[3]);
+			return MFM_DATA;
+		}
+	}
+	return 0;
+}
+
+static inline ushort dos_encode_byte(unsigned char byte)
+{
+	register unsigned char *enc, b2, b1;
+	register ushort word;
+
+	enc=mfmencode;
+	b1=byte;
+	b2=b1>>4;
+	b1&=15;
+	word=enc[b2] <<8 | enc [b1];
+	return (word|((word&(256|64)) ? 0: 128));
+}
+
+static void dos_encode_block(ushort *dest, unsigned char *src, int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++) {
+		*dest=dos_encode_byte(*src++);
+		*dest|=((dest[-1]&1)||(*dest&0x4000))? 0: 0x8000;
+		dest++;
+	}
+}
+
+static unsigned long *ms_putsec(int drive, unsigned long *raw, int cnt)
+{
+	static struct dos_header hdr={0,0,0,2,0,
+	  {78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78,78}};
+	int i;
+	static ushort crc[2]={0,0x4e4e};
+
+	drive&=3;
+/* id gap 1 */
+/* the MFM word before is always 9254 */
+	for(i=0;i<6;i++)
+		*raw++=0xaaaaaaaa;
+/* 3 sync + 1 headermark */
+	*raw++=0x44894489;
+	*raw++=0x44895554;
+
+/* fill in the variable parts of the header */
+	hdr.track=unit[drive].track/unit[drive].type->heads;
+	hdr.side=unit[drive].track%unit[drive].type->heads;
+	hdr.sec=cnt+1;
+	hdr.crc=dos_hdr_crc(&hdr);
+
+/* header (without "magic") and id gap 2*/
+	dos_encode_block((ushort *)raw,(unsigned char *) &hdr.track,28);
+	raw+=14;
+
+/*id gap 3 */
+	for(i=0;i<6;i++)
+		*raw++=0xaaaaaaaa;
+
+/* 3 syncs and 1 datamark */
+	*raw++=0x44894489;
+	*raw++=0x44895545;
+
+/* data */
+	dos_encode_block((ushort *)raw,
+			 (unsigned char *)unit[drive].trackbuf+cnt*512,512);
+	raw+=256;
+
+/*data crc + jd's special gap (long words :-/) */
+	crc[0]=dos_data_crc(unit[drive].trackbuf+cnt*512);
+	dos_encode_block((ushort *) raw,(unsigned char *)crc,4);
+	raw+=2;
+
+/* data gap */
+	for(i=0;i<38;i++)
+		*raw++=0x92549254;
+
+	return raw; /* wrote 652 MFM words */
+}
+
+static void dos_write(int disk)
+{
+	int cnt;
+	unsigned long raw = (unsigned long) raw_buf;
+	unsigned long *ptr=(unsigned long *)raw;
+
+	disk&=3;
+/* really gap4 + indexgap , but we write it first and round it up */
+	for (cnt=0;cnt<425;cnt++)
+		*ptr++=0x92549254;
+
+/* the following is just guessed */
+	if (unit[disk].type->sect_mult==2)  /* check for HD-Disks */
+		for(cnt=0;cnt<473;cnt++)
+			*ptr++=0x92549254;
+
+/* now the index marks...*/
+	for (cnt=0;cnt<20;cnt++)
+		*ptr++=0x92549254;
+	for (cnt=0;cnt<6;cnt++)
+		*ptr++=0xaaaaaaaa;
+	*ptr++=0x52245224;
+	*ptr++=0x52245552;
+	for (cnt=0;cnt<20;cnt++)
+		*ptr++=0x92549254;
+
+/* sectors */
+	for(cnt = 0; cnt < unit[disk].dtype->sects * unit[disk].type->sect_mult; cnt++)
+		ptr=ms_putsec(disk,ptr,cnt);
+
+	*(ushort *)ptr = 0xaaa8; /* MFM word before is always 0x9254 */
+}
+
+/*
+ * Here comes the high level stuff (i.e. the filesystem interface)
+ * and helper functions.
+ * Normally this should be the only part that has to be adapted to
+ * different kernel versions.
+ */
+
+/* FIXME: this assumes the drive is still spinning -
+ * which is only true if we complete writing a track within three seconds
+ */
+static void flush_track_callback(unsigned long nr)
+{
+	nr&=3;
+	writefromint = 1;
+	if (!try_fdc(nr)) {
+		/* we might block in an interrupt, so try again later */
+		flush_track_timer[nr].expires = jiffies + 1;
+		add_timer(flush_track_timer + nr);
+		return;
+	}
+	get_fdc(nr);
+	(*unit[nr].dtype->write_fkt)(nr);
+	if (!raw_write(nr)) {
+		printk (KERN_NOTICE "floppy disk write protected\n");
+		writefromint = 0;
+		writepending = 0;
+	}
+	rel_fdc();
+}
+
+static int non_int_flush_track (unsigned long nr)
+{
+	unsigned long flags;
+
+	nr&=3;
+	writefromint = 0;
+	del_timer(&post_write_timer);
+	get_fdc(nr);
+	if (!fd_motor_on(nr)) {
+		writepending = 0;
+		rel_fdc();
+		return 0;
+	}
+	local_irq_save(flags);
+	if (writepending != 2) {
+		local_irq_restore(flags);
+		(*unit[nr].dtype->write_fkt)(nr);
+		if (!raw_write(nr)) {
+			printk (KERN_NOTICE "floppy disk write protected "
+				"in write!\n");
+			writepending = 0;
+			return 0;
+		}
+		while (block_flag == 2)
+			sleep_on (&wait_fd_block);
+	}
+	else {
+		local_irq_restore(flags);
+		ms_delay(2); /* 2 ms post_write delay */
+		post_write(nr);
+	}
+	rel_fdc();
+	return 1;
+}
+
+static int get_track(int drive, int track)
+{
+	int error, errcnt;
+
+	drive&=3;
+	if (unit[drive].track == track)
+		return 0;
+	get_fdc(drive);
+	if (!fd_motor_on(drive)) {
+		rel_fdc();
+		return -1;
+	}
+
+	if (unit[drive].dirty == 1) {
+		del_timer (flush_track_timer + drive);
+		non_int_flush_track (drive);
+	}
+	errcnt = 0;
+	while (errcnt < MAX_ERRORS) {
+		if (!fd_seek(drive, track))
+			return -1;
+		raw_read(drive);
+		error = (*unit[drive].dtype->read_fkt)(drive);
+		if (error == 0) {
+			rel_fdc();
+			return 0;
+		}
+		/* Read Error Handling: recalibrate and try again */
+		unit[drive].track = -1;
+		errcnt++;
+	}
+	rel_fdc();
+	return -1;
+}
+
+static void redo_fd_request(void)
+{
+	unsigned int cnt, block, track, sector;
+	int drive;
+	struct amiga_floppy_struct *floppy;
+	char *data;
+	unsigned long flags;
+
+ repeat:
+	if (!CURRENT) {
+		/* Nothing left to do */
+		return;
+	}
+
+	floppy = CURRENT->rq_disk->private_data;
+	drive = floppy - unit;
+
+	/* Here someone could investigate to be more efficient */
+	for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 
+#ifdef DEBUG
+		printk("fd: sector %ld + %d requested for %s\n",
+		       CURRENT->sector,cnt,
+		       (CURRENT->cmd==READ)?"read":"write");
+#endif
+		block = CURRENT->sector + cnt;
+		if ((int)block > floppy->blocks) {
+			end_request(CURRENT, 0);
+			goto repeat;
+		}
+
+		track = block / (floppy->dtype->sects * floppy->type->sect_mult);
+		sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
+		data = CURRENT->buffer + 512 * cnt;
+#ifdef DEBUG
+		printk("access to track %d, sector %d, with buffer at "
+		       "0x%08lx\n", track, sector, data);
+#endif
+
+		if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) {
+			printk(KERN_WARNING "do_fd_request: unknown command\n");
+			end_request(CURRENT, 0);
+			goto repeat;
+		}
+		if (get_track(drive, track) == -1) {
+			end_request(CURRENT, 0);
+			goto repeat;
+		}
+
+		switch (rq_data_dir(CURRENT)) {
+		case READ:
+			memcpy(data, floppy->trackbuf + sector * 512, 512);
+			break;
+
+		case WRITE:
+			memcpy(floppy->trackbuf + sector * 512, data, 512);
+
+			/* keep the drive spinning while writes are scheduled */
+			if (!fd_motor_on(drive)) {
+				end_request(CURRENT, 0);
+				goto repeat;
+			}
+			/*
+			 * setup a callback to write the track buffer
+			 * after a short (1 tick) delay.
+			 */
+			local_irq_save(flags);
+
+			floppy->dirty = 1;
+		        /* reset the timer */
+			mod_timer (flush_track_timer + drive, jiffies + 1);
+			local_irq_restore(flags);
+			break;
+		}
+	}
+	CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+	CURRENT->sector += CURRENT->current_nr_sectors;
+
+	end_request(CURRENT, 1);
+	goto repeat;
+}
+
+static void do_fd_request(request_queue_t * q)
+{
+	redo_fd_request();
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long param)
+{
+	int drive = iminor(inode) & 3;
+	static struct floppy_struct getprm;
+
+	switch(cmd){
+	case HDIO_GETGEO:
+	{
+		struct hd_geometry loc;
+		loc.heads = unit[drive].type->heads;
+		loc.sectors = unit[drive].dtype->sects * unit[drive].type->sect_mult;
+		loc.cylinders = unit[drive].type->tracks;
+		loc.start = 0;
+		if (copy_to_user((void *)param, (void *)&loc,
+				 sizeof(struct hd_geometry)))
+			return -EFAULT;
+		break;
+	}
+	case FDFMTBEG:
+		get_fdc(drive);
+		if (fd_ref[drive] > 1) {
+			rel_fdc();
+			return -EBUSY;
+		}
+		fsync_bdev(inode->i_bdev);
+		if (fd_motor_on(drive) == 0) {
+			rel_fdc();
+			return -ENODEV;
+		}
+		if (fd_calibrate(drive) == 0) {
+			rel_fdc();
+			return -ENXIO;
+		}
+		floppy_off(drive);
+		rel_fdc();
+		break;
+	case FDFMTTRK:
+		if (param < unit[drive].type->tracks * unit[drive].type->heads)
+		{
+			get_fdc(drive);
+			if (fd_seek(drive,param) != 0){
+				memset(unit[drive].trackbuf, FD_FILL_BYTE,
+				       unit[drive].dtype->sects * unit[drive].type->sect_mult * 512);
+				non_int_flush_track(drive);
+			}
+			floppy_off(drive);
+			rel_fdc();
+		}
+		else
+			return -EINVAL;
+		break;
+	case FDFMTEND:
+		floppy_off(drive);
+		invalidate_bdev(inode->i_bdev, 0);
+		break;
+	case FDGETPRM:
+		memset((void *)&getprm, 0, sizeof (getprm));
+		getprm.track=unit[drive].type->tracks;
+		getprm.head=unit[drive].type->heads;
+		getprm.sect=unit[drive].dtype->sects * unit[drive].type->sect_mult;
+		getprm.size=unit[drive].blocks;
+		if (copy_to_user((void *)param,
+				 (void *)&getprm,
+				 sizeof(struct floppy_struct)))
+			return -EFAULT;
+		break;
+	case FDSETPRM:
+	case FDDEFPRM:
+		return -EINVAL;
+	case FDFLUSH: /* unconditionally, even if not needed */
+		del_timer (flush_track_timer + drive);
+		non_int_flush_track(drive);
+		break;
+#ifdef RAW_IOCTL
+	case IOCTL_RAW_TRACK:
+		if (copy_to_user((void *)param, raw_buf,
+				 unit[drive].type->read_size))
+			return -EFAULT;
+		else
+			return unit[drive].type->read_size;
+#endif
+	default:
+		printk(KERN_DEBUG "fd_ioctl: unknown cmd %d for drive %d.",
+		       cmd, drive);
+		return -ENOSYS;
+	}
+	return 0;
+}
+
+static void fd_probe(int dev)
+{
+	unsigned long code;
+	int type;
+	int drive;
+
+	drive = dev & 3;
+	code = fd_get_drive_id(drive);
+
+	/* get drive type */
+	for (type = 0; type < num_dr_types; type++)
+		if (drive_types[type].code == code)
+			break;
+
+	if (type >= num_dr_types) {
+		printk(KERN_WARNING "fd_probe: unsupported drive type "
+		       "%08lx found\n", code);
+		unit[drive].type = &drive_types[num_dr_types-1]; /* FD_NODRIVE */
+		return;
+	}
+
+	unit[drive].type = drive_types + type;
+	unit[drive].track = -1;
+
+	unit[drive].disk = -1;
+	unit[drive].motor = 0;
+	unit[drive].busy = 0;
+	unit[drive].status = -1;
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+static int floppy_open(struct inode *inode, struct file *filp)
+{
+	int drive = iminor(inode) & 3;
+	int system =  (iminor(inode) & 4) >> 2;
+	int old_dev;
+	unsigned long flags;
+
+	old_dev = fd_device[drive];
+
+	if (fd_ref[drive] && old_dev != system)
+		return -EBUSY;
+
+	if (filp && filp->f_mode & 3) {
+		check_disk_change(inode->i_bdev);
+		if (filp->f_mode & 2 ) {
+			int wrprot;
+
+			get_fdc(drive);
+			fd_select (drive);
+			wrprot = !(ciaa.pra & DSKPROT);
+			fd_deselect (drive);
+			rel_fdc();
+
+			if (wrprot)
+				return -EROFS;
+		}
+	}
+
+	local_irq_save(flags);
+	fd_ref[drive]++;
+	fd_device[drive] = system;
+	local_irq_restore(flags);
+
+	unit[drive].dtype=&data_types[system];
+	unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks*
+		data_types[system].sects*unit[drive].type->sect_mult;
+	set_capacity(unit[drive].gendisk, unit[drive].blocks);
+
+	printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
+	       unit[drive].type->name, data_types[system].name);
+
+	return 0;
+}
+
+static int floppy_release(struct inode * inode, struct file * filp)
+{
+	int drive = iminor(inode) & 3;
+
+	if (unit[drive].dirty == 1) {
+		del_timer (flush_track_timer + drive);
+		non_int_flush_track (drive);
+	}
+  
+	if (!fd_ref[drive]--) {
+		printk(KERN_CRIT "floppy_release with fd_ref == 0");
+		fd_ref[drive] = 0;
+	}
+#ifdef MODULE
+/* the mod_use counter is handled this way */
+	floppy_off (drive | 0x40000000);
+#endif
+	return 0;
+}
+
+/*
+ * floppy-change is never called from an interrupt, so we can relax a bit
+ * here, sleep etc. Note that floppy-on tries to set current_DOR to point
+ * to the desired drive, but it will probably not survive the sleep if
+ * several floppies are used at the same time: thus the loop.
+ */
+static int amiga_floppy_change(struct gendisk *disk)
+{
+	struct amiga_floppy_struct *p = disk->private_data;
+	int drive = p - unit;
+	int changed;
+	static int first_time = 1;
+
+	if (first_time)
+		changed = first_time--;
+	else {
+		get_fdc(drive);
+		fd_select (drive);
+		changed = !(ciaa.pra & DSKCHANGE);
+		fd_deselect (drive);
+		rel_fdc();
+	}
+
+	if (changed) {
+		fd_probe(drive);
+		p->track = -1;
+		p->dirty = 0;
+		writepending = 0; /* if this was true before, too bad! */
+		writefromint = 0;
+		return 1;
+	}
+	return 0;
+}
+
+static struct block_device_operations floppy_fops = {
+	.owner		= THIS_MODULE,
+	.open		= floppy_open,
+	.release	= floppy_release,
+	.ioctl		= fd_ioctl,
+	.media_changed	= amiga_floppy_change,
+};
+
+void __init amiga_floppy_setup (char *str, int *ints)
+{
+	printk (KERN_INFO "amiflop: Setting default df0 to %x\n", ints[1]);
+	fd_def_df0 = ints[1];
+}
+
+static int __init fd_probe_drives(void)
+{
+	int drive,drives,nomem;
+
+	printk(KERN_INFO "FD: probing units\n" KERN_INFO "found ");
+	drives=0;
+	nomem=0;
+	for(drive=0;drive<FD_MAX_UNITS;drive++) {
+		struct gendisk *disk;
+		fd_probe(drive);
+		if (unit[drive].type->code == FD_NODRIVE)
+			continue;
+		disk = alloc_disk(1);
+		if (!disk) {
+			unit[drive].type->code = FD_NODRIVE;
+			continue;
+		}
+		unit[drive].gendisk = disk;
+		drives++;
+		if ((unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL)) == NULL) {
+			printk("no mem for ");
+			unit[drive].type = &drive_types[num_dr_types - 1]; /* FD_NODRIVE */
+			drives--;
+			nomem = 1;
+		}
+		printk("fd%d ",drive);
+		disk->major = FLOPPY_MAJOR;
+		disk->first_minor = drive;
+		disk->fops = &floppy_fops;
+		sprintf(disk->disk_name, "fd%d", drive);
+		disk->private_data = &unit[drive];
+		disk->queue = floppy_queue;
+		set_capacity(disk, 880*2);
+		add_disk(disk);
+	}
+	if ((drives > 0) || (nomem == 0)) {
+		if (drives == 0)
+			printk("no drives");
+		printk("\n");
+		return drives;
+	}
+	printk("\n");
+	return -ENOMEM;
+}
+ 
+static struct kobject *floppy_find(dev_t dev, int *part, void *data)
+{
+	int drive = *part & 3;
+	if (unit[drive].type->code == FD_NODRIVE)
+		return NULL;
+	*part = 0;
+	return get_disk(unit[drive].gendisk);
+}
+
+int __init amiga_floppy_init(void)
+{
+	int i, ret;
+
+	if (!AMIGAHW_PRESENT(AMI_FLOPPY))
+		return -ENXIO;
+
+	if (register_blkdev(FLOPPY_MAJOR,"fd"))
+		return -EBUSY;
+
+	/*
+	 *  We request DSKPTR, DSKLEN and DSKDATA only, because the other
+	 *  floppy registers are too spreaded over the custom register space
+	 */
+	ret = -EBUSY;
+	if (!request_mem_region(CUSTOM_PHYSADDR+0x20, 8, "amiflop [Paula]")) {
+		printk("fd: cannot get floppy registers\n");
+		goto out_blkdev;
+	}
+
+	ret = -ENOMEM;
+	if ((raw_buf = (char *)amiga_chip_alloc (RAW_BUF_SIZE, "Floppy")) ==
+	    NULL) {
+		printk("fd: cannot get chip mem buffer\n");
+		goto out_memregion;
+	}
+
+	ret = -EBUSY;
+	if (request_irq(IRQ_AMIGA_DSKBLK, fd_block_done, 0, "floppy_dma", NULL)) {
+		printk("fd: cannot get irq for dma\n");
+		goto out_irq;
+	}
+
+	if (request_irq(IRQ_AMIGA_CIAA_TB, ms_isr, 0, "floppy_timer", NULL)) {
+		printk("fd: cannot get irq for timer\n");
+		goto out_irq2;
+	}
+
+	ret = -ENOMEM;
+	floppy_queue = blk_init_queue(do_fd_request, &amiflop_lock);
+	if (!floppy_queue)
+		goto out_queue;
+
+	ret = -ENXIO;
+	if (fd_probe_drives() < 1) /* No usable drives */
+		goto out_probe;
+
+	blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
+				floppy_find, NULL, NULL);
+
+	/* initialize variables */
+	init_timer(&motor_on_timer);
+	motor_on_timer.expires = 0;
+	motor_on_timer.data = 0;
+	motor_on_timer.function = motor_on_callback;
+	for (i = 0; i < FD_MAX_UNITS; i++) {
+		init_timer(&motor_off_timer[i]);
+		motor_off_timer[i].expires = 0;
+		motor_off_timer[i].data = i|0x80000000;
+		motor_off_timer[i].function = fd_motor_off;
+		init_timer(&flush_track_timer[i]);
+		flush_track_timer[i].expires = 0;
+		flush_track_timer[i].data = i;
+		flush_track_timer[i].function = flush_track_callback;
+
+		unit[i].track = -1;
+	}
+
+	init_timer(&post_write_timer);
+	post_write_timer.expires = 0;
+	post_write_timer.data = 0;
+	post_write_timer.function = post_write;
+  
+	for (i = 0; i < 128; i++)
+		mfmdecode[i]=255;
+	for (i = 0; i < 16; i++)
+		mfmdecode[mfmencode[i]]=i;
+
+	/* make sure that disk DMA is enabled */
+	custom.dmacon = DMAF_SETCLR | DMAF_DISK;
+
+	/* init ms timer */
+	ciaa.crb = 8; /* one-shot, stop */
+	return 0;
+
+out_probe:
+	blk_cleanup_queue(floppy_queue);
+out_queue:
+	free_irq(IRQ_AMIGA_CIAA_TB, NULL);
+out_irq2:
+	free_irq(IRQ_AMIGA_DSKBLK, NULL);
+out_irq:
+	amiga_chip_free(raw_buf);
+out_memregion:
+	release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
+out_blkdev:
+	unregister_blkdev(FLOPPY_MAJOR,"fd");
+	return ret;
+}
+
+#ifdef MODULE
+#include <linux/version.h>
+
+int init_module(void)
+{
+	if (!MACH_IS_AMIGA)
+		return -ENXIO;
+	return amiga_floppy_init();
+}
+
+#if 0 /* not safe to unload */
+void cleanup_module(void)
+{
+	int i;
+
+	for( i = 0; i < FD_MAX_UNITS; i++) {
+		if (unit[i].type->code != FD_NODRIVE) {
+			del_gendisk(unit[i].gendisk);
+			put_disk(unit[i].gendisk);
+			kfree(unit[i].trackbuf);
+		}
+	}
+	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+	free_irq(IRQ_AMIGA_CIAA_TB, NULL);
+	free_irq(IRQ_AMIGA_DSKBLK, NULL);
+	custom.dmacon = DMAF_DISK; /* disable DMA */
+	amiga_chip_free(raw_buf);
+	blk_cleanup_queue(floppy_queue);
+	release_mem_region(CUSTOM_PHYSADDR+0x20, 8);
+	unregister_blkdev(FLOPPY_MAJOR, "fd");
+}
+#endif
+#endif
diff --git a/drivers/block/aoe/Makefile b/drivers/block/aoe/Makefile
new file mode 100644
index 0000000..e76d997
--- /dev/null
+++ b/drivers/block/aoe/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for ATA over Ethernet
+#
+
+obj-$(CONFIG_ATA_OVER_ETH)	+= aoe.o
+aoe-objs := aoeblk.o aoechr.o aoecmd.o aoedev.o aoemain.o aoenet.o
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
new file mode 100644
index 0000000..db78f82
--- /dev/null
+++ b/drivers/block/aoe/aoe.h
@@ -0,0 +1,165 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+#define VERSION "5"
+#define AOE_MAJOR 152
+#define DEVICE_NAME "aoe"
+#ifndef AOE_PARTITIONS
+#define AOE_PARTITIONS 16
+#endif
+#define SYSMINOR(aoemajor, aoeminor) ((aoemajor) * 10 + (aoeminor))
+#define AOEMAJOR(sysminor) ((sysminor) / 10)
+#define AOEMINOR(sysminor) ((sysminor) % 10)
+#define WHITESPACE " \t\v\f\n"
+
+enum {
+	AOECMD_ATA,
+	AOECMD_CFG,
+
+	AOEFL_RSP = (1<<3),
+	AOEFL_ERR = (1<<2),
+
+	AOEAFL_EXT = (1<<6),
+	AOEAFL_DEV = (1<<4),
+	AOEAFL_ASYNC = (1<<1),
+	AOEAFL_WRITE = (1<<0),
+
+	AOECCMD_READ = 0,
+	AOECCMD_TEST,
+	AOECCMD_PTEST,
+	AOECCMD_SET,
+	AOECCMD_FSET,
+
+	AOE_HVER = 0x10,
+};
+
+struct aoe_hdr {
+	unsigned char dst[6];
+	unsigned char src[6];
+	unsigned char type[2];
+	unsigned char verfl;
+	unsigned char err;
+	unsigned char major[2];
+	unsigned char minor;
+	unsigned char cmd;
+	unsigned char tag[4];
+};
+
+struct aoe_atahdr {
+	unsigned char aflags;
+	unsigned char errfeat;
+	unsigned char scnt;
+	unsigned char cmdstat;
+	unsigned char lba0;
+	unsigned char lba1;
+	unsigned char lba2;
+	unsigned char lba3;
+	unsigned char lba4;
+	unsigned char lba5;
+	unsigned char res[2];
+};
+
+struct aoe_cfghdr {
+	unsigned char bufcnt[2];
+	unsigned char fwver[2];
+	unsigned char res;
+	unsigned char aoeccmd;
+	unsigned char cslen[2];
+};
+
+enum {
+	DEVFL_UP = 1,	/* device is installed in system and ready for AoE->ATA commands */
+	DEVFL_TKILL = (1<<1),	/* flag for timer to know when to kill self */
+	DEVFL_EXT = (1<<2),	/* device accepts lba48 commands */
+	DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */
+	DEVFL_WC_UPDATE = (1<<4), /* this device needs to update write cache status */
+	DEVFL_WORKON = (1<<4),
+
+	BUFFL_FAIL = 1,
+};
+
+enum {
+	MAXATADATA = 1024,
+	NPERSHELF = 10,
+	FREETAG = -1,
+	MIN_BUFS = 8,
+};
+
+struct buf {
+	struct list_head bufs;
+	ulong flags;
+	ulong nframesout;
+	char *bufaddr;
+	ulong resid;
+	ulong bv_resid;
+	sector_t sector;
+	struct bio *bio;
+	struct bio_vec *bv;
+};
+
+struct frame {
+	int tag;
+	ulong waited;
+	struct buf *buf;
+	char *bufaddr;
+	int writedatalen;
+	int ndata;
+
+	/* largest possible */
+	unsigned char data[sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr)];
+};
+
+struct aoedev {
+	struct aoedev *next;
+	unsigned char addr[6];	/* remote mac addr */
+	ushort flags;
+	ulong sysminor;
+	ulong aoemajor;
+	ulong aoeminor;
+	ulong nopen;		/* (bd_openers isn't available without sleeping) */
+	ulong rttavg;		/* round trip average of requests/responses */
+	u16 fw_ver;		/* version of blade's firmware */
+	struct work_struct work;/* disk create work struct */
+	struct gendisk *gd;
+	request_queue_t blkq;
+	struct hd_geometry geo; 
+	sector_t ssize;
+	struct timer_list timer;
+	spinlock_t lock;
+	struct net_device *ifp;	/* interface ed is attached to */
+	struct sk_buff *skblist;/* packets needing to be sent */
+	mempool_t *bufpool;	/* for deadlock-free Buf allocation */
+	struct list_head bufq;	/* queue of bios to work on */
+	struct buf *inprocess;	/* the one we're currently working on */
+	ulong lasttag;		/* last tag sent */
+	ulong nframes;		/* number of frames below */
+	struct frame *frames;
+};
+
+
+int aoeblk_init(void);
+void aoeblk_exit(void);
+void aoeblk_gdalloc(void *);
+void aoedisk_rm_sysfs(struct aoedev *d);
+
+int aoechr_init(void);
+void aoechr_exit(void);
+void aoechr_error(char *);
+
+void aoecmd_work(struct aoedev *d);
+void aoecmd_cfg(ushort, unsigned char);
+void aoecmd_ata_rsp(struct sk_buff *);
+void aoecmd_cfg_rsp(struct sk_buff *);
+
+int aoedev_init(void);
+void aoedev_exit(void);
+struct aoedev *aoedev_bymac(unsigned char *);
+void aoedev_downdev(struct aoedev *d);
+struct aoedev *aoedev_set(ulong, unsigned char *, struct net_device *, ulong);
+int aoedev_busy(void);
+
+int aoenet_init(void);
+void aoenet_exit(void);
+void aoenet_xmit(struct sk_buff *);
+int is_aoe_netif(struct net_device *ifp);
+int set_aoe_iflist(const char __user *str, size_t size);
+
+u64 mac_addr(char addr[6]);
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c
new file mode 100644
index 0000000..63561b2
--- /dev/null
+++ b/drivers/block/aoe/aoeblk.c
@@ -0,0 +1,267 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+/*
+ * aoeblk.c
+ * block device routines
+ */
+
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/fs.h>
+#include <linux/ioctl.h>
+#include <linux/genhd.h>
+#include <linux/netdevice.h>
+#include "aoe.h"
+
+static kmem_cache_t *buf_pool_cache;
+
+/* add attributes for our block devices in sysfs */
+static ssize_t aoedisk_show_state(struct gendisk * disk, char *page)
+{
+	struct aoedev *d = disk->private_data;
+
+	return snprintf(page, PAGE_SIZE,
+			"%s%s\n",
+			(d->flags & DEVFL_UP) ? "up" : "down",
+			(d->flags & DEVFL_CLOSEWAIT) ? ",closewait" : "");
+}
+static ssize_t aoedisk_show_mac(struct gendisk * disk, char *page)
+{
+	struct aoedev *d = disk->private_data;
+
+	return snprintf(page, PAGE_SIZE, "%012llx\n",
+			(unsigned long long)mac_addr(d->addr));
+}
+static ssize_t aoedisk_show_netif(struct gendisk * disk, char *page)
+{
+	struct aoedev *d = disk->private_data;
+
+	return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name);
+}
+
+static struct disk_attribute disk_attr_state = {
+	.attr = {.name = "state", .mode = S_IRUGO },
+	.show = aoedisk_show_state
+};
+static struct disk_attribute disk_attr_mac = {
+	.attr = {.name = "mac", .mode = S_IRUGO },
+	.show = aoedisk_show_mac
+};
+static struct disk_attribute disk_attr_netif = {
+	.attr = {.name = "netif", .mode = S_IRUGO },
+	.show = aoedisk_show_netif
+};
+
+static void
+aoedisk_add_sysfs(struct aoedev *d)
+{
+	sysfs_create_file(&d->gd->kobj, &disk_attr_state.attr);
+	sysfs_create_file(&d->gd->kobj, &disk_attr_mac.attr);
+	sysfs_create_file(&d->gd->kobj, &disk_attr_netif.attr);
+}
+void
+aoedisk_rm_sysfs(struct aoedev *d)
+{
+	sysfs_remove_link(&d->gd->kobj, "state");
+	sysfs_remove_link(&d->gd->kobj, "mac");
+	sysfs_remove_link(&d->gd->kobj, "netif");
+}
+
+static int
+aoeblk_open(struct inode *inode, struct file *filp)
+{
+	struct aoedev *d;
+	ulong flags;
+
+	d = inode->i_bdev->bd_disk->private_data;
+
+	spin_lock_irqsave(&d->lock, flags);
+	if (d->flags & DEVFL_UP) {
+		d->nopen++;
+		spin_unlock_irqrestore(&d->lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+	return -ENODEV;
+}
+
+static int
+aoeblk_release(struct inode *inode, struct file *filp)
+{
+	struct aoedev *d;
+	ulong flags;
+
+	d = inode->i_bdev->bd_disk->private_data;
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	if (--d->nopen == 0 && (d->flags & DEVFL_CLOSEWAIT)) {
+		d->flags &= ~DEVFL_CLOSEWAIT;
+		spin_unlock_irqrestore(&d->lock, flags);
+		aoecmd_cfg(d->aoemajor, d->aoeminor);
+		return 0;
+	}
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	return 0;
+}
+
+static int
+aoeblk_make_request(request_queue_t *q, struct bio *bio)
+{
+	struct aoedev *d;
+	struct buf *buf;
+	struct sk_buff *sl;
+	ulong flags;
+
+	blk_queue_bounce(q, &bio);
+
+	d = bio->bi_bdev->bd_disk->private_data;
+	buf = mempool_alloc(d->bufpool, GFP_NOIO);
+	if (buf == NULL) {
+		printk(KERN_INFO "aoe: aoeblk_make_request: buf allocation "
+			"failure\n");
+		bio_endio(bio, bio->bi_size, -ENOMEM);
+		return 0;
+	}
+	memset(buf, 0, sizeof(*buf));
+	INIT_LIST_HEAD(&buf->bufs);
+	buf->bio = bio;
+	buf->resid = bio->bi_size;
+	buf->sector = bio->bi_sector;
+	buf->bv = buf->bio->bi_io_vec;
+	buf->bv_resid = buf->bv->bv_len;
+	buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	if ((d->flags & DEVFL_UP) == 0) {
+		printk(KERN_INFO "aoe: aoeblk_make_request: device %ld.%ld is not up\n",
+			d->aoemajor, d->aoeminor);
+		spin_unlock_irqrestore(&d->lock, flags);
+		mempool_free(buf, d->bufpool);
+		bio_endio(bio, bio->bi_size, -ENXIO);
+		return 0;
+	}
+
+	list_add_tail(&buf->bufs, &d->bufq);
+	aoecmd_work(d);
+
+	sl = d->skblist;
+	d->skblist = NULL;
+
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	aoenet_xmit(sl);
+	return 0;
+}
+
+/* This ioctl implementation expects userland to have the device node
+ * permissions set so that only priviledged users can open an aoe
+ * block device directly.
+ */
+static int
+aoeblk_ioctl(struct inode *inode, struct file *filp, uint cmd, ulong arg)
+{
+	struct aoedev *d;
+
+	if (!arg)
+		return -EINVAL;
+
+	d = inode->i_bdev->bd_disk->private_data;
+	if ((d->flags & DEVFL_UP) == 0) {
+		printk(KERN_ERR "aoe: aoeblk_ioctl: disk not up\n");
+		return -ENODEV;
+	}
+
+	if (cmd == HDIO_GETGEO) {
+		d->geo.start = get_start_sect(inode->i_bdev);
+		if (!copy_to_user((void __user *) arg, &d->geo, sizeof d->geo))
+			return 0;
+		return -EFAULT;
+	}
+	printk(KERN_INFO "aoe: aoeblk_ioctl: unknown ioctl %d\n", cmd);
+	return -EINVAL;
+}
+
+static struct block_device_operations aoe_bdops = {
+	.open = aoeblk_open,
+	.release = aoeblk_release,
+	.ioctl = aoeblk_ioctl,
+	.owner = THIS_MODULE,
+};
+
+/* alloc_disk and add_disk can sleep */
+void
+aoeblk_gdalloc(void *vp)
+{
+	struct aoedev *d = vp;
+	struct gendisk *gd;
+	ulong flags;
+
+	gd = alloc_disk(AOE_PARTITIONS);
+	if (gd == NULL) {
+		printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate disk "
+			"structure for %ld.%ld\n", d->aoemajor, d->aoeminor);
+		spin_lock_irqsave(&d->lock, flags);
+		d->flags &= ~DEVFL_WORKON;
+		spin_unlock_irqrestore(&d->lock, flags);
+		return;
+	}
+
+	d->bufpool = mempool_create(MIN_BUFS,
+				    mempool_alloc_slab, mempool_free_slab,
+				    buf_pool_cache);
+	if (d->bufpool == NULL) {
+		printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool "
+			"for %ld.%ld\n", d->aoemajor, d->aoeminor);
+		put_disk(gd);
+		spin_lock_irqsave(&d->lock, flags);
+		d->flags &= ~DEVFL_WORKON;
+		spin_unlock_irqrestore(&d->lock, flags);
+		return;
+	}
+
+	spin_lock_irqsave(&d->lock, flags);
+	blk_queue_make_request(&d->blkq, aoeblk_make_request);
+	gd->major = AOE_MAJOR;
+	gd->first_minor = d->sysminor * AOE_PARTITIONS;
+	gd->fops = &aoe_bdops;
+	gd->private_data = d;
+	gd->capacity = d->ssize;
+	snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld",
+		d->aoemajor, d->aoeminor);
+
+	gd->queue = &d->blkq;
+	d->gd = gd;
+	d->flags &= ~DEVFL_WORKON;
+	d->flags |= DEVFL_UP;
+
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	add_disk(gd);
+	aoedisk_add_sysfs(d);
+	
+	printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu "
+		"sectors\n", (unsigned long long)mac_addr(d->addr),
+		d->aoemajor, d->aoeminor,
+		d->fw_ver, (long long)d->ssize);
+}
+
+void
+aoeblk_exit(void)
+{
+	kmem_cache_destroy(buf_pool_cache);
+}
+
+int __init
+aoeblk_init(void)
+{
+	buf_pool_cache = kmem_cache_create("aoe_bufs", 
+					   sizeof(struct buf),
+					   0, 0, NULL, NULL);
+	if (buf_pool_cache == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
new file mode 100644
index 0000000..14aeca3
--- /dev/null
+++ b/drivers/block/aoe/aoechr.c
@@ -0,0 +1,244 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+/*
+ * aoechr.c
+ * AoE character device driver
+ */
+
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include "aoe.h"
+
+enum {
+	//MINOR_STAT = 1, (moved to sysfs)
+	MINOR_ERR = 2,
+	MINOR_DISCOVER,
+	MINOR_INTERFACES,
+	MSGSZ = 2048,
+	NARGS = 10,
+	NMSG = 100,		/* message backlog to retain */
+};
+
+struct aoe_chardev {
+	ulong minor;
+	char name[32];
+};
+
+enum { EMFL_VALID = 1 };
+
+struct ErrMsg {
+	short flags;
+	short len;
+	char *msg;
+};
+
+static struct ErrMsg emsgs[NMSG];
+static int emsgs_head_idx, emsgs_tail_idx;
+static struct semaphore emsgs_sema;
+static spinlock_t emsgs_lock;
+static int nblocked_emsgs_readers;
+static struct class_simple *aoe_class;
+static struct aoe_chardev chardevs[] = {
+	{ MINOR_ERR, "err" },
+	{ MINOR_DISCOVER, "discover" },
+	{ MINOR_INTERFACES, "interfaces" },
+};
+
+static int
+discover(void)
+{
+	aoecmd_cfg(0xffff, 0xff);
+	return 0;
+}
+
+static int
+interfaces(const char __user *str, size_t size)
+{
+	if (set_aoe_iflist(str, size)) {
+		printk(KERN_CRIT
+		       "%s: could not set interface list: %s\n",
+		       __FUNCTION__, "too many interfaces");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+void
+aoechr_error(char *msg)
+{
+	struct ErrMsg *em;
+	char *mp;
+	ulong flags, n;
+
+	n = strlen(msg);
+
+	spin_lock_irqsave(&emsgs_lock, flags);
+
+	em = emsgs + emsgs_tail_idx;
+	if ((em->flags & EMFL_VALID)) {
+bail:		spin_unlock_irqrestore(&emsgs_lock, flags);
+		return;
+	}
+
+	mp = kmalloc(n, GFP_ATOMIC);
+	if (mp == NULL) {
+		printk(KERN_CRIT "aoe: aoechr_error: allocation failure, len=%ld\n", n);
+		goto bail;
+	}
+
+	memcpy(mp, msg, n);
+	em->msg = mp;
+	em->flags |= EMFL_VALID;
+	em->len = n;
+
+	emsgs_tail_idx++;
+	emsgs_tail_idx %= ARRAY_SIZE(emsgs);
+
+	spin_unlock_irqrestore(&emsgs_lock, flags);
+
+	if (nblocked_emsgs_readers)
+		up(&emsgs_sema);
+}
+
+static ssize_t
+aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp)
+{
+	int ret = -EINVAL;
+
+	switch ((unsigned long) filp->private_data) {
+	default:
+		printk(KERN_INFO "aoe: aoechr_write: can't write to that file.\n");
+		break;
+	case MINOR_DISCOVER:
+		ret = discover();
+		break;
+	case MINOR_INTERFACES:
+		ret = interfaces(buf, cnt);
+		break;
+	}
+	if (ret == 0)
+		ret = cnt;
+	return ret;
+}
+
+static int
+aoechr_open(struct inode *inode, struct file *filp)
+{
+	int n, i;
+
+	n = MINOR(inode->i_rdev);
+	filp->private_data = (void *) (unsigned long) n;
+
+	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
+		if (chardevs[i].minor == n)
+			return 0;
+	return -EINVAL;
+}
+
+static int
+aoechr_rel(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+static ssize_t
+aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off)
+{
+	unsigned long n;
+	char *mp;
+	struct ErrMsg *em;
+	ssize_t len;
+	ulong flags;
+
+	n = (unsigned long) filp->private_data;
+	switch (n) {
+	case MINOR_ERR:
+		spin_lock_irqsave(&emsgs_lock, flags);
+loop:
+		em = emsgs + emsgs_head_idx;
+		if ((em->flags & EMFL_VALID) == 0) {
+			if (filp->f_flags & O_NDELAY) {
+				spin_unlock_irqrestore(&emsgs_lock, flags);
+				return -EAGAIN;
+			}
+			nblocked_emsgs_readers++;
+
+			spin_unlock_irqrestore(&emsgs_lock, flags);
+
+			n = down_interruptible(&emsgs_sema);
+
+			spin_lock_irqsave(&emsgs_lock, flags);
+
+			nblocked_emsgs_readers--;
+
+			if (n) {
+				spin_unlock_irqrestore(&emsgs_lock, flags);
+				return -ERESTARTSYS;
+			}
+			goto loop;
+		}
+		if (em->len > cnt) {
+			spin_unlock_irqrestore(&emsgs_lock, flags);
+			return -EAGAIN;
+		}
+		mp = em->msg;
+		len = em->len;
+		em->msg = NULL;
+		em->flags &= ~EMFL_VALID;
+
+		emsgs_head_idx++;
+		emsgs_head_idx %= ARRAY_SIZE(emsgs);
+
+		spin_unlock_irqrestore(&emsgs_lock, flags);
+
+		n = copy_to_user(buf, mp, len);
+		kfree(mp);
+		return n == 0 ? len : -EFAULT;
+	default:
+		return -EFAULT;
+	}
+}
+
+static struct file_operations aoe_fops = {
+	.write = aoechr_write,
+	.read = aoechr_read,
+	.open = aoechr_open,
+	.release = aoechr_rel,
+	.owner = THIS_MODULE,
+};
+
+int __init
+aoechr_init(void)
+{
+	int n, i;
+
+	n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
+	if (n < 0) { 
+		printk(KERN_ERR "aoe: aoechr_init: can't register char device\n");
+		return n;
+	}
+	sema_init(&emsgs_sema, 0);
+	spin_lock_init(&emsgs_lock);
+	aoe_class = class_simple_create(THIS_MODULE, "aoe");
+	if (IS_ERR(aoe_class)) {
+		unregister_chrdev(AOE_MAJOR, "aoechr");
+		return PTR_ERR(aoe_class);
+	}
+	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
+		class_simple_device_add(aoe_class,
+					MKDEV(AOE_MAJOR, chardevs[i].minor),
+					NULL, chardevs[i].name);
+
+	return 0;
+}
+
+void
+aoechr_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(chardevs); ++i)
+		class_simple_device_remove(MKDEV(AOE_MAJOR, chardevs[i].minor));
+	class_simple_destroy(aoe_class);
+	unregister_chrdev(AOE_MAJOR, "aoechr");
+}
+
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
new file mode 100644
index 0000000..fb6d942
--- /dev/null
+++ b/drivers/block/aoe/aoecmd.c
@@ -0,0 +1,629 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+/*
+ * aoecmd.c
+ * Filesystem request handling methods
+ */
+
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include "aoe.h"
+
+#define TIMERTICK (HZ / 10)
+#define MINTIMER (2 * TIMERTICK)
+#define MAXTIMER (HZ << 1)
+#define MAXWAIT (60 * 3)	/* After MAXWAIT seconds, give up and fail dev */
+
+static struct sk_buff *
+new_skb(struct net_device *if_dev, ulong len)
+{
+	struct sk_buff *skb;
+
+	skb = alloc_skb(len, GFP_ATOMIC);
+	if (skb) {
+		skb->nh.raw = skb->mac.raw = skb->data;
+		skb->dev = if_dev;
+		skb->protocol = __constant_htons(ETH_P_AOE);
+		skb->priority = 0;
+		skb_put(skb, len);
+		skb->next = skb->prev = NULL;
+
+		/* tell the network layer not to perform IP checksums
+		 * or to get the NIC to do it
+		 */
+		skb->ip_summed = CHECKSUM_NONE;
+	}
+	return skb;
+}
+
+static struct sk_buff *
+skb_prepare(struct aoedev *d, struct frame *f)
+{
+	struct sk_buff *skb;
+	char *p;
+
+	skb = new_skb(d->ifp, f->ndata + f->writedatalen);
+	if (!skb) {
+		printk(KERN_INFO "aoe: skb_prepare: failure to allocate skb\n");
+		return NULL;
+	}
+
+	p = skb->mac.raw;
+	memcpy(p, f->data, f->ndata);
+
+	if (f->writedatalen) {
+		p += sizeof(struct aoe_hdr) + sizeof(struct aoe_atahdr);
+		memcpy(p, f->bufaddr, f->writedatalen);
+	}
+
+	return skb;
+}
+
+static struct frame *
+getframe(struct aoedev *d, int tag)
+{
+	struct frame *f, *e;
+
+	f = d->frames;
+	e = f + d->nframes;
+	for (; f<e; f++)
+		if (f->tag == tag)
+			return f;
+	return NULL;
+}
+
+/*
+ * Leave the top bit clear so we have tagspace for userland.
+ * The bottom 16 bits are the xmit tick for rexmit/rttavg processing.
+ * This driver reserves tag -1 to mean "unused frame."
+ */
+static int
+newtag(struct aoedev *d)
+{
+	register ulong n;
+
+	n = jiffies & 0xffff;
+	return n |= (++d->lasttag & 0x7fff) << 16;
+}
+
+static int
+aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h)
+{
+	u16 type = __constant_cpu_to_be16(ETH_P_AOE);
+	u16 aoemajor = __cpu_to_be16(d->aoemajor);
+	u32 host_tag = newtag(d);
+	u32 tag = __cpu_to_be32(host_tag);
+
+	memcpy(h->src, d->ifp->dev_addr, sizeof h->src);
+	memcpy(h->dst, d->addr, sizeof h->dst);
+	memcpy(h->type, &type, sizeof type);
+	h->verfl = AOE_HVER;
+	memcpy(h->major, &aoemajor, sizeof aoemajor);
+	h->minor = d->aoeminor;
+	h->cmd = AOECMD_ATA;
+	memcpy(h->tag, &tag, sizeof tag);
+
+	return host_tag;
+}
+
+static void
+aoecmd_ata_rw(struct aoedev *d, struct frame *f)
+{
+	struct aoe_hdr *h;
+	struct aoe_atahdr *ah;
+	struct buf *buf;
+	struct sk_buff *skb;
+	ulong bcnt;
+	register sector_t sector;
+	char writebit, extbit;
+
+	writebit = 0x10;
+	extbit = 0x4;
+
+	buf = d->inprocess;
+
+	sector = buf->sector;
+	bcnt = buf->bv_resid;
+	if (bcnt > MAXATADATA)
+		bcnt = MAXATADATA;
+
+	/* initialize the headers & frame */
+	h = (struct aoe_hdr *) f->data;
+	ah = (struct aoe_atahdr *) (h+1);
+	f->ndata = sizeof *h + sizeof *ah;
+	memset(h, 0, f->ndata);
+	f->tag = aoehdr_atainit(d, h);
+	f->waited = 0;
+	f->buf = buf;
+	f->bufaddr = buf->bufaddr;
+
+	/* set up ata header */
+	ah->scnt = bcnt >> 9;
+	ah->lba0 = sector;
+	ah->lba1 = sector >>= 8;
+	ah->lba2 = sector >>= 8;
+	ah->lba3 = sector >>= 8;
+	if (d->flags & DEVFL_EXT) {
+		ah->aflags |= AOEAFL_EXT;
+		ah->lba4 = sector >>= 8;
+		ah->lba5 = sector >>= 8;
+	} else {
+		extbit = 0;
+		ah->lba3 &= 0x0f;
+		ah->lba3 |= 0xe0;	/* LBA bit + obsolete 0xa0 */
+	}
+
+	if (bio_data_dir(buf->bio) == WRITE) {
+		ah->aflags |= AOEAFL_WRITE;
+		f->writedatalen = bcnt;
+	} else {
+		writebit = 0;
+		f->writedatalen = 0;
+	}
+
+	ah->cmdstat = WIN_READ | writebit | extbit;
+
+	/* mark all tracking fields and load out */
+	buf->nframesout += 1;
+	buf->bufaddr += bcnt;
+	buf->bv_resid -= bcnt;
+/* printk(KERN_INFO "aoe: bv_resid=%ld\n", buf->bv_resid); */
+	buf->resid -= bcnt;
+	buf->sector += bcnt >> 9;
+	if (buf->resid == 0) {
+		d->inprocess = NULL;
+	} else if (buf->bv_resid == 0) {
+		buf->bv++;
+		buf->bv_resid = buf->bv->bv_len;
+		buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset;
+	}
+
+	skb = skb_prepare(d, f);
+	if (skb) {
+		skb->next = d->skblist;
+		d->skblist = skb;
+	}
+}
+
+/* enters with d->lock held */
+void
+aoecmd_work(struct aoedev *d)
+{
+	struct frame *f;
+	struct buf *buf;
+loop:
+	f = getframe(d, FREETAG);
+	if (f == NULL)
+		return;
+	if (d->inprocess == NULL) {
+		if (list_empty(&d->bufq))
+			return;
+		buf = container_of(d->bufq.next, struct buf, bufs);
+		list_del(d->bufq.next);
+/*printk(KERN_INFO "aoecmd_work: bi_size=%ld\n", buf->bio->bi_size); */
+		d->inprocess = buf;
+	}
+	aoecmd_ata_rw(d, f);
+	goto loop;
+}
+
+static void
+rexmit(struct aoedev *d, struct frame *f)
+{
+	struct sk_buff *skb;
+	struct aoe_hdr *h;
+	char buf[128];
+	u32 n;
+	u32 net_tag;
+
+	n = newtag(d);
+
+	snprintf(buf, sizeof buf,
+		"%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n",
+		"retransmit",
+		d->aoemajor, d->aoeminor, f->tag, jiffies, n);
+	aoechr_error(buf);
+
+	h = (struct aoe_hdr *) f->data;
+	f->tag = n;
+	net_tag = __cpu_to_be32(n);
+	memcpy(h->tag, &net_tag, sizeof net_tag);
+
+	skb = skb_prepare(d, f);
+	if (skb) {
+		skb->next = d->skblist;
+		d->skblist = skb;
+	}
+}
+
+static int
+tsince(int tag)
+{
+	int n;
+
+	n = jiffies & 0xffff;
+	n -= tag & 0xffff;
+	if (n < 0)
+		n += 1<<16;
+	return n;
+}
+
+static void
+rexmit_timer(ulong vp)
+{
+	struct aoedev *d;
+	struct frame *f, *e;
+	struct sk_buff *sl;
+	register long timeout;
+	ulong flags, n;
+
+	d = (struct aoedev *) vp;
+	sl = NULL;
+
+	/* timeout is always ~150% of the moving average */
+	timeout = d->rttavg;
+	timeout += timeout >> 1;
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	if (d->flags & DEVFL_TKILL) {
+tdie:		spin_unlock_irqrestore(&d->lock, flags);
+		return;
+	}
+	f = d->frames;
+	e = f + d->nframes;
+	for (; f<e; f++) {
+		if (f->tag != FREETAG && tsince(f->tag) >= timeout) {
+			n = f->waited += timeout;
+			n /= HZ;
+			if (n > MAXWAIT) { /* waited too long.  device failure. */
+				aoedev_downdev(d);
+				goto tdie;
+			}
+			rexmit(d, f);
+		}
+	}
+
+	sl = d->skblist;
+	d->skblist = NULL;
+	if (sl) {
+		n = d->rttavg <<= 1;
+		if (n > MAXTIMER)
+			d->rttavg = MAXTIMER;
+	}
+
+	d->timer.expires = jiffies + TIMERTICK;
+	add_timer(&d->timer);
+
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	aoenet_xmit(sl);
+}
+
+static void
+ataid_complete(struct aoedev *d, unsigned char *id)
+{
+	u64 ssize;
+	u16 n;
+
+	/* word 83: command set supported */
+	n = __le16_to_cpu(*((u16 *) &id[83<<1]));
+
+	/* word 86: command set/feature enabled */
+	n |= __le16_to_cpu(*((u16 *) &id[86<<1]));
+
+	if (n & (1<<10)) {	/* bit 10: LBA 48 */
+		d->flags |= DEVFL_EXT;
+
+		/* word 100: number lba48 sectors */
+		ssize = __le64_to_cpu(*((u64 *) &id[100<<1]));
+
+		/* set as in ide-disk.c:init_idedisk_capacity */
+		d->geo.cylinders = ssize;
+		d->geo.cylinders /= (255 * 63);
+		d->geo.heads = 255;
+		d->geo.sectors = 63;
+	} else {
+		d->flags &= ~DEVFL_EXT;
+
+		/* number lba28 sectors */
+		ssize = __le32_to_cpu(*((u32 *) &id[60<<1]));
+
+		/* NOTE: obsolete in ATA 6 */
+		d->geo.cylinders = __le16_to_cpu(*((u16 *) &id[54<<1]));
+		d->geo.heads = __le16_to_cpu(*((u16 *) &id[55<<1]));
+		d->geo.sectors = __le16_to_cpu(*((u16 *) &id[56<<1]));
+	}
+	d->ssize = ssize;
+	d->geo.start = 0;
+	if (d->gd != NULL) {
+		d->gd->capacity = ssize;
+		d->flags |= DEVFL_UP;
+		return;
+	}
+	if (d->flags & DEVFL_WORKON) {
+		printk(KERN_INFO "aoe: ataid_complete: can't schedule work, it's already on!  "
+			"(This really shouldn't happen).\n");
+		return;
+	}
+	INIT_WORK(&d->work, aoeblk_gdalloc, d);
+	schedule_work(&d->work);
+	d->flags |= DEVFL_WORKON;
+}
+
+static void
+calc_rttavg(struct aoedev *d, int rtt)
+{
+	register long n;
+
+	n = rtt;
+	if (n < MINTIMER)
+		n = MINTIMER;
+	else if (n > MAXTIMER)
+		n = MAXTIMER;
+
+	/* g == .25; cf. Congestion Avoidance and Control, Jacobson & Karels; 1988 */
+	n -= d->rttavg;
+	d->rttavg += n >> 2;
+}
+
+void
+aoecmd_ata_rsp(struct sk_buff *skb)
+{
+	struct aoedev *d;
+	struct aoe_hdr *hin;
+	struct aoe_atahdr *ahin, *ahout;
+	struct frame *f;
+	struct buf *buf;
+	struct sk_buff *sl;
+	register long n;
+	ulong flags;
+	char ebuf[128];
+	
+	hin = (struct aoe_hdr *) skb->mac.raw;
+	d = aoedev_bymac(hin->src);
+	if (d == NULL) {
+		snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response "
+			"for unknown device %d.%d\n",
+			 __be16_to_cpu(*((u16 *) hin->major)),
+			hin->minor);
+		aoechr_error(ebuf);
+		return;
+	}
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	f = getframe(d, __be32_to_cpu(*((u32 *) hin->tag)));
+	if (f == NULL) {
+		spin_unlock_irqrestore(&d->lock, flags);
+		snprintf(ebuf, sizeof ebuf,
+			"%15s e%d.%d    tag=%08x@%08lx\n",
+			"unexpected rsp",
+			__be16_to_cpu(*((u16 *) hin->major)),
+			hin->minor,
+			__be32_to_cpu(*((u32 *) hin->tag)),
+			jiffies);
+		aoechr_error(ebuf);
+		return;
+	}
+
+	calc_rttavg(d, tsince(f->tag));
+
+	ahin = (struct aoe_atahdr *) (hin+1);
+	ahout = (struct aoe_atahdr *) (f->data + sizeof(struct aoe_hdr));
+	buf = f->buf;
+
+	if (ahin->cmdstat & 0xa9) {	/* these bits cleared on success */
+		printk(KERN_CRIT "aoe: aoecmd_ata_rsp: ata error cmd=%2.2Xh "
+			"stat=%2.2Xh from e%ld.%ld\n", 
+			ahout->cmdstat, ahin->cmdstat,
+			d->aoemajor, d->aoeminor);
+		if (buf)
+			buf->flags |= BUFFL_FAIL;
+	} else {
+		switch (ahout->cmdstat) {
+		case WIN_READ:
+		case WIN_READ_EXT:
+			n = ahout->scnt << 9;
+			if (skb->len - sizeof *hin - sizeof *ahin < n) {
+				printk(KERN_CRIT "aoe: aoecmd_ata_rsp: runt "
+					"ata data size in read.  skb->len=%d\n",
+					skb->len);
+				/* fail frame f?  just returning will rexmit. */
+				spin_unlock_irqrestore(&d->lock, flags);
+				return;
+			}
+			memcpy(f->bufaddr, ahin+1, n);
+		case WIN_WRITE:
+		case WIN_WRITE_EXT:
+			break;
+		case WIN_IDENTIFY:
+			if (skb->len - sizeof *hin - sizeof *ahin < 512) {
+				printk(KERN_INFO "aoe: aoecmd_ata_rsp: runt data size "
+					"in ataid.  skb->len=%d\n", skb->len);
+				spin_unlock_irqrestore(&d->lock, flags);
+				return;
+			}
+			ataid_complete(d, (char *) (ahin+1));
+			/* d->flags |= DEVFL_WC_UPDATE; */
+			break;
+		default:
+			printk(KERN_INFO "aoe: aoecmd_ata_rsp: unrecognized "
+			       "outbound ata command %2.2Xh for %d.%d\n", 
+			       ahout->cmdstat,
+			       __be16_to_cpu(*((u16 *) hin->major)),
+			       hin->minor);
+		}
+	}
+
+	if (buf) {
+		buf->nframesout -= 1;
+		if (buf->nframesout == 0 && buf->resid == 0) {
+			n = (buf->flags & BUFFL_FAIL) ? -EIO : 0;
+			bio_endio(buf->bio, buf->bio->bi_size, n);
+			mempool_free(buf, d->bufpool);
+		}
+	}
+
+	f->buf = NULL;
+	f->tag = FREETAG;
+
+	aoecmd_work(d);
+
+	sl = d->skblist;
+	d->skblist = NULL;
+
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	aoenet_xmit(sl);
+}
+
+void
+aoecmd_cfg(ushort aoemajor, unsigned char aoeminor)
+{
+	struct aoe_hdr *h;
+	struct aoe_cfghdr *ch;
+	struct sk_buff *skb, *sl;
+	struct net_device *ifp;
+	u16 aoe_type = __constant_cpu_to_be16(ETH_P_AOE);
+	u16 net_aoemajor = __cpu_to_be16(aoemajor);
+
+	sl = NULL;
+
+	read_lock(&dev_base_lock);
+	for (ifp = dev_base; ifp; dev_put(ifp), ifp = ifp->next) {
+		dev_hold(ifp);
+		if (!is_aoe_netif(ifp))
+			continue;
+
+		skb = new_skb(ifp, sizeof *h + sizeof *ch);
+		if (skb == NULL) {
+			printk(KERN_INFO "aoe: aoecmd_cfg: skb alloc failure\n");
+			continue;
+		}
+		h = (struct aoe_hdr *) skb->mac.raw;
+		memset(h, 0, sizeof *h + sizeof *ch);
+
+		memset(h->dst, 0xff, sizeof h->dst);
+		memcpy(h->src, ifp->dev_addr, sizeof h->src);
+		memcpy(h->type, &aoe_type, sizeof aoe_type);
+		h->verfl = AOE_HVER;
+		memcpy(h->major, &net_aoemajor, sizeof net_aoemajor);
+		h->minor = aoeminor;
+		h->cmd = AOECMD_CFG;
+
+		skb->next = sl;
+		sl = skb;
+	}
+	read_unlock(&dev_base_lock);
+
+	aoenet_xmit(sl);
+}
+ 
+/*
+ * Since we only call this in one place (and it only prepares one frame)
+ * we just return the skb.  Usually we'd chain it up to the d->skblist.
+ */
+static struct sk_buff *
+aoecmd_ata_id(struct aoedev *d)
+{
+	struct aoe_hdr *h;
+	struct aoe_atahdr *ah;
+	struct frame *f;
+	struct sk_buff *skb;
+
+	f = getframe(d, FREETAG);
+	if (f == NULL) {
+		printk(KERN_CRIT "aoe: aoecmd_ata_id: can't get a frame.  "
+			"This shouldn't happen.\n");
+		return NULL;
+	}
+
+	/* initialize the headers & frame */
+	h = (struct aoe_hdr *) f->data;
+	ah = (struct aoe_atahdr *) (h+1);
+	f->ndata = sizeof *h + sizeof *ah;
+	memset(h, 0, f->ndata);
+	f->tag = aoehdr_atainit(d, h);
+	f->waited = 0;
+	f->writedatalen = 0;
+
+	/* this message initializes the device, so we reset the rttavg */
+	d->rttavg = MAXTIMER;
+
+	/* set up ata header */
+	ah->scnt = 1;
+	ah->cmdstat = WIN_IDENTIFY;
+	ah->lba3 = 0xa0;
+
+	skb = skb_prepare(d, f);
+
+	/* we now want to start the rexmit tracking */
+	d->flags &= ~DEVFL_TKILL;
+	d->timer.data = (ulong) d;
+	d->timer.function = rexmit_timer;
+	d->timer.expires = jiffies + TIMERTICK;
+	add_timer(&d->timer);
+
+	return skb;
+}
+ 
+void
+aoecmd_cfg_rsp(struct sk_buff *skb)
+{
+	struct aoedev *d;
+	struct aoe_hdr *h;
+	struct aoe_cfghdr *ch;
+	ulong flags, bufcnt, sysminor, aoemajor;
+	struct sk_buff *sl;
+	enum { MAXFRAMES = 8, MAXSYSMINOR = 255 };
+
+	h = (struct aoe_hdr *) skb->mac.raw;
+	ch = (struct aoe_cfghdr *) (h+1);
+
+	/*
+	 * Enough people have their dip switches set backwards to
+	 * warrant a loud message for this special case.
+	 */
+	aoemajor = __be16_to_cpu(*((u16 *) h->major));
+	if (aoemajor == 0xfff) {
+		printk(KERN_CRIT "aoe: aoecmd_cfg_rsp: Warning: shelf "
+			"address is all ones.  Check shelf dip switches\n");
+		return;
+	}
+
+	sysminor = SYSMINOR(aoemajor, h->minor);
+	if (sysminor > MAXSYSMINOR) {
+		printk(KERN_INFO "aoe: aoecmd_cfg_rsp: sysminor %ld too "
+			"large\n", sysminor);
+		return;
+	}
+
+	bufcnt = __be16_to_cpu(*((u16 *) ch->bufcnt));
+	if (bufcnt > MAXFRAMES)	/* keep it reasonable */
+		bufcnt = MAXFRAMES;
+
+	d = aoedev_set(sysminor, h->src, skb->dev, bufcnt);
+	if (d == NULL) {
+		printk(KERN_INFO "aoe: aoecmd_cfg_rsp: device set failure\n");
+		return;
+	}
+
+	spin_lock_irqsave(&d->lock, flags);
+
+	if (d->flags & (DEVFL_UP | DEVFL_CLOSEWAIT)) {
+		spin_unlock_irqrestore(&d->lock, flags);
+		return;
+	}
+
+	d->fw_ver = __be16_to_cpu(*((u16 *) ch->fwver));
+
+	/* we get here only if the device is new */
+	sl = aoecmd_ata_id(d);
+
+	spin_unlock_irqrestore(&d->lock, flags);
+
+	aoenet_xmit(sl);
+}
+
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c
new file mode 100644
index 0000000..240abae
--- /dev/null
+++ b/drivers/block/aoe/aoedev.c
@@ -0,0 +1,180 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+/*
+ * aoedev.c
+ * AoE device utility functions; maintains device list.
+ */
+
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/netdevice.h>
+#include "aoe.h"
+
+static struct aoedev *devlist;
+static spinlock_t devlist_lock;
+
+struct aoedev *
+aoedev_bymac(unsigned char *macaddr)
+{
+	struct aoedev *d;
+	ulong flags;
+
+	spin_lock_irqsave(&devlist_lock, flags);
+
+	for (d=devlist; d; d=d->next)
+		if (!memcmp(d->addr, macaddr, 6))
+			break;
+
+	spin_unlock_irqrestore(&devlist_lock, flags);
+	return d;
+}
+
+/* called with devlist lock held */
+static struct aoedev *
+aoedev_newdev(ulong nframes)
+{
+	struct aoedev *d;
+	struct frame *f, *e;
+
+	d = kcalloc(1, sizeof *d, GFP_ATOMIC);
+	if (d == NULL)
+		return NULL;
+	f = kcalloc(nframes, sizeof *f, GFP_ATOMIC);
+	if (f == NULL) {
+		kfree(d);
+		return NULL;
+	}
+
+	d->nframes = nframes;
+	d->frames = f;
+	e = f + nframes;
+	for (; f<e; f++)
+		f->tag = FREETAG;
+
+	spin_lock_init(&d->lock);
+	init_timer(&d->timer);
+	d->bufpool = NULL;	/* defer to aoeblk_gdalloc */
+	INIT_LIST_HEAD(&d->bufq);
+	d->next = devlist;
+	devlist = d;
+
+	return d;
+}
+
+void
+aoedev_downdev(struct aoedev *d)
+{
+	struct frame *f, *e;
+	struct buf *buf;
+	struct bio *bio;
+
+	d->flags |= DEVFL_TKILL;
+	del_timer(&d->timer);
+
+	f = d->frames;
+	e = f + d->nframes;
+	for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) {
+		if (f->tag == FREETAG || f->buf == NULL)
+			continue;
+		buf = f->buf;
+		bio = buf->bio;
+		if (--buf->nframesout == 0) {
+			mempool_free(buf, d->bufpool);
+			bio_endio(bio, bio->bi_size, -EIO);
+		}
+	}
+	d->inprocess = NULL;
+
+	while (!list_empty(&d->bufq)) {
+		buf = container_of(d->bufq.next, struct buf, bufs);
+		list_del(d->bufq.next);
+		bio = buf->bio;
+		mempool_free(buf, d->bufpool);
+		bio_endio(bio, bio->bi_size, -EIO);
+	}
+
+	if (d->nopen)
+		d->flags |= DEVFL_CLOSEWAIT;
+	if (d->gd)
+		d->gd->capacity = 0;
+
+	d->flags &= ~DEVFL_UP;
+}
+
+struct aoedev *
+aoedev_set(ulong sysminor, unsigned char *addr, struct net_device *ifp, ulong bufcnt)
+{
+	struct aoedev *d;
+	ulong flags;
+
+	spin_lock_irqsave(&devlist_lock, flags);
+
+	for (d=devlist; d; d=d->next)
+		if (d->sysminor == sysminor
+		|| memcmp(d->addr, addr, sizeof d->addr) == 0)
+			break;
+
+	if (d == NULL && (d = aoedev_newdev(bufcnt)) == NULL) {
+		spin_unlock_irqrestore(&devlist_lock, flags);
+		printk(KERN_INFO "aoe: aoedev_set: aoedev_newdev failure.\n");
+		return NULL;
+	}
+
+	spin_unlock_irqrestore(&devlist_lock, flags);
+	spin_lock_irqsave(&d->lock, flags);
+
+	d->ifp = ifp;
+
+	if (d->sysminor != sysminor
+	|| memcmp(d->addr, addr, sizeof d->addr)
+	|| (d->flags & DEVFL_UP) == 0) {
+		aoedev_downdev(d); /* flushes outstanding frames */
+		memcpy(d->addr, addr, sizeof d->addr);
+		d->sysminor = sysminor;
+		d->aoemajor = AOEMAJOR(sysminor);
+		d->aoeminor = AOEMINOR(sysminor);
+	}
+
+	spin_unlock_irqrestore(&d->lock, flags);
+	return d;
+}
+
+static void
+aoedev_freedev(struct aoedev *d)
+{
+	if (d->gd) {
+		aoedisk_rm_sysfs(d);
+		del_gendisk(d->gd);
+		put_disk(d->gd);
+	}
+	kfree(d->frames);
+	mempool_destroy(d->bufpool);
+	kfree(d);
+}
+
+void
+aoedev_exit(void)
+{
+	struct aoedev *d;
+	ulong flags;
+
+	flush_scheduled_work();
+
+	while ((d = devlist)) {
+		devlist = d->next;
+
+		spin_lock_irqsave(&d->lock, flags);
+		aoedev_downdev(d);
+		spin_unlock_irqrestore(&d->lock, flags);
+
+		del_timer_sync(&d->timer);
+		aoedev_freedev(d);
+	}
+}
+
+int __init
+aoedev_init(void)
+{
+	spin_lock_init(&devlist_lock);
+	return 0;
+}
+
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c
new file mode 100644
index 0000000..387588a
--- /dev/null
+++ b/drivers/block/aoe/aoemain.c
@@ -0,0 +1,112 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+/*
+ * aoemain.c
+ * Module initialization routines, discover timer
+ */
+
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include "aoe.h"
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>");
+MODULE_DESCRIPTION("AoE block/char driver for 2.6.[0-9]+");
+MODULE_VERSION(VERSION);
+
+enum { TINIT, TRUN, TKILL };
+
+static void
+discover_timer(ulong vp)
+{
+	static struct timer_list t;
+	static volatile ulong die;
+	static spinlock_t lock;
+	ulong flags;
+	enum { DTIMERTICK = HZ * 60 }; /* one minute */
+
+	switch (vp) {
+	case TINIT:
+		init_timer(&t);
+		spin_lock_init(&lock);
+		t.data = TRUN;
+		t.function = discover_timer;
+		die = 0;
+	case TRUN:
+		spin_lock_irqsave(&lock, flags);
+		if (!die) {
+			t.expires = jiffies + DTIMERTICK;
+			add_timer(&t);
+		}
+		spin_unlock_irqrestore(&lock, flags);
+
+		aoecmd_cfg(0xffff, 0xff);
+		return;
+	case TKILL:
+		spin_lock_irqsave(&lock, flags);
+		die = 1;
+		spin_unlock_irqrestore(&lock, flags);
+
+		del_timer_sync(&t);
+	default:
+		return;
+	}
+}
+
+static void
+aoe_exit(void)
+{
+	discover_timer(TKILL);
+
+	aoenet_exit();
+	unregister_blkdev(AOE_MAJOR, DEVICE_NAME);
+	aoechr_exit();
+	aoedev_exit();
+	aoeblk_exit();		/* free cache after de-allocating bufs */
+}
+
+static int __init
+aoe_init(void)
+{
+	int ret;
+
+	ret = aoedev_init();
+	if (ret)
+		return ret;
+	ret = aoechr_init();
+	if (ret)
+		goto chr_fail;
+	ret = aoeblk_init();
+	if (ret)
+		goto blk_fail;
+	ret = aoenet_init();
+	if (ret)
+		goto net_fail;
+	ret = register_blkdev(AOE_MAJOR, DEVICE_NAME);
+	if (ret < 0) {
+		printk(KERN_ERR "aoe: aoeblk_init: can't register major\n");
+		goto blkreg_fail;
+	}
+
+	printk(KERN_INFO
+	       "aoe: aoe_init: AoE v2.6-%s initialised.\n",
+	       VERSION);
+	discover_timer(TINIT);
+	return 0;
+
+ blkreg_fail:
+	aoenet_exit();
+ net_fail:
+	aoeblk_exit();
+ blk_fail:
+	aoechr_exit();
+ chr_fail:
+	aoedev_exit();
+	
+	printk(KERN_INFO "aoe: aoe_init: initialisation failure.\n");
+	return ret;
+}
+
+module_init(aoe_init);
+module_exit(aoe_exit);
+
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c
new file mode 100644
index 0000000..cc1945b
--- /dev/null
+++ b/drivers/block/aoe/aoenet.c
@@ -0,0 +1,172 @@
+/* Copyright (c) 2004 Coraid, Inc.  See COPYING for GPL terms. */
+/*
+ * aoenet.c
+ * Ethernet portion of AoE driver
+ */
+
+#include <linux/hdreg.h>
+#include <linux/blkdev.h>
+#include <linux/netdevice.h>
+#include "aoe.h"
+
+#define NECODES 5
+
+static char *aoe_errlist[] =
+{
+	"no such error",
+	"unrecognized command code",
+	"bad argument parameter",
+	"device unavailable",
+	"config string present",
+	"unsupported version"
+};
+
+enum {
+	IFLISTSZ = 1024,
+};
+
+static char aoe_iflist[IFLISTSZ];
+
+int
+is_aoe_netif(struct net_device *ifp)
+{
+	register char *p, *q;
+	register int len;
+
+	if (aoe_iflist[0] == '\0')
+		return 1;
+
+	for (p = aoe_iflist; *p; p = q + strspn(q, WHITESPACE)) {
+		q = p + strcspn(p, WHITESPACE);
+		if (q != p)
+			len = q - p;
+		else
+			len = strlen(p); /* last token in aoe_iflist */
+
+		if (strlen(ifp->name) == len && !strncmp(ifp->name, p, len))
+			return 1;
+		if (q == p)
+			break;
+	}
+
+	return 0;
+}
+
+int
+set_aoe_iflist(const char __user *user_str, size_t size)
+{
+	if (size >= IFLISTSZ)
+		return -EINVAL;
+
+	if (copy_from_user(aoe_iflist, user_str, size)) {
+		printk(KERN_INFO "aoe: %s: copy from user failed\n", __FUNCTION__);
+		return -EFAULT;
+	}
+	aoe_iflist[size] = 0x00;
+	return 0;
+}
+
+u64
+mac_addr(char addr[6])
+{
+	u64 n = 0;
+	char *p = (char *) &n;
+
+	memcpy(p + 2, addr, 6);	/* (sizeof addr != 6) */
+
+	return __be64_to_cpu(n);
+}
+
+static struct sk_buff *
+skb_check(struct sk_buff *skb)
+{
+	if (skb_is_nonlinear(skb))
+	if ((skb = skb_share_check(skb, GFP_ATOMIC)))
+	if (skb_linearize(skb, GFP_ATOMIC) < 0) {
+		dev_kfree_skb(skb);
+		return NULL;
+	}
+	return skb;
+}
+
+void
+aoenet_xmit(struct sk_buff *sl)
+{
+	struct sk_buff *skb;
+
+	while ((skb = sl)) {
+		sl = sl->next;
+		skb->next = skb->prev = NULL;
+		dev_queue_xmit(skb);
+	}
+}
+
+/* 
+ * (1) len doesn't include the header by default.  I want this. 
+ */
+static int
+aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt)
+{
+	struct aoe_hdr *h;
+	ulong n;
+
+	skb = skb_check(skb);
+	if (!skb)
+		return 0;
+
+	if (!is_aoe_netif(ifp))
+		goto exit;
+
+	//skb->len += ETH_HLEN;	/* (1) */
+	skb_push(skb, ETH_HLEN);	/* (1) */
+
+	h = (struct aoe_hdr *) skb->mac.raw;
+	n = __be32_to_cpu(*((u32 *) h->tag));
+	if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31))
+		goto exit;
+
+	if (h->verfl & AOEFL_ERR) {
+		n = h->err;
+		if (n > NECODES)
+			n = 0;
+		if (net_ratelimit())
+			printk(KERN_ERR "aoe: aoenet_rcv: error packet from %d.%d; "
+			       "ecode=%d '%s'\n",
+			       __be16_to_cpu(*((u16 *) h->major)), h->minor, 
+			       h->err, aoe_errlist[n]);
+		goto exit;
+	}
+
+	switch (h->cmd) {
+	case AOECMD_ATA:
+		aoecmd_ata_rsp(skb);
+		break;
+	case AOECMD_CFG:
+		aoecmd_cfg_rsp(skb);
+		break;
+	default:
+		printk(KERN_INFO "aoe: aoenet_rcv: unknown cmd %d\n", h->cmd);
+	}
+exit:
+	dev_kfree_skb(skb);
+	return 0;
+}
+
+static struct packet_type aoe_pt = {
+	.type = __constant_htons(ETH_P_AOE),
+	.func = aoenet_rcv,
+};
+
+int __init
+aoenet_init(void)
+{
+	dev_add_pack(&aoe_pt);
+	return 0;
+}
+
+void
+aoenet_exit(void)
+{
+	dev_remove_pack(&aoe_pt);
+}
+
diff --git a/drivers/block/as-iosched.c b/drivers/block/as-iosched.c
new file mode 100644
index 0000000..a9575bb
--- /dev/null
+++ b/drivers/block/as-iosched.c
@@ -0,0 +1,2136 @@
+/*
+ *  linux/drivers/block/as-iosched.c
+ *
+ *  Anticipatory & deadline i/o scheduler.
+ *
+ *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
+ *                     Nick Piggin <piggin@cyberone.com.au>
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+#include <linux/interrupt.h>
+
+#define REQ_SYNC	1
+#define REQ_ASYNC	0
+
+/*
+ * See Documentation/block/as-iosched.txt
+ */
+
+/*
+ * max time before a read is submitted.
+ */
+#define default_read_expire (HZ / 8)
+
+/*
+ * ditto for writes, these limits are not hard, even
+ * if the disk is capable of satisfying them.
+ */
+#define default_write_expire (HZ / 4)
+
+/*
+ * read_batch_expire describes how long we will allow a stream of reads to
+ * persist before looking to see whether it is time to switch over to writes.
+ */
+#define default_read_batch_expire (HZ / 2)
+
+/*
+ * write_batch_expire describes how long we want a stream of writes to run for.
+ * This is not a hard limit, but a target we set for the auto-tuning thingy.
+ * See, the problem is: we can send a lot of writes to disk cache / TCQ in
+ * a short amount of time...
+ */
+#define default_write_batch_expire (HZ / 8)
+
+/*
+ * max time we may wait to anticipate a read (default around 6ms)
+ */
+#define default_antic_expire ((HZ / 150) ? HZ / 150 : 1)
+
+/*
+ * Keep track of up to 20ms thinktimes. We can go as big as we like here,
+ * however huge values tend to interfere and not decay fast enough. A program
+ * might be in a non-io phase of operation. Waiting on user input for example,
+ * or doing a lengthy computation. A small penalty can be justified there, and
+ * will still catch out those processes that constantly have large thinktimes.
+ */
+#define MAX_THINKTIME (HZ/50UL)
+
+/* Bits in as_io_context.state */
+enum as_io_states {
+	AS_TASK_RUNNING=0,	/* Process has not exitted */
+	AS_TASK_IOSTARTED,	/* Process has started some IO */
+	AS_TASK_IORUNNING,	/* Process has completed some IO */
+};
+
+enum anticipation_status {
+	ANTIC_OFF=0,		/* Not anticipating (normal operation)	*/
+	ANTIC_WAIT_REQ,		/* The last read has not yet completed  */
+	ANTIC_WAIT_NEXT,	/* Currently anticipating a request vs
+				   last read (which has completed) */
+	ANTIC_FINISHED,		/* Anticipating but have found a candidate
+				 * or timed out */
+};
+
+struct as_data {
+	/*
+	 * run time data
+	 */
+
+	struct request_queue *q;	/* the "owner" queue */
+
+	/*
+	 * requests (as_rq s) are present on both sort_list and fifo_list
+	 */
+	struct rb_root sort_list[2];
+	struct list_head fifo_list[2];
+
+	struct as_rq *next_arq[2];	/* next in sort order */
+	sector_t last_sector[2];	/* last REQ_SYNC & REQ_ASYNC sectors */
+	struct list_head *dispatch;	/* driver dispatch queue */
+	struct list_head *hash;		/* request hash */
+
+	unsigned long exit_prob;	/* probability a task will exit while
+					   being waited on */
+	unsigned long new_ttime_total; 	/* mean thinktime on new proc */
+	unsigned long new_ttime_mean;
+	u64 new_seek_total;		/* mean seek on new proc */
+	sector_t new_seek_mean;
+
+	unsigned long current_batch_expires;
+	unsigned long last_check_fifo[2];
+	int changed_batch;		/* 1: waiting for old batch to end */
+	int new_batch;			/* 1: waiting on first read complete */
+	int batch_data_dir;		/* current batch REQ_SYNC / REQ_ASYNC */
+	int write_batch_count;		/* max # of reqs in a write batch */
+	int current_write_count;	/* how many requests left this batch */
+	int write_batch_idled;		/* has the write batch gone idle? */
+	mempool_t *arq_pool;
+
+	enum anticipation_status antic_status;
+	unsigned long antic_start;	/* jiffies: when it started */
+	struct timer_list antic_timer;	/* anticipatory scheduling timer */
+	struct work_struct antic_work;	/* Deferred unplugging */
+	struct io_context *io_context;	/* Identify the expected process */
+	int ioc_finished; /* IO associated with io_context is finished */
+	int nr_dispatched;
+
+	/*
+	 * settings that change how the i/o scheduler behaves
+	 */
+	unsigned long fifo_expire[2];
+	unsigned long batch_expire[2];
+	unsigned long antic_expire;
+};
+
+#define list_entry_fifo(ptr)	list_entry((ptr), struct as_rq, fifo)
+
+/*
+ * per-request data.
+ */
+enum arq_state {
+	AS_RQ_NEW=0,		/* New - not referenced and not on any lists */
+	AS_RQ_QUEUED,		/* In the request queue. It belongs to the
+				   scheduler */
+	AS_RQ_DISPATCHED,	/* On the dispatch list. It belongs to the
+				   driver now */
+	AS_RQ_PRESCHED,		/* Debug poisoning for requests being used */
+	AS_RQ_REMOVED,
+	AS_RQ_MERGED,
+	AS_RQ_POSTSCHED,	/* when they shouldn't be */
+};
+
+struct as_rq {
+	/*
+	 * rbtree index, key is the starting offset
+	 */
+	struct rb_node rb_node;
+	sector_t rb_key;
+
+	struct request *request;
+
+	struct io_context *io_context;	/* The submitting task */
+
+	/*
+	 * request hash, key is the ending offset (for back merge lookup)
+	 */
+	struct list_head hash;
+	unsigned int on_hash;
+
+	/*
+	 * expire fifo
+	 */
+	struct list_head fifo;
+	unsigned long expires;
+
+	unsigned int is_sync;
+	enum arq_state state;
+};
+
+#define RQ_DATA(rq)	((struct as_rq *) (rq)->elevator_private)
+
+static kmem_cache_t *arq_pool;
+
+/*
+ * IO Context helper functions
+ */
+
+/* Called to deallocate the as_io_context */
+static void free_as_io_context(struct as_io_context *aic)
+{
+	kfree(aic);
+}
+
+/* Called when the task exits */
+static void exit_as_io_context(struct as_io_context *aic)
+{
+	WARN_ON(!test_bit(AS_TASK_RUNNING, &aic->state));
+	clear_bit(AS_TASK_RUNNING, &aic->state);
+}
+
+static struct as_io_context *alloc_as_io_context(void)
+{
+	struct as_io_context *ret;
+
+	ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
+	if (ret) {
+		ret->dtor = free_as_io_context;
+		ret->exit = exit_as_io_context;
+		ret->state = 1 << AS_TASK_RUNNING;
+		atomic_set(&ret->nr_queued, 0);
+		atomic_set(&ret->nr_dispatched, 0);
+		spin_lock_init(&ret->lock);
+		ret->ttime_total = 0;
+		ret->ttime_samples = 0;
+		ret->ttime_mean = 0;
+		ret->seek_total = 0;
+		ret->seek_samples = 0;
+		ret->seek_mean = 0;
+	}
+
+	return ret;
+}
+
+/*
+ * If the current task has no AS IO context then create one and initialise it.
+ * Then take a ref on the task's io context and return it.
+ */
+static struct io_context *as_get_io_context(void)
+{
+	struct io_context *ioc = get_io_context(GFP_ATOMIC);
+	if (ioc && !ioc->aic) {
+		ioc->aic = alloc_as_io_context();
+		if (!ioc->aic) {
+			put_io_context(ioc);
+			ioc = NULL;
+		}
+	}
+	return ioc;
+}
+
+/*
+ * the back merge hash support functions
+ */
+static const int as_hash_shift = 6;
+#define AS_HASH_BLOCK(sec)	((sec) >> 3)
+#define AS_HASH_FN(sec)		(hash_long(AS_HASH_BLOCK((sec)), as_hash_shift))
+#define AS_HASH_ENTRIES		(1 << as_hash_shift)
+#define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)	list_entry((ptr), struct as_rq, hash)
+
+static inline void __as_del_arq_hash(struct as_rq *arq)
+{
+	arq->on_hash = 0;
+	list_del_init(&arq->hash);
+}
+
+static inline void as_del_arq_hash(struct as_rq *arq)
+{
+	if (arq->on_hash)
+		__as_del_arq_hash(arq);
+}
+
+static void as_remove_merge_hints(request_queue_t *q, struct as_rq *arq)
+{
+	as_del_arq_hash(arq);
+
+	if (q->last_merge == arq->request)
+		q->last_merge = NULL;
+}
+
+static void as_add_arq_hash(struct as_data *ad, struct as_rq *arq)
+{
+	struct request *rq = arq->request;
+
+	BUG_ON(arq->on_hash);
+
+	arq->on_hash = 1;
+	list_add(&arq->hash, &ad->hash[AS_HASH_FN(rq_hash_key(rq))]);
+}
+
+/*
+ * move hot entry to front of chain
+ */
+static inline void as_hot_arq_hash(struct as_data *ad, struct as_rq *arq)
+{
+	struct request *rq = arq->request;
+	struct list_head *head = &ad->hash[AS_HASH_FN(rq_hash_key(rq))];
+
+	if (!arq->on_hash) {
+		WARN_ON(1);
+		return;
+	}
+
+	if (arq->hash.prev != head) {
+		list_del(&arq->hash);
+		list_add(&arq->hash, head);
+	}
+}
+
+static struct request *as_find_arq_hash(struct as_data *ad, sector_t offset)
+{
+	struct list_head *hash_list = &ad->hash[AS_HASH_FN(offset)];
+	struct list_head *entry, *next = hash_list->next;
+
+	while ((entry = next) != hash_list) {
+		struct as_rq *arq = list_entry_hash(entry);
+		struct request *__rq = arq->request;
+
+		next = entry->next;
+
+		BUG_ON(!arq->on_hash);
+
+		if (!rq_mergeable(__rq)) {
+			as_remove_merge_hints(ad->q, arq);
+			continue;
+		}
+
+		if (rq_hash_key(__rq) == offset)
+			return __rq;
+	}
+
+	return NULL;
+}
+
+/*
+ * rb tree support functions
+ */
+#define RB_NONE		(2)
+#define RB_EMPTY(root)	((root)->rb_node == NULL)
+#define ON_RB(node)	((node)->rb_color != RB_NONE)
+#define RB_CLEAR(node)	((node)->rb_color = RB_NONE)
+#define rb_entry_arq(node)	rb_entry((node), struct as_rq, rb_node)
+#define ARQ_RB_ROOT(ad, arq)	(&(ad)->sort_list[(arq)->is_sync])
+#define rq_rb_key(rq)		(rq)->sector
+
+/*
+ * as_find_first_arq finds the first (lowest sector numbered) request
+ * for the specified data_dir. Used to sweep back to the start of the disk
+ * (1-way elevator) after we process the last (highest sector) request.
+ */
+static struct as_rq *as_find_first_arq(struct as_data *ad, int data_dir)
+{
+	struct rb_node *n = ad->sort_list[data_dir].rb_node;
+
+	if (n == NULL)
+		return NULL;
+
+	for (;;) {
+		if (n->rb_left == NULL)
+			return rb_entry_arq(n);
+
+		n = n->rb_left;
+	}
+}
+
+/*
+ * Add the request to the rb tree if it is unique.  If there is an alias (an
+ * existing request against the same sector), which can happen when using
+ * direct IO, then return the alias.
+ */
+static struct as_rq *as_add_arq_rb(struct as_data *ad, struct as_rq *arq)
+{
+	struct rb_node **p = &ARQ_RB_ROOT(ad, arq)->rb_node;
+	struct rb_node *parent = NULL;
+	struct as_rq *__arq;
+	struct request *rq = arq->request;
+
+	arq->rb_key = rq_rb_key(rq);
+
+	while (*p) {
+		parent = *p;
+		__arq = rb_entry_arq(parent);
+
+		if (arq->rb_key < __arq->rb_key)
+			p = &(*p)->rb_left;
+		else if (arq->rb_key > __arq->rb_key)
+			p = &(*p)->rb_right;
+		else
+			return __arq;
+	}
+
+	rb_link_node(&arq->rb_node, parent, p);
+	rb_insert_color(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
+
+	return NULL;
+}
+
+static inline void as_del_arq_rb(struct as_data *ad, struct as_rq *arq)
+{
+	if (!ON_RB(&arq->rb_node)) {
+		WARN_ON(1);
+		return;
+	}
+
+	rb_erase(&arq->rb_node, ARQ_RB_ROOT(ad, arq));
+	RB_CLEAR(&arq->rb_node);
+}
+
+static struct request *
+as_find_arq_rb(struct as_data *ad, sector_t sector, int data_dir)
+{
+	struct rb_node *n = ad->sort_list[data_dir].rb_node;
+	struct as_rq *arq;
+
+	while (n) {
+		arq = rb_entry_arq(n);
+
+		if (sector < arq->rb_key)
+			n = n->rb_left;
+		else if (sector > arq->rb_key)
+			n = n->rb_right;
+		else
+			return arq->request;
+	}
+
+	return NULL;
+}
+
+/*
+ * IO Scheduler proper
+ */
+
+#define MAXBACK (1024 * 1024)	/*
+				 * Maximum distance the disk will go backward
+				 * for a request.
+				 */
+
+#define BACK_PENALTY	2
+
+/*
+ * as_choose_req selects the preferred one of two requests of the same data_dir
+ * ignoring time - eg. timeouts, which is the job of as_dispatch_request
+ */
+static struct as_rq *
+as_choose_req(struct as_data *ad, struct as_rq *arq1, struct as_rq *arq2)
+{
+	int data_dir;
+	sector_t last, s1, s2, d1, d2;
+	int r1_wrap=0, r2_wrap=0;	/* requests are behind the disk head */
+	const sector_t maxback = MAXBACK;
+
+	if (arq1 == NULL || arq1 == arq2)
+		return arq2;
+	if (arq2 == NULL)
+		return arq1;
+
+	data_dir = arq1->is_sync;
+
+	last = ad->last_sector[data_dir];
+	s1 = arq1->request->sector;
+	s2 = arq2->request->sector;
+
+	BUG_ON(data_dir != arq2->is_sync);
+
+	/*
+	 * Strict one way elevator _except_ in the case where we allow
+	 * short backward seeks which are biased as twice the cost of a
+	 * similar forward seek.
+	 */
+	if (s1 >= last)
+		d1 = s1 - last;
+	else if (s1+maxback >= last)
+		d1 = (last - s1)*BACK_PENALTY;
+	else {
+		r1_wrap = 1;
+		d1 = 0; /* shut up, gcc */
+	}
+
+	if (s2 >= last)
+		d2 = s2 - last;
+	else if (s2+maxback >= last)
+		d2 = (last - s2)*BACK_PENALTY;
+	else {
+		r2_wrap = 1;
+		d2 = 0;
+	}
+
+	/* Found required data */
+	if (!r1_wrap && r2_wrap)
+		return arq1;
+	else if (!r2_wrap && r1_wrap)
+		return arq2;
+	else if (r1_wrap && r2_wrap) {
+		/* both behind the head */
+		if (s1 <= s2)
+			return arq1;
+		else
+			return arq2;
+	}
+
+	/* Both requests in front of the head */
+	if (d1 < d2)
+		return arq1;
+	else if (d2 < d1)
+		return arq2;
+	else {
+		if (s1 >= s2)
+			return arq1;
+		else
+			return arq2;
+	}
+}
+
+/*
+ * as_find_next_arq finds the next request after @prev in elevator order.
+ * this with as_choose_req form the basis for how the scheduler chooses
+ * what request to process next. Anticipation works on top of this.
+ */
+static struct as_rq *as_find_next_arq(struct as_data *ad, struct as_rq *last)
+{
+	const int data_dir = last->is_sync;
+	struct as_rq *ret;
+	struct rb_node *rbnext = rb_next(&last->rb_node);
+	struct rb_node *rbprev = rb_prev(&last->rb_node);
+	struct as_rq *arq_next, *arq_prev;
+
+	BUG_ON(!ON_RB(&last->rb_node));
+
+	if (rbprev)
+		arq_prev = rb_entry_arq(rbprev);
+	else
+		arq_prev = NULL;
+
+	if (rbnext)
+		arq_next = rb_entry_arq(rbnext);
+	else {
+		arq_next = as_find_first_arq(ad, data_dir);
+		if (arq_next == last)
+			arq_next = NULL;
+	}
+
+	ret = as_choose_req(ad,	arq_next, arq_prev);
+
+	return ret;
+}
+
+/*
+ * anticipatory scheduling functions follow
+ */
+
+/*
+ * as_antic_expired tells us when we have anticipated too long.
+ * The funny "absolute difference" math on the elapsed time is to handle
+ * jiffy wraps, and disks which have been idle for 0x80000000 jiffies.
+ */
+static int as_antic_expired(struct as_data *ad)
+{
+	long delta_jif;
+
+	delta_jif = jiffies - ad->antic_start;
+	if (unlikely(delta_jif < 0))
+		delta_jif = -delta_jif;
+	if (delta_jif < ad->antic_expire)
+		return 0;
+
+	return 1;
+}
+
+/*
+ * as_antic_waitnext starts anticipating that a nice request will soon be
+ * submitted. See also as_antic_waitreq
+ */
+static void as_antic_waitnext(struct as_data *ad)
+{
+	unsigned long timeout;
+
+	BUG_ON(ad->antic_status != ANTIC_OFF
+			&& ad->antic_status != ANTIC_WAIT_REQ);
+
+	timeout = ad->antic_start + ad->antic_expire;
+
+	mod_timer(&ad->antic_timer, timeout);
+
+	ad->antic_status = ANTIC_WAIT_NEXT;
+}
+
+/*
+ * as_antic_waitreq starts anticipating. We don't start timing the anticipation
+ * until the request that we're anticipating on has finished. This means we
+ * are timing from when the candidate process wakes up hopefully.
+ */
+static void as_antic_waitreq(struct as_data *ad)
+{
+	BUG_ON(ad->antic_status == ANTIC_FINISHED);
+	if (ad->antic_status == ANTIC_OFF) {
+		if (!ad->io_context || ad->ioc_finished)
+			as_antic_waitnext(ad);
+		else
+			ad->antic_status = ANTIC_WAIT_REQ;
+	}
+}
+
+/*
+ * This is called directly by the functions in this file to stop anticipation.
+ * We kill the timer and schedule a call to the request_fn asap.
+ */
+static void as_antic_stop(struct as_data *ad)
+{
+	int status = ad->antic_status;
+
+	if (status == ANTIC_WAIT_REQ || status == ANTIC_WAIT_NEXT) {
+		if (status == ANTIC_WAIT_NEXT)
+			del_timer(&ad->antic_timer);
+		ad->antic_status = ANTIC_FINISHED;
+		/* see as_work_handler */
+		kblockd_schedule_work(&ad->antic_work);
+	}
+}
+
+/*
+ * as_antic_timeout is the timer function set by as_antic_waitnext.
+ */
+static void as_antic_timeout(unsigned long data)
+{
+	struct request_queue *q = (struct request_queue *)data;
+	struct as_data *ad = q->elevator->elevator_data;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	if (ad->antic_status == ANTIC_WAIT_REQ
+			|| ad->antic_status == ANTIC_WAIT_NEXT) {
+		struct as_io_context *aic = ad->io_context->aic;
+
+		ad->antic_status = ANTIC_FINISHED;
+		kblockd_schedule_work(&ad->antic_work);
+
+		if (aic->ttime_samples == 0) {
+			/* process anticipated on has exitted or timed out*/
+			ad->exit_prob = (7*ad->exit_prob + 256)/8;
+		}
+	}
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ * as_close_req decides if one request is considered "close" to the
+ * previous one issued.
+ */
+static int as_close_req(struct as_data *ad, struct as_rq *arq)
+{
+	unsigned long delay;	/* milliseconds */
+	sector_t last = ad->last_sector[ad->batch_data_dir];
+	sector_t next = arq->request->sector;
+	sector_t delta; /* acceptable close offset (in sectors) */
+
+	if (ad->antic_status == ANTIC_OFF || !ad->ioc_finished)
+		delay = 0;
+	else
+		delay = ((jiffies - ad->antic_start) * 1000) / HZ;
+
+	if (delay <= 1)
+		delta = 64;
+	else if (delay <= 20 && delay <= ad->antic_expire)
+		delta = 64 << (delay-1);
+	else
+		return 1;
+
+	return (last - (delta>>1) <= next) && (next <= last + delta);
+}
+
+/*
+ * as_can_break_anticipation returns true if we have been anticipating this
+ * request.
+ *
+ * It also returns true if the process against which we are anticipating
+ * submits a write - that's presumably an fsync, O_SYNC write, etc. We want to
+ * dispatch it ASAP, because we know that application will not be submitting
+ * any new reads.
+ *
+ * If the task which has submitted the request has exitted, break anticipation.
+ *
+ * If this task has queued some other IO, do not enter enticipation.
+ */
+static int as_can_break_anticipation(struct as_data *ad, struct as_rq *arq)
+{
+	struct io_context *ioc;
+	struct as_io_context *aic;
+	sector_t s;
+
+	ioc = ad->io_context;
+	BUG_ON(!ioc);
+
+	if (arq && ioc == arq->io_context) {
+		/* request from same process */
+		return 1;
+	}
+
+	if (ad->ioc_finished && as_antic_expired(ad)) {
+		/*
+		 * In this situation status should really be FINISHED,
+		 * however the timer hasn't had the chance to run yet.
+		 */
+		return 1;
+	}
+
+	aic = ioc->aic;
+	if (!aic)
+		return 0;
+
+	if (!test_bit(AS_TASK_RUNNING, &aic->state)) {
+		/* process anticipated on has exitted */
+		if (aic->ttime_samples == 0)
+			ad->exit_prob = (7*ad->exit_prob + 256)/8;
+		return 1;
+	}
+
+	if (atomic_read(&aic->nr_queued) > 0) {
+		/* process has more requests queued */
+		return 1;
+	}
+
+	if (atomic_read(&aic->nr_dispatched) > 0) {
+		/* process has more requests dispatched */
+		return 1;
+	}
+
+	if (arq && arq->is_sync == REQ_SYNC && as_close_req(ad, arq)) {
+		/*
+		 * Found a close request that is not one of ours.
+		 *
+		 * This makes close requests from another process reset
+		 * our thinktime delay. Is generally useful when there are
+		 * two or more cooperating processes working in the same
+		 * area.
+		 */
+		spin_lock(&aic->lock);
+		aic->last_end_request = jiffies;
+		spin_unlock(&aic->lock);
+		return 1;
+	}
+
+
+	if (aic->ttime_samples == 0) {
+		if (ad->new_ttime_mean > ad->antic_expire)
+			return 1;
+		if (ad->exit_prob > 128)
+			return 1;
+	} else if (aic->ttime_mean > ad->antic_expire) {
+		/* the process thinks too much between requests */
+		return 1;
+	}
+
+	if (!arq)
+		return 0;
+
+	if (ad->last_sector[REQ_SYNC] < arq->request->sector)
+		s = arq->request->sector - ad->last_sector[REQ_SYNC];
+	else
+		s = ad->last_sector[REQ_SYNC] - arq->request->sector;
+
+	if (aic->seek_samples == 0) {
+		/*
+		 * Process has just started IO. Use past statistics to
+		 * guage success possibility
+		 */
+		if (ad->new_seek_mean > s) {
+			/* this request is better than what we're expecting */
+			return 1;
+		}
+
+	} else {
+		if (aic->seek_mean > s) {
+			/* this request is better than what we're expecting */
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * as_can_anticipate indicates weather we should either run arq
+ * or keep anticipating a better request.
+ */
+static int as_can_anticipate(struct as_data *ad, struct as_rq *arq)
+{
+	if (!ad->io_context)
+		/*
+		 * Last request submitted was a write
+		 */
+		return 0;
+
+	if (ad->antic_status == ANTIC_FINISHED)
+		/*
+		 * Don't restart if we have just finished. Run the next request
+		 */
+		return 0;
+
+	if (as_can_break_anticipation(ad, arq))
+		/*
+		 * This request is a good candidate. Don't keep anticipating,
+		 * run it.
+		 */
+		return 0;
+
+	/*
+	 * OK from here, we haven't finished, and don't have a decent request!
+	 * Status is either ANTIC_OFF so start waiting,
+	 * ANTIC_WAIT_REQ so continue waiting for request to finish
+	 * or ANTIC_WAIT_NEXT so continue waiting for an acceptable request.
+	 *
+	 */
+
+	return 1;
+}
+
+static void as_update_thinktime(struct as_data *ad, struct as_io_context *aic, unsigned long ttime)
+{
+	/* fixed point: 1.0 == 1<<8 */
+	if (aic->ttime_samples == 0) {
+		ad->new_ttime_total = (7*ad->new_ttime_total + 256*ttime) / 8;
+		ad->new_ttime_mean = ad->new_ttime_total / 256;
+
+		ad->exit_prob = (7*ad->exit_prob)/8;
+	}
+	aic->ttime_samples = (7*aic->ttime_samples + 256) / 8;
+	aic->ttime_total = (7*aic->ttime_total + 256*ttime) / 8;
+	aic->ttime_mean = (aic->ttime_total + 128) / aic->ttime_samples;
+}
+
+static void as_update_seekdist(struct as_data *ad, struct as_io_context *aic, sector_t sdist)
+{
+	u64 total;
+
+	if (aic->seek_samples == 0) {
+		ad->new_seek_total = (7*ad->new_seek_total + 256*(u64)sdist)/8;
+		ad->new_seek_mean = ad->new_seek_total / 256;
+	}
+
+	/*
+	 * Don't allow the seek distance to get too large from the
+	 * odd fragment, pagein, etc
+	 */
+	if (aic->seek_samples <= 60) /* second&third seek */
+		sdist = min(sdist, (aic->seek_mean * 4) + 2*1024*1024);
+	else
+		sdist = min(sdist, (aic->seek_mean * 4)	+ 2*1024*64);
+
+	aic->seek_samples = (7*aic->seek_samples + 256) / 8;
+	aic->seek_total = (7*aic->seek_total + (u64)256*sdist) / 8;
+	total = aic->seek_total + (aic->seek_samples/2);
+	do_div(total, aic->seek_samples);
+	aic->seek_mean = (sector_t)total;
+}
+
+/*
+ * as_update_iohist keeps a decaying histogram of IO thinktimes, and
+ * updates @aic->ttime_mean based on that. It is called when a new
+ * request is queued.
+ */
+static void as_update_iohist(struct as_data *ad, struct as_io_context *aic, struct request *rq)
+{
+	struct as_rq *arq = RQ_DATA(rq);
+	int data_dir = arq->is_sync;
+	unsigned long thinktime;
+	sector_t seek_dist;
+
+	if (aic == NULL)
+		return;
+
+	if (data_dir == REQ_SYNC) {
+		unsigned long in_flight = atomic_read(&aic->nr_queued)
+					+ atomic_read(&aic->nr_dispatched);
+		spin_lock(&aic->lock);
+		if (test_bit(AS_TASK_IORUNNING, &aic->state) ||
+			test_bit(AS_TASK_IOSTARTED, &aic->state)) {
+			/* Calculate read -> read thinktime */
+			if (test_bit(AS_TASK_IORUNNING, &aic->state)
+							&& in_flight == 0) {
+				thinktime = jiffies - aic->last_end_request;
+				thinktime = min(thinktime, MAX_THINKTIME-1);
+			} else
+				thinktime = 0;
+			as_update_thinktime(ad, aic, thinktime);
+
+			/* Calculate read -> read seek distance */
+			if (aic->last_request_pos < rq->sector)
+				seek_dist = rq->sector - aic->last_request_pos;
+			else
+				seek_dist = aic->last_request_pos - rq->sector;
+			as_update_seekdist(ad, aic, seek_dist);
+		}
+		aic->last_request_pos = rq->sector + rq->nr_sectors;
+		set_bit(AS_TASK_IOSTARTED, &aic->state);
+		spin_unlock(&aic->lock);
+	}
+}
+
+/*
+ * as_update_arq must be called whenever a request (arq) is added to
+ * the sort_list. This function keeps caches up to date, and checks if the
+ * request might be one we are "anticipating"
+ */
+static void as_update_arq(struct as_data *ad, struct as_rq *arq)
+{
+	const int data_dir = arq->is_sync;
+
+	/* keep the next_arq cache up to date */
+	ad->next_arq[data_dir] = as_choose_req(ad, arq, ad->next_arq[data_dir]);
+
+	/*
+	 * have we been anticipating this request?
+	 * or does it come from the same process as the one we are anticipating
+	 * for?
+	 */
+	if (ad->antic_status == ANTIC_WAIT_REQ
+			|| ad->antic_status == ANTIC_WAIT_NEXT) {
+		if (as_can_break_anticipation(ad, arq))
+			as_antic_stop(ad);
+	}
+}
+
+/*
+ * Gathers timings and resizes the write batch automatically
+ */
+static void update_write_batch(struct as_data *ad)
+{
+	unsigned long batch = ad->batch_expire[REQ_ASYNC];
+	long write_time;
+
+	write_time = (jiffies - ad->current_batch_expires) + batch;
+	if (write_time < 0)
+		write_time = 0;
+
+	if (write_time > batch && !ad->write_batch_idled) {
+		if (write_time > batch * 3)
+			ad->write_batch_count /= 2;
+		else
+			ad->write_batch_count--;
+	} else if (write_time < batch && ad->current_write_count == 0) {
+		if (batch > write_time * 3)
+			ad->write_batch_count *= 2;
+		else
+			ad->write_batch_count++;
+	}
+
+	if (ad->write_batch_count < 1)
+		ad->write_batch_count = 1;
+}
+
+/*
+ * as_completed_request is to be called when a request has completed and
+ * returned something to the requesting process, be it an error or data.
+ */
+static void as_completed_request(request_queue_t *q, struct request *rq)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = RQ_DATA(rq);
+
+	WARN_ON(!list_empty(&rq->queuelist));
+
+	if (arq->state == AS_RQ_PRESCHED) {
+		WARN_ON(arq->io_context);
+		goto out;
+	}
+
+	if (arq->state == AS_RQ_MERGED)
+		goto out_ioc;
+
+	if (arq->state != AS_RQ_REMOVED) {
+		printk("arq->state %d\n", arq->state);
+		WARN_ON(1);
+		goto out;
+	}
+
+	if (!blk_fs_request(rq))
+		goto out;
+
+	if (ad->changed_batch && ad->nr_dispatched == 1) {
+		kblockd_schedule_work(&ad->antic_work);
+		ad->changed_batch = 0;
+
+		if (ad->batch_data_dir == REQ_SYNC)
+			ad->new_batch = 1;
+	}
+	WARN_ON(ad->nr_dispatched == 0);
+	ad->nr_dispatched--;
+
+	/*
+	 * Start counting the batch from when a request of that direction is
+	 * actually serviced. This should help devices with big TCQ windows
+	 * and writeback caches
+	 */
+	if (ad->new_batch && ad->batch_data_dir == arq->is_sync) {
+		update_write_batch(ad);
+		ad->current_batch_expires = jiffies +
+				ad->batch_expire[REQ_SYNC];
+		ad->new_batch = 0;
+	}
+
+	if (ad->io_context == arq->io_context && ad->io_context) {
+		ad->antic_start = jiffies;
+		ad->ioc_finished = 1;
+		if (ad->antic_status == ANTIC_WAIT_REQ) {
+			/*
+			 * We were waiting on this request, now anticipate
+			 * the next one
+			 */
+			as_antic_waitnext(ad);
+		}
+	}
+
+out_ioc:
+	if (!arq->io_context)
+		goto out;
+
+	if (arq->is_sync == REQ_SYNC) {
+		struct as_io_context *aic = arq->io_context->aic;
+		if (aic) {
+			spin_lock(&aic->lock);
+			set_bit(AS_TASK_IORUNNING, &aic->state);
+			aic->last_end_request = jiffies;
+			spin_unlock(&aic->lock);
+		}
+	}
+
+	put_io_context(arq->io_context);
+out:
+	arq->state = AS_RQ_POSTSCHED;
+}
+
+/*
+ * as_remove_queued_request removes a request from the pre dispatch queue
+ * without updating refcounts. It is expected the caller will drop the
+ * reference unless it replaces the request at somepart of the elevator
+ * (ie. the dispatch queue)
+ */
+static void as_remove_queued_request(request_queue_t *q, struct request *rq)
+{
+	struct as_rq *arq = RQ_DATA(rq);
+	const int data_dir = arq->is_sync;
+	struct as_data *ad = q->elevator->elevator_data;
+
+	WARN_ON(arq->state != AS_RQ_QUEUED);
+
+	if (arq->io_context && arq->io_context->aic) {
+		BUG_ON(!atomic_read(&arq->io_context->aic->nr_queued));
+		atomic_dec(&arq->io_context->aic->nr_queued);
+	}
+
+	/*
+	 * Update the "next_arq" cache if we are about to remove its
+	 * entry
+	 */
+	if (ad->next_arq[data_dir] == arq)
+		ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
+
+	list_del_init(&arq->fifo);
+	as_remove_merge_hints(q, arq);
+	as_del_arq_rb(ad, arq);
+}
+
+/*
+ * as_remove_dispatched_request is called to remove a request which has gone
+ * to the dispatch list.
+ */
+static void as_remove_dispatched_request(request_queue_t *q, struct request *rq)
+{
+	struct as_rq *arq = RQ_DATA(rq);
+	struct as_io_context *aic;
+
+	if (!arq) {
+		WARN_ON(1);
+		return;
+	}
+
+	WARN_ON(arq->state != AS_RQ_DISPATCHED);
+	WARN_ON(ON_RB(&arq->rb_node));
+	if (arq->io_context && arq->io_context->aic) {
+		aic = arq->io_context->aic;
+		if (aic) {
+			WARN_ON(!atomic_read(&aic->nr_dispatched));
+			atomic_dec(&aic->nr_dispatched);
+		}
+	}
+}
+
+/*
+ * as_remove_request is called when a driver has finished with a request.
+ * This should be only called for dispatched requests, but for some reason
+ * a POWER4 box running hwscan it does not.
+ */
+static void as_remove_request(request_queue_t *q, struct request *rq)
+{
+	struct as_rq *arq = RQ_DATA(rq);
+
+	if (unlikely(arq->state == AS_RQ_NEW))
+		goto out;
+
+	if (ON_RB(&arq->rb_node)) {
+		if (arq->state != AS_RQ_QUEUED) {
+			printk("arq->state %d\n", arq->state);
+			WARN_ON(1);
+			goto out;
+		}
+		/*
+		 * We'll lose the aliased request(s) here. I don't think this
+		 * will ever happen, but if it does, hopefully someone will
+		 * report it.
+		 */
+		WARN_ON(!list_empty(&rq->queuelist));
+		as_remove_queued_request(q, rq);
+	} else {
+		if (arq->state != AS_RQ_DISPATCHED) {
+			printk("arq->state %d\n", arq->state);
+			WARN_ON(1);
+			goto out;
+		}
+		as_remove_dispatched_request(q, rq);
+	}
+out:
+	arq->state = AS_RQ_REMOVED;
+}
+
+/*
+ * as_fifo_expired returns 0 if there are no expired reads on the fifo,
+ * 1 otherwise.  It is ratelimited so that we only perform the check once per
+ * `fifo_expire' interval.  Otherwise a large number of expired requests
+ * would create a hopeless seekstorm.
+ *
+ * See as_antic_expired comment.
+ */
+static int as_fifo_expired(struct as_data *ad, int adir)
+{
+	struct as_rq *arq;
+	long delta_jif;
+
+	delta_jif = jiffies - ad->last_check_fifo[adir];
+	if (unlikely(delta_jif < 0))
+		delta_jif = -delta_jif;
+	if (delta_jif < ad->fifo_expire[adir])
+		return 0;
+
+	ad->last_check_fifo[adir] = jiffies;
+
+	if (list_empty(&ad->fifo_list[adir]))
+		return 0;
+
+	arq = list_entry_fifo(ad->fifo_list[adir].next);
+
+	return time_after(jiffies, arq->expires);
+}
+
+/*
+ * as_batch_expired returns true if the current batch has expired. A batch
+ * is a set of reads or a set of writes.
+ */
+static inline int as_batch_expired(struct as_data *ad)
+{
+	if (ad->changed_batch || ad->new_batch)
+		return 0;
+
+	if (ad->batch_data_dir == REQ_SYNC)
+		/* TODO! add a check so a complete fifo gets written? */
+		return time_after(jiffies, ad->current_batch_expires);
+
+	return time_after(jiffies, ad->current_batch_expires)
+		|| ad->current_write_count == 0;
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq)
+{
+	struct request *rq = arq->request;
+	struct list_head *insert;
+	const int data_dir = arq->is_sync;
+
+	BUG_ON(!ON_RB(&arq->rb_node));
+
+	as_antic_stop(ad);
+	ad->antic_status = ANTIC_OFF;
+
+	/*
+	 * This has to be set in order to be correctly updated by
+	 * as_find_next_arq
+	 */
+	ad->last_sector[data_dir] = rq->sector + rq->nr_sectors;
+
+	if (data_dir == REQ_SYNC) {
+		/* In case we have to anticipate after this */
+		copy_io_context(&ad->io_context, &arq->io_context);
+	} else {
+		if (ad->io_context) {
+			put_io_context(ad->io_context);
+			ad->io_context = NULL;
+		}
+
+		if (ad->current_write_count != 0)
+			ad->current_write_count--;
+	}
+	ad->ioc_finished = 0;
+
+	ad->next_arq[data_dir] = as_find_next_arq(ad, arq);
+
+	/*
+	 * take it off the sort and fifo list, add to dispatch queue
+	 */
+	insert = ad->dispatch->prev;
+
+	while (!list_empty(&rq->queuelist)) {
+		struct request *__rq = list_entry_rq(rq->queuelist.next);
+		struct as_rq *__arq = RQ_DATA(__rq);
+
+		list_move_tail(&__rq->queuelist, ad->dispatch);
+
+		if (__arq->io_context && __arq->io_context->aic)
+			atomic_inc(&__arq->io_context->aic->nr_dispatched);
+
+		WARN_ON(__arq->state != AS_RQ_QUEUED);
+		__arq->state = AS_RQ_DISPATCHED;
+
+		ad->nr_dispatched++;
+	}
+
+	as_remove_queued_request(ad->q, rq);
+	WARN_ON(arq->state != AS_RQ_QUEUED);
+
+	list_add(&rq->queuelist, insert);
+	arq->state = AS_RQ_DISPATCHED;
+	if (arq->io_context && arq->io_context->aic)
+		atomic_inc(&arq->io_context->aic->nr_dispatched);
+	ad->nr_dispatched++;
+}
+
+/*
+ * as_dispatch_request selects the best request according to
+ * read/write expire, batch expire, etc, and moves it to the dispatch
+ * queue. Returns 1 if a request was found, 0 otherwise.
+ */
+static int as_dispatch_request(struct as_data *ad)
+{
+	struct as_rq *arq;
+	const int reads = !list_empty(&ad->fifo_list[REQ_SYNC]);
+	const int writes = !list_empty(&ad->fifo_list[REQ_ASYNC]);
+
+	/* Signal that the write batch was uncontended, so we can't time it */
+	if (ad->batch_data_dir == REQ_ASYNC && !reads) {
+		if (ad->current_write_count == 0 || !writes)
+			ad->write_batch_idled = 1;
+	}
+
+	if (!(reads || writes)
+		|| ad->antic_status == ANTIC_WAIT_REQ
+		|| ad->antic_status == ANTIC_WAIT_NEXT
+		|| ad->changed_batch)
+		return 0;
+
+	if (!(reads && writes && as_batch_expired(ad)) ) {
+		/*
+		 * batch is still running or no reads or no writes
+		 */
+		arq = ad->next_arq[ad->batch_data_dir];
+
+		if (ad->batch_data_dir == REQ_SYNC && ad->antic_expire) {
+			if (as_fifo_expired(ad, REQ_SYNC))
+				goto fifo_expired;
+
+			if (as_can_anticipate(ad, arq)) {
+				as_antic_waitreq(ad);
+				return 0;
+			}
+		}
+
+		if (arq) {
+			/* we have a "next request" */
+			if (reads && !writes)
+				ad->current_batch_expires =
+					jiffies + ad->batch_expire[REQ_SYNC];
+			goto dispatch_request;
+		}
+	}
+
+	/*
+	 * at this point we are not running a batch. select the appropriate
+	 * data direction (read / write)
+	 */
+
+	if (reads) {
+		BUG_ON(RB_EMPTY(&ad->sort_list[REQ_SYNC]));
+
+		if (writes && ad->batch_data_dir == REQ_SYNC)
+			/*
+			 * Last batch was a read, switch to writes
+			 */
+			goto dispatch_writes;
+
+		if (ad->batch_data_dir == REQ_ASYNC) {
+			WARN_ON(ad->new_batch);
+			ad->changed_batch = 1;
+		}
+		ad->batch_data_dir = REQ_SYNC;
+		arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+		ad->last_check_fifo[ad->batch_data_dir] = jiffies;
+		goto dispatch_request;
+	}
+
+	/*
+	 * the last batch was a read
+	 */
+
+	if (writes) {
+dispatch_writes:
+		BUG_ON(RB_EMPTY(&ad->sort_list[REQ_ASYNC]));
+
+		if (ad->batch_data_dir == REQ_SYNC) {
+			ad->changed_batch = 1;
+
+			/*
+			 * new_batch might be 1 when the queue runs out of
+			 * reads. A subsequent submission of a write might
+			 * cause a change of batch before the read is finished.
+			 */
+			ad->new_batch = 0;
+		}
+		ad->batch_data_dir = REQ_ASYNC;
+		ad->current_write_count = ad->write_batch_count;
+		ad->write_batch_idled = 0;
+		arq = ad->next_arq[ad->batch_data_dir];
+		goto dispatch_request;
+	}
+
+	BUG();
+	return 0;
+
+dispatch_request:
+	/*
+	 * If a request has expired, service it.
+	 */
+
+	if (as_fifo_expired(ad, ad->batch_data_dir)) {
+fifo_expired:
+		arq = list_entry_fifo(ad->fifo_list[ad->batch_data_dir].next);
+		BUG_ON(arq == NULL);
+	}
+
+	if (ad->changed_batch) {
+		WARN_ON(ad->new_batch);
+
+		if (ad->nr_dispatched)
+			return 0;
+
+		if (ad->batch_data_dir == REQ_ASYNC)
+			ad->current_batch_expires = jiffies +
+					ad->batch_expire[REQ_ASYNC];
+		else
+			ad->new_batch = 1;
+
+		ad->changed_batch = 0;
+	}
+
+	/*
+	 * arq is the selected appropriate request.
+	 */
+	as_move_to_dispatch(ad, arq);
+
+	return 1;
+}
+
+static struct request *as_next_request(request_queue_t *q)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct request *rq = NULL;
+
+	/*
+	 * if there are still requests on the dispatch queue, grab the first
+	 */
+	if (!list_empty(ad->dispatch) || as_dispatch_request(ad))
+		rq = list_entry_rq(ad->dispatch->next);
+
+	return rq;
+}
+
+/*
+ * Add arq to a list behind alias
+ */
+static inline void
+as_add_aliased_request(struct as_data *ad, struct as_rq *arq, struct as_rq *alias)
+{
+	struct request  *req = arq->request;
+	struct list_head *insert = alias->request->queuelist.prev;
+
+	/*
+	 * Transfer list of aliases
+	 */
+	while (!list_empty(&req->queuelist)) {
+		struct request *__rq = list_entry_rq(req->queuelist.next);
+		struct as_rq *__arq = RQ_DATA(__rq);
+
+		list_move_tail(&__rq->queuelist, &alias->request->queuelist);
+
+		WARN_ON(__arq->state != AS_RQ_QUEUED);
+	}
+
+	/*
+	 * Another request with the same start sector on the rbtree.
+	 * Link this request to that sector. They are untangled in
+	 * as_move_to_dispatch
+	 */
+	list_add(&arq->request->queuelist, insert);
+
+	/*
+	 * Don't want to have to handle merges.
+	 */
+	as_remove_merge_hints(ad->q, arq);
+}
+
+/*
+ * add arq to rbtree and fifo
+ */
+static void as_add_request(struct as_data *ad, struct as_rq *arq)
+{
+	struct as_rq *alias;
+	int data_dir;
+
+	if (rq_data_dir(arq->request) == READ
+			|| current->flags&PF_SYNCWRITE)
+		arq->is_sync = 1;
+	else
+		arq->is_sync = 0;
+	data_dir = arq->is_sync;
+
+	arq->io_context = as_get_io_context();
+
+	if (arq->io_context) {
+		as_update_iohist(ad, arq->io_context->aic, arq->request);
+		atomic_inc(&arq->io_context->aic->nr_queued);
+	}
+
+	alias = as_add_arq_rb(ad, arq);
+	if (!alias) {
+		/*
+		 * set expire time (only used for reads) and add to fifo list
+		 */
+		arq->expires = jiffies + ad->fifo_expire[data_dir];
+		list_add_tail(&arq->fifo, &ad->fifo_list[data_dir]);
+
+		if (rq_mergeable(arq->request)) {
+			as_add_arq_hash(ad, arq);
+
+			if (!ad->q->last_merge)
+				ad->q->last_merge = arq->request;
+		}
+		as_update_arq(ad, arq); /* keep state machine up to date */
+
+	} else {
+		as_add_aliased_request(ad, arq, alias);
+
+		/*
+		 * have we been anticipating this request?
+		 * or does it come from the same process as the one we are
+		 * anticipating for?
+		 */
+		if (ad->antic_status == ANTIC_WAIT_REQ
+				|| ad->antic_status == ANTIC_WAIT_NEXT) {
+			if (as_can_break_anticipation(ad, arq))
+				as_antic_stop(ad);
+		}
+	}
+
+	arq->state = AS_RQ_QUEUED;
+}
+
+static void as_deactivate_request(request_queue_t *q, struct request *rq)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = RQ_DATA(rq);
+
+	if (arq) {
+		if (arq->state == AS_RQ_REMOVED) {
+			arq->state = AS_RQ_DISPATCHED;
+			if (arq->io_context && arq->io_context->aic)
+				atomic_inc(&arq->io_context->aic->nr_dispatched);
+		}
+	} else
+		WARN_ON(blk_fs_request(rq)
+			&& (!(rq->flags & (REQ_HARDBARRIER|REQ_SOFTBARRIER))) );
+
+	/* Stop anticipating - let this request get through */
+	as_antic_stop(ad);
+}
+
+/*
+ * requeue the request. The request has not been completed, nor is it a
+ * new request, so don't touch accounting.
+ */
+static void as_requeue_request(request_queue_t *q, struct request *rq)
+{
+	as_deactivate_request(q, rq);
+	list_add(&rq->queuelist, &q->queue_head);
+}
+
+/*
+ * Account a request that is inserted directly onto the dispatch queue.
+ * arq->io_context->aic->nr_dispatched should not need to be incremented
+ * because only new requests should come through here: requeues go through
+ * our explicit requeue handler.
+ */
+static void as_account_queued_request(struct as_data *ad, struct request *rq)
+{
+	if (blk_fs_request(rq)) {
+		struct as_rq *arq = RQ_DATA(rq);
+		arq->state = AS_RQ_DISPATCHED;
+		ad->nr_dispatched++;
+	}
+}
+
+static void
+as_insert_request(request_queue_t *q, struct request *rq, int where)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = RQ_DATA(rq);
+
+	if (arq) {
+		if (arq->state != AS_RQ_PRESCHED) {
+			printk("arq->state: %d\n", arq->state);
+			WARN_ON(1);
+		}
+		arq->state = AS_RQ_NEW;
+	}
+
+	/* barriers must flush the reorder queue */
+	if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
+			&& where == ELEVATOR_INSERT_SORT)) {
+		WARN_ON(1);
+		where = ELEVATOR_INSERT_BACK;
+	}
+
+	switch (where) {
+		case ELEVATOR_INSERT_BACK:
+			while (ad->next_arq[REQ_SYNC])
+				as_move_to_dispatch(ad, ad->next_arq[REQ_SYNC]);
+
+			while (ad->next_arq[REQ_ASYNC])
+				as_move_to_dispatch(ad, ad->next_arq[REQ_ASYNC]);
+
+			list_add_tail(&rq->queuelist, ad->dispatch);
+			as_account_queued_request(ad, rq);
+			as_antic_stop(ad);
+			break;
+		case ELEVATOR_INSERT_FRONT:
+			list_add(&rq->queuelist, ad->dispatch);
+			as_account_queued_request(ad, rq);
+			as_antic_stop(ad);
+			break;
+		case ELEVATOR_INSERT_SORT:
+			BUG_ON(!blk_fs_request(rq));
+			as_add_request(ad, arq);
+			break;
+		default:
+			BUG();
+			return;
+	}
+}
+
+/*
+ * as_queue_empty tells us if there are requests left in the device. It may
+ * not be the case that a driver can get the next request even if the queue
+ * is not empty - it is used in the block layer to check for plugging and
+ * merging opportunities
+ */
+static int as_queue_empty(request_queue_t *q)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+
+	if (!list_empty(&ad->fifo_list[REQ_ASYNC])
+		|| !list_empty(&ad->fifo_list[REQ_SYNC])
+		|| !list_empty(ad->dispatch))
+			return 0;
+
+	return 1;
+}
+
+static struct request *
+as_former_request(request_queue_t *q, struct request *rq)
+{
+	struct as_rq *arq = RQ_DATA(rq);
+	struct rb_node *rbprev = rb_prev(&arq->rb_node);
+	struct request *ret = NULL;
+
+	if (rbprev)
+		ret = rb_entry_arq(rbprev)->request;
+
+	return ret;
+}
+
+static struct request *
+as_latter_request(request_queue_t *q, struct request *rq)
+{
+	struct as_rq *arq = RQ_DATA(rq);
+	struct rb_node *rbnext = rb_next(&arq->rb_node);
+	struct request *ret = NULL;
+
+	if (rbnext)
+		ret = rb_entry_arq(rbnext)->request;
+
+	return ret;
+}
+
+static int
+as_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	sector_t rb_key = bio->bi_sector + bio_sectors(bio);
+	struct request *__rq;
+	int ret;
+
+	/*
+	 * try last_merge to avoid going to hash
+	 */
+	ret = elv_try_last_merge(q, bio);
+	if (ret != ELEVATOR_NO_MERGE) {
+		__rq = q->last_merge;
+		goto out_insert;
+	}
+
+	/*
+	 * see if the merge hash can satisfy a back merge
+	 */
+	__rq = as_find_arq_hash(ad, bio->bi_sector);
+	if (__rq) {
+		BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+		if (elv_rq_merge_ok(__rq, bio)) {
+			ret = ELEVATOR_BACK_MERGE;
+			goto out;
+		}
+	}
+
+	/*
+	 * check for front merge
+	 */
+	__rq = as_find_arq_rb(ad, rb_key, bio_data_dir(bio));
+	if (__rq) {
+		BUG_ON(rb_key != rq_rb_key(__rq));
+
+		if (elv_rq_merge_ok(__rq, bio)) {
+			ret = ELEVATOR_FRONT_MERGE;
+			goto out;
+		}
+	}
+
+	return ELEVATOR_NO_MERGE;
+out:
+	if (rq_mergeable(__rq))
+		q->last_merge = __rq;
+out_insert:
+	if (ret) {
+		if (rq_mergeable(__rq))
+			as_hot_arq_hash(ad, RQ_DATA(__rq));
+	}
+	*req = __rq;
+	return ret;
+}
+
+static void as_merged_request(request_queue_t *q, struct request *req)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = RQ_DATA(req);
+
+	/*
+	 * hash always needs to be repositioned, key is end sector
+	 */
+	as_del_arq_hash(arq);
+	as_add_arq_hash(ad, arq);
+
+	/*
+	 * if the merge was a front merge, we need to reposition request
+	 */
+	if (rq_rb_key(req) != arq->rb_key) {
+		struct as_rq *alias, *next_arq = NULL;
+
+		if (ad->next_arq[arq->is_sync] == arq)
+			next_arq = as_find_next_arq(ad, arq);
+
+		/*
+		 * Note! We should really be moving any old aliased requests
+		 * off this request and try to insert them into the rbtree. We
+		 * currently don't bother. Ditto the next function.
+		 */
+		as_del_arq_rb(ad, arq);
+		if ((alias = as_add_arq_rb(ad, arq)) ) {
+			list_del_init(&arq->fifo);
+			as_add_aliased_request(ad, arq, alias);
+			if (next_arq)
+				ad->next_arq[arq->is_sync] = next_arq;
+		}
+		/*
+		 * Note! At this stage of this and the next function, our next
+		 * request may not be optimal - eg the request may have "grown"
+		 * behind the disk head. We currently don't bother adjusting.
+		 */
+	}
+
+	if (arq->on_hash)
+		q->last_merge = req;
+}
+
+static void
+as_merged_requests(request_queue_t *q, struct request *req,
+			 struct request *next)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = RQ_DATA(req);
+	struct as_rq *anext = RQ_DATA(next);
+
+	BUG_ON(!arq);
+	BUG_ON(!anext);
+
+	/*
+	 * reposition arq (this is the merged request) in hash, and in rbtree
+	 * in case of a front merge
+	 */
+	as_del_arq_hash(arq);
+	as_add_arq_hash(ad, arq);
+
+	if (rq_rb_key(req) != arq->rb_key) {
+		struct as_rq *alias, *next_arq = NULL;
+
+		if (ad->next_arq[arq->is_sync] == arq)
+			next_arq = as_find_next_arq(ad, arq);
+
+		as_del_arq_rb(ad, arq);
+		if ((alias = as_add_arq_rb(ad, arq)) ) {
+			list_del_init(&arq->fifo);
+			as_add_aliased_request(ad, arq, alias);
+			if (next_arq)
+				ad->next_arq[arq->is_sync] = next_arq;
+		}
+	}
+
+	/*
+	 * if anext expires before arq, assign its expire time to arq
+	 * and move into anext position (anext will be deleted) in fifo
+	 */
+	if (!list_empty(&arq->fifo) && !list_empty(&anext->fifo)) {
+		if (time_before(anext->expires, arq->expires)) {
+			list_move(&arq->fifo, &anext->fifo);
+			arq->expires = anext->expires;
+			/*
+			 * Don't copy here but swap, because when anext is
+			 * removed below, it must contain the unused context
+			 */
+			swap_io_context(&arq->io_context, &anext->io_context);
+		}
+	}
+
+	/*
+	 * Transfer list of aliases
+	 */
+	while (!list_empty(&next->queuelist)) {
+		struct request *__rq = list_entry_rq(next->queuelist.next);
+		struct as_rq *__arq = RQ_DATA(__rq);
+
+		list_move_tail(&__rq->queuelist, &req->queuelist);
+
+		WARN_ON(__arq->state != AS_RQ_QUEUED);
+	}
+
+	/*
+	 * kill knowledge of next, this one is a goner
+	 */
+	as_remove_queued_request(q, next);
+
+	anext->state = AS_RQ_MERGED;
+}
+
+/*
+ * This is executed in a "deferred" process context, by kblockd. It calls the
+ * driver's request_fn so the driver can submit that request.
+ *
+ * IMPORTANT! This guy will reenter the elevator, so set up all queue global
+ * state before calling, and don't rely on any state over calls.
+ *
+ * FIXME! dispatch queue is not a queue at all!
+ */
+static void as_work_handler(void *data)
+{
+	struct request_queue *q = data;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	if (as_next_request(q))
+		q->request_fn(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static void as_put_request(request_queue_t *q, struct request *rq)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = RQ_DATA(rq);
+
+	if (!arq) {
+		WARN_ON(1);
+		return;
+	}
+
+	if (arq->state != AS_RQ_POSTSCHED && arq->state != AS_RQ_PRESCHED) {
+		printk("arq->state %d\n", arq->state);
+		WARN_ON(1);
+	}
+
+	mempool_free(arq, ad->arq_pool);
+	rq->elevator_private = NULL;
+}
+
+static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+{
+	struct as_data *ad = q->elevator->elevator_data;
+	struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask);
+
+	if (arq) {
+		memset(arq, 0, sizeof(*arq));
+		RB_CLEAR(&arq->rb_node);
+		arq->request = rq;
+		arq->state = AS_RQ_PRESCHED;
+		arq->io_context = NULL;
+		INIT_LIST_HEAD(&arq->hash);
+		arq->on_hash = 0;
+		INIT_LIST_HEAD(&arq->fifo);
+		rq->elevator_private = arq;
+		return 0;
+	}
+
+	return 1;
+}
+
+static int as_may_queue(request_queue_t *q, int rw)
+{
+	int ret = ELV_MQUEUE_MAY;
+	struct as_data *ad = q->elevator->elevator_data;
+	struct io_context *ioc;
+	if (ad->antic_status == ANTIC_WAIT_REQ ||
+			ad->antic_status == ANTIC_WAIT_NEXT) {
+		ioc = as_get_io_context();
+		if (ad->io_context == ioc)
+			ret = ELV_MQUEUE_MUST;
+		put_io_context(ioc);
+	}
+
+	return ret;
+}
+
+static void as_exit_queue(elevator_t *e)
+{
+	struct as_data *ad = e->elevator_data;
+
+	del_timer_sync(&ad->antic_timer);
+	kblockd_flush();
+
+	BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
+	BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
+
+	mempool_destroy(ad->arq_pool);
+	put_io_context(ad->io_context);
+	kfree(ad->hash);
+	kfree(ad);
+}
+
+/*
+ * initialize elevator private data (as_data), and alloc a arq for
+ * each request on the free lists
+ */
+static int as_init_queue(request_queue_t *q, elevator_t *e)
+{
+	struct as_data *ad;
+	int i;
+
+	if (!arq_pool)
+		return -ENOMEM;
+
+	ad = kmalloc(sizeof(*ad), GFP_KERNEL);
+	if (!ad)
+		return -ENOMEM;
+	memset(ad, 0, sizeof(*ad));
+
+	ad->q = q; /* Identify what queue the data belongs to */
+
+	ad->hash = kmalloc(sizeof(struct list_head)*AS_HASH_ENTRIES,GFP_KERNEL);
+	if (!ad->hash) {
+		kfree(ad);
+		return -ENOMEM;
+	}
+
+	ad->arq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, arq_pool);
+	if (!ad->arq_pool) {
+		kfree(ad->hash);
+		kfree(ad);
+		return -ENOMEM;
+	}
+
+	/* anticipatory scheduling helpers */
+	ad->antic_timer.function = as_antic_timeout;
+	ad->antic_timer.data = (unsigned long)q;
+	init_timer(&ad->antic_timer);
+	INIT_WORK(&ad->antic_work, as_work_handler, q);
+
+	for (i = 0; i < AS_HASH_ENTRIES; i++)
+		INIT_LIST_HEAD(&ad->hash[i]);
+
+	INIT_LIST_HEAD(&ad->fifo_list[REQ_SYNC]);
+	INIT_LIST_HEAD(&ad->fifo_list[REQ_ASYNC]);
+	ad->sort_list[REQ_SYNC] = RB_ROOT;
+	ad->sort_list[REQ_ASYNC] = RB_ROOT;
+	ad->dispatch = &q->queue_head;
+	ad->fifo_expire[REQ_SYNC] = default_read_expire;
+	ad->fifo_expire[REQ_ASYNC] = default_write_expire;
+	ad->antic_expire = default_antic_expire;
+	ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
+	ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
+	e->elevator_data = ad;
+
+	ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
+	ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
+	if (ad->write_batch_count < 2)
+		ad->write_batch_count = 2;
+
+	return 0;
+}
+
+/*
+ * sysfs parts below
+ */
+struct as_fs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct as_data *, char *);
+	ssize_t (*store)(struct as_data *, const char *, size_t);
+};
+
+static ssize_t
+as_var_show(unsigned int var, char *page)
+{
+	var = (var * 1000) / HZ;
+	return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+as_var_store(unsigned long *var, const char *page, size_t count)
+{
+	unsigned long tmp;
+	char *p = (char *) page;
+
+	tmp = simple_strtoul(p, &p, 10);
+	if (tmp != 0) {
+		tmp = (tmp * HZ) / 1000;
+		if (tmp == 0)
+			tmp = 1;
+	}
+	*var = tmp;
+	return count;
+}
+
+static ssize_t as_est_show(struct as_data *ad, char *page)
+{
+	int pos = 0;
+
+	pos += sprintf(page+pos, "%lu %% exit probability\n", 100*ad->exit_prob/256);
+	pos += sprintf(page+pos, "%lu ms new thinktime\n", ad->new_ttime_mean);
+	pos += sprintf(page+pos, "%llu sectors new seek distance\n", (unsigned long long)ad->new_seek_mean);
+
+	return pos;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR)				\
+static ssize_t __FUNC(struct as_data *ad, char *page)		\
+{								\
+	return as_var_show(jiffies_to_msecs((__VAR)), (page));	\
+}
+SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]);
+SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire);
+SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]);
+SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX)				\
+static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count)	\
+{									\
+	int ret = as_var_store(__PTR, (page), count);		\
+	if (*(__PTR) < (MIN))						\
+		*(__PTR) = (MIN);					\
+	else if (*(__PTR) > (MAX))					\
+		*(__PTR) = (MAX);					\
+	*(__PTR) = msecs_to_jiffies(*(__PTR));				\
+	return ret;							\
+}
+STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX);
+STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX);
+STORE_FUNCTION(as_read_batchexpire_store,
+			&ad->batch_expire[REQ_SYNC], 0, INT_MAX);
+STORE_FUNCTION(as_write_batchexpire_store,
+			&ad->batch_expire[REQ_ASYNC], 0, INT_MAX);
+#undef STORE_FUNCTION
+
+static struct as_fs_entry as_est_entry = {
+	.attr = {.name = "est_time", .mode = S_IRUGO },
+	.show = as_est_show,
+};
+static struct as_fs_entry as_readexpire_entry = {
+	.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = as_readexpire_show,
+	.store = as_readexpire_store,
+};
+static struct as_fs_entry as_writeexpire_entry = {
+	.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = as_writeexpire_show,
+	.store = as_writeexpire_store,
+};
+static struct as_fs_entry as_anticexpire_entry = {
+	.attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = as_anticexpire_show,
+	.store = as_anticexpire_store,
+};
+static struct as_fs_entry as_read_batchexpire_entry = {
+	.attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = as_read_batchexpire_show,
+	.store = as_read_batchexpire_store,
+};
+static struct as_fs_entry as_write_batchexpire_entry = {
+	.attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = as_write_batchexpire_show,
+	.store = as_write_batchexpire_store,
+};
+
+static struct attribute *default_attrs[] = {
+	&as_est_entry.attr,
+	&as_readexpire_entry.attr,
+	&as_writeexpire_entry.attr,
+	&as_anticexpire_entry.attr,
+	&as_read_batchexpire_entry.attr,
+	&as_write_batchexpire_entry.attr,
+	NULL,
+};
+
+#define to_as(atr) container_of((atr), struct as_fs_entry, attr)
+
+static ssize_t
+as_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct as_fs_entry *entry = to_as(attr);
+
+	if (!entry->show)
+		return 0;
+
+	return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+as_attr_store(struct kobject *kobj, struct attribute *attr,
+		    const char *page, size_t length)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct as_fs_entry *entry = to_as(attr);
+
+	if (!entry->store)
+		return -EINVAL;
+
+	return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops as_sysfs_ops = {
+	.show	= as_attr_show,
+	.store	= as_attr_store,
+};
+
+static struct kobj_type as_ktype = {
+	.sysfs_ops	= &as_sysfs_ops,
+	.default_attrs	= default_attrs,
+};
+
+static struct elevator_type iosched_as = {
+	.ops = {
+		.elevator_merge_fn = 		as_merge,
+		.elevator_merged_fn =		as_merged_request,
+		.elevator_merge_req_fn =	as_merged_requests,
+		.elevator_next_req_fn =		as_next_request,
+		.elevator_add_req_fn =		as_insert_request,
+		.elevator_remove_req_fn =	as_remove_request,
+		.elevator_requeue_req_fn = 	as_requeue_request,
+		.elevator_deactivate_req_fn = 	as_deactivate_request,
+		.elevator_queue_empty_fn =	as_queue_empty,
+		.elevator_completed_req_fn =	as_completed_request,
+		.elevator_former_req_fn =	as_former_request,
+		.elevator_latter_req_fn =	as_latter_request,
+		.elevator_set_req_fn =		as_set_request,
+		.elevator_put_req_fn =		as_put_request,
+		.elevator_may_queue_fn =	as_may_queue,
+		.elevator_init_fn =		as_init_queue,
+		.elevator_exit_fn =		as_exit_queue,
+	},
+
+	.elevator_ktype = &as_ktype,
+	.elevator_name = "anticipatory",
+	.elevator_owner = THIS_MODULE,
+};
+
+static int __init as_init(void)
+{
+	int ret;
+
+	arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
+				     0, 0, NULL, NULL);
+	if (!arq_pool)
+		return -ENOMEM;
+
+	ret = elv_register(&iosched_as);
+	if (!ret) {
+		/*
+		 * don't allow AS to get unregistered, since we would have
+		 * to browse all tasks in the system and release their
+		 * as_io_context first
+		 */
+		__module_get(THIS_MODULE);
+		return 0;
+	}
+
+	kmem_cache_destroy(arq_pool);
+	return ret;
+}
+
+static void __exit as_exit(void)
+{
+	kmem_cache_destroy(arq_pool);
+	elv_unregister(&iosched_as);
+}
+
+module_init(as_init);
+module_exit(as_exit);
+
+MODULE_AUTHOR("Nick Piggin");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("anticipatory IO scheduler");
diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c
new file mode 100644
index 0000000..db05a5a
--- /dev/null
+++ b/drivers/block/ataflop.c
@@ -0,0 +1,2006 @@
+/*
+ *  drivers/block/ataflop.c
+ *
+ *  Copyright (C) 1993  Greg Harp
+ *  Atari Support by Bjoern Brauel, Roman Hodek
+ *
+ *  Big cleanup Sep 11..14 1994 Roman Hodek:
+ *   - Driver now works interrupt driven
+ *   - Support for two drives; should work, but I cannot test that :-(
+ *   - Reading is done in whole tracks and buffered to speed up things
+ *   - Disk change detection and drive deselecting after motor-off
+ *     similar to TOS
+ *   - Autodetection of disk format (DD/HD); untested yet, because I
+ *     don't have an HD drive :-(
+ *
+ *  Fixes Nov 13 1994 Martin Schaller:
+ *   - Autodetection works now
+ *   - Support for 5 1/4'' disks
+ *   - Removed drive type (unknown on atari)
+ *   - Do seeks with 8 Mhz
+ *
+ *  Changes by Andreas Schwab:
+ *   - After errors in multiple read mode try again reading single sectors
+ *  (Feb 1995):
+ *   - Clean up error handling
+ *   - Set blk_size for proper size checking
+ *   - Initialize track register when testing presence of floppy
+ *   - Implement some ioctl's
+ *
+ *  Changes by Torsten Lang:
+ *   - When probing the floppies we should add the FDCCMDADD_H flag since
+ *     the FDC will otherwise wait forever when no disk is inserted...
+ *
+ * ++ Freddi Aschwanden (fa) 20.9.95 fixes for medusa:
+ *  - MFPDELAY() after each FDC access -> atari 
+ *  - more/other disk formats
+ *  - DMA to the block buffer directly if we have a 32bit DMA
+ *  - for medusa, the step rate is always 3ms
+ *  - on medusa, use only cache_push()
+ * Roman:
+ *  - Make disk format numbering independent from minors
+ *  - Let user set max. supported drive type (speeds up format
+ *    detection, saves buffer space)
+ *
+ * Roman 10/15/95:
+ *  - implement some more ioctls
+ *  - disk formatting
+ *  
+ * Andreas 95/12/12:
+ *  - increase gap size at start of track for HD/ED disks
+ *
+ * Michael (MSch) 11/07/96:
+ *  - implemented FDSETPRM and FDDEFPRM ioctl
+ *
+ * Andreas (97/03/19):
+ *  - implemented missing BLK* ioctls
+ *
+ *  Things left to do:
+ *   - Formatting
+ *   - Maybe a better strategy for disk change detection (does anyone
+ *     know one?)
+ */
+
+#include <linux/module.h>
+
+#include <linux/fd.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+
+#include <asm/atafd.h>
+#include <asm/atafdreg.h>
+#include <asm/atariints.h>
+#include <asm/atari_stdma.h>
+#include <asm/atari_stram.h>
+
+#define	FD_MAX_UNITS 2
+
+#undef DEBUG
+
+static struct request_queue *floppy_queue;
+
+#define QUEUE (floppy_queue)
+#define CURRENT elv_next_request(floppy_queue)
+
+/* Disk types: DD, HD, ED */
+static struct atari_disk_type {
+	const char	*name;
+	unsigned	spt;		/* sectors per track */
+	unsigned	blocks;		/* total number of blocks */
+	unsigned	fdc_speed;	/* fdc_speed setting */
+	unsigned 	stretch;	/* track doubling ? */
+} disk_type[] = {
+	{ "d360",  9, 720, 0, 0},	/*  0: 360kB diskette */
+	{ "D360",  9, 720, 0, 1},	/*  1: 360kb in 720k or 1.2MB drive */
+	{ "D720",  9,1440, 0, 0},	/*  2: 720kb in 720k or 1.2MB drive */
+	{ "D820", 10,1640, 0, 0},	/*  3: DD disk with 82 tracks/10 sectors */
+/* formats above are probed for type DD */
+#define	MAX_TYPE_DD 3
+	{ "h1200",15,2400, 3, 0},	/*  4: 1.2MB diskette */
+	{ "H1440",18,2880, 3, 0},	/*  5: 1.4 MB diskette (HD) */
+	{ "H1640",20,3280, 3, 0},	/*  6: 1.64MB diskette (fat HD) 82 tr 20 sec */
+/* formats above are probed for types DD and HD */
+#define	MAX_TYPE_HD 6
+	{ "E2880",36,5760, 3, 0},	/*  7: 2.8 MB diskette (ED) */
+	{ "E3280",40,6560, 3, 0},	/*  8: 3.2 MB diskette (fat ED) 82 tr 40 sec */
+/* formats above are probed for types DD, HD and ED */
+#define	MAX_TYPE_ED 8
+/* types below are never autoprobed */
+	{ "H1680",21,3360, 3, 0},	/*  9: 1.68MB diskette (fat HD) 80 tr 21 sec */
+	{ "h410",10,820, 0, 1},		/* 10: 410k diskette 41 tr 10 sec, stretch */
+	{ "h1476",18,2952, 3, 0},	/* 11: 1.48MB diskette 82 tr 18 sec */
+	{ "H1722",21,3444, 3, 0},	/* 12: 1.72MB diskette 82 tr 21 sec */
+	{ "h420",10,840, 0, 1},		/* 13: 420k diskette 42 tr 10 sec, stretch */
+	{ "H830",10,1660, 0, 0},	/* 14: 820k diskette 83 tr 10 sec */
+	{ "h1494",18,2952, 3, 0},	/* 15: 1.49MB diskette 83 tr 18 sec */
+	{ "H1743",21,3486, 3, 0},	/* 16: 1.74MB diskette 83 tr 21 sec */
+	{ "h880",11,1760, 0, 0},	/* 17: 880k diskette 80 tr 11 sec */
+	{ "D1040",13,2080, 0, 0},	/* 18: 1.04MB diskette 80 tr 13 sec */
+	{ "D1120",14,2240, 0, 0},	/* 19: 1.12MB diskette 80 tr 14 sec */
+	{ "h1600",20,3200, 3, 0},	/* 20: 1.60MB diskette 80 tr 20 sec */
+	{ "H1760",22,3520, 3, 0},	/* 21: 1.76MB diskette 80 tr 22 sec */
+	{ "H1920",24,3840, 3, 0},	/* 22: 1.92MB diskette 80 tr 24 sec */
+	{ "E3200",40,6400, 3, 0},	/* 23: 3.2MB diskette 80 tr 40 sec */
+	{ "E3520",44,7040, 3, 0},	/* 24: 3.52MB diskette 80 tr 44 sec */
+	{ "E3840",48,7680, 3, 0},	/* 25: 3.84MB diskette 80 tr 48 sec */
+	{ "H1840",23,3680, 3, 0},	/* 26: 1.84MB diskette 80 tr 23 sec */
+	{ "D800",10,1600, 0, 0},	/* 27: 800k diskette 80 tr 10 sec */
+};
+
+static int StartDiskType[] = {
+	MAX_TYPE_DD,
+	MAX_TYPE_HD,
+	MAX_TYPE_ED
+};
+
+#define	TYPE_DD		0
+#define	TYPE_HD		1
+#define	TYPE_ED		2
+
+static int DriveType = TYPE_HD;
+
+static DEFINE_SPINLOCK(ataflop_lock);
+
+/* Array for translating minors into disk formats */
+static struct {
+	int 	 index;
+	unsigned drive_types;
+} minor2disktype[] = {
+	{  0, TYPE_DD },	/*  1: d360 */
+	{  4, TYPE_HD },	/*  2: h1200 */
+	{  1, TYPE_DD },	/*  3: D360 */
+	{  2, TYPE_DD },	/*  4: D720 */
+	{  1, TYPE_DD },	/*  5: h360 = D360 */
+	{  2, TYPE_DD },	/*  6: h720 = D720 */
+	{  5, TYPE_HD },	/*  7: H1440 */
+	{  7, TYPE_ED },	/*  8: E2880 */
+/* some PC formats :-) */
+	{  8, TYPE_ED },	/*  9: E3280    <- was "CompaQ" == E2880 for PC */
+	{  5, TYPE_HD },	/* 10: h1440 = H1440 */
+	{  9, TYPE_HD },	/* 11: H1680 */
+	{ 10, TYPE_DD },	/* 12: h410  */
+	{  3, TYPE_DD },	/* 13: H820     <- == D820, 82x10 */
+	{ 11, TYPE_HD },	/* 14: h1476 */
+	{ 12, TYPE_HD },	/* 15: H1722 */
+	{ 13, TYPE_DD },	/* 16: h420  */
+	{ 14, TYPE_DD },	/* 17: H830  */
+	{ 15, TYPE_HD },	/* 18: h1494 */
+	{ 16, TYPE_HD },	/* 19: H1743 */
+	{ 17, TYPE_DD },	/* 20: h880  */
+	{ 18, TYPE_DD },	/* 21: D1040 */
+	{ 19, TYPE_DD },	/* 22: D1120 */
+	{ 20, TYPE_HD },	/* 23: h1600 */
+	{ 21, TYPE_HD },	/* 24: H1760 */
+	{ 22, TYPE_HD },	/* 25: H1920 */
+	{ 23, TYPE_ED },	/* 26: E3200 */
+	{ 24, TYPE_ED },	/* 27: E3520 */
+	{ 25, TYPE_ED },	/* 28: E3840 */
+	{ 26, TYPE_HD },	/* 29: H1840 */
+	{ 27, TYPE_DD },	/* 30: D800  */
+	{  6, TYPE_HD },	/* 31: H1640    <- was H1600 == h1600 for PC */
+};
+
+#define NUM_DISK_MINORS (sizeof(minor2disktype)/sizeof(*minor2disktype))
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ */
+#define MAX_DISK_SIZE 3280
+
+/*
+ * MSch: User-provided type information. 'drive' points to
+ * the respective entry of this array. Set by FDSETPRM ioctls.
+ */
+static struct atari_disk_type user_params[FD_MAX_UNITS];
+
+/*
+ * User-provided permanent type information. 'drive' points to
+ * the respective entry of this array.  Set by FDDEFPRM ioctls, 
+ * restored upon disk change by floppy_revalidate() if valid (as seen by
+ * default_params[].blocks > 0 - a bit in unit[].flags might be used for this?)
+ */
+static struct atari_disk_type default_params[FD_MAX_UNITS];
+
+/* current info on each unit */
+static struct atari_floppy_struct {
+	int connected;				/* !=0 : drive is connected */
+	int autoprobe;				/* !=0 : do autoprobe	    */
+
+	struct atari_disk_type	*disktype;	/* current type of disk */
+
+	int track;		/* current head position or -1 if
+				   unknown */
+	unsigned int steprate;	/* steprate setting */
+	unsigned int wpstat;	/* current state of WP signal (for
+				   disk change detection) */
+	int flags;		/* flags */
+	struct gendisk *disk;
+	int ref;
+	int type;
+} unit[FD_MAX_UNITS];
+
+#define	UD	unit[drive]
+#define	UDT	unit[drive].disktype
+#define	SUD	unit[SelectedDrive]
+#define	SUDT	unit[SelectedDrive].disktype
+
+
+#define FDC_READ(reg) ({			\
+    /* unsigned long __flags; */		\
+    unsigned short __val;			\
+    /* local_irq_save(__flags); */		\
+    dma_wd.dma_mode_status = 0x80 | (reg);	\
+    udelay(25);					\
+    __val = dma_wd.fdc_acces_seccount;		\
+    MFPDELAY();					\
+    /* local_irq_restore(__flags); */		\
+    __val & 0xff;				\
+})
+
+#define FDC_WRITE(reg,val)			\
+    do {					\
+	/* unsigned long __flags; */		\
+	/* local_irq_save(__flags); */		\
+	dma_wd.dma_mode_status = 0x80 | (reg);	\
+	udelay(25);				\
+	dma_wd.fdc_acces_seccount = (val);	\
+	MFPDELAY();				\
+        /* local_irq_restore(__flags); */	\
+    } while(0)
+
+
+/* Buffering variables:
+ * First, there is a DMA buffer in ST-RAM that is used for floppy DMA
+ * operations. Second, a track buffer is used to cache a whole track
+ * of the disk to save read operations. These are two separate buffers
+ * because that allows write operations without clearing the track buffer.
+ */
+
+static int MaxSectors[] = {
+	11, 22, 44
+};
+static int BufferSize[] = {
+	15*512, 30*512, 60*512
+};
+
+#define	BUFFER_SIZE	(BufferSize[DriveType])
+
+unsigned char *DMABuffer;			  /* buffer for writes */
+static unsigned long PhysDMABuffer;   /* physical address */
+
+static int UseTrackbuffer = -1;		  /* Do track buffering? */
+MODULE_PARM(UseTrackbuffer, "i");
+
+unsigned char *TrackBuffer;			  /* buffer for reads */
+static unsigned long PhysTrackBuffer; /* physical address */
+static int BufferDrive, BufferSide, BufferTrack;
+static int read_track;		/* non-zero if we are reading whole tracks */
+
+#define	SECTOR_BUFFER(sec)	(TrackBuffer + ((sec)-1)*512)
+#define	IS_BUFFERED(drive,side,track) \
+    (BufferDrive == (drive) && BufferSide == (side) && BufferTrack == (track))
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+static int SelectedDrive = 0;
+static int ReqCmd, ReqBlock;
+static int ReqSide, ReqTrack, ReqSector, ReqCnt;
+static int HeadSettleFlag = 0;
+static unsigned char *ReqData, *ReqBuffer;
+static int MotorOn = 0, MotorOffTrys;
+static int IsFormatting = 0, FormatError;
+
+static int UserSteprate[FD_MAX_UNITS] = { -1, -1 };
+MODULE_PARM(UserSteprate, "1-" __MODULE_STRING(FD_MAX_UNITS) "i");
+
+/* Synchronization of FDC access. */
+static volatile int fdc_busy = 0;
+static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(format_wait);
+
+static unsigned long changed_floppies = 0xff, fake_change = 0;
+#define	CHECK_CHANGE_DELAY	HZ/2
+
+#define	FD_MOTOR_OFF_DELAY	(3*HZ)
+#define	FD_MOTOR_OFF_MAXTRY	(10*20)
+
+#define FLOPPY_TIMEOUT		(6*HZ)
+#define RECALIBRATE_ERRORS	4	/* After this many errors the drive
+					 * will be recalibrated. */
+#define MAX_ERRORS		8	/* After this many errors the driver
+					 * will give up. */
+
+
+/*
+ * The driver is trying to determine the correct media format
+ * while Probing is set. fd_rwsec_done() clears it after a
+ * successful access.
+ */
+static int Probing = 0;
+
+/* This flag is set when a dummy seek is necessary to make the WP
+ * status bit accessible.
+ */
+static int NeedSeek = 0;
+
+
+#ifdef DEBUG
+#define DPRINT(a)	printk a
+#else
+#define DPRINT(a)
+#endif
+
+/***************************** Prototypes *****************************/
+
+static void fd_select_side( int side );
+static void fd_select_drive( int drive );
+static void fd_deselect( void );
+static void fd_motor_off_timer( unsigned long dummy );
+static void check_change( unsigned long dummy );
+static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp);
+static void fd_error( void );
+static int do_format(int drive, int type, struct atari_format_descr *desc);
+static void do_fd_action( int drive );
+static void fd_calibrate( void );
+static void fd_calibrate_done( int status );
+static void fd_seek( void );
+static void fd_seek_done( int status );
+static void fd_rwsec( void );
+static void fd_readtrack_check( unsigned long dummy );
+static void fd_rwsec_done( int status );
+static void fd_rwsec_done1(int status);
+static void fd_writetrack( void );
+static void fd_writetrack_done( int status );
+static void fd_times_out( unsigned long dummy );
+static void finish_fdc( void );
+static void finish_fdc_done( int dummy );
+static void setup_req_params( int drive );
+static void redo_fd_request( void);
+static int fd_ioctl( struct inode *inode, struct file *filp, unsigned int
+                     cmd, unsigned long param);
+static void fd_probe( int drive );
+static int fd_test_drive_present( int drive );
+static void config_types( void );
+static int floppy_open( struct inode *inode, struct file *filp );
+static int floppy_release( struct inode * inode, struct file * filp );
+
+/************************* End of Prototypes **************************/
+
+static struct timer_list motor_off_timer =
+	TIMER_INITIALIZER(fd_motor_off_timer, 0, 0);
+static struct timer_list readtrack_timer =
+	TIMER_INITIALIZER(fd_readtrack_check, 0, 0);
+
+static struct timer_list timeout_timer =
+	TIMER_INITIALIZER(fd_times_out, 0, 0);
+
+static struct timer_list fd_timer =
+	TIMER_INITIALIZER(check_change, 0, 0);
+	
+static inline void start_motor_off_timer(void)
+{
+	mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY);
+	MotorOffTrys = 0;
+}
+
+static inline void start_check_change_timer( void )
+{
+	mod_timer(&fd_timer, jiffies + CHECK_CHANGE_DELAY);
+}
+
+static inline void start_timeout(void)
+{
+	mod_timer(&timeout_timer, jiffies + FLOPPY_TIMEOUT);
+}
+
+static inline void stop_timeout(void)
+{
+	del_timer(&timeout_timer);
+}
+
+/* Select the side to use. */
+
+static void fd_select_side( int side )
+{
+	unsigned long flags;
+
+	/* protect against various other ints mucking around with the PSG */
+	local_irq_save(flags);
+  
+	sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
+	sound_ym.wd_data = (side == 0) ? sound_ym.rd_data_reg_sel | 0x01 :
+	                                 sound_ym.rd_data_reg_sel & 0xfe;
+
+	local_irq_restore(flags);
+}
+
+
+/* Select a drive, update the FDC's track register and set the correct
+ * clock speed for this disk's type.
+ */
+
+static void fd_select_drive( int drive )
+{
+	unsigned long flags;
+	unsigned char tmp;
+  
+	if (drive == SelectedDrive)
+	  return;
+
+	/* protect against various other ints mucking around with the PSG */
+	local_irq_save(flags);
+	sound_ym.rd_data_reg_sel = 14; /* Select PSG Port A */
+	tmp = sound_ym.rd_data_reg_sel;
+	sound_ym.wd_data = (tmp | DSKDRVNONE) & ~(drive == 0 ? DSKDRV0 : DSKDRV1);
+	atari_dont_touch_floppy_select = 1;
+	local_irq_restore(flags);
+
+	/* restore track register to saved value */
+	FDC_WRITE( FDCREG_TRACK, UD.track );
+	udelay(25);
+
+	/* select 8/16 MHz */
+	if (UDT)
+		if (ATARIHW_PRESENT(FDCSPEED))
+			dma_wd.fdc_speed = UDT->fdc_speed;
+	
+	SelectedDrive = drive;
+}
+
+
+/* Deselect both drives. */
+
+static void fd_deselect( void )
+{
+	unsigned long flags;
+
+	/* protect against various other ints mucking around with the PSG */
+	local_irq_save(flags);
+	atari_dont_touch_floppy_select = 0;
+	sound_ym.rd_data_reg_sel=14;	/* Select PSG Port A */
+	sound_ym.wd_data = (sound_ym.rd_data_reg_sel |
+			    (MACH_IS_FALCON ? 3 : 7)); /* no drives selected */
+	/* On Falcon, the drive B select line is used on the printer port, so
+	 * leave it alone... */
+	SelectedDrive = -1;
+	local_irq_restore(flags);
+}
+
+
+/* This timer function deselects the drives when the FDC switched the
+ * motor off. The deselection cannot happen earlier because the FDC
+ * counts the index signals, which arrive only if one drive is selected.
+ */
+
+static void fd_motor_off_timer( unsigned long dummy )
+{
+	unsigned char status;
+
+	if (SelectedDrive < 0)
+		/* no drive selected, needn't deselect anyone */
+		return;
+
+	if (stdma_islocked())
+		goto retry;
+
+	status = FDC_READ( FDCREG_STATUS );
+
+	if (!(status & 0x80)) {
+		/* motor already turned off by FDC -> deselect drives */
+		MotorOn = 0;
+		fd_deselect();
+		return;
+	}
+	/* not yet off, try again */
+
+  retry:
+	/* Test again later; if tested too often, it seems there is no disk
+	 * in the drive and the FDC will leave the motor on forever (or,
+	 * at least until a disk is inserted). So we'll test only twice
+	 * per second from then on...
+	 */
+	mod_timer(&motor_off_timer,
+		  jiffies + (MotorOffTrys++ < FD_MOTOR_OFF_MAXTRY ? HZ/20 : HZ/2));
+}
+
+
+/* This function is repeatedly called to detect disk changes (as good
+ * as possible) and keep track of the current state of the write protection.
+ */
+
+static void check_change( unsigned long dummy )
+{
+	static int    drive = 0;
+
+	unsigned long flags;
+	unsigned char old_porta;
+	int			  stat;
+
+	if (++drive > 1 || !UD.connected)
+		drive = 0;
+
+	/* protect against various other ints mucking around with the PSG */
+	local_irq_save(flags);
+
+	if (!stdma_islocked()) {
+		sound_ym.rd_data_reg_sel = 14;
+		old_porta = sound_ym.rd_data_reg_sel;
+		sound_ym.wd_data = (old_porta | DSKDRVNONE) &
+			               ~(drive == 0 ? DSKDRV0 : DSKDRV1);
+		stat = !!(FDC_READ( FDCREG_STATUS ) & FDCSTAT_WPROT);
+		sound_ym.wd_data = old_porta;
+
+		if (stat != UD.wpstat) {
+			DPRINT(( "wpstat[%d] = %d\n", drive, stat ));
+			UD.wpstat = stat;
+			set_bit (drive, &changed_floppies);
+		}
+	}
+	local_irq_restore(flags);
+
+	start_check_change_timer();
+}
+
+ 
+/* Handling of the Head Settling Flag: This flag should be set after each
+ * seek operation, because we don't use seeks with verify.
+ */
+
+static inline void set_head_settle_flag(void)
+{
+	HeadSettleFlag = FDCCMDADD_E;
+}
+
+static inline int get_head_settle_flag(void)
+{
+	int	tmp = HeadSettleFlag;
+	HeadSettleFlag = 0;
+	return( tmp );
+}
+
+static inline void copy_buffer(void *from, void *to)
+{
+	ulong *p1 = (ulong *)from, *p2 = (ulong *)to;
+	int cnt;
+
+	for (cnt = 512/4; cnt; cnt--)
+		*p2++ = *p1++;
+}
+
+  
+  
+
+/* General Interrupt Handling */
+
+static void (*FloppyIRQHandler)( int status ) = NULL;
+
+static irqreturn_t floppy_irq (int irq, void *dummy, struct pt_regs *fp)
+{
+	unsigned char status;
+	void (*handler)( int );
+
+	handler = xchg(&FloppyIRQHandler, NULL);
+
+	if (handler) {
+		nop();
+		status = FDC_READ( FDCREG_STATUS );
+		DPRINT(("FDC irq, status = %02x handler = %08lx\n",status,(unsigned long)handler));
+		handler( status );
+	}
+	else {
+		DPRINT(("FDC irq, no handler\n"));
+	}
+	return IRQ_HANDLED;
+}
+
+
+/* Error handling: If some error happened, retry some times, then
+ * recalibrate, then try again, and fail after MAX_ERRORS.
+ */
+
+static void fd_error( void )
+{
+	if (IsFormatting) {
+		IsFormatting = 0;
+		FormatError = 1;
+		wake_up( &format_wait );
+		return;
+	}
+
+	if (!CURRENT)
+		return;
+
+	CURRENT->errors++;
+	if (CURRENT->errors >= MAX_ERRORS) {
+		printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive );
+		end_request(CURRENT, 0);
+	}
+	else if (CURRENT->errors == RECALIBRATE_ERRORS) {
+		printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive );
+		if (SelectedDrive != -1)
+			SUD.track = -1;
+	}
+	redo_fd_request();
+}
+
+
+
+#define	SET_IRQ_HANDLER(proc) do { FloppyIRQHandler = (proc); } while(0)
+
+
+/* ---------- Formatting ---------- */
+
+#define FILL(n,val)		\
+    do {			\
+	memset( p, val, n );	\
+	p += n;			\
+    } while(0)
+
+static int do_format(int drive, int type, struct atari_format_descr *desc)
+{
+	unsigned char	*p;
+	int sect, nsect;
+	unsigned long	flags;
+
+	DPRINT(("do_format( dr=%d tr=%d he=%d offs=%d )\n",
+		drive, desc->track, desc->head, desc->sect_offset ));
+
+	local_irq_save(flags);
+	while( fdc_busy ) sleep_on( &fdc_wait );
+	fdc_busy = 1;
+	stdma_lock(floppy_irq, NULL);
+	atari_turnon_irq( IRQ_MFP_FDC ); /* should be already, just to be sure */
+	local_irq_restore(flags);
+
+	if (type) {
+		if (--type >= NUM_DISK_MINORS ||
+		    minor2disktype[type].drive_types > DriveType) {
+			redo_fd_request();
+			return -EINVAL;
+		}
+		type = minor2disktype[type].index;
+		UDT = &disk_type[type];
+	}
+
+	if (!UDT || desc->track >= UDT->blocks/UDT->spt/2 || desc->head >= 2) {
+		redo_fd_request();
+		return -EINVAL;
+	}
+
+	nsect = UDT->spt;
+	p = TrackBuffer;
+	/* The track buffer is used for the raw track data, so its
+	   contents become invalid! */
+	BufferDrive = -1;
+	/* stop deselect timer */
+	del_timer( &motor_off_timer );
+
+	FILL( 60 * (nsect / 9), 0x4e );
+	for( sect = 0; sect < nsect; ++sect ) {
+		FILL( 12, 0 );
+		FILL( 3, 0xf5 );
+		*p++ = 0xfe;
+		*p++ = desc->track;
+		*p++ = desc->head;
+		*p++ = (nsect + sect - desc->sect_offset) % nsect + 1;
+		*p++ = 2;
+		*p++ = 0xf7;
+		FILL( 22, 0x4e );
+		FILL( 12, 0 );
+		FILL( 3, 0xf5 );
+		*p++ = 0xfb;
+		FILL( 512, 0xe5 );
+		*p++ = 0xf7;
+		FILL( 40, 0x4e );
+	}
+	FILL( TrackBuffer+BUFFER_SIZE-p, 0x4e );
+
+	IsFormatting = 1;
+	FormatError = 0;
+	ReqTrack = desc->track;
+	ReqSide  = desc->head;
+	do_fd_action( drive );
+
+	sleep_on( &format_wait );
+
+	redo_fd_request();
+	return( FormatError ? -EIO : 0 );	
+}
+
+
+/* do_fd_action() is the general procedure for a fd request: All
+ * required parameter settings (drive select, side select, track
+ * position) are checked and set if needed. For each of these
+ * parameters and the actual reading or writing exist two functions:
+ * one that starts the setting (or skips it if possible) and one
+ * callback for the "done" interrupt. Each done func calls the next
+ * set function to propagate the request down to fd_rwsec_done().
+ */
+
+static void do_fd_action( int drive )
+{
+	DPRINT(("do_fd_action\n"));
+	
+	if (UseTrackbuffer && !IsFormatting) {
+	repeat:
+	    if (IS_BUFFERED( drive, ReqSide, ReqTrack )) {
+		if (ReqCmd == READ) {
+		    copy_buffer( SECTOR_BUFFER(ReqSector), ReqData );
+		    if (++ReqCnt < CURRENT->current_nr_sectors) {
+			/* read next sector */
+			setup_req_params( drive );
+			goto repeat;
+		    }
+		    else {
+			/* all sectors finished */
+			CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+			CURRENT->sector += CURRENT->current_nr_sectors;
+			end_request(CURRENT, 1);
+			redo_fd_request();
+			return;
+		    }
+		}
+		else {
+		    /* cmd == WRITE, pay attention to track buffer
+		     * consistency! */
+		    copy_buffer( ReqData, SECTOR_BUFFER(ReqSector) );
+		}
+	    }
+	}
+
+	if (SelectedDrive != drive)
+		fd_select_drive( drive );
+    
+	if (UD.track == -1)
+		fd_calibrate();
+	else if (UD.track != ReqTrack << UDT->stretch)
+		fd_seek();
+	else if (IsFormatting)
+		fd_writetrack();
+	else
+		fd_rwsec();
+}
+
+
+/* Seek to track 0 if the current track is unknown */
+
+static void fd_calibrate( void )
+{
+	if (SUD.track >= 0) {
+		fd_calibrate_done( 0 );
+		return;
+	}
+
+	if (ATARIHW_PRESENT(FDCSPEED))
+		dma_wd.fdc_speed = 0; 	/* always seek with 8 Mhz */;
+	DPRINT(("fd_calibrate\n"));
+	SET_IRQ_HANDLER( fd_calibrate_done );
+	/* we can't verify, since the speed may be incorrect */
+	FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | SUD.steprate );
+
+	NeedSeek = 1;
+	MotorOn = 1;
+	start_timeout();
+	/* wait for IRQ */
+}
+
+
+static void fd_calibrate_done( int status )
+{
+	DPRINT(("fd_calibrate_done()\n"));
+	stop_timeout();
+    
+	/* set the correct speed now */
+	if (ATARIHW_PRESENT(FDCSPEED))
+		dma_wd.fdc_speed = SUDT->fdc_speed;
+	if (status & FDCSTAT_RECNF) {
+		printk(KERN_ERR "fd%d: restore failed\n", SelectedDrive );
+		fd_error();
+	}
+	else {
+		SUD.track = 0;
+		fd_seek();
+	}
+}
+  
+  
+/* Seek the drive to the requested track. The drive must have been
+ * calibrated at some point before this.
+ */
+  
+static void fd_seek( void )
+{
+	if (SUD.track == ReqTrack << SUDT->stretch) {
+		fd_seek_done( 0 );
+		return;
+	}
+
+	if (ATARIHW_PRESENT(FDCSPEED)) {
+		dma_wd.fdc_speed = 0;	/* always seek witch 8 Mhz */
+		MFPDELAY();
+	}
+
+	DPRINT(("fd_seek() to track %d\n",ReqTrack));
+	FDC_WRITE( FDCREG_DATA, ReqTrack << SUDT->stretch);
+	udelay(25);
+	SET_IRQ_HANDLER( fd_seek_done );
+	FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK | SUD.steprate );
+
+	MotorOn = 1;
+	set_head_settle_flag();
+	start_timeout();
+	/* wait for IRQ */
+}
+
+
+static void fd_seek_done( int status )
+{
+	DPRINT(("fd_seek_done()\n"));
+	stop_timeout();
+	
+	/* set the correct speed */
+	if (ATARIHW_PRESENT(FDCSPEED))
+		dma_wd.fdc_speed = SUDT->fdc_speed;
+	if (status & FDCSTAT_RECNF) {
+		printk(KERN_ERR "fd%d: seek error (to track %d)\n",
+				SelectedDrive, ReqTrack );
+		/* we don't know exactly which track we are on now! */
+		SUD.track = -1;
+		fd_error();
+	}
+	else {
+		SUD.track = ReqTrack << SUDT->stretch;
+		NeedSeek = 0;
+		if (IsFormatting)
+			fd_writetrack();
+		else
+			fd_rwsec();
+	}
+}
+
+
+/* This does the actual reading/writing after positioning the head
+ * over the correct track.
+ */
+
+static int MultReadInProgress = 0;
+
+
+static void fd_rwsec( void )
+{
+	unsigned long paddr, flags;
+	unsigned int  rwflag, old_motoron;
+	unsigned int track;
+	
+	DPRINT(("fd_rwsec(), Sec=%d, Access=%c\n",ReqSector, ReqCmd == WRITE ? 'w' : 'r' ));
+	if (ReqCmd == WRITE) {
+		if (ATARIHW_PRESENT(EXTD_DMA)) {
+			paddr = virt_to_phys(ReqData);
+		}
+		else {
+			copy_buffer( ReqData, DMABuffer );
+			paddr = PhysDMABuffer;
+		}
+		dma_cache_maintenance( paddr, 512, 1 );
+		rwflag = 0x100;
+	}
+	else {
+		if (read_track)
+			paddr = PhysTrackBuffer;
+		else
+			paddr = ATARIHW_PRESENT(EXTD_DMA) ? 
+				virt_to_phys(ReqData) : PhysDMABuffer;
+		rwflag = 0;
+	}
+
+	fd_select_side( ReqSide );
+  
+	/* Start sector of this operation */
+	FDC_WRITE( FDCREG_SECTOR, read_track ? 1 : ReqSector );
+	MFPDELAY();
+	/* Cheat for track if stretch != 0 */
+	if (SUDT->stretch) {
+		track = FDC_READ( FDCREG_TRACK);
+		MFPDELAY();
+		FDC_WRITE( FDCREG_TRACK, track >> SUDT->stretch);
+	}
+	udelay(25);
+  
+	/* Setup DMA */
+	local_irq_save(flags);
+	dma_wd.dma_lo = (unsigned char)paddr;
+	MFPDELAY();
+	paddr >>= 8;
+	dma_wd.dma_md = (unsigned char)paddr;
+	MFPDELAY();
+	paddr >>= 8;
+	if (ATARIHW_PRESENT(EXTD_DMA))
+		st_dma_ext_dmahi = (unsigned short)paddr;
+	else
+		dma_wd.dma_hi = (unsigned char)paddr;
+	MFPDELAY();
+	local_irq_restore(flags);
+  
+	/* Clear FIFO and switch DMA to correct mode */  
+	dma_wd.dma_mode_status = 0x90 | rwflag;  
+	MFPDELAY();
+	dma_wd.dma_mode_status = 0x90 | (rwflag ^ 0x100);  
+	MFPDELAY();
+	dma_wd.dma_mode_status = 0x90 | rwflag;
+	MFPDELAY();
+  
+	/* How many sectors for DMA */
+	dma_wd.fdc_acces_seccount = read_track ? SUDT->spt : 1;
+  
+	udelay(25);  
+  
+	/* Start operation */
+	dma_wd.dma_mode_status = FDCSELREG_STP | rwflag;
+	udelay(25);
+	SET_IRQ_HANDLER( fd_rwsec_done );
+	dma_wd.fdc_acces_seccount =
+	  (get_head_settle_flag() |
+	   (rwflag ? FDCCMD_WRSEC : (FDCCMD_RDSEC | (read_track ? FDCCMDADD_M : 0))));
+
+	old_motoron = MotorOn;
+	MotorOn = 1;
+	NeedSeek = 1;
+	/* wait for interrupt */
+
+	if (read_track) {
+		/* If reading a whole track, wait about one disk rotation and
+		 * then check if all sectors are read. The FDC will even
+		 * search for the first non-existent sector and need 1 sec to
+		 * recognise that it isn't present :-(
+		 */
+		MultReadInProgress = 1;
+		mod_timer(&readtrack_timer,
+			  /* 1 rot. + 5 rot.s if motor was off  */
+			  jiffies + HZ/5 + (old_motoron ? 0 : HZ));
+	}
+	start_timeout();
+}
+
+    
+static void fd_readtrack_check( unsigned long dummy )
+{
+	unsigned long flags, addr, addr2;
+
+	local_irq_save(flags);
+
+	if (!MultReadInProgress) {
+		/* This prevents a race condition that could arise if the
+		 * interrupt is triggered while the calling of this timer
+		 * callback function takes place. The IRQ function then has
+		 * already cleared 'MultReadInProgress'  when flow of control
+		 * gets here.
+		 */
+		local_irq_restore(flags);
+		return;
+	}
+
+	/* get the current DMA address */
+	/* ++ f.a. read twice to avoid being fooled by switcher */
+	addr = 0;
+	do {
+		addr2 = addr;
+		addr = dma_wd.dma_lo & 0xff;
+		MFPDELAY();
+		addr |= (dma_wd.dma_md & 0xff) << 8;
+		MFPDELAY();
+		if (ATARIHW_PRESENT( EXTD_DMA ))
+			addr |= (st_dma_ext_dmahi & 0xffff) << 16;
+		else
+			addr |= (dma_wd.dma_hi & 0xff) << 16;
+		MFPDELAY();
+	} while(addr != addr2);
+  
+	if (addr >= PhysTrackBuffer + SUDT->spt*512) {
+		/* already read enough data, force an FDC interrupt to stop
+		 * the read operation
+		 */
+		SET_IRQ_HANDLER( NULL );
+		MultReadInProgress = 0;
+		local_irq_restore(flags);
+		DPRINT(("fd_readtrack_check(): done\n"));
+		FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
+		udelay(25);
+
+		/* No error until now -- the FDC would have interrupted
+		 * otherwise!
+		 */
+		fd_rwsec_done1(0);
+	}
+	else {
+		/* not yet finished, wait another tenth rotation */
+		local_irq_restore(flags);
+		DPRINT(("fd_readtrack_check(): not yet finished\n"));
+		mod_timer(&readtrack_timer, jiffies + HZ/5/10);
+	}
+}
+
+
+static void fd_rwsec_done( int status )
+{
+	DPRINT(("fd_rwsec_done()\n"));
+
+	if (read_track) {
+		del_timer(&readtrack_timer);
+		if (!MultReadInProgress)
+			return;
+		MultReadInProgress = 0;
+	}
+	fd_rwsec_done1(status);
+}
+
+static void fd_rwsec_done1(int status)
+{
+	unsigned int track;
+
+	stop_timeout();
+	
+	/* Correct the track if stretch != 0 */
+	if (SUDT->stretch) {
+		track = FDC_READ( FDCREG_TRACK);
+		MFPDELAY();
+		FDC_WRITE( FDCREG_TRACK, track << SUDT->stretch);
+	}
+
+	if (!UseTrackbuffer) {
+		dma_wd.dma_mode_status = 0x90;
+		MFPDELAY();
+		if (!(dma_wd.dma_mode_status & 0x01)) {
+			printk(KERN_ERR "fd%d: DMA error\n", SelectedDrive );
+			goto err_end;
+		}
+	}
+	MFPDELAY();
+
+	if (ReqCmd == WRITE && (status & FDCSTAT_WPROT)) {
+		printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive );
+		goto err_end;
+	}	
+	if ((status & FDCSTAT_RECNF) &&
+	    /* RECNF is no error after a multiple read when the FDC
+	       searched for a non-existent sector! */
+	    !(read_track && FDC_READ(FDCREG_SECTOR) > SUDT->spt)) {
+		if (Probing) {
+			if (SUDT > disk_type) {
+			    if (SUDT[-1].blocks > ReqBlock) {
+				/* try another disk type */
+				SUDT--;
+				set_capacity(unit[SelectedDrive].disk,
+							SUDT->blocks);
+			    } else
+				Probing = 0;
+			}
+			else {
+				if (SUD.flags & FTD_MSG)
+					printk(KERN_INFO "fd%d: Auto-detected floppy type %s\n",
+					       SelectedDrive, SUDT->name );
+				Probing=0;
+			}
+		} else {	
+/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
+			if (SUD.autoprobe) {
+				SUDT = disk_type + StartDiskType[DriveType];
+				set_capacity(unit[SelectedDrive].disk,
+							SUDT->blocks);
+				Probing = 1;
+			}
+		}
+		if (Probing) {
+			if (ATARIHW_PRESENT(FDCSPEED)) {
+				dma_wd.fdc_speed = SUDT->fdc_speed;
+				MFPDELAY();
+			}
+			setup_req_params( SelectedDrive );
+			BufferDrive = -1;
+			do_fd_action( SelectedDrive );
+			return;
+		}
+
+		printk(KERN_ERR "fd%d: sector %d not found (side %d, track %d)\n",
+		       SelectedDrive, FDC_READ (FDCREG_SECTOR), ReqSide, ReqTrack );
+		goto err_end;
+	}
+	if (status & FDCSTAT_CRC) {
+		printk(KERN_ERR "fd%d: CRC error (side %d, track %d, sector %d)\n",
+		       SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) );
+		goto err_end;
+	}
+	if (status & FDCSTAT_LOST) {
+		printk(KERN_ERR "fd%d: lost data (side %d, track %d, sector %d)\n",
+		       SelectedDrive, ReqSide, ReqTrack, FDC_READ (FDCREG_SECTOR) );
+		goto err_end;
+	}
+
+	Probing = 0;
+	
+	if (ReqCmd == READ) {
+		if (!read_track) {
+			void *addr;
+			addr = ATARIHW_PRESENT( EXTD_DMA ) ? ReqData : DMABuffer;
+			dma_cache_maintenance( virt_to_phys(addr), 512, 0 );
+			if (!ATARIHW_PRESENT( EXTD_DMA ))
+				copy_buffer (addr, ReqData);
+		} else {
+			dma_cache_maintenance( PhysTrackBuffer, MaxSectors[DriveType] * 512, 0 );
+			BufferDrive = SelectedDrive;
+			BufferSide  = ReqSide;
+			BufferTrack = ReqTrack;
+			copy_buffer (SECTOR_BUFFER (ReqSector), ReqData);
+		}
+	}
+  
+	if (++ReqCnt < CURRENT->current_nr_sectors) {
+		/* read next sector */
+		setup_req_params( SelectedDrive );
+		do_fd_action( SelectedDrive );
+	}
+	else {
+		/* all sectors finished */
+		CURRENT->nr_sectors -= CURRENT->current_nr_sectors;
+		CURRENT->sector += CURRENT->current_nr_sectors;
+		end_request(CURRENT, 1);
+		redo_fd_request();
+	}
+	return;
+  
+  err_end:
+	BufferDrive = -1;
+	fd_error();
+}
+
+
+static void fd_writetrack( void )
+{
+	unsigned long paddr, flags;
+	unsigned int track;
+	
+	DPRINT(("fd_writetrack() Tr=%d Si=%d\n", ReqTrack, ReqSide ));
+
+	paddr = PhysTrackBuffer;
+	dma_cache_maintenance( paddr, BUFFER_SIZE, 1 );
+
+	fd_select_side( ReqSide );
+  
+	/* Cheat for track if stretch != 0 */
+	if (SUDT->stretch) {
+		track = FDC_READ( FDCREG_TRACK);
+		MFPDELAY();
+		FDC_WRITE(FDCREG_TRACK,track >> SUDT->stretch);
+	}
+	udelay(40);
+  
+	/* Setup DMA */
+	local_irq_save(flags);
+	dma_wd.dma_lo = (unsigned char)paddr;
+	MFPDELAY();
+	paddr >>= 8;
+	dma_wd.dma_md = (unsigned char)paddr;
+	MFPDELAY();
+	paddr >>= 8;
+	if (ATARIHW_PRESENT( EXTD_DMA ))
+		st_dma_ext_dmahi = (unsigned short)paddr;
+	else
+		dma_wd.dma_hi = (unsigned char)paddr;
+	MFPDELAY();
+	local_irq_restore(flags);
+  
+	/* Clear FIFO and switch DMA to correct mode */  
+	dma_wd.dma_mode_status = 0x190;  
+	MFPDELAY();
+	dma_wd.dma_mode_status = 0x90;  
+	MFPDELAY();
+	dma_wd.dma_mode_status = 0x190;
+	MFPDELAY();
+  
+	/* How many sectors for DMA */
+	dma_wd.fdc_acces_seccount = BUFFER_SIZE/512;
+	udelay(40);  
+  
+	/* Start operation */
+	dma_wd.dma_mode_status = FDCSELREG_STP | 0x100;
+	udelay(40);
+	SET_IRQ_HANDLER( fd_writetrack_done );
+	dma_wd.fdc_acces_seccount = FDCCMD_WRTRA | get_head_settle_flag(); 
+
+	MotorOn = 1;
+	start_timeout();
+	/* wait for interrupt */
+}
+
+
+static void fd_writetrack_done( int status )
+{
+	DPRINT(("fd_writetrack_done()\n"));
+
+	stop_timeout();
+
+	if (status & FDCSTAT_WPROT) {
+		printk(KERN_NOTICE "fd%d: is write protected\n", SelectedDrive );
+		goto err_end;
+	}	
+	if (status & FDCSTAT_LOST) {
+		printk(KERN_ERR "fd%d: lost data (side %d, track %d)\n",
+				SelectedDrive, ReqSide, ReqTrack );
+		goto err_end;
+	}
+
+	wake_up( &format_wait );
+	return;
+
+  err_end:
+	fd_error();
+}
+
+static void fd_times_out( unsigned long dummy )
+{
+	atari_disable_irq( IRQ_MFP_FDC );
+	if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
+					  * before we came here... */
+
+	SET_IRQ_HANDLER( NULL );
+	/* If the timeout occurred while the readtrack_check timer was
+	 * active, we need to cancel it, else bad things will happen */
+	if (UseTrackbuffer)
+		del_timer( &readtrack_timer );
+	FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
+	udelay( 25 );
+	
+	printk(KERN_ERR "floppy timeout\n" );
+	fd_error();
+  end:
+	atari_enable_irq( IRQ_MFP_FDC );
+}
+
+
+/* The (noop) seek operation here is needed to make the WP bit in the
+ * FDC status register accessible for check_change. If the last disk
+ * operation would have been a RDSEC, this bit would always read as 0
+ * no matter what :-( To save time, the seek goes to the track we're
+ * already on.
+ */
+
+static void finish_fdc( void )
+{
+	if (!NeedSeek) {
+		finish_fdc_done( 0 );
+	}
+	else {
+		DPRINT(("finish_fdc: dummy seek started\n"));
+		FDC_WRITE (FDCREG_DATA, SUD.track);
+		SET_IRQ_HANDLER( finish_fdc_done );
+		FDC_WRITE (FDCREG_CMD, FDCCMD_SEEK);
+		MotorOn = 1;
+		start_timeout();
+		/* we must wait for the IRQ here, because the ST-DMA
+		   is released immediately afterwards and the interrupt
+		   may be delivered to the wrong driver. */
+	  }
+}
+
+
+static void finish_fdc_done( int dummy )
+{
+	unsigned long flags;
+
+	DPRINT(("finish_fdc_done entered\n"));
+	stop_timeout();
+	NeedSeek = 0;
+
+	if (timer_pending(&fd_timer) && time_before(fd_timer.expires, jiffies + 5))
+		/* If the check for a disk change is done too early after this
+		 * last seek command, the WP bit still reads wrong :-((
+		 */
+		mod_timer(&fd_timer, jiffies + 5);
+	else
+		start_check_change_timer();
+	start_motor_off_timer();
+
+	local_irq_save(flags);
+	stdma_release();
+	fdc_busy = 0;
+	wake_up( &fdc_wait );
+	local_irq_restore(flags);
+
+	DPRINT(("finish_fdc() finished\n"));
+}
+
+/* The detection of disk changes is a dark chapter in Atari history :-(
+ * Because the "Drive ready" signal isn't present in the Atari
+ * hardware, one has to rely on the "Write Protect". This works fine,
+ * as long as no write protected disks are used. TOS solves this
+ * problem by introducing tri-state logic ("maybe changed") and
+ * looking at the serial number in block 0. This isn't possible for
+ * Linux, since the floppy driver can't make assumptions about the
+ * filesystem used on the disk and thus the contents of block 0. I've
+ * chosen the method to always say "The disk was changed" if it is
+ * unsure whether it was. This implies that every open or mount
+ * invalidates the disk buffers if you work with write protected
+ * disks. But at least this is better than working with incorrect data
+ * due to unrecognised disk changes.
+ */
+
+static int check_floppy_change(struct gendisk *disk)
+{
+	struct atari_floppy_struct *p = disk->private_data;
+	unsigned int drive = p - unit;
+	if (test_bit (drive, &fake_change)) {
+		/* simulated change (e.g. after formatting) */
+		return 1;
+	}
+	if (test_bit (drive, &changed_floppies)) {
+		/* surely changed (the WP signal changed at least once) */
+		return 1;
+	}
+	if (UD.wpstat) {
+		/* WP is on -> could be changed: to be sure, buffers should be
+		 * invalidated...
+		 */
+		return 1;
+	}
+
+	return 0;
+}
+
+static int floppy_revalidate(struct gendisk *disk)
+{
+	struct atari_floppy_struct *p = disk->private_data;
+	unsigned int drive = p - unit;
+
+	if (test_bit(drive, &changed_floppies) ||
+	    test_bit(drive, &fake_change) ||
+	    p->disktype == 0) {
+		if (UD.flags & FTD_MSG)
+			printk(KERN_ERR "floppy: clear format %p!\n", UDT);
+		BufferDrive = -1;
+		clear_bit(drive, &fake_change);
+		clear_bit(drive, &changed_floppies);
+		/* MSch: clearing geometry makes sense only for autoprobe
+		   formats, for 'permanent user-defined' parameter:
+		   restore default_params[] here if flagged valid! */
+		if (default_params[drive].blocks == 0)
+			UDT = 0;
+		else
+			UDT = &default_params[drive];
+	}
+	return 0;
+}
+
+
+/* This sets up the global variables describing the current request. */
+
+static void setup_req_params( int drive )
+{
+	int block = ReqBlock + ReqCnt;
+
+	ReqTrack = block / UDT->spt;
+	ReqSector = block - ReqTrack * UDT->spt + 1;
+	ReqSide = ReqTrack & 1;
+	ReqTrack >>= 1;
+	ReqData = ReqBuffer + 512 * ReqCnt;
+
+	if (UseTrackbuffer)
+		read_track = (ReqCmd == READ && CURRENT->errors == 0);
+	else
+		read_track = 0;
+
+	DPRINT(("Request params: Si=%d Tr=%d Se=%d Data=%08lx\n",ReqSide,
+			ReqTrack, ReqSector, (unsigned long)ReqData ));
+}
+
+
+static void redo_fd_request(void)
+{
+	int drive, type;
+	struct atari_floppy_struct *floppy;
+
+	DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n",
+		CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "",
+		CURRENT ? CURRENT->sector : 0 ));
+
+	IsFormatting = 0;
+
+repeat:
+
+	if (!CURRENT)
+		goto the_end;
+
+	floppy = CURRENT->rq_disk->private_data;
+	drive = floppy - unit;
+	type = floppy->type;
+	
+	if (!UD.connected) {
+		/* drive not connected */
+		printk(KERN_ERR "Unknown Device: fd%d\n", drive );
+		end_request(CURRENT, 0);
+		goto repeat;
+	}
+		
+	if (type == 0) {
+		if (!UDT) {
+			Probing = 1;
+			UDT = disk_type + StartDiskType[DriveType];
+			set_capacity(floppy->disk, UDT->blocks);
+			UD.autoprobe = 1;
+		}
+	} 
+	else {
+		/* user supplied disk type */
+		if (--type >= NUM_DISK_MINORS) {
+			printk(KERN_WARNING "fd%d: invalid disk format", drive );
+			end_request(CURRENT, 0);
+			goto repeat;
+		}
+		if (minor2disktype[type].drive_types > DriveType)  {
+			printk(KERN_WARNING "fd%d: unsupported disk format", drive );
+			end_request(CURRENT, 0);
+			goto repeat;
+		}
+		type = minor2disktype[type].index;
+		UDT = &disk_type[type];
+		set_capacity(floppy->disk, UDT->blocks);
+		UD.autoprobe = 0;
+	}
+	
+	if (CURRENT->sector + 1 > UDT->blocks) {
+		end_request(CURRENT, 0);
+		goto repeat;
+	}
+
+	/* stop deselect timer */
+	del_timer( &motor_off_timer );
+		
+	ReqCnt = 0;
+	ReqCmd = rq_data_dir(CURRENT);
+	ReqBlock = CURRENT->sector;
+	ReqBuffer = CURRENT->buffer;
+	setup_req_params( drive );
+	do_fd_action( drive );
+
+	return;
+
+  the_end:
+	finish_fdc();
+}
+
+
+void do_fd_request(request_queue_t * q)
+{
+ 	unsigned long flags;
+
+	DPRINT(("do_fd_request for pid %d\n",current->pid));
+	while( fdc_busy ) sleep_on( &fdc_wait );
+	fdc_busy = 1;
+	stdma_lock(floppy_irq, NULL);
+
+	atari_disable_irq( IRQ_MFP_FDC );
+	local_save_flags(flags);	/* The request function is called with ints
+	local_irq_disable();		 * disabled... so must save the IPL for later */ 
+	redo_fd_request();
+	local_irq_restore(flags);
+	atari_enable_irq( IRQ_MFP_FDC );
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp,
+		    unsigned int cmd, unsigned long param)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct atari_floppy_struct *floppy = disk->private_data;
+	int drive = floppy - unit;
+	int type = floppy->type;
+	struct atari_format_descr fmt_desc;
+	struct atari_disk_type *dtp;
+	struct floppy_struct getprm;
+	int settype;
+	struct floppy_struct setprm;
+
+	switch (cmd) {
+	case FDGETPRM:
+		if (type) {
+			if (--type >= NUM_DISK_MINORS)
+				return -ENODEV;
+			if (minor2disktype[type].drive_types > DriveType)
+				return -ENODEV;
+			type = minor2disktype[type].index;
+			dtp = &disk_type[type];
+			if (UD.flags & FTD_MSG)
+			    printk (KERN_ERR "floppy%d: found dtp %p name %s!\n",
+			        drive, dtp, dtp->name);
+		}
+		else {
+			if (!UDT)
+				return -ENXIO;
+			else
+				dtp = UDT;
+		}
+		memset((void *)&getprm, 0, sizeof(getprm));
+		getprm.size = dtp->blocks;
+		getprm.sect = dtp->spt;
+		getprm.head = 2;
+		getprm.track = dtp->blocks/dtp->spt/2;
+		getprm.stretch = dtp->stretch;
+		if (copy_to_user((void *)param, &getprm, sizeof(getprm)))
+			return -EFAULT;
+		return 0;
+	}
+	switch (cmd) {
+	case FDSETPRM:
+	case FDDEFPRM:
+	        /* 
+		 * MSch 7/96: simple 'set geometry' case: just set the
+		 * 'default' device params (minor == 0).
+		 * Currently, the drive geometry is cleared after each
+		 * disk change and subsequent revalidate()! simple
+		 * implementation of FDDEFPRM: save geometry from a
+		 * FDDEFPRM call and restore it in floppy_revalidate() !
+		 */
+
+		/* get the parameters from user space */
+		if (floppy->ref != 1 && floppy->ref != -1)
+			return -EBUSY;
+		if (copy_from_user(&setprm, (void *) param, sizeof(setprm)))
+			return -EFAULT;
+		/* 
+		 * first of all: check for floppy change and revalidate, 
+		 * or the next access will revalidate - and clear UDT :-(
+		 */
+
+		if (check_floppy_change(disk))
+		        floppy_revalidate(disk);
+
+		if (UD.flags & FTD_MSG)
+		    printk (KERN_INFO "floppy%d: setting size %d spt %d str %d!\n",
+			drive, setprm.size, setprm.sect, setprm.stretch);
+
+		/* what if type > 0 here? Overwrite specified entry ? */
+		if (type) {
+		        /* refuse to re-set a predefined type for now */
+			redo_fd_request();
+			return -EINVAL;
+		}
+
+		/* 
+		 * type == 0: first look for a matching entry in the type list,
+		 * and set the UD.disktype field to use the perdefined entry.
+		 * TODO: add user-defined format to head of autoprobe list ? 
+		 * Useful to include the user-type for future autodetection!
+		 */
+
+		for (settype = 0; settype < NUM_DISK_MINORS; settype++) {
+			int setidx = 0;
+			if (minor2disktype[settype].drive_types > DriveType) {
+				/* skip this one, invalid for drive ... */
+				continue;
+			}
+			setidx = minor2disktype[settype].index;
+			dtp = &disk_type[setidx];
+
+			/* found matching entry ?? */
+			if (   dtp->blocks  == setprm.size 
+			    && dtp->spt     == setprm.sect
+			    && dtp->stretch == setprm.stretch ) {
+				if (UD.flags & FTD_MSG)
+				    printk (KERN_INFO "floppy%d: setting %s %p!\n",
+				        drive, dtp->name, dtp);
+				UDT = dtp;
+				set_capacity(floppy->disk, UDT->blocks);
+
+				if (cmd == FDDEFPRM) {
+				  /* save settings as permanent default type */
+				  default_params[drive].name    = dtp->name;
+				  default_params[drive].spt     = dtp->spt;
+				  default_params[drive].blocks  = dtp->blocks;
+				  default_params[drive].fdc_speed = dtp->fdc_speed;
+				  default_params[drive].stretch = dtp->stretch;
+				}
+				
+				return 0;
+			}
+
+		}
+
+		/* no matching disk type found above - setting user_params */
+
+	       	if (cmd == FDDEFPRM) {
+			/* set permanent type */
+			dtp = &default_params[drive];
+		} else
+			/* set user type (reset by disk change!) */
+			dtp = &user_params[drive];
+
+		dtp->name   = "user format";
+		dtp->blocks = setprm.size;
+		dtp->spt    = setprm.sect;
+		if (setprm.sect > 14) 
+			dtp->fdc_speed = 3;
+		else
+			dtp->fdc_speed = 0;
+		dtp->stretch = setprm.stretch;
+
+		if (UD.flags & FTD_MSG)
+			printk (KERN_INFO "floppy%d: blk %d spt %d str %d!\n",
+				drive, dtp->blocks, dtp->spt, dtp->stretch);
+
+		/* sanity check */
+		if (!dtp || setprm.track != dtp->blocks/dtp->spt/2 ||
+		    setprm.head != 2) {
+			redo_fd_request();
+			return -EINVAL;
+		}
+
+		UDT = dtp;
+		set_capacity(floppy->disk, UDT->blocks);
+
+		return 0;
+	case FDMSGON:
+		UD.flags |= FTD_MSG;
+		return 0;
+	case FDMSGOFF:
+		UD.flags &= ~FTD_MSG;
+		return 0;
+	case FDSETEMSGTRESH:
+		return -EINVAL;
+	case FDFMTBEG:
+		return 0;
+	case FDFMTTRK:
+		if (floppy->ref != 1 && floppy->ref != -1)
+			return -EBUSY;
+		if (copy_from_user(&fmt_desc, (void *) param, sizeof(fmt_desc)))
+			return -EFAULT;
+		return do_format(drive, type, &fmt_desc);
+	case FDCLRPRM:
+		UDT = NULL;
+		/* MSch: invalidate default_params */
+		default_params[drive].blocks  = 0;
+		set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
+	case FDFMTEND:
+	case FDFLUSH:
+		/* invalidate the buffer track to force a reread */
+		BufferDrive = -1;
+		set_bit(drive, &fake_change);
+		check_disk_change(inode->i_bdev);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+
+/* Initialize the 'unit' variable for drive 'drive' */
+
+static void __init fd_probe( int drive )
+{
+	UD.connected = 0;
+	UDT  = NULL;
+
+	if (!fd_test_drive_present( drive ))
+		return;
+
+	UD.connected = 1;
+	UD.track     = 0;
+	switch( UserSteprate[drive] ) {
+	case 2:
+		UD.steprate = FDCSTEP_2;
+		break;
+	case 3:
+		UD.steprate = FDCSTEP_3;
+		break;
+	case 6:
+		UD.steprate = FDCSTEP_6;
+		break;
+	case 12:
+		UD.steprate = FDCSTEP_12;
+		break;
+	default: /* should be -1 for "not set by user" */
+		if (ATARIHW_PRESENT( FDCSPEED ) || MACH_IS_MEDUSA)
+			UD.steprate = FDCSTEP_3;
+		else
+			UD.steprate = FDCSTEP_6;
+		break;
+	}
+	MotorOn = 1;	/* from probe restore operation! */
+}
+
+
+/* This function tests the physical presence of a floppy drive (not
+ * whether a disk is inserted). This is done by issuing a restore
+ * command, waiting max. 2 seconds (that should be enough to move the
+ * head across the whole disk) and looking at the state of the "TR00"
+ * signal. This should now be raised if there is a drive connected
+ * (and there is no hardware failure :-) Otherwise, the drive is
+ * declared absent.
+ */
+
+static int __init fd_test_drive_present( int drive )
+{
+	unsigned long timeout;
+	unsigned char status;
+	int ok;
+	
+	if (drive >= (MACH_IS_FALCON ? 1 : 2)) return( 0 );
+	fd_select_drive( drive );
+
+	/* disable interrupt temporarily */
+	atari_turnoff_irq( IRQ_MFP_FDC );
+	FDC_WRITE (FDCREG_TRACK, 0xff00);
+	FDC_WRITE( FDCREG_CMD, FDCCMD_RESTORE | FDCCMDADD_H | FDCSTEP_6 );
+
+	timeout = jiffies + 2*HZ+HZ/2;
+	while (time_before(jiffies, timeout))
+		if (!(mfp.par_dt_reg & 0x20))
+			break;
+
+	status = FDC_READ( FDCREG_STATUS );
+	ok = (status & FDCSTAT_TR00) != 0;
+
+	/* force interrupt to abort restore operation (FDC would try
+	 * about 50 seconds!) */
+	FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
+	udelay(500);
+	status = FDC_READ( FDCREG_STATUS );
+	udelay(20);
+
+	if (ok) {
+		/* dummy seek command to make WP bit accessible */
+		FDC_WRITE( FDCREG_DATA, 0 );
+		FDC_WRITE( FDCREG_CMD, FDCCMD_SEEK );
+		while( mfp.par_dt_reg & 0x20 )
+			;
+		status = FDC_READ( FDCREG_STATUS );
+	}
+
+	atari_turnon_irq( IRQ_MFP_FDC );
+	return( ok );
+}
+
+
+/* Look how many and which kind of drives are connected. If there are
+ * floppies, additionally start the disk-change and motor-off timers.
+ */
+
+static void __init config_types( void )
+{
+	int drive, cnt = 0;
+
+	/* for probing drives, set the FDC speed to 8 MHz */
+	if (ATARIHW_PRESENT(FDCSPEED))
+		dma_wd.fdc_speed = 0;
+
+	printk(KERN_INFO "Probing floppy drive(s):\n");
+	for( drive = 0; drive < FD_MAX_UNITS; drive++ ) {
+		fd_probe( drive );
+		if (UD.connected) {
+			printk(KERN_INFO "fd%d\n", drive);
+			++cnt;
+		}
+	}
+
+	if (FDC_READ( FDCREG_STATUS ) & FDCSTAT_BUSY) {
+		/* If FDC is still busy from probing, give it another FORCI
+		 * command to abort the operation. If this isn't done, the FDC
+		 * will interrupt later and its IRQ line stays low, because
+		 * the status register isn't read. And this will block any
+		 * interrupts on this IRQ line :-(
+		 */
+		FDC_WRITE( FDCREG_CMD, FDCCMD_FORCI );
+		udelay(500);
+		FDC_READ( FDCREG_STATUS );
+		udelay(20);
+	}
+	
+	if (cnt > 0) {
+		start_motor_off_timer();
+		if (cnt == 1) fd_select_drive( 0 );
+		start_check_change_timer();
+	}
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+
+static int floppy_open( struct inode *inode, struct file *filp )
+{
+	struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
+	int type  = iminor(inode) >> 2;
+
+	DPRINT(("fd_open: type=%d\n",type));
+	if (p->ref && p->type != type)
+		return -EBUSY;
+
+	if (p->ref == -1 || (p->ref && filp->f_flags & O_EXCL))
+		return -EBUSY;
+
+	if (filp->f_flags & O_EXCL)
+		p->ref = -1;
+	else
+		p->ref++;
+
+	p->type = type;
+
+	if (filp->f_flags & O_NDELAY)
+		return 0;
+
+	if (filp->f_mode & 3) {
+		check_disk_change(inode->i_bdev);
+		if (filp->f_mode & 2) {
+			if (p->wpstat) {
+				if (p->ref < 0)
+					p->ref = 0;
+				else
+					p->ref--;
+				floppy_release(inode, filp);
+				return -EROFS;
+			}
+		}
+	}
+	return 0;
+}
+
+
+static int floppy_release( struct inode * inode, struct file * filp )
+{
+	struct atari_floppy_struct *p = inode->i_bdev->bd_disk->private_data;
+	if (p->ref < 0)
+		p->ref = 0;
+	else if (!p->ref--) {
+		printk(KERN_ERR "floppy_release with fd_ref == 0");
+		p->ref = 0;
+	}
+	return 0;
+}
+
+static struct block_device_operations floppy_fops = {
+	.owner		= THIS_MODULE,
+	.open		= floppy_open,
+	.release	= floppy_release,
+	.ioctl		= fd_ioctl,
+	.media_changed	= check_floppy_change,
+	.revalidate_disk= floppy_revalidate,
+};
+
+static struct kobject *floppy_find(dev_t dev, int *part, void *data)
+{
+	int drive = *part & 3;
+	int type  = *part >> 2;
+	if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
+		return NULL;
+	*part = 0;
+	return get_disk(unit[drive].disk);
+}
+
+static int __init atari_floppy_init (void)
+{
+	int i;
+
+	if (!MACH_IS_ATARI)
+		/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
+		return -ENXIO;
+
+	if (MACH_IS_HADES)
+		/* Hades doesn't have Atari-compatible floppy */
+		return -ENXIO;
+
+	if (register_blkdev(FLOPPY_MAJOR,"fd"))
+		return -EBUSY;
+
+	for (i = 0; i < FD_MAX_UNITS; i++) {
+		unit[i].disk = alloc_disk(1);
+		if (!unit[i].disk)
+			goto Enomem;
+	}
+
+	if (UseTrackbuffer < 0)
+		/* not set by user -> use default: for now, we turn
+		   track buffering off for all Medusas, though it
+		   could be used with ones that have a counter
+		   card. But the test is too hard :-( */
+		UseTrackbuffer = !MACH_IS_MEDUSA;
+
+	/* initialize variables */
+	SelectedDrive = -1;
+	BufferDrive = -1;
+
+	DMABuffer = atari_stram_alloc(BUFFER_SIZE+512, "ataflop");
+	if (!DMABuffer) {
+		printk(KERN_ERR "atari_floppy_init: cannot get dma buffer\n");
+		goto Enomem;
+	}
+	TrackBuffer = DMABuffer + 512;
+	PhysDMABuffer = virt_to_phys(DMABuffer);
+	PhysTrackBuffer = virt_to_phys(TrackBuffer);
+	BufferDrive = BufferSide = BufferTrack = -1;
+
+	floppy_queue = blk_init_queue(do_fd_request, &ataflop_lock);
+	if (!floppy_queue)
+		goto Enomem;
+
+	for (i = 0; i < FD_MAX_UNITS; i++) {
+		unit[i].track = -1;
+		unit[i].flags = 0;
+		unit[i].disk->major = FLOPPY_MAJOR;
+		unit[i].disk->first_minor = i;
+		sprintf(unit[i].disk->disk_name, "fd%d", i);
+		unit[i].disk->fops = &floppy_fops;
+		unit[i].disk->private_data = &unit[i];
+		unit[i].disk->queue = floppy_queue;
+		set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
+		add_disk(unit[i].disk);
+	}
+
+	blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
+				floppy_find, NULL, NULL);
+
+	printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
+	       DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
+	       UseTrackbuffer ? "" : "no ");
+	config_types();
+
+	return 0;
+Enomem:
+	while (i--)
+		put_disk(unit[i].disk);
+	if (floppy_queue)
+		blk_cleanup_queue(floppy_queue);
+	unregister_blkdev(FLOPPY_MAJOR, "fd");
+	return -ENOMEM;
+}
+
+
+void __init atari_floppy_setup( char *str, int *ints )
+{
+	int i;
+	
+	if (ints[0] < 1) {
+		printk(KERN_ERR "ataflop_setup: no arguments!\n" );
+		return;
+	}
+	else if (ints[0] > 2+FD_MAX_UNITS) {
+		printk(KERN_ERR "ataflop_setup: too many arguments\n" );
+	}
+
+	if (ints[1] < 0 || ints[1] > 2)
+		printk(KERN_ERR "ataflop_setup: bad drive type\n" );
+	else
+		DriveType = ints[1];
+
+	if (ints[0] >= 2)
+		UseTrackbuffer = (ints[2] > 0);
+
+	for( i = 3; i <= ints[0] && i-3 < FD_MAX_UNITS; ++i ) {
+		if (ints[i] != 2 && ints[i] != 3 && ints[i] != 6 && ints[i] != 12)
+			printk(KERN_ERR "ataflop_setup: bad steprate\n" );
+		else
+			UserSteprate[i-3] = ints[i];
+	}
+}
+
+static void atari_floppy_exit(void)
+{
+	int i;
+	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+	for (i = 0; i < FD_MAX_UNITS; i++) {
+		del_gendisk(unit[i].disk);
+		put_disk(unit[i].disk);
+	}
+	unregister_blkdev(FLOPPY_MAJOR, "fd");
+
+	blk_cleanup_queue(floppy_queue);
+	del_timer_sync(&fd_timer);
+	atari_stram_free( DMABuffer );
+}
+
+module_init(atari_floppy_init)
+module_exit(atari_floppy_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
new file mode 100644
index 0000000..8f7c1a1
--- /dev/null
+++ b/drivers/block/cciss.c
@@ -0,0 +1,2976 @@
+/*
+ *    Disk Array driver for HP SA 5xxx and 6xxx Controllers
+ *    Copyright 2000, 2002 Hewlett-Packard Development Company, L.P.
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+
+#include <linux/config.h>	/* CONFIG_PROC_FS */
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/blkpg.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h> 
+#include <linux/hdreg.h>
+#include <linux/spinlock.h>
+#include <linux/compat.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/completion.h>
+
+#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
+#define DRIVER_NAME "HP CISS Driver (v 2.6.6)"
+#define DRIVER_VERSION CCISS_DRIVER_VERSION(2,6,6)
+
+/* Embedded module documentation macros - see modules.h */
+MODULE_AUTHOR("Hewlett-Packard Company");
+MODULE_DESCRIPTION("Driver for HP Controller SA5xxx SA6xxx version 2.6.6");
+MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400"
+			" SA6i P600 P800 E400");
+MODULE_LICENSE("GPL");
+
+#include "cciss_cmd.h"
+#include "cciss.h"
+#include <linux/cciss_ioctl.h>
+
+/* define the PCI info for the cards we can control */
+static const struct pci_device_id cciss_pci_device_id[] = {
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISS,
+			0x0E11, 0x4070, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
+                        0x0E11, 0x4080, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
+                        0x0E11, 0x4082, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSB,
+                        0x0E11, 0x4083, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+		0x0E11, 0x409A, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+		0x0E11, 0x409B, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+		0x0E11, 0x409C, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+		0x0E11, 0x409D, 0, 0, 0},
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_CISSC,
+		0x0E11, 0x4091, 0, 0, 0},
+	{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSA,
+		0x103C, 0x3225, 0, 0, 0},
+	{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
+		0x103c, 0x3223, 0, 0, 0},
+	{ PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSB,
+		0x103c, 0x3231, 0, 0, 0},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, cciss_pci_device_id);
+
+#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
+
+/*  board_id = Subsystem Device ID & Vendor ID
+ *  product = Marketing Name for the board
+ *  access = Address of the struct of function pointers 
+ */
+static struct board_type products[] = {
+	{ 0x40700E11, "Smart Array 5300", &SA5_access },
+	{ 0x40800E11, "Smart Array 5i", &SA5B_access},
+	{ 0x40820E11, "Smart Array 532", &SA5B_access},
+	{ 0x40830E11, "Smart Array 5312", &SA5B_access},
+	{ 0x409A0E11, "Smart Array 641", &SA5_access},
+	{ 0x409B0E11, "Smart Array 642", &SA5_access},
+	{ 0x409C0E11, "Smart Array 6400", &SA5_access},
+	{ 0x409D0E11, "Smart Array 6400 EM", &SA5_access},
+	{ 0x40910E11, "Smart Array 6i", &SA5_access},
+	{ 0x3225103C, "Smart Array P600", &SA5_access},
+	{ 0x3223103C, "Smart Array P800", &SA5_access},
+	{ 0x3231103C, "Smart Array E400", &SA5_access},
+};
+
+/* How long to wait (in millesconds) for board to go into simple mode */
+#define MAX_CONFIG_WAIT 30000 
+#define MAX_IOCTL_CONFIG_WAIT 1000
+
+/*define how many times we will try a command because of bus resets */
+#define MAX_CMD_RETRIES 3
+
+#define READ_AHEAD 	 1024
+#define NR_CMDS		 384 /* #commands that can be outstanding */
+#define MAX_CTLR	32
+
+/* Originally cciss driver only supports 8 major numbers */
+#define MAX_CTLR_ORIG 	8
+
+
+#define CCISS_DMA_MASK	0xFFFFFFFF	/* 32 bit DMA */
+
+static ctlr_info_t *hba[MAX_CTLR];
+
+static void do_cciss_request(request_queue_t *q);
+static int cciss_open(struct inode *inode, struct file *filep);
+static int cciss_release(struct inode *inode, struct file *filep);
+static int cciss_ioctl(struct inode *inode, struct file *filep, 
+		unsigned int cmd, unsigned long arg);
+
+static int revalidate_allvol(ctlr_info_t *host);
+static int cciss_revalidate(struct gendisk *disk);
+static int deregister_disk(struct gendisk *disk);
+static int register_new_disk(ctlr_info_t *h);
+
+static void cciss_getgeometry(int cntl_num);
+
+static void start_io( ctlr_info_t *h);
+static int sendcmd( __u8 cmd, int ctlr, void *buff, size_t size,
+	unsigned int use_unit_num, unsigned int log_unit, __u8 page_code,
+	unsigned char *scsi3addr, int cmd_type);
+
+#ifdef CONFIG_PROC_FS
+static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 
+		int length, int *eof, void *data);
+static void cciss_procinit(int i);
+#else
+static void cciss_procinit(int i) {}
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_COMPAT
+static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg);
+#endif
+
+static struct block_device_operations cciss_fops  = {
+	.owner		= THIS_MODULE,
+	.open		= cciss_open, 
+	.release       	= cciss_release,
+        .ioctl		= cciss_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl   = cciss_compat_ioctl,
+#endif
+	.revalidate_disk= cciss_revalidate,
+};
+
+/*
+ * Enqueuing and dequeuing functions for cmdlists.
+ */
+static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
+{
+        if (*Qptr == NULL) {
+                *Qptr = c;
+                c->next = c->prev = c;
+        } else {
+                c->prev = (*Qptr)->prev;
+                c->next = (*Qptr);
+                (*Qptr)->prev->next = c;
+                (*Qptr)->prev = c;
+        }
+}
+
+static inline CommandList_struct *removeQ(CommandList_struct **Qptr, 
+						CommandList_struct *c)
+{
+        if (c && c->next != c) {
+                if (*Qptr == c) *Qptr = c->next;
+                c->prev->next = c->next;
+                c->next->prev = c->prev;
+        } else {
+                *Qptr = NULL;
+        }
+        return c;
+}
+
+#include "cciss_scsi.c"		/* For SCSI tape support */
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Report information about this controller.
+ */
+#define ENG_GIG 1000000000
+#define ENG_GIG_FACTOR (ENG_GIG/512)
+#define RAID_UNKNOWN 6
+static const char *raid_label[] = {"0","4","1(1+0)","5","5+1","ADG",
+	                                   "UNKNOWN"};
+
+static struct proc_dir_entry *proc_cciss;
+
+static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 
+		int length, int *eof, void *data)
+{
+        off_t pos = 0;
+        off_t len = 0;
+        int size, i, ctlr;
+        ctlr_info_t *h = (ctlr_info_t*)data;
+        drive_info_struct *drv;
+	unsigned long flags;
+        sector_t vol_sz, vol_sz_frac;
+
+        ctlr = h->ctlr;
+
+	/* prevent displaying bogus info during configuration
+	 * or deconfiguration of a logical volume
+	 */
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	if (h->busy_configuring) {
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+	return -EBUSY;
+	}
+	h->busy_configuring = 1;
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+
+        size = sprintf(buffer, "%s: HP %s Controller\n"
+		"Board ID: 0x%08lx\n"
+		"Firmware Version: %c%c%c%c\n"
+		"IRQ: %d\n"
+		"Logical drives: %d\n"
+		"Current Q depth: %d\n"
+		"Current # commands on controller: %d\n"
+		"Max Q depth since init: %d\n"
+		"Max # commands on controller since init: %d\n"
+		"Max SG entries since init: %d\n\n",
+                h->devname,
+                h->product_name,
+                (unsigned long)h->board_id,
+		h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], h->firm_ver[3],
+                (unsigned int)h->intr,
+                h->num_luns, 
+		h->Qdepth, h->commands_outstanding,
+		h->maxQsinceinit, h->max_outstanding, h->maxSG);
+
+        pos += size; len += size;
+	cciss_proc_tape_report(ctlr, buffer, &pos, &len);
+	for(i=0; i<=h->highest_lun; i++) {
+
+                drv = &h->drv[i];
+		if (drv->block_size == 0)
+			continue;
+
+		vol_sz = drv->nr_blocks;
+		vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR);
+		vol_sz_frac *= 100;
+		sector_div(vol_sz_frac, ENG_GIG_FACTOR);
+
+		if (drv->raid_level > 5)
+			drv->raid_level = RAID_UNKNOWN;
+		size = sprintf(buffer+len, "cciss/c%dd%d:"
+				"\t%4u.%02uGB\tRAID %s\n",
+				ctlr, i, (int)vol_sz, (int)vol_sz_frac,
+				raid_label[drv->raid_level]);
+                pos += size; len += size;
+        }
+
+        *eof = 1;
+        *start = buffer+offset;
+        len -= offset;
+        if (len>length)
+                len = length;
+	h->busy_configuring = 0;
+        return len;
+}
+
+static int 
+cciss_proc_write(struct file *file, const char __user *buffer, 
+			unsigned long count, void *data)
+{
+	unsigned char cmd[80];
+	int len;
+#ifdef CONFIG_CISS_SCSI_TAPE
+	ctlr_info_t *h = (ctlr_info_t *) data;
+	int rc;
+#endif
+
+	if (count > sizeof(cmd)-1) return -EINVAL;
+	if (copy_from_user(cmd, buffer, count)) return -EFAULT;
+	cmd[count] = '\0';
+	len = strlen(cmd);	// above 3 lines ensure safety
+	if (len && cmd[len-1] == '\n')
+		cmd[--len] = '\0';
+#	ifdef CONFIG_CISS_SCSI_TAPE
+		if (strcmp("engage scsi", cmd)==0) {
+			rc = cciss_engage_scsi(h->ctlr);
+			if (rc != 0) return -rc;
+			return count;
+		}
+		/* might be nice to have "disengage" too, but it's not 
+		   safely possible. (only 1 module use count, lock issues.) */
+#	endif
+	return -EINVAL;
+}
+
+/*
+ * Get us a file in /proc/cciss that says something about each controller.
+ * Create /proc/cciss if it doesn't exist yet.
+ */
+static void __devinit cciss_procinit(int i)
+{
+	struct proc_dir_entry *pde;
+
+        if (proc_cciss == NULL) {
+                proc_cciss = proc_mkdir("cciss", proc_root_driver);
+                if (!proc_cciss) 
+			return;
+        }
+
+	pde = create_proc_read_entry(hba[i]->devname, 
+		S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, 
+		proc_cciss, cciss_proc_get_info, hba[i]);
+	pde->write_proc = cciss_proc_write;
+}
+#endif /* CONFIG_PROC_FS */
+
+/* 
+ * For operations that cannot sleep, a command block is allocated at init, 
+ * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track
+ * which ones are free or in use.  For operations that can wait for kmalloc 
+ * to possible sleep, this routine can be called with get_from_pool set to 0. 
+ * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. 
+ */ 
+static CommandList_struct * cmd_alloc(ctlr_info_t *h, int get_from_pool)
+{
+	CommandList_struct *c;
+	int i; 
+	u64bit temp64;
+	dma_addr_t cmd_dma_handle, err_dma_handle;
+
+	if (!get_from_pool)
+	{
+		c = (CommandList_struct *) pci_alloc_consistent(
+			h->pdev, sizeof(CommandList_struct), &cmd_dma_handle); 
+        	if(c==NULL)
+                 	return NULL;
+		memset(c, 0, sizeof(CommandList_struct));
+
+		c->err_info = (ErrorInfo_struct *)pci_alloc_consistent(
+					h->pdev, sizeof(ErrorInfo_struct), 
+					&err_dma_handle);
+	
+		if (c->err_info == NULL)
+		{
+			pci_free_consistent(h->pdev, 
+				sizeof(CommandList_struct), c, cmd_dma_handle);
+			return NULL;
+		}
+		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+	} else /* get it out of the controllers pool */ 
+	{
+	     	do {
+                	i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
+                        if (i == NR_CMDS)
+                                return NULL;
+                } while(test_and_set_bit(i & (BITS_PER_LONG - 1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
+#ifdef CCISS_DEBUG
+		printk(KERN_DEBUG "cciss: using command buffer %d\n", i);
+#endif
+                c = h->cmd_pool + i;
+		memset(c, 0, sizeof(CommandList_struct));
+		cmd_dma_handle = h->cmd_pool_dhandle 
+					+ i*sizeof(CommandList_struct);
+		c->err_info = h->errinfo_pool + i;
+		memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+		err_dma_handle = h->errinfo_pool_dhandle 
+					+ i*sizeof(ErrorInfo_struct);
+                h->nr_allocs++;
+        }
+
+	c->busaddr = (__u32) cmd_dma_handle;
+	temp64.val = (__u64) err_dma_handle;	
+	c->ErrDesc.Addr.lower = temp64.val32.lower;
+	c->ErrDesc.Addr.upper = temp64.val32.upper;
+	c->ErrDesc.Len = sizeof(ErrorInfo_struct);
+	
+	c->ctlr = h->ctlr;
+        return c;
+
+
+}
+
+/* 
+ * Frees a command block that was previously allocated with cmd_alloc(). 
+ */
+static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool)
+{
+	int i;
+	u64bit temp64;
+
+	if( !got_from_pool)
+	{ 
+		temp64.val32.lower = c->ErrDesc.Addr.lower;
+		temp64.val32.upper = c->ErrDesc.Addr.upper;
+		pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 
+			c->err_info, (dma_addr_t) temp64.val);
+		pci_free_consistent(h->pdev, sizeof(CommandList_struct), 
+			c, (dma_addr_t) c->busaddr);
+	} else 
+	{
+		i = c - h->cmd_pool;
+		clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
+                h->nr_frees++;
+        }
+}
+
+static inline ctlr_info_t *get_host(struct gendisk *disk)
+{
+	return disk->queue->queuedata; 
+}
+
+static inline drive_info_struct *get_drv(struct gendisk *disk)
+{
+	return disk->private_data;
+}
+
+/*
+ * Open.  Make sure the device is really there.
+ */
+static int cciss_open(struct inode *inode, struct file *filep)
+{
+	ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+	drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
+
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "cciss_open %s\n", inode->i_bdev->bd_disk->disk_name);
+#endif /* CCISS_DEBUG */ 
+
+	/*
+	 * Root is allowed to open raw volume zero even if it's not configured
+	 * so array config can still work. Root is also allowed to open any
+	 * volume that has a LUN ID, so it can issue IOCTL to reread the
+	 * disk information.  I don't think I really like this
+	 * but I'm already using way to many device nodes to claim another one
+	 * for "raw controller".
+	 */
+	if (drv->nr_blocks == 0) {
+		if (iminor(inode) != 0)	{ 	/* not node 0? */
+			/* if not node 0 make sure it is a partition = 0 */
+			if (iminor(inode) & 0x0f) {
+			return -ENXIO;
+				/* if it is, make sure we have a LUN ID */
+			} else if (drv->LunID == 0) {
+				return -ENXIO;
+			}
+		}
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+	}
+	drv->usage_count++;
+	host->usage_count++;
+	return 0;
+}
+/*
+ * Close.  Sync first.
+ */
+static int cciss_release(struct inode *inode, struct file *filep)
+{
+	ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+	drive_info_struct *drv = get_drv(inode->i_bdev->bd_disk);
+
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "cciss_release %s\n", inode->i_bdev->bd_disk->disk_name);
+#endif /* CCISS_DEBUG */
+
+	drv->usage_count--;
+	host->usage_count--;
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+
+static int do_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+{
+	int ret;
+	lock_kernel();
+	ret = cciss_ioctl(f->f_dentry->d_inode, f, cmd, arg);
+	unlock_kernel();
+	return ret;
+}
+
+static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg);
+static int cciss_ioctl32_big_passthru(struct file *f, unsigned cmd, unsigned long arg);
+
+static long cciss_compat_ioctl(struct file *f, unsigned cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case CCISS_GETPCIINFO:
+	case CCISS_GETINTINFO:
+	case CCISS_SETINTINFO:
+	case CCISS_GETNODENAME:
+	case CCISS_SETNODENAME:
+	case CCISS_GETHEARTBEAT:
+	case CCISS_GETBUSTYPES:
+	case CCISS_GETFIRMVER:
+	case CCISS_GETDRIVVER:
+	case CCISS_REVALIDVOLS:
+	case CCISS_DEREGDISK:
+	case CCISS_REGNEWDISK:
+	case CCISS_REGNEWD:
+	case CCISS_RESCANDISK:
+	case CCISS_GETLUNINFO:
+		return do_ioctl(f, cmd, arg);
+
+	case CCISS_PASSTHRU32:
+		return cciss_ioctl32_passthru(f, cmd, arg);
+	case CCISS_BIG_PASSTHRU32:
+		return cciss_ioctl32_big_passthru(f, cmd, arg);
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+}
+
+static int cciss_ioctl32_passthru(struct file *f, unsigned cmd, unsigned long arg)
+{
+	IOCTL32_Command_struct __user *arg32 =
+		(IOCTL32_Command_struct __user *) arg;
+	IOCTL_Command_struct arg64;
+	IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+	int err;
+	u32 cp;
+
+	err = 0;
+	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
+	err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
+	err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
+	err |= get_user(arg64.buf_size, &arg32->buf_size);
+	err |= get_user(cp, &arg32->buf);
+	arg64.buf = compat_ptr(cp);
+	err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+	if (err)
+		return -EFAULT;
+
+	err = do_ioctl(f, CCISS_PASSTHRU, (unsigned long) p);
+	if (err)
+		return err;
+	err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
+	if (err)
+		return -EFAULT;
+	return err;
+}
+
+static int cciss_ioctl32_big_passthru(struct file *file, unsigned cmd, unsigned long arg)
+{
+	BIG_IOCTL32_Command_struct __user *arg32 =
+		(BIG_IOCTL32_Command_struct __user *) arg;
+	BIG_IOCTL_Command_struct arg64;
+	BIG_IOCTL_Command_struct __user *p = compat_alloc_user_space(sizeof(arg64));
+	int err;
+	u32 cp;
+
+	err = 0;
+	err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, sizeof(arg64.LUN_info));
+	err |= copy_from_user(&arg64.Request, &arg32->Request, sizeof(arg64.Request));
+	err |= copy_from_user(&arg64.error_info, &arg32->error_info, sizeof(arg64.error_info));
+	err |= get_user(arg64.buf_size, &arg32->buf_size);
+	err |= get_user(arg64.malloc_size, &arg32->malloc_size);
+	err |= get_user(cp, &arg32->buf);
+	arg64.buf = compat_ptr(cp);
+	err |= copy_to_user(p, &arg64, sizeof(arg64));
+
+	if (err)
+		 return -EFAULT;
+
+	err = do_ioctl(file, CCISS_BIG_PASSTHRU, (unsigned long) p);
+	if (err)
+		return err;
+	err |= copy_in_user(&arg32->error_info, &p->error_info, sizeof(arg32->error_info));
+	if (err)
+		return -EFAULT;
+	return err;
+}
+#endif
+/*
+ * ioctl 
+ */
+static int cciss_ioctl(struct inode *inode, struct file *filep, 
+		unsigned int cmd, unsigned long arg)
+{
+	struct block_device *bdev = inode->i_bdev;
+	struct gendisk *disk = bdev->bd_disk;
+	ctlr_info_t *host = get_host(disk);
+	drive_info_struct *drv = get_drv(disk);
+	int ctlr = host->ctlr;
+	void __user *argp = (void __user *)arg;
+
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg);
+#endif /* CCISS_DEBUG */ 
+	
+	switch(cmd) {
+	case HDIO_GETGEO:
+	{
+                struct hd_geometry driver_geo;
+                if (drv->cylinders) {
+                        driver_geo.heads = drv->heads;
+                        driver_geo.sectors = drv->sectors;
+                        driver_geo.cylinders = drv->cylinders;
+                } else
+			return -ENXIO;
+                driver_geo.start= get_start_sect(inode->i_bdev);
+                if (copy_to_user(argp, &driver_geo, sizeof(struct hd_geometry)))
+                        return  -EFAULT;
+                return(0);
+	}
+
+	case CCISS_GETPCIINFO:
+	{
+		cciss_pci_info_struct pciinfo;
+
+		if (!arg) return -EINVAL;
+		pciinfo.bus = host->pdev->bus->number;
+		pciinfo.dev_fn = host->pdev->devfn;
+		pciinfo.board_id = host->board_id;
+		if (copy_to_user(argp, &pciinfo,  sizeof( cciss_pci_info_struct )))
+			return  -EFAULT;
+		return(0);
+	}	
+	case CCISS_GETINTINFO:
+	{
+		cciss_coalint_struct intinfo;
+		if (!arg) return -EINVAL;
+		intinfo.delay = readl(&host->cfgtable->HostWrite.CoalIntDelay);
+		intinfo.count = readl(&host->cfgtable->HostWrite.CoalIntCount);
+		if (copy_to_user(argp, &intinfo, sizeof( cciss_coalint_struct )))
+			return -EFAULT;
+                return(0);
+        }
+	case CCISS_SETINTINFO:
+        {
+                cciss_coalint_struct intinfo;
+		unsigned long flags;
+		int i;
+
+		if (!arg) return -EINVAL;	
+		if (!capable(CAP_SYS_ADMIN)) return -EPERM;
+		if (copy_from_user(&intinfo, argp, sizeof( cciss_coalint_struct)))
+			return -EFAULT;
+		if ( (intinfo.delay == 0 ) && (intinfo.count == 0))
+
+		{
+//			printk("cciss_ioctl: delay and count cannot be 0\n");
+			return( -EINVAL);
+		}
+		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+		/* Update the field, and then ring the doorbell */ 
+		writel( intinfo.delay, 
+			&(host->cfgtable->HostWrite.CoalIntDelay));
+		writel( intinfo.count, 
+                        &(host->cfgtable->HostWrite.CoalIntCount));
+		writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+
+		for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
+			if (!(readl(host->vaddr + SA5_DOORBELL) 
+					& CFGTBL_ChangeReq))
+				break;
+			/* delay and try again */
+			udelay(1000);
+		}	
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		if (i >= MAX_IOCTL_CONFIG_WAIT)
+			return -EAGAIN;
+                return(0);
+        }
+	case CCISS_GETNODENAME:
+        {
+                NodeName_type NodeName;
+		int i; 
+
+		if (!arg) return -EINVAL;
+		for(i=0;i<16;i++)
+			NodeName[i] = readb(&host->cfgtable->ServerName[i]);
+                if (copy_to_user(argp, NodeName, sizeof( NodeName_type)))
+                	return  -EFAULT;
+                return(0);
+        }
+	case CCISS_SETNODENAME:
+	{
+		NodeName_type NodeName;
+		unsigned long flags;
+		int i;
+
+		if (!arg) return -EINVAL;
+		if (!capable(CAP_SYS_ADMIN)) return -EPERM;
+		
+		if (copy_from_user(NodeName, argp, sizeof( NodeName_type)))
+			return -EFAULT;
+
+		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+
+			/* Update the field, and then ring the doorbell */ 
+		for(i=0;i<16;i++)
+			writeb( NodeName[i], &host->cfgtable->ServerName[i]);
+			
+		writel( CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL);
+
+		for(i=0;i<MAX_IOCTL_CONFIG_WAIT;i++) {
+			if (!(readl(host->vaddr + SA5_DOORBELL) 
+					& CFGTBL_ChangeReq))
+				break;
+			/* delay and try again */
+			udelay(1000);
+		}	
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		if (i >= MAX_IOCTL_CONFIG_WAIT)
+			return -EAGAIN;
+                return(0);
+        }
+
+	case CCISS_GETHEARTBEAT:
+        {
+                Heartbeat_type heartbeat;
+
+		if (!arg) return -EINVAL;
+                heartbeat = readl(&host->cfgtable->HeartBeat);
+                if (copy_to_user(argp, &heartbeat, sizeof( Heartbeat_type)))
+                	return -EFAULT;
+                return(0);
+        }
+	case CCISS_GETBUSTYPES:
+        {
+                BusTypes_type BusTypes;
+
+		if (!arg) return -EINVAL;
+                BusTypes = readl(&host->cfgtable->BusTypes);
+                if (copy_to_user(argp, &BusTypes, sizeof( BusTypes_type) ))
+                	return  -EFAULT;
+                return(0);
+        }
+	case CCISS_GETFIRMVER:
+        {
+		FirmwareVer_type firmware;
+
+		if (!arg) return -EINVAL;
+		memcpy(firmware, host->firm_ver, 4);
+
+                if (copy_to_user(argp, firmware, sizeof( FirmwareVer_type)))
+                	return -EFAULT;
+                return(0);
+        }
+        case CCISS_GETDRIVVER:
+        {
+		DriverVer_type DriverVer = DRIVER_VERSION;
+
+                if (!arg) return -EINVAL;
+
+                if (copy_to_user(argp, &DriverVer, sizeof( DriverVer_type) ))
+                	return -EFAULT;
+                return(0);
+        }
+
+	case CCISS_REVALIDVOLS:
+		if (bdev != bdev->bd_contains || drv != host->drv)
+			return -ENXIO;
+                return revalidate_allvol(host);
+
+ 	case CCISS_GETLUNINFO: {
+ 		LogvolInfo_struct luninfo;
+ 		int i;
+ 		
+ 		luninfo.LunID = drv->LunID;
+ 		luninfo.num_opens = drv->usage_count;
+ 		luninfo.num_parts = 0;
+ 		/* count partitions 1 to 15 with sizes > 0 */
+ 		for (i = 0; i < MAX_PART - 1; i++) {
+			if (!disk->part[i])
+				continue;
+			if (disk->part[i]->nr_sects != 0)
+				luninfo.num_parts++;
+		}
+ 		if (copy_to_user(argp, &luninfo,
+ 				sizeof(LogvolInfo_struct)))
+ 			return -EFAULT;
+ 		return(0);
+ 	}
+	case CCISS_DEREGDISK:
+		return deregister_disk(disk);
+
+	case CCISS_REGNEWD:
+		return register_new_disk(host);
+
+	case CCISS_PASSTHRU:
+	{
+		IOCTL_Command_struct iocommand;
+		CommandList_struct *c;
+		char 	*buff = NULL;
+		u64bit	temp64;
+		unsigned long flags;
+		DECLARE_COMPLETION(wait);
+
+		if (!arg) return -EINVAL;
+	
+		if (!capable(CAP_SYS_RAWIO)) return -EPERM;
+
+		if (copy_from_user(&iocommand, argp, sizeof( IOCTL_Command_struct) ))
+			return -EFAULT;
+		if((iocommand.buf_size < 1) && 
+				(iocommand.Request.Type.Direction != XFER_NONE))
+		{	
+			return -EINVAL;
+		} 
+#if 0 /* 'buf_size' member is 16-bits, and always smaller than kmalloc limit */
+		/* Check kmalloc limits */
+		if(iocommand.buf_size > 128000)
+			return -EINVAL;
+#endif
+		if(iocommand.buf_size > 0)
+		{
+			buff =  kmalloc(iocommand.buf_size, GFP_KERNEL);
+			if( buff == NULL) 
+				return -EFAULT;
+		}
+		if (iocommand.Request.Type.Direction == XFER_WRITE)
+		{
+			/* Copy the data into the buffer we created */ 
+			if (copy_from_user(buff, iocommand.buf, iocommand.buf_size))
+			{
+				kfree(buff);
+				return -EFAULT;
+			}
+		} else {
+			memset(buff, 0, iocommand.buf_size);
+		}
+		if ((c = cmd_alloc(host , 0)) == NULL)
+		{
+			kfree(buff);
+			return -ENOMEM;
+		}
+			// Fill in the command type 
+		c->cmd_type = CMD_IOCTL_PEND;
+			// Fill in Command Header 
+		c->Header.ReplyQueue = 0;  // unused in simple mode
+		if( iocommand.buf_size > 0) 	// buffer to fill 
+		{
+			c->Header.SGList = 1;
+			c->Header.SGTotal= 1;
+		} else	// no buffers to fill  
+		{
+			c->Header.SGList = 0;
+                	c->Header.SGTotal= 0;
+		}
+		c->Header.LUN = iocommand.LUN_info;
+		c->Header.Tag.lower = c->busaddr;  // use the kernel address the cmd block for tag
+		
+		// Fill in Request block 
+		c->Request = iocommand.Request; 
+	
+		// Fill in the scatter gather information
+		if (iocommand.buf_size > 0 ) 
+		{
+			temp64.val = pci_map_single( host->pdev, buff,
+                                        iocommand.buf_size, 
+                                PCI_DMA_BIDIRECTIONAL);	
+			c->SG[0].Addr.lower = temp64.val32.lower;
+			c->SG[0].Addr.upper = temp64.val32.upper;
+			c->SG[0].Len = iocommand.buf_size;
+			c->SG[0].Ext = 0;  // we are not chaining
+		}
+		c->waiting = &wait;
+
+		/* Put the request on the tail of the request queue */
+		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+		addQ(&host->reqQ, c);
+		host->Qdepth++;
+		start_io(host);
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+
+		wait_for_completion(&wait);
+
+		/* unlock the buffers from DMA */
+		temp64.val32.lower = c->SG[0].Addr.lower;
+                temp64.val32.upper = c->SG[0].Addr.upper;
+                pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
+                	iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+
+		/* Copy the error information out */ 
+		iocommand.error_info = *(c->err_info);
+		if ( copy_to_user(argp, &iocommand, sizeof( IOCTL_Command_struct) ) )
+		{
+			kfree(buff);
+			cmd_free(host, c, 0);
+			return( -EFAULT);	
+		} 	
+
+		if (iocommand.Request.Type.Direction == XFER_READ)
+                {
+                        /* Copy the data out of the buffer we created */
+                        if (copy_to_user(iocommand.buf, buff, iocommand.buf_size))
+			{
+                        	kfree(buff);
+				cmd_free(host, c, 0);
+				return -EFAULT;
+			}
+                }
+                kfree(buff);
+		cmd_free(host, c, 0);
+                return(0);
+	} 
+	case CCISS_BIG_PASSTHRU: {
+		BIG_IOCTL_Command_struct *ioc;
+		CommandList_struct *c;
+		unsigned char **buff = NULL;
+		int	*buff_size = NULL;
+		u64bit	temp64;
+		unsigned long flags;
+		BYTE sg_used = 0;
+		int status = 0;
+		int i;
+		DECLARE_COMPLETION(wait);
+		__u32   left;
+		__u32	sz;
+		BYTE    __user *data_ptr;
+
+		if (!arg)
+			return -EINVAL;
+		if (!capable(CAP_SYS_RAWIO))
+			return -EPERM;
+		ioc = (BIG_IOCTL_Command_struct *) 
+			kmalloc(sizeof(*ioc), GFP_KERNEL);
+		if (!ioc) {
+			status = -ENOMEM;
+			goto cleanup1;
+		}
+		if (copy_from_user(ioc, argp, sizeof(*ioc))) {
+			status = -EFAULT;
+			goto cleanup1;
+		}
+		if ((ioc->buf_size < 1) &&
+			(ioc->Request.Type.Direction != XFER_NONE)) {
+				status = -EINVAL;
+				goto cleanup1;
+		}
+		/* Check kmalloc limits  using all SGs */
+		if (ioc->malloc_size > MAX_KMALLOC_SIZE) {
+			status = -EINVAL;
+			goto cleanup1;
+		}
+		if (ioc->buf_size > ioc->malloc_size * MAXSGENTRIES) {
+			status = -EINVAL;
+			goto cleanup1;
+		}
+		buff = (unsigned char **) kmalloc(MAXSGENTRIES * 
+				sizeof(char *), GFP_KERNEL);
+		if (!buff) {
+			status = -ENOMEM;
+			goto cleanup1;
+		}
+		memset(buff, 0, MAXSGENTRIES);
+		buff_size = (int *) kmalloc(MAXSGENTRIES * sizeof(int), 
+					GFP_KERNEL);
+		if (!buff_size) {
+			status = -ENOMEM;
+			goto cleanup1;
+		}
+		left = ioc->buf_size;
+		data_ptr = ioc->buf;
+		while (left) {
+			sz = (left > ioc->malloc_size) ? ioc->malloc_size : left;
+			buff_size[sg_used] = sz;
+			buff[sg_used] = kmalloc(sz, GFP_KERNEL);
+			if (buff[sg_used] == NULL) {
+				status = -ENOMEM;
+				goto cleanup1;
+			}
+			if (ioc->Request.Type.Direction == XFER_WRITE &&
+				copy_from_user(buff[sg_used], data_ptr, sz)) {
+					status = -ENOMEM;
+					goto cleanup1;			
+			} else {
+				memset(buff[sg_used], 0, sz);
+			}
+			left -= sz;
+			data_ptr += sz;
+			sg_used++;
+		}
+		if ((c = cmd_alloc(host , 0)) == NULL) {
+			status = -ENOMEM;
+			goto cleanup1;	
+		}
+		c->cmd_type = CMD_IOCTL_PEND;
+		c->Header.ReplyQueue = 0;
+		
+		if( ioc->buf_size > 0) {
+			c->Header.SGList = sg_used;
+			c->Header.SGTotal= sg_used;
+		} else { 
+			c->Header.SGList = 0;
+			c->Header.SGTotal= 0;
+		}
+		c->Header.LUN = ioc->LUN_info;
+		c->Header.Tag.lower = c->busaddr;
+		
+		c->Request = ioc->Request;
+		if (ioc->buf_size > 0 ) {
+			int i;
+			for(i=0; i<sg_used; i++) {
+				temp64.val = pci_map_single( host->pdev, buff[i],
+					buff_size[i],
+					PCI_DMA_BIDIRECTIONAL);
+				c->SG[i].Addr.lower = temp64.val32.lower;
+				c->SG[i].Addr.upper = temp64.val32.upper;
+				c->SG[i].Len = buff_size[i];
+				c->SG[i].Ext = 0;  /* we are not chaining */
+			}
+		}
+		c->waiting = &wait;
+		/* Put the request on the tail of the request queue */
+		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+		addQ(&host->reqQ, c);
+		host->Qdepth++;
+		start_io(host);
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		wait_for_completion(&wait);
+		/* unlock the buffers from DMA */
+		for(i=0; i<sg_used; i++) {
+			temp64.val32.lower = c->SG[i].Addr.lower;
+			temp64.val32.upper = c->SG[i].Addr.upper;
+			pci_unmap_single( host->pdev, (dma_addr_t) temp64.val,
+				buff_size[i], PCI_DMA_BIDIRECTIONAL);
+		}
+		/* Copy the error information out */
+		ioc->error_info = *(c->err_info);
+		if (copy_to_user(argp, ioc, sizeof(*ioc))) {
+			cmd_free(host, c, 0);
+			status = -EFAULT;
+			goto cleanup1;
+		}
+		if (ioc->Request.Type.Direction == XFER_READ) {
+			/* Copy the data out of the buffer we created */
+			BYTE __user *ptr = ioc->buf;
+	        	for(i=0; i< sg_used; i++) {
+				if (copy_to_user(ptr, buff[i], buff_size[i])) {
+					cmd_free(host, c, 0);
+					status = -EFAULT;
+					goto cleanup1;
+				}
+				ptr += buff_size[i];
+			}
+		}
+		cmd_free(host, c, 0);
+		status = 0;
+cleanup1:
+		if (buff) {
+			for(i=0; i<sg_used; i++)
+				if(buff[i] != NULL)
+					kfree(buff[i]);
+			kfree(buff);
+		}
+		if (buff_size)
+			kfree(buff_size);
+		if (ioc)
+			kfree(ioc);
+		return(status);
+	}
+	default:
+		return -ENOTTY;
+	}
+	
+}
+
+/*
+ * revalidate_allvol is for online array config utilities.  After a
+ * utility reconfigures the drives in the array, it can use this function
+ * (through an ioctl) to make the driver zap any previous disk structs for
+ * that controller and get new ones.
+ *
+ * Right now I'm using the getgeometry() function to do this, but this
+ * function should probably be finer grained and allow you to revalidate one
+ * particualar logical volume (instead of all of them on a particular
+ * controller).
+ */
+static int revalidate_allvol(ctlr_info_t *host)
+{
+	int ctlr = host->ctlr, i;
+	unsigned long flags;
+
+        spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+        if (host->usage_count > 1) {
+                spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+                printk(KERN_WARNING "cciss: Device busy for volume"
+                        " revalidation (usage=%d)\n", host->usage_count);
+                return -EBUSY;
+        }
+        host->usage_count++;
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+
+	for(i=0; i< NWD; i++) {
+		struct gendisk *disk = host->gendisk[i];
+		if (disk->flags & GENHD_FL_UP)
+			del_gendisk(disk);
+	}
+
+        /*
+         * Set the partition and block size structures for all volumes
+         * on this controller to zero.  We will reread all of this data
+         */
+        memset(host->drv,        0, sizeof(drive_info_struct)
+						* CISS_MAX_LUN);
+        /*
+         * Tell the array controller not to give us any interrupts while
+         * we check the new geometry.  Then turn interrupts back on when
+         * we're done.
+         */
+        host->access.set_intr_mask(host, CCISS_INTR_OFF);
+        cciss_getgeometry(ctlr);
+        host->access.set_intr_mask(host, CCISS_INTR_ON);
+
+	/* Loop through each real device */ 
+	for (i = 0; i < NWD; i++) {
+		struct gendisk *disk = host->gendisk[i];
+		drive_info_struct *drv = &(host->drv[i]);
+		/* we must register the controller even if no disks exist */
+		/* this is for the online array utilities */
+		if (!drv->heads && i)
+			continue;
+		blk_queue_hardsect_size(host->queue, drv->block_size);
+		set_capacity(disk, drv->nr_blocks);
+		add_disk(disk);
+	}
+        host->usage_count--;
+        return 0;
+}
+
+static int deregister_disk(struct gendisk *disk)
+{
+	unsigned long flags;
+	ctlr_info_t *h = get_host(disk);
+	drive_info_struct *drv = get_drv(disk);
+	int ctlr = h->ctlr;
+
+	if (!capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	/* make sure logical volume is NOT is use */
+	if( drv->usage_count > 1) {
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+                return -EBUSY;
+	}
+	drv->usage_count++;
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+
+	/* invalidate the devices and deregister the disk */ 
+	if (disk->flags & GENHD_FL_UP)
+		del_gendisk(disk);
+	/* check to see if it was the last disk */
+	if (drv == h->drv + h->highest_lun) {
+		/* if so, find the new hightest lun */
+		int i, newhighest =-1;
+		for(i=0; i<h->highest_lun; i++) {
+			/* if the disk has size > 0, it is available */
+			if (h->drv[i].nr_blocks)
+				newhighest = i;
+		}
+		h->highest_lun = newhighest;
+				
+	}
+	--h->num_luns;
+	/* zero out the disk size info */ 
+	drv->nr_blocks = 0;
+	drv->block_size = 0;
+	drv->cylinders = 0;
+	drv->LunID = 0;
+	return(0);
+}
+static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff,
+	size_t size,
+	unsigned int use_unit_num, /* 0: address the controller,
+				      1: address logical volume log_unit,
+				      2: periph device address is scsi3addr */
+	unsigned int log_unit, __u8 page_code, unsigned char *scsi3addr,
+	int cmd_type)
+{
+	ctlr_info_t *h= hba[ctlr];
+	u64bit buff_dma_handle;
+	int status = IO_OK;
+
+	c->cmd_type = CMD_IOCTL_PEND;
+	c->Header.ReplyQueue = 0;
+	if( buff != NULL) {
+		c->Header.SGList = 1;
+		c->Header.SGTotal= 1;
+	} else {
+		c->Header.SGList = 0;
+                c->Header.SGTotal= 0;
+	}
+	c->Header.Tag.lower = c->busaddr;
+
+	c->Request.Type.Type = cmd_type;
+	if (cmd_type == TYPE_CMD) {
+		switch(cmd) {
+		case  CISS_INQUIRY:
+			/* If the logical unit number is 0 then, this is going
+			to controller so It's a physical command
+			mode = 0 target = 0.  So we have nothing to write.
+			otherwise, if use_unit_num == 1,
+			mode = 1(volume set addressing) target = LUNID
+			otherwise, if use_unit_num == 2,
+			mode = 0(periph dev addr) target = scsi3addr */
+			if (use_unit_num == 1) {
+				c->Header.LUN.LogDev.VolId=
+					h->drv[log_unit].LunID;
+                        	c->Header.LUN.LogDev.Mode = 1;
+			} else if (use_unit_num == 2) {
+				memcpy(c->Header.LUN.LunAddrBytes,scsi3addr,8);
+				c->Header.LUN.LogDev.Mode = 0;
+			}
+			/* are we trying to read a vital product page */
+			if(page_code != 0) {
+				c->Request.CDB[1] = 0x01;
+				c->Request.CDB[2] = page_code;
+			}
+			c->Request.CDBLen = 6;
+			c->Request.Type.Attribute = ATTR_SIMPLE;  
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] =  CISS_INQUIRY;
+			c->Request.CDB[4] = size  & 0xFF;  
+		break;
+		case CISS_REPORT_LOG:
+		case CISS_REPORT_PHYS:
+                        /* Talking to controller so It's a physical command
+			   mode = 00 target = 0.  Nothing to write.
+                        */
+			c->Request.CDBLen = 12;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = cmd;
+			c->Request.CDB[6] = (size >> 24) & 0xFF;  //MSB
+			c->Request.CDB[7] = (size >> 16) & 0xFF;
+			c->Request.CDB[8] = (size >> 8) & 0xFF;
+			c->Request.CDB[9] = size & 0xFF;
+			break;
+
+		case CCISS_READ_CAPACITY:
+			c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID;
+			c->Header.LUN.LogDev.Mode = 1;
+			c->Request.CDBLen = 10;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_READ;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = cmd;
+		break;
+		case CCISS_CACHE_FLUSH:
+			c->Request.CDBLen = 12;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_WRITE;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = BMIC_WRITE;
+			c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+		break;
+		default:
+			printk(KERN_WARNING
+				"cciss%d:  Unknown Command 0x%c\n", ctlr, cmd);
+			return(IO_ERROR);
+		}
+	} else if (cmd_type == TYPE_MSG) {
+		switch (cmd) {
+		case 3:	/* No-Op message */
+			c->Request.CDBLen = 1;
+			c->Request.Type.Attribute = ATTR_SIMPLE;
+			c->Request.Type.Direction = XFER_WRITE;
+			c->Request.Timeout = 0;
+			c->Request.CDB[0] = cmd;
+			break;
+		default:
+			printk(KERN_WARNING
+				"cciss%d: unknown message type %d\n",
+				ctlr, cmd);
+			return IO_ERROR;
+		}
+	} else {
+		printk(KERN_WARNING
+			"cciss%d: unknown command type %d\n", ctlr, cmd_type);
+		return IO_ERROR;
+	}
+	/* Fill in the scatter gather information */
+	if (size > 0) {
+		buff_dma_handle.val = (__u64) pci_map_single(h->pdev,
+			buff, size, PCI_DMA_BIDIRECTIONAL);
+		c->SG[0].Addr.lower = buff_dma_handle.val32.lower;
+		c->SG[0].Addr.upper = buff_dma_handle.val32.upper;
+		c->SG[0].Len = size;
+		c->SG[0].Ext = 0;  /* we are not chaining */
+	}
+	return status;
+}
+static int sendcmd_withirq(__u8	cmd,
+	int	ctlr,
+	void	*buff,
+	size_t	size,
+	unsigned int use_unit_num,
+	unsigned int log_unit,
+	__u8	page_code,
+	int cmd_type)
+{
+	ctlr_info_t *h = hba[ctlr];
+	CommandList_struct *c;
+	u64bit	buff_dma_handle;
+	unsigned long flags;
+	int return_status;
+	DECLARE_COMPLETION(wait);
+	
+	if ((c = cmd_alloc(h , 0)) == NULL)
+		return -ENOMEM;
+	return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
+		log_unit, page_code, NULL, cmd_type);
+	if (return_status != IO_OK) {
+		cmd_free(h, c, 0);
+		return return_status;
+	}
+resend_cmd2:
+	c->waiting = &wait;
+	
+	/* Put the request on the tail of the queue and send it */
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	addQ(&h->reqQ, c);
+	h->Qdepth++;
+	start_io(h);
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+	
+	wait_for_completion(&wait);
+
+	if(c->err_info->CommandStatus != 0) 
+	{ /* an error has occurred */ 
+		switch(c->err_info->CommandStatus)
+		{
+			case CMD_TARGET_STATUS:
+				printk(KERN_WARNING "cciss: cmd %p has "
+					" completed with errors\n", c);
+				if( c->err_info->ScsiStatus)
+                		{
+                    			printk(KERN_WARNING "cciss: cmd %p "
+					"has SCSI Status = %x\n",
+                        			c,  
+						c->err_info->ScsiStatus);
+                		}
+
+			break;
+			case CMD_DATA_UNDERRUN:
+			case CMD_DATA_OVERRUN:
+			/* expected for inquire and report lun commands */
+			break;
+			case CMD_INVALID:
+				printk(KERN_WARNING "cciss: Cmd %p is "
+					"reported invalid\n", c);
+				return_status = IO_ERROR;
+			break;
+			case CMD_PROTOCOL_ERR:
+                                printk(KERN_WARNING "cciss: cmd %p has "
+					"protocol error \n", c);
+                                return_status = IO_ERROR;
+                        break;
+case CMD_HARDWARE_ERR:
+                                printk(KERN_WARNING "cciss: cmd %p had " 
+                                        " hardware error\n", c);
+                                return_status = IO_ERROR;
+                        break;
+			case CMD_CONNECTION_LOST:
+				printk(KERN_WARNING "cciss: cmd %p had "
+					"connection lost\n", c);
+				return_status = IO_ERROR;
+			break;
+			case CMD_ABORTED:
+				printk(KERN_WARNING "cciss: cmd %p was "
+					"aborted\n", c);
+				return_status = IO_ERROR;
+			break;
+			case CMD_ABORT_FAILED:
+				printk(KERN_WARNING "cciss: cmd %p reports "
+					"abort failed\n", c);
+				return_status = IO_ERROR;
+			break;
+			case CMD_UNSOLICITED_ABORT:
+				printk(KERN_WARNING 
+					"cciss%d: unsolicited abort %p\n",
+					ctlr, c);
+				if (c->retry_count < MAX_CMD_RETRIES) {
+					printk(KERN_WARNING 
+						"cciss%d: retrying %p\n", 
+						ctlr, c);
+					c->retry_count++;
+					/* erase the old error information */
+					memset(c->err_info, 0,
+						sizeof(ErrorInfo_struct));
+					return_status = IO_OK;
+					INIT_COMPLETION(wait);
+					goto resend_cmd2;
+				}
+				return_status = IO_ERROR;
+			break;
+			default:
+				printk(KERN_WARNING "cciss: cmd %p returned "
+					"unknown status %x\n", c, 
+						c->err_info->CommandStatus); 
+				return_status = IO_ERROR;
+		}
+	}	
+	/* unlock the buffers from DMA */
+	pci_unmap_single( h->pdev, (dma_addr_t) buff_dma_handle.val,
+			size, PCI_DMA_BIDIRECTIONAL);
+	cmd_free(h, c, 0);
+        return(return_status);
+
+}
+static void cciss_geometry_inquiry(int ctlr, int logvol,
+			int withirq, unsigned int total_size,
+			unsigned int block_size, InquiryData_struct *inq_buff,
+			drive_info_struct *drv)
+{
+	int return_code;
+	memset(inq_buff, 0, sizeof(InquiryData_struct));
+	if (withirq)
+		return_code = sendcmd_withirq(CISS_INQUIRY, ctlr,
+			inq_buff, sizeof(*inq_buff), 1, logvol ,0xC1, TYPE_CMD);
+	else
+		return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff,
+			sizeof(*inq_buff), 1, logvol ,0xC1, NULL, TYPE_CMD);
+	if (return_code == IO_OK) {
+		if(inq_buff->data_byte[8] == 0xFF) {
+			printk(KERN_WARNING
+				"cciss: reading geometry failed, volume "
+				"does not support reading geometry\n");
+			drv->block_size = block_size;
+			drv->nr_blocks = total_size;
+			drv->heads = 255;
+			drv->sectors = 32; // Sectors per track
+			drv->cylinders = total_size / 255 / 32;
+		} else {
+			unsigned int t;
+
+			drv->block_size = block_size;
+			drv->nr_blocks = total_size;
+			drv->heads = inq_buff->data_byte[6];
+			drv->sectors = inq_buff->data_byte[7];
+			drv->cylinders = (inq_buff->data_byte[4] & 0xff) << 8;
+			drv->cylinders += inq_buff->data_byte[5];
+			drv->raid_level = inq_buff->data_byte[8];
+			t = drv->heads * drv->sectors;
+			if (t > 1) {
+				drv->cylinders = total_size/t;
+			}
+		}
+	} else { /* Get geometry failed */
+		printk(KERN_WARNING "cciss: reading geometry failed\n");
+	}
+	printk(KERN_INFO "      heads= %d, sectors= %d, cylinders= %d\n\n",
+		drv->heads, drv->sectors, drv->cylinders);
+}
+static void
+cciss_read_capacity(int ctlr, int logvol, ReadCapdata_struct *buf,
+		int withirq, unsigned int *total_size, unsigned int *block_size)
+{
+	int return_code;
+	memset(buf, 0, sizeof(*buf));
+	if (withirq)
+		return_code = sendcmd_withirq(CCISS_READ_CAPACITY,
+			ctlr, buf, sizeof(*buf), 1, logvol, 0, TYPE_CMD);
+	else
+		return_code = sendcmd(CCISS_READ_CAPACITY,
+			ctlr, buf, sizeof(*buf), 1, logvol, 0, NULL, TYPE_CMD);
+	if (return_code == IO_OK) {
+		*total_size = be32_to_cpu(*((__be32 *) &buf->total_size[0]))+1;
+		*block_size = be32_to_cpu(*((__be32 *) &buf->block_size[0]));
+	} else { /* read capacity command failed */
+		printk(KERN_WARNING "cciss: read capacity failed\n");
+		*total_size = 0;
+		*block_size = BLOCK_SIZE;
+	}
+	printk(KERN_INFO "      blocks= %u block_size= %d\n",
+		*total_size, *block_size);
+	return;
+}
+
+static int register_new_disk(ctlr_info_t *h)
+{
+        struct gendisk *disk;
+	int ctlr = h->ctlr;
+        int i;
+	int num_luns;
+	int logvol;
+	int new_lun_found = 0;
+	int new_lun_index = 0;
+	int free_index_found = 0;
+	int free_index = 0;
+	ReportLunData_struct *ld_buff = NULL;
+	ReadCapdata_struct *size_buff = NULL;
+	InquiryData_struct *inq_buff = NULL;
+	int return_code;
+	int listlength = 0;
+	__u32 lunid = 0;
+	unsigned int block_size;
+	unsigned int total_size;
+
+        if (!capable(CAP_SYS_RAWIO))
+                return -EPERM;
+	/* if we have no space in our disk array left to add anything */
+	if(  h->num_luns >= CISS_MAX_LUN)
+		return -EINVAL;
+	
+	ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+	if (ld_buff == NULL)
+		goto mem_msg;
+	memset(ld_buff, 0, sizeof(ReportLunData_struct));
+	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+        if (size_buff == NULL)
+		goto mem_msg;
+	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+        if (inq_buff == NULL)
+		goto mem_msg;
+	
+	return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 
+			sizeof(ReportLunData_struct), 0, 0, 0, TYPE_CMD);
+
+	if( return_code == IO_OK)
+	{
+		
+		// printk("LUN Data\n--------------------------\n");
+
+		listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
+		listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
+		listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;	
+		listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
+	} else /* reading number of logical volumes failed */
+	{
+		printk(KERN_WARNING "cciss: report logical volume"
+			" command failed\n");
+		listlength = 0;
+		goto free_err;
+	}
+	num_luns = listlength / 8; // 8 bytes pre entry
+	if (num_luns > CISS_MAX_LUN)
+	{
+		num_luns = CISS_MAX_LUN;
+	}
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
+		ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
+		ld_buff->LUNListLength[3],  num_luns);
+#endif 
+	for(i=0; i<  num_luns; i++)
+	{
+		int j;
+		int lunID_found = 0;
+
+	  	lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
+        	lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
+        	lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
+        	lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
+		
+ 		/* check to see if this is a new lun */ 
+		for(j=0; j <= h->highest_lun; j++)
+		{
+#ifdef CCISS_DEBUG
+			printk("Checking %d %x against %x\n", j,h->drv[j].LunID,
+						lunid);
+#endif /* CCISS_DEBUG */
+			if (h->drv[j].LunID == lunid)
+			{
+				lunID_found = 1;
+				break;
+			}
+			
+		}
+		if( lunID_found == 1)
+			continue;
+		else
+		{	/* It is the new lun we have been looking for */
+#ifdef CCISS_DEBUG
+			printk("new lun found at %d\n", i);
+#endif /* CCISS_DEBUG */
+			new_lun_index = i;
+			new_lun_found = 1;
+			break;	
+		}
+	 }
+	 if (!new_lun_found)
+	 {
+		printk(KERN_WARNING "cciss:  New Logical Volume not found\n");
+		goto free_err;
+	 }
+	 /* Now find the free index 	*/
+	for(i=0; i <CISS_MAX_LUN; i++)
+	{
+#ifdef CCISS_DEBUG
+		printk("Checking Index %d\n", i);
+#endif /* CCISS_DEBUG */
+		if(h->drv[i].LunID == 0)
+		{
+#ifdef CCISS_DEBUG
+			printk("free index found at %d\n", i);
+#endif /* CCISS_DEBUG */
+			free_index_found = 1;
+			free_index = i;
+			break;
+		}
+	}
+	if (!free_index_found)
+	{
+		printk(KERN_WARNING "cciss: unable to find free slot for disk\n");
+		goto free_err;
+         }
+
+	logvol = free_index;
+	h->drv[logvol].LunID = lunid;
+		/* there could be gaps in lun numbers, track hightest */
+	if(h->highest_lun < lunid)
+		h->highest_lun = logvol;
+	cciss_read_capacity(ctlr, logvol, size_buff, 1,
+		&total_size, &block_size);
+	cciss_geometry_inquiry(ctlr, logvol, 1, total_size, block_size,
+			inq_buff, &h->drv[logvol]);
+	h->drv[logvol].usage_count = 0;
+	++h->num_luns;
+	/* setup partitions per disk */
+        disk = h->gendisk[logvol];
+	set_capacity(disk, h->drv[logvol].nr_blocks);
+	/* if it's the controller it's already added */
+	if(logvol)
+		add_disk(disk);
+freeret:
+	kfree(ld_buff);
+	kfree(size_buff);
+	kfree(inq_buff);
+	return (logvol);
+mem_msg:
+	printk(KERN_ERR "cciss: out of memory\n");
+free_err:
+	logvol = -1;
+	goto freeret;
+}
+
+static int cciss_revalidate(struct gendisk *disk)
+{
+	ctlr_info_t *h = get_host(disk);
+	drive_info_struct *drv = get_drv(disk);
+	int logvol;
+	int FOUND=0;
+	unsigned int block_size;
+	unsigned int total_size;
+	ReadCapdata_struct *size_buff = NULL;
+	InquiryData_struct *inq_buff = NULL;
+
+	for(logvol=0; logvol < CISS_MAX_LUN; logvol++)
+	{
+		if(h->drv[logvol].LunID == drv->LunID) {
+			FOUND=1;
+			break;
+		}
+	}
+
+	if (!FOUND) return 1;
+
+	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+        if (size_buff == NULL)
+        {
+                printk(KERN_WARNING "cciss: out of memory\n");
+                return 1;
+        }
+	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+        if (inq_buff == NULL)
+        {
+                printk(KERN_WARNING "cciss: out of memory\n");
+		kfree(size_buff);
+                return 1;
+        }
+
+	cciss_read_capacity(h->ctlr, logvol, size_buff, 1, &total_size, &block_size);
+	cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, inq_buff, drv);
+
+	blk_queue_hardsect_size(h->queue, drv->block_size);
+	set_capacity(disk, drv->nr_blocks);
+
+	kfree(size_buff);
+	kfree(inq_buff);
+	return 0;
+}
+
+/*
+ *   Wait polling for a command to complete.
+ *   The memory mapped FIFO is polled for the completion.
+ *   Used only at init time, interrupts from the HBA are disabled.
+ */
+static unsigned long pollcomplete(int ctlr)
+{
+	unsigned long done;
+	int i;
+
+	/* Wait (up to 20 seconds) for a command to complete */
+
+	for (i = 20 * HZ; i > 0; i--) {
+		done = hba[ctlr]->access.command_completed(hba[ctlr]);
+		if (done == FIFO_EMPTY) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(1);
+		} else
+			return (done);
+	}
+	/* Invalid address to tell caller we ran out of time */
+	return 1;
+}
+/*
+ * Send a command to the controller, and wait for it to complete.  
+ * Only used at init time. 
+ */
+static int sendcmd(
+	__u8	cmd,
+	int	ctlr,
+	void	*buff,
+	size_t	size,
+	unsigned int use_unit_num, /* 0: address the controller,
+				      1: address logical volume log_unit, 
+				      2: periph device address is scsi3addr */
+	unsigned int log_unit,
+	__u8	page_code,
+	unsigned char *scsi3addr,
+	int cmd_type)
+{
+	CommandList_struct *c;
+	int i;
+	unsigned long complete;
+	ctlr_info_t *info_p= hba[ctlr];
+	u64bit buff_dma_handle;
+	int status;
+
+	if ((c = cmd_alloc(info_p, 1)) == NULL) {
+		printk(KERN_WARNING "cciss: unable to get memory");
+		return(IO_ERROR);
+	}
+	status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num,
+		log_unit, page_code, scsi3addr, cmd_type);
+	if (status != IO_OK) {
+		cmd_free(info_p, c, 1);
+		return status;
+	}
+resend_cmd1:
+	/*
+         * Disable interrupt
+         */
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "cciss: turning intr off\n");
+#endif /* CCISS_DEBUG */ 
+        info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF);
+	
+	/* Make sure there is room in the command FIFO */
+        /* Actually it should be completely empty at this time. */
+        for (i = 200000; i > 0; i--) 
+	{
+		/* if fifo isn't full go */
+                if (!(info_p->access.fifo_full(info_p))) 
+		{
+			
+                        break;
+                }
+                udelay(10);
+                printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
+                        " waiting!\n", ctlr);
+        }
+        /*
+         * Send the cmd
+         */
+        info_p->access.submit_command(info_p, c);
+        complete = pollcomplete(ctlr);
+
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "cciss: command completed\n");
+#endif /* CCISS_DEBUG */
+
+	if (complete != 1) {
+		if ( (complete & CISS_ERROR_BIT)
+		     && (complete & ~CISS_ERROR_BIT) == c->busaddr)
+		     {
+			/* if data overrun or underun on Report command 
+				ignore it 
+			*/
+			if (((c->Request.CDB[0] == CISS_REPORT_LOG) ||
+			     (c->Request.CDB[0] == CISS_REPORT_PHYS) ||
+			     (c->Request.CDB[0] == CISS_INQUIRY)) &&
+				((c->err_info->CommandStatus == 
+					CMD_DATA_OVERRUN) || 
+				 (c->err_info->CommandStatus == 
+					CMD_DATA_UNDERRUN)
+			 	))
+			{
+				complete = c->busaddr;
+			} else {
+				if (c->err_info->CommandStatus ==
+						CMD_UNSOLICITED_ABORT) {
+					printk(KERN_WARNING "cciss%d: "
+						"unsolicited abort %p\n",
+						ctlr, c);
+					if (c->retry_count < MAX_CMD_RETRIES) {
+						printk(KERN_WARNING
+						   "cciss%d: retrying %p\n",
+						   ctlr, c);
+						c->retry_count++;
+						/* erase the old error */
+						/* information */
+						memset(c->err_info, 0,
+						   sizeof(ErrorInfo_struct));
+						goto resend_cmd1;
+					} else {
+						printk(KERN_WARNING
+						   "cciss%d: retried %p too "
+						   "many times\n", ctlr, c);
+						status = IO_ERROR;
+						goto cleanup1;
+					}
+				}
+				printk(KERN_WARNING "ciss ciss%d: sendcmd"
+				" Error %x \n", ctlr, 
+					c->err_info->CommandStatus); 
+				printk(KERN_WARNING "ciss ciss%d: sendcmd"
+				" offensive info\n"
+				"  size %x\n   num %x   value %x\n", ctlr,
+				  c->err_info->MoreErrInfo.Invalid_Cmd.offense_size,
+				  c->err_info->MoreErrInfo.Invalid_Cmd.offense_num,
+				  c->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
+				status = IO_ERROR;
+				goto cleanup1;
+			}
+		}
+                if (complete != c->busaddr) {
+                        printk( KERN_WARNING "cciss cciss%d: SendCmd "
+                      "Invalid command list address returned! (%lx)\n",
+                                ctlr, complete);
+			status = IO_ERROR;
+			goto cleanup1;
+                }
+        } else {
+                printk( KERN_WARNING
+                        "cciss cciss%d: SendCmd Timeout out, "
+                        "No command list address returned!\n",
+                        ctlr);
+		status = IO_ERROR;
+        }
+		
+cleanup1:	
+	/* unlock the data buffer from DMA */
+	pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val,
+				size, PCI_DMA_BIDIRECTIONAL);
+	cmd_free(info_p, c, 1);
+	return (status);
+} 
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static void __iomem *remap_pci_mem(ulong base, ulong size)
+{
+        ulong page_base        = ((ulong) base) & PAGE_MASK;
+        ulong page_offs        = ((ulong) base) - page_base;
+        void __iomem *page_remapped = ioremap(page_base, page_offs+size);
+
+        return page_remapped ? (page_remapped + page_offs) : NULL;
+}
+
+/* 
+ * Takes jobs of the Q and sends them to the hardware, then puts it on 
+ * the Q to wait for completion. 
+ */ 
+static void start_io( ctlr_info_t *h)
+{
+	CommandList_struct *c;
+	
+	while(( c = h->reqQ) != NULL )
+	{
+		/* can't do anything if fifo is full */
+		if ((h->access.fifo_full(h))) {
+			printk(KERN_WARNING "cciss: fifo full\n");
+			break;
+		}
+
+		/* Get the frist entry from the Request Q */ 
+		removeQ(&(h->reqQ), c);
+		h->Qdepth--;
+	
+		/* Tell the controller execute command */ 
+		h->access.submit_command(h, c);
+		
+		/* Put job onto the completed Q */ 
+		addQ (&(h->cmpQ), c); 
+	}
+}
+
+static inline void complete_buffers(struct bio *bio, int status)
+{
+	while (bio) {
+		struct bio *xbh = bio->bi_next; 
+		int nr_sectors = bio_sectors(bio);
+
+		bio->bi_next = NULL; 
+		blk_finished_io(len);
+		bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
+		bio = xbh;
+	}
+
+} 
+/* Assumes that CCISS_LOCK(h->ctlr) is held. */
+/* Zeros out the error record and then resends the command back */
+/* to the controller */
+static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c)
+{
+	/* erase the old error information */
+	memset(c->err_info, 0, sizeof(ErrorInfo_struct));
+
+	/* add it to software queue and then send it to the controller */
+	addQ(&(h->reqQ),c);
+	h->Qdepth++;
+	if(h->Qdepth > h->maxQsinceinit)
+		h->maxQsinceinit = h->Qdepth;
+
+	start_io(h);
+}
+/* checks the status of the job and calls complete buffers to mark all 
+ * buffers for the completed job. 
+ */ 
+static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd,
+		int timeout)
+{
+	int status = 1;
+	int i;
+	int retry_cmd = 0;
+	u64bit temp64;
+		
+	if (timeout)
+		status = 0; 
+
+	if(cmd->err_info->CommandStatus != 0) 
+	{ /* an error has occurred */ 
+		switch(cmd->err_info->CommandStatus)
+		{
+			unsigned char sense_key;
+			case CMD_TARGET_STATUS:
+				status = 0;
+			
+				if( cmd->err_info->ScsiStatus == 0x02)
+				{
+					printk(KERN_WARNING "cciss: cmd %p "
+                                        	"has CHECK CONDITION "
+						" byte 2 = 0x%x\n", cmd,
+						cmd->err_info->SenseInfo[2]
+					);
+					/* check the sense key */
+					sense_key = 0xf & 
+						cmd->err_info->SenseInfo[2];
+					/* no status or recovered error */
+					if((sense_key == 0x0) ||
+					    (sense_key == 0x1))
+					{
+							status = 1;
+					}
+				} else
+				{
+					printk(KERN_WARNING "cciss: cmd %p "
+                                                "has SCSI Status 0x%x\n",
+						cmd, cmd->err_info->ScsiStatus);
+				}
+			break;
+			case CMD_DATA_UNDERRUN:
+				printk(KERN_WARNING "cciss: cmd %p has"
+					" completed with data underrun "
+					"reported\n", cmd);
+			break;
+			case CMD_DATA_OVERRUN:
+				printk(KERN_WARNING "cciss: cmd %p has"
+					" completed with data overrun "
+					"reported\n", cmd);
+			break;
+			case CMD_INVALID:
+				printk(KERN_WARNING "cciss: cmd %p is "
+					"reported invalid\n", cmd);
+				status = 0;
+			break;
+			case CMD_PROTOCOL_ERR:
+                                printk(KERN_WARNING "cciss: cmd %p has "
+					"protocol error \n", cmd);
+                                status = 0;
+                        break;
+			case CMD_HARDWARE_ERR:
+                                printk(KERN_WARNING "cciss: cmd %p had " 
+                                        " hardware error\n", cmd);
+                                status = 0;
+                        break;
+			case CMD_CONNECTION_LOST:
+				printk(KERN_WARNING "cciss: cmd %p had "
+					"connection lost\n", cmd);
+				status=0;
+			break;
+			case CMD_ABORTED:
+				printk(KERN_WARNING "cciss: cmd %p was "
+					"aborted\n", cmd);
+				status=0;
+			break;
+			case CMD_ABORT_FAILED:
+				printk(KERN_WARNING "cciss: cmd %p reports "
+					"abort failed\n", cmd);
+				status=0;
+			break;
+			case CMD_UNSOLICITED_ABORT:
+				printk(KERN_WARNING "cciss%d: unsolicited "
+					"abort %p\n", h->ctlr, cmd);
+				if (cmd->retry_count < MAX_CMD_RETRIES) {
+					retry_cmd=1;
+					printk(KERN_WARNING
+						"cciss%d: retrying %p\n",
+						h->ctlr, cmd);
+					cmd->retry_count++;
+				} else
+					printk(KERN_WARNING
+						"cciss%d: %p retried too "
+						"many times\n", h->ctlr, cmd);
+				status=0;
+			break;
+			case CMD_TIMEOUT:
+				printk(KERN_WARNING "cciss: cmd %p timedout\n",
+					cmd);
+				status=0;
+			break;
+			default:
+				printk(KERN_WARNING "cciss: cmd %p returned "
+					"unknown status %x\n", cmd, 
+						cmd->err_info->CommandStatus); 
+				status=0;
+		}
+	}
+	/* We need to return this command */
+	if(retry_cmd) {
+		resend_cciss_cmd(h,cmd);
+		return;
+	}	
+	/* command did not need to be retried */
+	/* unmap the DMA mapping for all the scatter gather elements */
+	for(i=0; i<cmd->Header.SGList; i++) {
+		temp64.val32.lower = cmd->SG[i].Addr.lower;
+		temp64.val32.upper = cmd->SG[i].Addr.upper;
+		pci_unmap_page(hba[cmd->ctlr]->pdev,
+			temp64.val, cmd->SG[i].Len,
+			(cmd->Request.Type.Direction == XFER_READ) ?
+				PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+	}
+	complete_buffers(cmd->rq->bio, status);
+
+#ifdef CCISS_DEBUG
+	printk("Done with %p\n", cmd->rq);
+#endif /* CCISS_DEBUG */ 
+
+	end_that_request_last(cmd->rq);
+	cmd_free(h,cmd,1);
+}
+
+/* 
+ * Get a request and submit it to the controller. 
+ */
+static void do_cciss_request(request_queue_t *q)
+{
+	ctlr_info_t *h= q->queuedata; 
+	CommandList_struct *c;
+	int start_blk, seg;
+	struct request *creq;
+	u64bit temp64;
+	struct scatterlist tmp_sg[MAXSGENTRIES];
+	drive_info_struct *drv;
+	int i, dir;
+
+	/* We call start_io here in case there is a command waiting on the
+	 * queue that has not been sent.
+	*/
+	if (blk_queue_plugged(q))
+		goto startio;
+
+queue:
+	creq = elv_next_request(q);
+	if (!creq)
+		goto startio;
+
+	if (creq->nr_phys_segments > MAXSGENTRIES)
+                BUG();
+
+	if (( c = cmd_alloc(h, 1)) == NULL)
+		goto full;
+
+	blkdev_dequeue_request(creq);
+
+	spin_unlock_irq(q->queue_lock);
+
+	c->cmd_type = CMD_RWREQ;
+	c->rq = creq;
+	
+	/* fill in the request */ 
+	drv = creq->rq_disk->private_data;
+	c->Header.ReplyQueue = 0;  // unused in simple mode
+	c->Header.Tag.lower = c->busaddr;  // use the physical address the cmd block for tag
+	c->Header.LUN.LogDev.VolId= drv->LunID;
+	c->Header.LUN.LogDev.Mode = 1;
+	c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
+	c->Request.Type.Type =  TYPE_CMD; // It is a command. 
+	c->Request.Type.Attribute = ATTR_SIMPLE; 
+	c->Request.Type.Direction = 
+		(rq_data_dir(creq) == READ) ? XFER_READ: XFER_WRITE; 
+	c->Request.Timeout = 0; // Don't time out	
+	c->Request.CDB[0] = (rq_data_dir(creq) == READ) ? CCISS_READ : CCISS_WRITE;
+	start_blk = creq->sector;
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n",(int) creq->sector,
+		(int) creq->nr_sectors);	
+#endif /* CCISS_DEBUG */
+
+	seg = blk_rq_map_sg(q, creq, tmp_sg);
+
+	/* get the DMA records for the setup */ 
+	if (c->Request.Type.Direction == XFER_READ)
+		dir = PCI_DMA_FROMDEVICE;
+	else
+		dir = PCI_DMA_TODEVICE;
+
+	for (i=0; i<seg; i++)
+	{
+		c->SG[i].Len = tmp_sg[i].length;
+		temp64.val = (__u64) pci_map_page(h->pdev, tmp_sg[i].page,
+			 		  tmp_sg[i].offset, tmp_sg[i].length,
+					  dir);
+		c->SG[i].Addr.lower = temp64.val32.lower;
+                c->SG[i].Addr.upper = temp64.val32.upper;
+                c->SG[i].Ext = 0;  // we are not chaining
+	}
+	/* track how many SG entries we are using */ 
+	if( seg > h->maxSG)
+		h->maxSG = seg; 
+
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "cciss: Submitting %d sectors in %d segments\n", creq->nr_sectors, seg);
+#endif /* CCISS_DEBUG */
+
+	c->Header.SGList = c->Header.SGTotal = seg;
+	c->Request.CDB[1]= 0;
+	c->Request.CDB[2]= (start_blk >> 24) & 0xff;	//MSB
+	c->Request.CDB[3]= (start_blk >> 16) & 0xff;
+	c->Request.CDB[4]= (start_blk >>  8) & 0xff;
+	c->Request.CDB[5]= start_blk & 0xff;
+	c->Request.CDB[6]= 0; // (sect >> 24) & 0xff; MSB
+	c->Request.CDB[7]= (creq->nr_sectors >>  8) & 0xff; 
+	c->Request.CDB[8]= creq->nr_sectors & 0xff; 
+	c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0;
+
+	spin_lock_irq(q->queue_lock);
+
+	addQ(&(h->reqQ),c);
+	h->Qdepth++;
+	if(h->Qdepth > h->maxQsinceinit)
+		h->maxQsinceinit = h->Qdepth; 
+
+	goto queue;
+full:
+	blk_stop_queue(q);
+startio:
+	/* We will already have the driver lock here so not need
+	 * to lock it.
+	*/
+	start_io(h);
+}
+
+static irqreturn_t do_cciss_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	ctlr_info_t *h = dev_id;
+	CommandList_struct *c;
+	unsigned long flags;
+	__u32 a, a1;
+	int j;
+	int start_queue = h->next_to_run;
+
+	/* Is this interrupt for us? */
+	if (( h->access.intr_pending(h) == 0) || (h->interrupts_enabled == 0))
+		return IRQ_NONE;
+
+	/*
+	 * If there are completed commands in the completion queue,
+	 * we had better do something about it.
+	 */
+	spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
+	while( h->access.intr_pending(h))
+	{
+		while((a = h->access.command_completed(h)) != FIFO_EMPTY) 
+		{
+			a1 = a;
+			a &= ~3;
+			if ((c = h->cmpQ) == NULL)
+			{  
+				printk(KERN_WARNING "cciss: Completion of %08lx ignored\n", (unsigned long)a1);
+				continue;	
+			} 
+			while(c->busaddr != a) {
+				c = c->next;
+				if (c == h->cmpQ) 
+					break;
+			}
+			/*
+			 * If we've found the command, take it off the
+			 * completion Q and free it
+			 */
+			 if (c->busaddr == a) {
+				removeQ(&h->cmpQ, c);
+				if (c->cmd_type == CMD_RWREQ) {
+					complete_command(h, c, 0);
+				} else if (c->cmd_type == CMD_IOCTL_PEND) {
+					complete(c->waiting);
+				}
+#				ifdef CONFIG_CISS_SCSI_TAPE
+				else if (c->cmd_type == CMD_SCSI)
+					complete_scsi_command(c, 0, a1);
+#				endif
+				continue;
+			}
+		}
+	}
+
+ 	/* check to see if we have maxed out the number of commands that can
+ 	 * be placed on the queue.  If so then exit.  We do this check here
+ 	 * in case the interrupt we serviced was from an ioctl and did not
+ 	 * free any new commands.
+	 */
+ 	if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
+ 		goto cleanup;
+
+ 	/* We have room on the queue for more commands.  Now we need to queue
+ 	 * them up.  We will also keep track of the next queue to run so
+ 	 * that every queue gets a chance to be started first.
+ 	*/
+ 	for (j=0; j < NWD; j++){
+ 		int curr_queue = (start_queue + j) % NWD;
+ 		/* make sure the disk has been added and the drive is real
+ 		 * because this can be called from the middle of init_one.
+ 		*/
+ 		if(!(h->gendisk[curr_queue]->queue) ||
+		 		   !(h->drv[curr_queue].heads))
+ 			continue;
+ 		blk_start_queue(h->gendisk[curr_queue]->queue);
+
+ 		/* check to see if we have maxed out the number of commands
+ 		 * that can be placed on the queue.
+ 		*/
+ 		if ((find_first_zero_bit(h->cmd_pool_bits, NR_CMDS)) == NR_CMDS)
+ 		{
+ 			if (curr_queue == start_queue){
+ 				h->next_to_run = (start_queue + 1) % NWD;
+ 				goto cleanup;
+ 			} else {
+ 				h->next_to_run = curr_queue;
+ 				goto cleanup;
+ 	}
+ 		} else {
+ 			curr_queue = (curr_queue + 1) % NWD;
+ 		}
+ 	}
+
+cleanup:
+	spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
+	return IRQ_HANDLED;
+}
+
+/* 
+ *  We cannot read the structure directly, for portablity we must use 
+ *   the io functions.
+ *   This is for debug only. 
+ */
+#ifdef CCISS_DEBUG
+static void print_cfg_table( CfgTable_struct *tb)
+{
+	int i;
+	char temp_name[17];
+
+	printk("Controller Configuration information\n");
+	printk("------------------------------------\n");
+	for(i=0;i<4;i++)
+		temp_name[i] = readb(&(tb->Signature[i]));
+	temp_name[4]='\0';
+	printk("   Signature = %s\n", temp_name); 
+	printk("   Spec Number = %d\n", readl(&(tb->SpecValence)));
+	printk("   Transport methods supported = 0x%x\n", 
+				readl(&(tb-> TransportSupport)));
+	printk("   Transport methods active = 0x%x\n", 
+				readl(&(tb->TransportActive)));
+	printk("   Requested transport Method = 0x%x\n", 
+			readl(&(tb->HostWrite.TransportRequest)));
+	printk("   Coalese Interrupt Delay = 0x%x\n", 
+			readl(&(tb->HostWrite.CoalIntDelay)));
+	printk("   Coalese Interrupt Count = 0x%x\n", 
+			readl(&(tb->HostWrite.CoalIntCount)));
+	printk("   Max outstanding commands = 0x%d\n", 
+			readl(&(tb->CmdsOutMax)));
+	printk("   Bus Types = 0x%x\n", readl(&(tb-> BusTypes)));
+	for(i=0;i<16;i++)
+		temp_name[i] = readb(&(tb->ServerName[i]));
+	temp_name[16] = '\0';
+	printk("   Server Name = %s\n", temp_name);
+	printk("   Heartbeat Counter = 0x%x\n\n\n", 
+			readl(&(tb->HeartBeat)));
+}
+#endif /* CCISS_DEBUG */ 
+
+static void release_io_mem(ctlr_info_t *c)
+{
+	/* if IO mem was not protected do nothing */
+	if( c->io_mem_addr == 0)
+		return;
+	release_region(c->io_mem_addr, c->io_mem_length);
+	c->io_mem_addr = 0;
+	c->io_mem_length = 0;
+}
+
+static int find_PCI_BAR_index(struct pci_dev *pdev,
+				unsigned long pci_bar_addr)
+{
+	int i, offset, mem_type, bar_type;
+	if (pci_bar_addr == PCI_BASE_ADDRESS_0) /* looking for BAR zero? */
+		return 0;
+	offset = 0;
+	for (i=0; i<DEVICE_COUNT_RESOURCE; i++) {
+		bar_type = pci_resource_flags(pdev, i) &
+			PCI_BASE_ADDRESS_SPACE;
+		if (bar_type == PCI_BASE_ADDRESS_SPACE_IO)
+			offset += 4;
+		else {
+			mem_type = pci_resource_flags(pdev, i) &
+				PCI_BASE_ADDRESS_MEM_TYPE_MASK;
+			switch (mem_type) {
+				case PCI_BASE_ADDRESS_MEM_TYPE_32:
+				case PCI_BASE_ADDRESS_MEM_TYPE_1M:
+					offset += 4; /* 32 bit */
+					break;
+				case PCI_BASE_ADDRESS_MEM_TYPE_64:
+					offset += 8;
+					break;
+				default: /* reserved in PCI 2.2 */
+					printk(KERN_WARNING "Base address is invalid\n");
+			       		return -1;
+				break;
+			}
+		}
+ 		if (offset == pci_bar_addr - PCI_BASE_ADDRESS_0)
+			return i+1;
+	}
+	return -1;
+}
+
+static int cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+{
+	ushort subsystem_vendor_id, subsystem_device_id, command;
+	__u32 board_id, scratchpad = 0;
+	__u64 cfg_offset;
+	__u32 cfg_base_addr;
+	__u64 cfg_base_addr_index;
+	int i;
+
+	/* check to see if controller has been disabled */
+	/* BEFORE trying to enable it */
+	(void) pci_read_config_word(pdev, PCI_COMMAND,&command);
+	if(!(command & 0x02))
+	{
+		printk(KERN_WARNING "cciss: controller appears to be disabled\n");
+		return(-1);
+	}
+
+	if (pci_enable_device(pdev))
+	{
+		printk(KERN_ERR "cciss: Unable to Enable PCI device\n");
+		return( -1);
+	}
+	if (pci_set_dma_mask(pdev, CCISS_DMA_MASK ) != 0)
+	{
+		printk(KERN_ERR "cciss:  Unable to set DMA mask\n");
+		return(-1);
+	}
+
+	subsystem_vendor_id = pdev->subsystem_vendor;
+	subsystem_device_id = pdev->subsystem_device;
+	board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) |
+					subsystem_vendor_id);
+
+	/* search for our IO range so we can protect it */
+	for(i=0; i<DEVICE_COUNT_RESOURCE; i++)
+	{
+		/* is this an IO range */ 
+		if( pci_resource_flags(pdev, i) & 0x01 ) {
+			c->io_mem_addr = pci_resource_start(pdev, i);
+			c->io_mem_length = pci_resource_end(pdev, i) -
+				pci_resource_start(pdev, i) +1;
+#ifdef CCISS_DEBUG
+			printk("IO value found base_addr[%d] %lx %lx\n", i,
+				c->io_mem_addr, c->io_mem_length);
+#endif /* CCISS_DEBUG */
+			/* register the IO range */ 
+			if(!request_region( c->io_mem_addr,
+                                        c->io_mem_length, "cciss"))
+			{
+				printk(KERN_WARNING "cciss I/O memory range already in use addr=%lx length=%ld\n",
+				c->io_mem_addr, c->io_mem_length);
+				c->io_mem_addr= 0;
+				c->io_mem_length = 0;
+			} 
+			break;
+		}
+	}
+
+#ifdef CCISS_DEBUG
+	printk("command = %x\n", command);
+	printk("irq = %x\n", pdev->irq);
+	printk("board_id = %x\n", board_id);
+#endif /* CCISS_DEBUG */ 
+
+	c->intr = pdev->irq;
+
+	/*
+	 * Memory base addr is first addr , the second points to the config
+         *   table
+	 */
+
+	c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
+#ifdef CCISS_DEBUG
+	printk("address 0 = %x\n", c->paddr);
+#endif /* CCISS_DEBUG */ 
+	c->vaddr = remap_pci_mem(c->paddr, 200);
+
+	/* Wait for the board to become ready.  (PCI hotplug needs this.)
+	 * We poll for up to 120 secs, once per 100ms. */
+	for (i=0; i < 1200; i++) {
+		scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET);
+		if (scratchpad == CCISS_FIRMWARE_READY)
+			break;
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(HZ / 10); /* wait 100ms */
+	}
+	if (scratchpad != CCISS_FIRMWARE_READY) {
+		printk(KERN_WARNING "cciss: Board not ready.  Timed out.\n");
+		return -1;
+	}
+
+	/* get the address index number */
+	cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET);
+	cfg_base_addr &= (__u32) 0x0000ffff;
+#ifdef CCISS_DEBUG
+	printk("cfg base address = %x\n", cfg_base_addr);
+#endif /* CCISS_DEBUG */
+	cfg_base_addr_index =
+		find_PCI_BAR_index(pdev, cfg_base_addr);
+#ifdef CCISS_DEBUG
+	printk("cfg base address index = %x\n", cfg_base_addr_index);
+#endif /* CCISS_DEBUG */
+	if (cfg_base_addr_index == -1) {
+		printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n");
+		release_io_mem(c);
+		return -1;
+	}
+
+	cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET);
+#ifdef CCISS_DEBUG
+	printk("cfg offset = %x\n", cfg_offset);
+#endif /* CCISS_DEBUG */
+	c->cfgtable =  remap_pci_mem(pci_resource_start(pdev,
+				cfg_base_addr_index) + cfg_offset,
+				sizeof(CfgTable_struct));
+	c->board_id = board_id;
+
+#ifdef CCISS_DEBUG
+	print_cfg_table(c->cfgtable); 
+#endif /* CCISS_DEBUG */
+
+	for(i=0; i<NR_PRODUCTS; i++) {
+		if (board_id == products[i].board_id) {
+			c->product_name = products[i].product_name;
+			c->access = *(products[i].access);
+			break;
+		}
+	}
+	if (i == NR_PRODUCTS) {
+		printk(KERN_WARNING "cciss: Sorry, I don't know how"
+			" to access the Smart Array controller %08lx\n", 
+				(unsigned long)board_id);
+		return -1;
+	}
+	if (  (readb(&c->cfgtable->Signature[0]) != 'C') ||
+	      (readb(&c->cfgtable->Signature[1]) != 'I') ||
+	      (readb(&c->cfgtable->Signature[2]) != 'S') ||
+	      (readb(&c->cfgtable->Signature[3]) != 'S') )
+	{
+		printk("Does not appear to be a valid CISS config table\n");
+		return -1;
+	}
+
+#ifdef CONFIG_X86
+{
+	/* Need to enable prefetch in the SCSI core for 6400 in x86 */
+	__u32 prefetch;
+	prefetch = readl(&(c->cfgtable->SCSI_Prefetch));
+	prefetch |= 0x100;
+	writel(prefetch, &(c->cfgtable->SCSI_Prefetch));
+}
+#endif
+
+#ifdef CCISS_DEBUG
+	printk("Trying to put board into Simple mode\n");
+#endif /* CCISS_DEBUG */ 
+	c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
+	/* Update the field, and then ring the doorbell */ 
+	writel( CFGTBL_Trans_Simple, 
+		&(c->cfgtable->HostWrite.TransportRequest));
+	writel( CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL);
+
+	/* under certain very rare conditions, this can take awhile.
+	 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
+	 * as we enter this code.) */
+	for(i=0;i<MAX_CONFIG_WAIT;i++) {
+		if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
+			break;
+		/* delay and try again */
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(10);
+	}	
+
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "I counter got to %d %x\n", i, readl(c->vaddr + SA5_DOORBELL));
+#endif /* CCISS_DEBUG */
+#ifdef CCISS_DEBUG
+	print_cfg_table(c->cfgtable);	
+#endif /* CCISS_DEBUG */ 
+
+	if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple))
+	{
+		printk(KERN_WARNING "cciss: unable to get board into"
+					" simple mode\n");
+		return -1;
+	}
+	return 0;
+
+}
+
+/* 
+ * Gets information about the local volumes attached to the controller. 
+ */ 
+static void cciss_getgeometry(int cntl_num)
+{
+	ReportLunData_struct *ld_buff;
+	ReadCapdata_struct *size_buff;
+	InquiryData_struct *inq_buff;
+	int return_code;
+	int i;
+	int listlength = 0;
+	__u32 lunid = 0;
+	int block_size;
+	int total_size; 
+
+	ld_buff = kmalloc(sizeof(ReportLunData_struct), GFP_KERNEL);
+	if (ld_buff == NULL)
+	{
+		printk(KERN_ERR "cciss: out of memory\n");
+		return;
+	}
+	memset(ld_buff, 0, sizeof(ReportLunData_struct));
+	size_buff = kmalloc(sizeof( ReadCapdata_struct), GFP_KERNEL);
+        if (size_buff == NULL)
+        {
+                printk(KERN_ERR "cciss: out of memory\n");
+		kfree(ld_buff);
+                return;
+        }
+	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+        if (inq_buff == NULL)
+        {
+                printk(KERN_ERR "cciss: out of memory\n");
+                kfree(ld_buff);
+		kfree(size_buff);
+                return;
+        }
+	/* Get the firmware version */ 
+	return_code = sendcmd(CISS_INQUIRY, cntl_num, inq_buff, 
+		sizeof(InquiryData_struct), 0, 0 ,0, NULL, TYPE_CMD);
+	if (return_code == IO_OK)
+	{
+		hba[cntl_num]->firm_ver[0] = inq_buff->data_byte[32];
+		hba[cntl_num]->firm_ver[1] = inq_buff->data_byte[33];
+		hba[cntl_num]->firm_ver[2] = inq_buff->data_byte[34];
+		hba[cntl_num]->firm_ver[3] = inq_buff->data_byte[35];
+	} else /* send command failed */
+	{
+		printk(KERN_WARNING "cciss: unable to determine firmware"
+			" version of controller\n");
+	}
+	/* Get the number of logical volumes */ 
+	return_code = sendcmd(CISS_REPORT_LOG, cntl_num, ld_buff, 
+			sizeof(ReportLunData_struct), 0, 0, 0, NULL, TYPE_CMD);
+
+	if( return_code == IO_OK)
+	{
+#ifdef CCISS_DEBUG
+		printk("LUN Data\n--------------------------\n");
+#endif /* CCISS_DEBUG */ 
+
+		listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[0])) << 24;
+		listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[1])) << 16;
+		listlength |= (0xff & (unsigned int)(ld_buff->LUNListLength[2])) << 8;	
+		listlength |= 0xff & (unsigned int)(ld_buff->LUNListLength[3]);
+	} else /* reading number of logical volumes failed */
+	{
+		printk(KERN_WARNING "cciss: report logical volume"
+			" command failed\n");
+		listlength = 0;
+	}
+	hba[cntl_num]->num_luns = listlength / 8; // 8 bytes pre entry
+	if (hba[cntl_num]->num_luns > CISS_MAX_LUN)
+	{
+		printk(KERN_ERR "ciss:  only %d number of logical volumes supported\n",
+			CISS_MAX_LUN);
+		hba[cntl_num]->num_luns = CISS_MAX_LUN;
+	}
+#ifdef CCISS_DEBUG
+	printk(KERN_DEBUG "Length = %x %x %x %x = %d\n", ld_buff->LUNListLength[0],
+		ld_buff->LUNListLength[1], ld_buff->LUNListLength[2],
+		ld_buff->LUNListLength[3],  hba[cntl_num]->num_luns);
+#endif /* CCISS_DEBUG */
+
+	hba[cntl_num]->highest_lun = hba[cntl_num]->num_luns-1;
+	for(i=0; i<  hba[cntl_num]->num_luns; i++)
+	{
+
+	  	lunid = (0xff & (unsigned int)(ld_buff->LUN[i][3])) << 24;
+        	lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][2])) << 16;
+        	lunid |= (0xff & (unsigned int)(ld_buff->LUN[i][1])) << 8;
+        	lunid |= 0xff & (unsigned int)(ld_buff->LUN[i][0]);
+		
+		hba[cntl_num]->drv[i].LunID = lunid;
+
+
+#ifdef CCISS_DEBUG
+	  	printk(KERN_DEBUG "LUN[%d]:  %x %x %x %x = %x\n", i, 
+		ld_buff->LUN[i][0], ld_buff->LUN[i][1],ld_buff->LUN[i][2], 
+		ld_buff->LUN[i][3], hba[cntl_num]->drv[i].LunID);
+#endif /* CCISS_DEBUG */
+		cciss_read_capacity(cntl_num, i, size_buff, 0,
+			&total_size, &block_size);
+		cciss_geometry_inquiry(cntl_num, i, 0, total_size, block_size,
+			inq_buff, &hba[cntl_num]->drv[i]);
+	}
+	kfree(ld_buff);
+	kfree(size_buff);
+	kfree(inq_buff);
+}	
+
+/* Function to find the first free pointer into our hba[] array */
+/* Returns -1 if no free entries are left.  */
+static int alloc_cciss_hba(void)
+{
+	struct gendisk *disk[NWD];
+	int i, n;
+	for (n = 0; n < NWD; n++) {
+		disk[n] = alloc_disk(1 << NWD_SHIFT);
+		if (!disk[n])
+			goto out;
+	}
+
+	for(i=0; i< MAX_CTLR; i++) {
+		if (!hba[i]) {
+			ctlr_info_t *p;
+			p = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+			if (!p)
+				goto Enomem;
+			memset(p, 0, sizeof(ctlr_info_t));
+			for (n = 0; n < NWD; n++)
+				p->gendisk[n] = disk[n];
+			hba[i] = p;
+			return i;
+		}
+	}
+	printk(KERN_WARNING "cciss: This driver supports a maximum"
+		" of %d controllers.\n", MAX_CTLR);
+	goto out;
+Enomem:
+	printk(KERN_ERR "cciss: out of memory.\n");
+out:
+	while (n--)
+		put_disk(disk[n]);
+	return -1;
+}
+
+static void free_hba(int i)
+{
+	ctlr_info_t *p = hba[i];
+	int n;
+
+	hba[i] = NULL;
+	for (n = 0; n < NWD; n++)
+		put_disk(p->gendisk[n]);
+	kfree(p);
+}
+
+/*
+ *  This is it.  Find all the controllers and register them.  I really hate
+ *  stealing all these major device numbers.
+ *  returns the number of block devices registered.
+ */
+static int __devinit cciss_init_one(struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	request_queue_t *q;
+	int i;
+	int j;
+	int rc;
+
+	printk(KERN_DEBUG "cciss: Device 0x%x has been found at"
+			" bus %d dev %d func %d\n",
+		pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
+			PCI_FUNC(pdev->devfn));
+	i = alloc_cciss_hba();
+	if(i < 0)
+		return (-1);
+	if (cciss_pci_init(hba[i], pdev) != 0)
+		goto clean1;
+
+	sprintf(hba[i]->devname, "cciss%d", i);
+	hba[i]->ctlr = i;
+	hba[i]->pdev = pdev;
+
+	/* configure PCI DMA stuff */
+	if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL))
+		printk("cciss: using DAC cycles\n");
+	else if (!pci_set_dma_mask(pdev, 0xffffffff))
+		printk("cciss: not using DAC cycles\n");
+	else {
+		printk("cciss: no suitable DMA available\n");
+		goto clean1;
+	}
+
+	/*
+	 * register with the major number, or get a dynamic major number
+	 * by passing 0 as argument.  This is done for greater than
+	 * 8 controller support.
+	 */
+	if (i < MAX_CTLR_ORIG)
+		hba[i]->major = MAJOR_NR + i;
+	rc = register_blkdev(hba[i]->major, hba[i]->devname);
+	if(rc == -EBUSY || rc == -EINVAL) {
+		printk(KERN_ERR
+			"cciss:  Unable to get major number %d for %s "
+			"on hba %d\n", hba[i]->major, hba[i]->devname, i);
+		goto clean1;
+	}
+	else {
+		if (i >= MAX_CTLR_ORIG)
+			hba[i]->major = rc;
+	}
+
+	/* make sure the board interrupts are off */
+	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
+	if( request_irq(hba[i]->intr, do_cciss_intr, 
+		SA_INTERRUPT | SA_SHIRQ | SA_SAMPLE_RANDOM, 
+			hba[i]->devname, hba[i])) {
+		printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
+			hba[i]->intr, hba[i]->devname);
+		goto clean2;
+	}
+	hba[i]->cmd_pool_bits = kmalloc(((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long), GFP_KERNEL);
+	hba[i]->cmd_pool = (CommandList_struct *)pci_alloc_consistent(
+		hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct), 
+		&(hba[i]->cmd_pool_dhandle));
+	hba[i]->errinfo_pool = (ErrorInfo_struct *)pci_alloc_consistent(
+		hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct), 
+		&(hba[i]->errinfo_pool_dhandle));
+	if((hba[i]->cmd_pool_bits == NULL) 
+		|| (hba[i]->cmd_pool == NULL)
+		|| (hba[i]->errinfo_pool == NULL)) {
+                printk( KERN_ERR "cciss: out of memory");
+		goto clean4;
+	}
+
+	spin_lock_init(&hba[i]->lock);
+	q = blk_init_queue(do_cciss_request, &hba[i]->lock);
+	if (!q)
+		goto clean4;
+
+	q->backing_dev_info.ra_pages = READ_AHEAD;
+	hba[i]->queue = q;
+	q->queuedata = hba[i];
+
+	/* Initialize the pdev driver private data. 
+		have it point to hba[i].  */
+	pci_set_drvdata(pdev, hba[i]);
+	/* command and error info recs zeroed out before 
+			they are used */
+        memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
+
+#ifdef CCISS_DEBUG	
+	printk(KERN_DEBUG "Scanning for drives on controller cciss%d\n",i);
+#endif /* CCISS_DEBUG */
+
+	cciss_getgeometry(i);
+
+	cciss_scsi_setup(i);
+
+	/* Turn the interrupts on so we can service requests */
+	hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
+
+	cciss_procinit(i);
+
+	blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
+
+	/* This is a hardware imposed limit. */
+	blk_queue_max_hw_segments(q, MAXSGENTRIES);
+
+	/* This is a limit in the driver and could be eliminated. */
+	blk_queue_max_phys_segments(q, MAXSGENTRIES);
+
+	blk_queue_max_sectors(q, 512);
+
+
+	for(j=0; j<NWD; j++) {
+		drive_info_struct *drv = &(hba[i]->drv[j]);
+		struct gendisk *disk = hba[i]->gendisk[j];
+
+		sprintf(disk->disk_name, "cciss/c%dd%d", i, j);
+		sprintf(disk->devfs_name, "cciss/host%d/target%d", i, j);
+		disk->major = hba[i]->major;
+		disk->first_minor = j << NWD_SHIFT;
+		disk->fops = &cciss_fops;
+		disk->queue = hba[i]->queue;
+		disk->private_data = drv;
+		/* we must register the controller even if no disks exist */
+		/* this is for the online array utilities */
+		if(!drv->heads && j)
+			continue;
+		blk_queue_hardsect_size(hba[i]->queue, drv->block_size);
+		set_capacity(disk, drv->nr_blocks);
+		add_disk(disk);
+	}
+	return(1);
+
+clean4:
+	if(hba[i]->cmd_pool_bits)
+               	kfree(hba[i]->cmd_pool_bits);
+	if(hba[i]->cmd_pool)
+		pci_free_consistent(hba[i]->pdev,
+			NR_CMDS * sizeof(CommandList_struct),
+			hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+	if(hba[i]->errinfo_pool)
+		pci_free_consistent(hba[i]->pdev,
+			NR_CMDS * sizeof( ErrorInfo_struct),
+			hba[i]->errinfo_pool,
+			hba[i]->errinfo_pool_dhandle);
+	free_irq(hba[i]->intr, hba[i]);
+clean2:
+	unregister_blkdev(hba[i]->major, hba[i]->devname);
+clean1:
+	release_io_mem(hba[i]);
+	free_hba(i);
+	return(-1);
+}
+
+static void __devexit cciss_remove_one (struct pci_dev *pdev)
+{
+	ctlr_info_t *tmp_ptr;
+	int i, j;
+	char flush_buf[4];
+	int return_code; 
+
+	if (pci_get_drvdata(pdev) == NULL)
+	{
+		printk( KERN_ERR "cciss: Unable to remove device \n");
+		return;
+	}
+	tmp_ptr = pci_get_drvdata(pdev);
+	i = tmp_ptr->ctlr;
+	if (hba[i] == NULL) 
+	{
+		printk(KERN_ERR "cciss: device appears to "
+			"already be removed \n");
+		return;
+	}
+	/* Turn board interrupts off  and send the flush cache command */
+	/* sendcmd will turn off interrupt, and send the flush...
+	* To write all data in the battery backed cache to disks */
+	memset(flush_buf, 0, 4);
+	return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL,
+				TYPE_CMD);
+	if(return_code != IO_OK)
+	{
+		printk(KERN_WARNING "Error Flushing cache on controller %d\n", 
+			i);
+	}
+	free_irq(hba[i]->intr, hba[i]);
+	pci_set_drvdata(pdev, NULL);
+	iounmap(hba[i]->vaddr);
+	cciss_unregister_scsi(i);  /* unhook from SCSI subsystem */
+	unregister_blkdev(hba[i]->major, hba[i]->devname);
+	remove_proc_entry(hba[i]->devname, proc_cciss);	
+	
+	/* remove it from the disk list */
+	for (j = 0; j < NWD; j++) {
+		struct gendisk *disk = hba[i]->gendisk[j];
+		if (disk->flags & GENHD_FL_UP)
+			del_gendisk(disk);
+	}
+
+	blk_cleanup_queue(hba[i]->queue);
+	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof(CommandList_struct),
+			    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+	pci_free_consistent(hba[i]->pdev, NR_CMDS * sizeof( ErrorInfo_struct),
+		hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle);
+	kfree(hba[i]->cmd_pool_bits);
+ 	release_io_mem(hba[i]);
+	free_hba(i);
+}	
+
+static struct pci_driver cciss_pci_driver = {
+	.name =		"cciss",
+	.probe =	cciss_init_one,
+	.remove =	__devexit_p(cciss_remove_one),
+	.id_table =	cciss_pci_device_id, /* id_table */
+};
+
+/*
+ *  This is it.  Register the PCI driver information for the cards we control
+ *  the OS will call our registered routines when it finds one of our cards. 
+ */
+static int __init cciss_init(void)
+{
+	printk(KERN_INFO DRIVER_NAME "\n");
+
+	/* Register for our PCI devices */
+	return pci_module_init(&cciss_pci_driver);
+}
+
+static void __exit cciss_cleanup(void)
+{
+	int i;
+
+	pci_unregister_driver(&cciss_pci_driver);
+	/* double check that all controller entrys have been removed */
+	for (i=0; i< MAX_CTLR; i++) 
+	{
+		if (hba[i] != NULL)
+		{
+			printk(KERN_WARNING "cciss: had to remove"
+					" controller %d\n", i);
+			cciss_remove_one(hba[i]->pdev);
+		}
+	}
+	remove_proc_entry("cciss", proc_root_driver);
+}
+
+module_init(cciss_init);
+module_exit(cciss_cleanup);
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
new file mode 100644
index 0000000..8fb1920
--- /dev/null
+++ b/drivers/block/cciss.h
@@ -0,0 +1,266 @@
+#ifndef CCISS_H
+#define CCISS_H
+
+#include <linux/genhd.h>
+
+#include "cciss_cmd.h"
+
+
+#define NWD		16
+#define NWD_SHIFT	4
+#define MAX_PART	(1 << NWD_SHIFT)
+
+#define IO_OK		0
+#define IO_ERROR	1
+
+#define MAJOR_NR COMPAQ_CISS_MAJOR
+
+struct ctlr_info;
+typedef struct ctlr_info ctlr_info_t;
+
+struct access_method {
+	void (*submit_command)(ctlr_info_t *h, CommandList_struct *c);
+	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
+	unsigned long (*fifo_full)(ctlr_info_t *h);
+	unsigned long (*intr_pending)(ctlr_info_t *h);
+	unsigned long (*command_completed)(ctlr_info_t *h);
+};
+typedef struct _drive_info_struct
+{
+ 	__u32   LunID;	
+	int 	usage_count;
+	sector_t nr_blocks;
+	int	block_size;
+	int 	heads;
+	int	sectors;
+	int 	cylinders;
+	int	raid_level;
+} drive_info_struct;
+
+struct ctlr_info 
+{
+	int	ctlr;
+	char	devname[8];
+	char    *product_name;
+	char	firm_ver[4]; // Firmware version 
+	struct pci_dev *pdev;
+	__u32	board_id;
+	void __iomem *vaddr;
+	unsigned long paddr;
+	unsigned long io_mem_addr;
+	unsigned long io_mem_length;
+	CfgTable_struct __iomem *cfgtable;
+	unsigned int intr;
+	int	interrupts_enabled;
+	int	major;
+	int 	max_commands;
+	int	commands_outstanding;
+	int 	max_outstanding; /* Debug */ 
+	int	num_luns;
+	int 	highest_lun;
+	int	usage_count;  /* number of opens all all minor devices */
+
+	// information about each logical volume
+	drive_info_struct drv[CISS_MAX_LUN];
+
+	struct access_method access;
+
+	/* queue and queue Info */ 
+	CommandList_struct *reqQ;
+	CommandList_struct  *cmpQ;
+	unsigned int Qdepth;
+	unsigned int maxQsinceinit;
+	unsigned int maxSG;
+	spinlock_t lock;
+	struct request_queue *queue;
+
+	//* pointers to command and error info pool */ 
+	CommandList_struct 	*cmd_pool;
+	dma_addr_t		cmd_pool_dhandle; 
+	ErrorInfo_struct 	*errinfo_pool;
+	dma_addr_t		errinfo_pool_dhandle; 
+        unsigned long  		*cmd_pool_bits;
+	int			nr_allocs;
+	int			nr_frees; 
+	int			busy_configuring;
+
+	/* This element holds the zero based queue number of the last
+	 * queue to be started.  It is used for fairness.
+	*/
+	int			next_to_run;
+
+	// Disk structures we need to pass back
+	struct gendisk   *gendisk[NWD];
+#ifdef CONFIG_CISS_SCSI_TAPE
+	void *scsi_ctlr; /* ptr to structure containing scsi related stuff */
+#endif
+};
+
+/*  Defining the diffent access_menthods */
+/*
+ * Memory mapped FIFO interface (SMART 53xx cards)
+ */
+#define SA5_DOORBELL	0x20
+#define SA5_REQUEST_PORT_OFFSET	0x40
+#define SA5_REPLY_INTR_MASK_OFFSET	0x34
+#define SA5_REPLY_PORT_OFFSET		0x44
+#define SA5_INTR_STATUS		0x30
+#define SA5_SCRATCHPAD_OFFSET	0xB0
+
+#define SA5_CTCFG_OFFSET	0xB4
+#define SA5_CTMEM_OFFSET	0xB8
+
+#define SA5_INTR_OFF		0x08
+#define SA5B_INTR_OFF		0x04
+#define SA5_INTR_PENDING	0x08
+#define SA5B_INTR_PENDING	0x04
+#define FIFO_EMPTY		0xffffffff	
+#define CCISS_FIRMWARE_READY	0xffff0000 /* value in scratchpad register */
+
+#define  CISS_ERROR_BIT		0x02
+
+#define CCISS_INTR_ON 	1 
+#define CCISS_INTR_OFF	0
+/* 
+	Send the command to the hardware 
+*/
+static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 
+{
+#ifdef CCISS_DEBUG
+	 printk("Sending %x - down to controller\n", c->busaddr );
+#endif /* CCISS_DEBUG */ 
+         writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET);
+	 h->commands_outstanding++;
+	 if ( h->commands_outstanding > h->max_outstanding)
+		h->max_outstanding = h->commands_outstanding;
+}
+
+/*  
+ *  This card is the opposite of the other cards.  
+ *   0 turns interrupts on... 
+ *   0x08 turns them off... 
+ */
+static void SA5_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+	if (val) 
+	{ /* Turn interrupts on */
+		h->interrupts_enabled = 1;
+		writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	} else /* Turn them off */
+	{
+		h->interrupts_enabled = 0;
+        	writel( SA5_INTR_OFF, 
+			h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+	}
+}
+/*
+ *  This card is the opposite of the other cards.
+ *   0 turns interrupts on...
+ *   0x04 turns them off...
+ */
+static void SA5B_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+        if (val)
+        { /* Turn interrupts on */
+		h->interrupts_enabled = 1;
+                writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+        } else /* Turn them off */
+        {
+		h->interrupts_enabled = 0;
+                writel( SA5B_INTR_OFF,
+                        h->vaddr + SA5_REPLY_INTR_MASK_OFFSET);
+        }
+}
+/*
+ *  Returns true if fifo is full.  
+ * 
+ */ 
+static unsigned long SA5_fifo_full(ctlr_info_t *h)
+{
+	if( h->commands_outstanding >= h->max_commands)
+		return(1);
+	else 
+		return(0);
+
+}
+/* 
+ *   returns value read from hardware. 
+ *     returns FIFO_EMPTY if there is nothing to read 
+ */ 
+static unsigned long SA5_completed(ctlr_info_t *h)
+{
+	unsigned long register_value 
+		= readl(h->vaddr + SA5_REPLY_PORT_OFFSET);
+	if(register_value != FIFO_EMPTY)
+	{
+		h->commands_outstanding--;
+#ifdef CCISS_DEBUG
+		printk("cciss:  Read %lx back from board\n", register_value);
+#endif /* CCISS_DEBUG */ 
+	} 
+#ifdef CCISS_DEBUG
+	else
+	{
+		printk("cciss:  FIFO Empty read\n");
+	}
+#endif 
+	return ( register_value); 
+
+}
+/*
+ *	Returns true if an interrupt is pending.. 
+ */
+static unsigned long SA5_intr_pending(ctlr_info_t *h)
+{
+	unsigned long register_value  = 
+		readl(h->vaddr + SA5_INTR_STATUS);
+#ifdef CCISS_DEBUG
+	printk("cciss: intr_pending %lx\n", register_value);
+#endif  /* CCISS_DEBUG */
+	if( register_value &  SA5_INTR_PENDING) 
+		return  1;	
+	return 0 ;
+}
+
+/*
+ *      Returns true if an interrupt is pending..
+ */
+static unsigned long SA5B_intr_pending(ctlr_info_t *h)
+{
+        unsigned long register_value  =
+                readl(h->vaddr + SA5_INTR_STATUS);
+#ifdef CCISS_DEBUG
+        printk("cciss: intr_pending %lx\n", register_value);
+#endif  /* CCISS_DEBUG */
+        if( register_value &  SA5B_INTR_PENDING)
+                return  1;
+        return 0 ;
+}
+
+
+static struct access_method SA5_access = {
+	SA5_submit_command,
+	SA5_intr_mask,
+	SA5_fifo_full,
+	SA5_intr_pending,
+	SA5_completed,
+};
+
+static struct access_method SA5B_access = {
+        SA5_submit_command,
+        SA5B_intr_mask,
+        SA5_fifo_full,
+        SA5B_intr_pending,
+        SA5_completed,
+};
+
+struct board_type {
+	__u32	board_id;
+	char	*product_name;
+	struct access_method *access;
+};
+
+#define CCISS_LOCK(i)	(hba[i]->queue->queue_lock)
+
+#endif /* CCISS_H */
+
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
new file mode 100644
index 0000000..a88a888
--- /dev/null
+++ b/drivers/block/cciss_cmd.h
@@ -0,0 +1,271 @@
+#ifndef CCISS_CMD_H
+#define CCISS_CMD_H
+//###########################################################################
+//DEFINES
+//###########################################################################
+#define CISS_VERSION "1.00"
+
+//general boundary defintions
+#define SENSEINFOBYTES          32//note that this value may vary between host implementations
+#define MAXSGENTRIES            31
+#define MAXREPLYQS              256
+
+//Command Status value
+#define CMD_SUCCESS             0x0000
+#define CMD_TARGET_STATUS       0x0001
+#define CMD_DATA_UNDERRUN       0x0002
+#define CMD_DATA_OVERRUN        0x0003
+#define CMD_INVALID             0x0004
+#define CMD_PROTOCOL_ERR        0x0005
+#define CMD_HARDWARE_ERR        0x0006
+#define CMD_CONNECTION_LOST     0x0007
+#define CMD_ABORTED             0x0008
+#define CMD_ABORT_FAILED        0x0009
+#define CMD_UNSOLICITED_ABORT   0x000A
+#define CMD_TIMEOUT             0x000B
+#define CMD_UNABORTABLE		0x000C
+
+//transfer direction
+#define XFER_NONE               0x00
+#define XFER_WRITE              0x01
+#define XFER_READ               0x02
+#define XFER_RSVD               0x03
+
+//task attribute
+#define ATTR_UNTAGGED           0x00
+#define ATTR_SIMPLE             0x04
+#define ATTR_HEADOFQUEUE        0x05
+#define ATTR_ORDERED            0x06
+#define ATTR_ACA                0x07
+
+//cdb type
+#define TYPE_CMD				0x00
+#define TYPE_MSG				0x01
+
+//config space register offsets
+#define CFG_VENDORID            0x00
+#define CFG_DEVICEID            0x02
+#define CFG_I2OBAR              0x10
+#define CFG_MEM1BAR             0x14
+
+//i2o space register offsets
+#define I2O_IBDB_SET            0x20
+#define I2O_IBDB_CLEAR          0x70
+#define I2O_INT_STATUS          0x30
+#define I2O_INT_MASK            0x34
+#define I2O_IBPOST_Q            0x40
+#define I2O_OBPOST_Q            0x44
+
+//Configuration Table
+#define CFGTBL_ChangeReq        0x00000001l
+#define CFGTBL_AccCmds          0x00000001l
+
+#define CFGTBL_Trans_Simple     0x00000002l
+
+#define CFGTBL_BusType_Ultra2   0x00000001l
+#define CFGTBL_BusType_Ultra3   0x00000002l
+#define CFGTBL_BusType_Fibre1G  0x00000100l
+#define CFGTBL_BusType_Fibre2G  0x00000200l
+typedef struct _vals32
+{
+        __u32   lower;
+        __u32   upper;
+} vals32;
+
+typedef union _u64bit
+{
+   vals32	val32;
+   __u64	val;
+} u64bit;
+
+// Type defs used in the following structs
+#define BYTE __u8
+#define WORD __u16
+#define HWORD __u16
+#define DWORD __u32
+#define QWORD vals32 
+
+//###########################################################################
+//STRUCTURES
+//###########################################################################
+#define CISS_MAX_LUN	16	
+#define CISS_MAX_PHYS_LUN	1024
+// SCSI-3 Cmmands 
+
+#pragma pack(1)	
+
+#define CISS_INQUIRY 0x12
+//Date returned
+typedef struct _InquiryData_struct
+{
+  BYTE data_byte[36];
+} InquiryData_struct;
+
+#define CISS_REPORT_LOG 0xc2    /* Report Logical LUNs */
+#define CISS_REPORT_PHYS 0xc3   /* Report Physical LUNs */
+// Data returned
+typedef struct _ReportLUNdata_struct
+{
+  BYTE LUNListLength[4];
+  DWORD reserved;
+  BYTE LUN[CISS_MAX_LUN][8];
+} ReportLunData_struct;
+
+#define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ 
+typedef struct _ReadCapdata_struct
+{
+  BYTE total_size[4];	// Total size in blocks
+  BYTE block_size[4];	// Size of blocks in bytes
+} ReadCapdata_struct;
+
+// 12 byte commands not implemented in firmware yet. 
+// #define CCISS_READ 	0xa8	// Read(12)
+// #define CCISS_WRITE	0xaa	// Write(12)
+ #define CCISS_READ   0x28    // Read(10)
+ #define CCISS_WRITE  0x2a    // Write(10)
+
+// BMIC commands 
+#define BMIC_READ 0x26
+#define BMIC_WRITE 0x27
+#define BMIC_CACHE_FLUSH 0xc2
+#define CCISS_CACHE_FLUSH 0x01	//C2 was already being used by CCISS
+
+//Command List Structure
+typedef union _SCSI3Addr_struct {
+   struct {
+    BYTE Dev;
+    BYTE Bus:6;
+    BYTE Mode:2;        // b00
+  } PeripDev;
+   struct {
+    BYTE DevLSB;
+    BYTE DevMSB:6;
+    BYTE Mode:2;        // b01
+  } LogDev;
+   struct {
+    BYTE Dev:5;
+    BYTE Bus:3;
+    BYTE Targ:6;
+    BYTE Mode:2;        // b10
+  } LogUnit;
+} SCSI3Addr_struct;
+
+typedef struct _PhysDevAddr_struct {
+  DWORD             TargetId:24;
+  DWORD             Bus:6;
+  DWORD             Mode:2;
+  SCSI3Addr_struct  Target[2]; //2 level target device addr
+} PhysDevAddr_struct;
+  
+typedef struct _LogDevAddr_struct {
+  DWORD            VolId:30;
+  DWORD            Mode:2;
+  BYTE             reserved[4];
+} LogDevAddr_struct;
+
+typedef union _LUNAddr_struct {
+  BYTE               LunAddrBytes[8];
+  SCSI3Addr_struct   SCSI3Lun[4];
+  PhysDevAddr_struct PhysDev;
+  LogDevAddr_struct  LogDev;
+} LUNAddr_struct;
+
+typedef struct _CommandListHeader_struct {
+  BYTE              ReplyQueue;
+  BYTE              SGList;
+  HWORD             SGTotal;
+  QWORD             Tag;
+  LUNAddr_struct    LUN;
+} CommandListHeader_struct;
+typedef struct _RequestBlock_struct {
+  BYTE   CDBLen;
+  struct {
+    BYTE Type:3;
+    BYTE Attribute:3;
+    BYTE Direction:2;
+  } Type;
+  HWORD  Timeout;
+  BYTE   CDB[16];
+} RequestBlock_struct;
+typedef struct _ErrDescriptor_struct {
+  QWORD  Addr;
+  DWORD  Len;
+} ErrDescriptor_struct;
+typedef struct _SGDescriptor_struct {
+  QWORD  Addr;
+  DWORD  Len;
+  DWORD  Ext;
+} SGDescriptor_struct;
+
+typedef union _MoreErrInfo_struct{
+  struct {
+    BYTE  Reserved[3];
+    BYTE  Type;
+    DWORD ErrorInfo;
+  }Common_Info;
+  struct{
+    BYTE  Reserved[2];
+    BYTE  offense_size;//size of offending entry
+    BYTE  offense_num; //byte # of offense 0-base
+    DWORD offense_value;
+  }Invalid_Cmd;
+}MoreErrInfo_struct;
+typedef struct _ErrorInfo_struct {
+  BYTE               ScsiStatus;
+  BYTE               SenseLen;
+  HWORD              CommandStatus;
+  DWORD              ResidualCnt;
+  MoreErrInfo_struct MoreErrInfo;
+  BYTE               SenseInfo[SENSEINFOBYTES];
+} ErrorInfo_struct;
+
+/* Command types */
+#define CMD_RWREQ       0x00
+#define CMD_IOCTL_PEND  0x01
+#define CMD_SCSI	0x03
+#define CMD_MSG_DONE	0x04
+#define CMD_MSG_TIMEOUT 0x05
+
+typedef struct _CommandList_struct {
+  CommandListHeader_struct Header;
+  RequestBlock_struct      Request;
+  ErrDescriptor_struct     ErrDesc;
+  SGDescriptor_struct      SG[MAXSGENTRIES];
+	/* information associated with the command */ 
+  __u32			   busaddr; /* physical address of this record */
+  ErrorInfo_struct * 	   err_info; /* pointer to the allocated mem */ 
+  int			   ctlr;
+  int			   cmd_type; 
+  struct _CommandList_struct *prev;
+  struct _CommandList_struct *next;
+  struct request *	   rq;
+  struct completion *waiting;
+  int	 retry_count;
+#ifdef CONFIG_CISS_SCSI_TAPE
+  void * scsi_cmd;
+#endif
+} CommandList_struct;
+
+//Configuration Table Structure
+typedef struct _HostWrite_struct {
+  DWORD TransportRequest;
+  DWORD Reserved;
+  DWORD CoalIntDelay;
+  DWORD CoalIntCount;
+} HostWrite_struct;
+
+typedef struct _CfgTable_struct {
+  BYTE             Signature[4];
+  DWORD            SpecValence;
+  DWORD            TransportSupport;
+  DWORD            TransportActive;
+  HostWrite_struct HostWrite;
+  DWORD            CmdsOutMax;
+  DWORD            BusTypes;
+  DWORD            Reserved; 
+  BYTE             ServerName[16];
+  DWORD            HeartBeat;
+  DWORD            SCSI_Prefetch;
+} CfgTable_struct;
+#pragma pack()	 
+#endif // CCISS_CMD_H
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
new file mode 100644
index 0000000..f16e3ca
--- /dev/null
+++ b/drivers/block/cciss_scsi.c
@@ -0,0 +1,1417 @@
+/*
+ *    Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module
+ *    Copyright 2001 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *    
+ *    Author: Stephen M. Cameron
+ */
+#ifdef CONFIG_CISS_SCSI_TAPE
+
+/* Here we have code to present the driver as a scsi driver 
+   as it is simultaneously presented as a block driver.  The 
+   reason for doing this is to allow access to SCSI tape drives
+   through the array controller.  Note in particular, neither 
+   physical nor logical disks are presented through the scsi layer. */
+
+#include <scsi/scsi.h> 
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h> 
+#include <asm/atomic.h>
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+#include "cciss_scsi.h"
+
+/* some prototypes... */ 
+static int sendcmd(
+	__u8	cmd,
+	int	ctlr,
+	void	*buff,
+	size_t	size,
+	unsigned int use_unit_num, /* 0: address the controller,
+				      1: address logical volume log_unit, 
+				      2: address is in scsi3addr */
+	unsigned int log_unit,
+	__u8	page_code,
+	unsigned char *scsi3addr,
+	int cmd_type);
+
+
+static int cciss_scsi_proc_info(
+		struct Scsi_Host *sh,
+		char *buffer, /* data buffer */
+		char **start, 	   /* where data in buffer starts */
+		off_t offset,	   /* offset from start of imaginary file */
+		int length, 	   /* length of data in buffer */
+		int func);	   /* 0 == read, 1 == write */
+
+static int cciss_scsi_queue_command (struct scsi_cmnd *cmd,
+		void (* done)(struct scsi_cmnd *));
+
+static struct cciss_scsi_hba_t ccissscsi[MAX_CTLR] = {
+	{ .name = "cciss0", .ndevices = 0 },
+	{ .name = "cciss1", .ndevices = 0 },
+	{ .name = "cciss2", .ndevices = 0 },
+	{ .name = "cciss3", .ndevices = 0 },
+	{ .name = "cciss4", .ndevices = 0 },
+	{ .name = "cciss5", .ndevices = 0 },
+	{ .name = "cciss6", .ndevices = 0 },
+	{ .name = "cciss7", .ndevices = 0 },
+};
+
+static struct scsi_host_template cciss_driver_template = {
+	.module			= THIS_MODULE,
+	.name			= "cciss",
+	.proc_name		= "cciss",
+	.proc_info		= cciss_scsi_proc_info,
+	.queuecommand		= cciss_scsi_queue_command,
+	.can_queue		= SCSI_CCISS_CAN_QUEUE,
+	.this_id		= 7,
+	.sg_tablesize		= MAXSGENTRIES,
+	.cmd_per_lun		= 1,
+	.use_clustering		= DISABLE_CLUSTERING,
+};
+
+#pragma pack(1)
+struct cciss_scsi_cmd_stack_elem_t {
+	CommandList_struct cmd;
+	ErrorInfo_struct Err;
+	__u32 busaddr;
+};
+
+#pragma pack()
+
+#define CMD_STACK_SIZE (SCSI_CCISS_CAN_QUEUE * \
+		CCISS_MAX_SCSI_DEVS_PER_HBA + 2)
+			// plus two for init time usage
+
+#pragma pack(1)
+struct cciss_scsi_cmd_stack_t {
+	struct cciss_scsi_cmd_stack_elem_t *pool;
+	struct cciss_scsi_cmd_stack_elem_t *elem[CMD_STACK_SIZE];
+	dma_addr_t cmd_pool_handle;
+	int top;
+};
+#pragma pack()
+
+struct cciss_scsi_adapter_data_t {
+	struct Scsi_Host *scsi_host;
+	struct cciss_scsi_cmd_stack_t cmd_stack;
+	int registered;
+	spinlock_t lock; // to protect ccissscsi[ctlr]; 
+};
+
+#define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \
+	&(((struct cciss_scsi_adapter_data_t *) \
+	hba[ctlr]->scsi_ctlr)->lock), flags);
+#define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \
+	&(((struct cciss_scsi_adapter_data_t *) \
+	hba[ctlr]->scsi_ctlr)->lock), flags);
+
+static CommandList_struct *
+scsi_cmd_alloc(ctlr_info_t *h)
+{
+	/* assume only one process in here at a time, locking done by caller. */
+	/* use CCISS_LOCK(ctlr) */
+	/* might be better to rewrite how we allocate scsi commands in a way that */
+	/* needs no locking at all. */
+
+	/* take the top memory chunk off the stack and return it, if any. */
+	struct cciss_scsi_cmd_stack_elem_t *c;
+	struct cciss_scsi_adapter_data_t *sa;
+	struct cciss_scsi_cmd_stack_t *stk;
+	u64bit temp64;
+
+	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+	stk = &sa->cmd_stack; 
+
+	if (stk->top < 0) 
+		return NULL;
+	c = stk->elem[stk->top]; 	
+	/* memset(c, 0, sizeof(*c)); */
+	memset(&c->cmd, 0, sizeof(c->cmd));
+	memset(&c->Err, 0, sizeof(c->Err));
+	/* set physical addr of cmd and addr of scsi parameters */
+	c->cmd.busaddr = c->busaddr; 
+	/* (__u32) (stk->cmd_pool_handle + 
+		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */
+
+	temp64.val = (__u64) (c->busaddr + sizeof(CommandList_struct));
+	/* (__u64) (stk->cmd_pool_handle + 
+		(sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top) +
+		 sizeof(CommandList_struct)); */
+	stk->top--;
+	c->cmd.ErrDesc.Addr.lower = temp64.val32.lower;
+	c->cmd.ErrDesc.Addr.upper = temp64.val32.upper;
+	c->cmd.ErrDesc.Len = sizeof(ErrorInfo_struct);
+	
+	c->cmd.ctlr = h->ctlr;
+	c->cmd.err_info = &c->Err;
+
+	return (CommandList_struct *) c;
+}
+
+static void 
+scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd)
+{
+	/* assume only one process in here at a time, locking done by caller. */
+	/* use CCISS_LOCK(ctlr) */
+	/* drop the free memory chunk on top of the stack. */
+
+	struct cciss_scsi_adapter_data_t *sa;
+	struct cciss_scsi_cmd_stack_t *stk;
+
+	sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr;
+	stk = &sa->cmd_stack; 
+	if (stk->top >= CMD_STACK_SIZE) {
+		printk("cciss: scsi_cmd_free called too many times.\n");
+		BUG();
+	}
+	stk->top++;
+	stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd;
+}
+
+static int
+scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa)
+{
+	int i;
+	struct cciss_scsi_cmd_stack_t *stk;
+	size_t size;
+
+	stk = &sa->cmd_stack; 
+	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
+
+	// pci_alloc_consistent guarantees 32-bit DMA address will
+	// be used
+
+	stk->pool = (struct cciss_scsi_cmd_stack_elem_t *)
+		pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle);
+
+	if (stk->pool == NULL) {
+		printk("stk->pool is null\n");
+		return -1;
+	}
+
+	for (i=0; i<CMD_STACK_SIZE; i++) {
+		stk->elem[i] = &stk->pool[i];
+		stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + 
+			(sizeof(struct cciss_scsi_cmd_stack_elem_t) * i));
+	}
+	stk->top = CMD_STACK_SIZE-1;
+	return 0;
+}
+
+static void
+scsi_cmd_stack_free(int ctlr)
+{
+	struct cciss_scsi_adapter_data_t *sa;
+	struct cciss_scsi_cmd_stack_t *stk;
+	size_t size;
+
+	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+	stk = &sa->cmd_stack; 
+	if (stk->top != CMD_STACK_SIZE-1) {
+		printk( "cciss: %d scsi commands are still outstanding.\n",
+			CMD_STACK_SIZE - stk->top);
+		// BUG();
+		printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk);
+	}
+	size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE;
+
+	pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle);
+	stk->pool = NULL;
+}
+
+/* scsi_device_types comes from scsi.h */
+#define DEVICETYPE(n) (n<0 || n>MAX_SCSI_DEVICE_CODE) ? \
+	"Unknown" : scsi_device_types[n]
+
+#if 0
+static int xmargin=8;
+static int amargin=60;
+
+static void
+print_bytes (unsigned char *c, int len, int hex, int ascii)
+{
+
+	int i;
+	unsigned char *x;
+
+	if (hex)
+	{
+		x = c;
+		for (i=0;i<len;i++)
+		{
+			if ((i % xmargin) == 0 && i>0) printk("\n");
+			if ((i % xmargin) == 0) printk("0x%04x:", i);
+			printk(" %02x", *x);
+			x++;
+		}
+		printk("\n");
+	}
+	if (ascii)
+	{
+		x = c;
+		for (i=0;i<len;i++)
+		{
+			if ((i % amargin) == 0 && i>0) printk("\n");
+			if ((i % amargin) == 0) printk("0x%04x:", i);
+			if (*x > 26 && *x < 128) printk("%c", *x);
+			else printk(".");
+			x++;
+		}
+		printk("\n");
+	}
+}
+
+static void
+print_cmd(CommandList_struct *cp)
+{
+	printk("queue:%d\n", cp->Header.ReplyQueue);
+	printk("sglist:%d\n", cp->Header.SGList);
+	printk("sgtot:%d\n", cp->Header.SGTotal);
+	printk("Tag:0x%08x/0x%08x\n", cp->Header.Tag.upper, 
+			cp->Header.Tag.lower);
+	printk("LUN:0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+		cp->Header.LUN.LunAddrBytes[0],
+		cp->Header.LUN.LunAddrBytes[1],
+		cp->Header.LUN.LunAddrBytes[2],
+		cp->Header.LUN.LunAddrBytes[3],
+		cp->Header.LUN.LunAddrBytes[4],
+		cp->Header.LUN.LunAddrBytes[5],
+		cp->Header.LUN.LunAddrBytes[6],
+		cp->Header.LUN.LunAddrBytes[7]);
+	printk("CDBLen:%d\n", cp->Request.CDBLen);
+	printk("Type:%d\n",cp->Request.Type.Type);
+	printk("Attr:%d\n",cp->Request.Type.Attribute);
+	printk(" Dir:%d\n",cp->Request.Type.Direction);
+	printk("Timeout:%d\n",cp->Request.Timeout);
+	printk( "CDB: %02x %02x %02x %02x %02x %02x %02x %02x"
+		" %02x %02x %02x %02x %02x %02x %02x %02x\n",
+		cp->Request.CDB[0], cp->Request.CDB[1],
+		cp->Request.CDB[2], cp->Request.CDB[3],
+		cp->Request.CDB[4], cp->Request.CDB[5],
+		cp->Request.CDB[6], cp->Request.CDB[7],
+		cp->Request.CDB[8], cp->Request.CDB[9],
+		cp->Request.CDB[10], cp->Request.CDB[11],
+		cp->Request.CDB[12], cp->Request.CDB[13],
+		cp->Request.CDB[14], cp->Request.CDB[15]),
+	printk("edesc.Addr: 0x%08x/0%08x, Len  = %d\n", 
+		cp->ErrDesc.Addr.upper, cp->ErrDesc.Addr.lower, 
+			cp->ErrDesc.Len);
+	printk("sgs..........Errorinfo:\n");
+	printk("scsistatus:%d\n", cp->err_info->ScsiStatus);
+	printk("senselen:%d\n", cp->err_info->SenseLen);
+	printk("cmd status:%d\n", cp->err_info->CommandStatus);
+	printk("resid cnt:%d\n", cp->err_info->ResidualCnt);
+	printk("offense size:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_size);
+	printk("offense byte:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_num);
+	printk("offense value:%d\n", cp->err_info->MoreErrInfo.Invalid_Cmd.offense_value);
+			
+}
+
+#endif
+
+static int 
+find_bus_target_lun(int ctlr, int *bus, int *target, int *lun)
+{
+	/* finds an unused bus, target, lun for a new device */
+	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+	int i, found=0;
+	unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA];
+
+	memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA);
+
+	target_taken[SELF_SCSI_ID] = 1;	
+	for (i=0;i<ccissscsi[ctlr].ndevices;i++)
+		target_taken[ccissscsi[ctlr].dev[i].target] = 1;
+	
+	for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) {
+		if (!target_taken[i]) {
+			*bus = 0; *target=i; *lun = 0; found=1;
+			break;
+		}
+	}
+	return (!found);	
+}
+
+static int 
+cciss_scsi_add_entry(int ctlr, int hostno, 
+		unsigned char *scsi3addr, int devtype)
+{
+	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+	int n = ccissscsi[ctlr].ndevices;
+	struct cciss_scsi_dev_t *sd;
+
+	if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+		printk("cciss%d: Too many devices, "
+			"some will be inaccessible.\n", ctlr);
+		return -1;
+	}
+	sd = &ccissscsi[ctlr].dev[n];
+	if (find_bus_target_lun(ctlr, &sd->bus, &sd->target, &sd->lun) != 0)
+		return -1;
+	memcpy(&sd->scsi3addr[0], scsi3addr, 8);
+	sd->devtype = devtype;
+	ccissscsi[ctlr].ndevices++;
+
+	/* initially, (before registering with scsi layer) we don't 
+	   know our hostno and we don't want to print anything first 
+	   time anyway (the scsi layer's inquiries will show that info) */
+	if (hostno != -1)
+		printk("cciss%d: %s device c%db%dt%dl%d added.\n", 
+			ctlr, DEVICETYPE(sd->devtype), hostno, 
+			sd->bus, sd->target, sd->lun);
+	return 0;
+}
+
+static void
+cciss_scsi_remove_entry(int ctlr, int hostno, int entry)
+{
+	/* assumes hba[ctlr]->scsi_ctlr->lock is held */ 
+	int i;
+	struct cciss_scsi_dev_t sd;
+
+	if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return;
+	sd = ccissscsi[ctlr].dev[entry];
+	for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++)
+		ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1];
+	ccissscsi[ctlr].ndevices--;
+	printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+		ctlr, DEVICETYPE(sd.devtype), hostno, 
+			sd.bus, sd.target, sd.lun);
+}
+
+
+#define SCSI3ADDR_EQ(a,b) ( \
+	(a)[7] == (b)[7] && \
+	(a)[6] == (b)[6] && \
+	(a)[5] == (b)[5] && \
+	(a)[4] == (b)[4] && \
+	(a)[3] == (b)[3] && \
+	(a)[2] == (b)[2] && \
+	(a)[1] == (b)[1] && \
+	(a)[0] == (b)[0])
+
+static int
+adjust_cciss_scsi_table(int ctlr, int hostno,
+	struct cciss_scsi_dev_t sd[], int nsds)
+{
+	/* sd contains scsi3 addresses and devtypes, but
+	   bus target and lun are not filled in.  This funciton
+	   takes what's in sd to be the current and adjusts
+	   ccissscsi[] to be in line with what's in sd. */ 
+
+	int i,j, found, changes=0;
+	struct cciss_scsi_dev_t *csd;
+	unsigned long flags;
+
+	CPQ_TAPE_LOCK(ctlr, flags);
+
+	/* find any devices in ccissscsi[] that are not in 
+	   sd[] and remove them from ccissscsi[] */
+
+	i = 0;
+	while(i<ccissscsi[ctlr].ndevices) {
+		csd = &ccissscsi[ctlr].dev[i];
+		found=0;
+		for (j=0;j<nsds;j++) {
+			if (SCSI3ADDR_EQ(sd[j].scsi3addr,
+				csd->scsi3addr)) {
+				if (sd[j].devtype == csd->devtype)
+					found=2;
+				else
+					found=1;
+				break;
+			}
+		}
+
+		if (found == 0) { /* device no longer present. */ 
+			changes++;
+			/* printk("cciss%d: %s device c%db%dt%dl%d removed.\n",
+				ctlr, DEVICETYPE(csd->devtype), hostno, 
+					csd->bus, csd->target, csd->lun); */
+			cciss_scsi_remove_entry(ctlr, hostno, i);
+			/* note, i not incremented */
+		} 
+		else if (found == 1) { /* device is different kind */
+			changes++;
+			printk("cciss%d: device c%db%dt%dl%d type changed "
+				"(device type now %s).\n",
+				ctlr, hostno, csd->bus, csd->target, csd->lun,
+					DEVICETYPE(csd->devtype));
+			csd->devtype = sd[j].devtype;
+			i++;	/* so just move along. */
+		} else 		/* device is same as it ever was, */
+			i++;	/* so just move along. */
+	}
+
+	/* Now, make sure every device listed in sd[] is also
+ 	   listed in ccissscsi[], adding them if they aren't found */
+
+	for (i=0;i<nsds;i++) {
+		found=0;
+		for (j=0;j<ccissscsi[ctlr].ndevices;j++) {
+			csd = &ccissscsi[ctlr].dev[j];
+			if (SCSI3ADDR_EQ(sd[i].scsi3addr,
+				csd->scsi3addr)) {
+				if (sd[i].devtype == csd->devtype)
+					found=2;	/* found device */
+				else
+					found=1; 	/* found a bug. */
+				break;
+			}
+		}
+		if (!found) {
+			changes++;
+			if (cciss_scsi_add_entry(ctlr, hostno, 
+				&sd[i].scsi3addr[0], sd[i].devtype) != 0)
+				break;
+		} else if (found == 1) {
+			/* should never happen... */
+			changes++;
+			printk("cciss%d: device unexpectedly changed type\n",
+				ctlr);
+			/* but if it does happen, we just ignore that device */
+		}
+	}
+	CPQ_TAPE_UNLOCK(ctlr, flags);
+
+	if (!changes) 
+		printk("cciss%d: No device changes detected.\n", ctlr);
+
+	return 0;
+}
+
+static int
+lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr)
+{
+	int i;
+	struct cciss_scsi_dev_t *sd;
+	unsigned long flags;
+
+	CPQ_TAPE_LOCK(ctlr, flags);
+	for (i=0;i<ccissscsi[ctlr].ndevices;i++) {
+		sd = &ccissscsi[ctlr].dev[i];
+		if (sd->bus == bus &&
+		    sd->target == target &&
+		    sd->lun == lun) {
+			memcpy(scsi3addr, &sd->scsi3addr[0], 8);
+			CPQ_TAPE_UNLOCK(ctlr, flags);
+			return 0;
+		}
+	}
+	CPQ_TAPE_UNLOCK(ctlr, flags);
+	return -1;
+}
+
+static void 
+cciss_scsi_setup(int cntl_num)
+{
+	struct cciss_scsi_adapter_data_t * shba;
+
+	ccissscsi[cntl_num].ndevices = 0;
+	shba = (struct cciss_scsi_adapter_data_t *)
+		kmalloc(sizeof(*shba), GFP_KERNEL);	
+	if (shba == NULL)
+		return;
+	shba->scsi_host = NULL;
+	spin_lock_init(&shba->lock);
+	shba->registered = 0;
+	if (scsi_cmd_stack_setup(cntl_num, shba) != 0) {
+		kfree(shba);
+		shba = NULL;
+	}
+	hba[cntl_num]->scsi_ctlr = (void *) shba;
+	return;
+}
+
+static void
+complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
+{
+	struct scsi_cmnd *cmd;
+	ctlr_info_t *ctlr;
+	u64bit addr64;
+	ErrorInfo_struct *ei;
+
+	ei = cp->err_info;
+
+	/* First, see if it was a message rather than a command */
+	if (cp->Request.Type.Type == TYPE_MSG)  {
+		cp->cmd_type = CMD_MSG_DONE;
+		return;
+	}
+
+	cmd = (struct scsi_cmnd *) cp->scsi_cmd;	
+	ctlr = hba[cp->ctlr];
+
+	/* undo the DMA mappings */
+
+	if (cmd->use_sg) {
+		pci_unmap_sg(ctlr->pdev,
+			cmd->buffer, cmd->use_sg,
+				cmd->sc_data_direction); 
+	}
+	else if (cmd->request_bufflen) {
+		addr64.val32.lower = cp->SG[0].Addr.lower;
+                addr64.val32.upper = cp->SG[0].Addr.upper;
+                pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
+                	cmd->request_bufflen, 
+				cmd->sc_data_direction);
+	}
+
+	cmd->result = (DID_OK << 16); 		/* host byte */
+	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
+	/* cmd->result |= (GOOD < 1); */		/* status byte */
+
+	cmd->result |= (ei->ScsiStatus);
+	/* printk("Scsistatus is 0x%02x\n", ei->ScsiStatus);  */
+
+	/* copy the sense data whether we need to or not. */
+
+	memcpy(cmd->sense_buffer, ei->SenseInfo, 
+		ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
+			SCSI_SENSE_BUFFERSIZE : 
+			ei->SenseLen);
+	cmd->resid = ei->ResidualCnt;
+
+	if(ei->CommandStatus != 0) 
+	{ /* an error has occurred */ 
+		switch(ei->CommandStatus)
+		{
+			case CMD_TARGET_STATUS:
+				/* Pass it up to the upper layers... */
+				if( ei->ScsiStatus)
+                		{
+#if 0
+                    			printk(KERN_WARNING "cciss: cmd %p "
+					"has SCSI Status = %x\n",
+                        			cp,  
+						ei->ScsiStatus); 
+#endif
+					cmd->result |= (ei->ScsiStatus < 1);
+                		}
+				else {  /* scsi status is zero??? How??? */
+					
+	/* Ordinarily, this case should never happen, but there is a bug
+	   in some released firmware revisions that allows it to happen
+	   if, for example, a 4100 backplane loses power and the tape
+	   drive is in it.  We assume that it's a fatal error of some
+	   kind because we can't show that it wasn't. We will make it
+	   look like selection timeout since that is the most common
+	   reason for this to occur, and it's severe enough. */
+
+					cmd->result = DID_NO_CONNECT << 16;
+				}
+			break;
+			case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+			break;
+			case CMD_DATA_OVERRUN:
+				printk(KERN_WARNING "cciss: cp %p has"
+					" completed with data overrun "
+					"reported\n", cp);
+			break;
+			case CMD_INVALID: {
+				/* print_bytes(cp, sizeof(*cp), 1, 0);
+				print_cmd(cp); */
+     /* We get CMD_INVALID if you address a non-existent tape drive instead
+	of a selection timeout (no response).  You will see this if you yank 
+	out a tape drive, then try to access it. This is kind of a shame
+	because it means that any other CMD_INVALID (e.g. driver bug) will
+	get interpreted as a missing target. */
+				cmd->result = DID_NO_CONNECT << 16;
+				}
+			break;
+			case CMD_PROTOCOL_ERR:
+                                printk(KERN_WARNING "cciss: cp %p has "
+					"protocol error \n", cp);
+                        break;
+			case CMD_HARDWARE_ERR:
+				cmd->result = DID_ERROR << 16;
+                                printk(KERN_WARNING "cciss: cp %p had " 
+                                        " hardware error\n", cp);
+                        break;
+			case CMD_CONNECTION_LOST:
+				cmd->result = DID_ERROR << 16;
+				printk(KERN_WARNING "cciss: cp %p had "
+					"connection lost\n", cp);
+			break;
+			case CMD_ABORTED:
+				cmd->result = DID_ABORT << 16;
+				printk(KERN_WARNING "cciss: cp %p was "
+					"aborted\n", cp);
+			break;
+			case CMD_ABORT_FAILED:
+				cmd->result = DID_ERROR << 16;
+				printk(KERN_WARNING "cciss: cp %p reports "
+					"abort failed\n", cp);
+			break;
+			case CMD_UNSOLICITED_ABORT:
+				cmd->result = DID_ABORT << 16;
+				printk(KERN_WARNING "cciss: cp %p aborted "
+					"do to an unsolicited abort\n", cp);
+			break;
+			case CMD_TIMEOUT:
+				cmd->result = DID_TIME_OUT << 16;
+				printk(KERN_WARNING "cciss: cp %p timedout\n",
+					cp);
+			break;
+			default:
+				cmd->result = DID_ERROR << 16;
+				printk(KERN_WARNING "cciss: cp %p returned "
+					"unknown status %x\n", cp, 
+						ei->CommandStatus); 
+		}
+	}
+	// printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, 
+	//	cmd->target, cmd->lun);
+	cmd->scsi_done(cmd);
+	scsi_cmd_free(ctlr, cp);
+}
+
+static int
+cciss_scsi_detect(int ctlr)
+{
+	struct Scsi_Host *sh;
+	int error;
+
+	sh = scsi_host_alloc(&cciss_driver_template, sizeof(struct ctlr_info *));
+	if (sh == NULL)
+		goto fail;
+	sh->io_port = 0;	// good enough?  FIXME, 
+	sh->n_io_port = 0;	// I don't think we use these two...
+	sh->this_id = SELF_SCSI_ID;  
+
+	((struct cciss_scsi_adapter_data_t *) 
+		hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh;
+	sh->hostdata[0] = (unsigned long) hba[ctlr];
+	sh->irq = hba[ctlr]->intr;
+	sh->unique_id = sh->irq;
+	error = scsi_add_host(sh, &hba[ctlr]->pdev->dev);
+	if (error)
+		goto fail_host_put;
+	scsi_scan_host(sh);
+	return 1;
+
+ fail_host_put:
+	scsi_host_put(sh);
+ fail:
+	return 0;
+}
+
+static void
+cciss_unmap_one(struct pci_dev *pdev,
+		CommandList_struct *cp,
+		size_t buflen,
+		int data_direction)
+{
+	u64bit addr64;
+
+	addr64.val32.lower = cp->SG[0].Addr.lower;
+	addr64.val32.upper = cp->SG[0].Addr.upper;
+	pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction);
+}
+
+static void
+cciss_map_one(struct pci_dev *pdev,
+		CommandList_struct *cp,
+		unsigned char *buf,
+		size_t buflen,
+		int data_direction)
+{
+	__u64 addr64;
+
+	addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction);
+	cp->SG[0].Addr.lower = 
+	  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+	cp->SG[0].Addr.upper =
+	  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+	cp->SG[0].Len = buflen;
+	cp->Header.SGList = (__u8) 1;   /* no. SGs contig in this cmd */
+	cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */
+}
+
+static int
+cciss_scsi_do_simple_cmd(ctlr_info_t *c,
+			CommandList_struct *cp,
+			unsigned char *scsi3addr, 
+			unsigned char *cdb,
+			unsigned char cdblen,
+			unsigned char *buf, int bufsize,
+			int direction)
+{
+	unsigned long flags;
+	DECLARE_COMPLETION(wait);
+
+	cp->cmd_type = CMD_IOCTL_PEND;		// treat this like an ioctl 
+	cp->scsi_cmd = NULL;
+	cp->Header.ReplyQueue = 0;  // unused in simple mode
+	memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN));
+	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
+	// Fill in the request block...
+
+	/* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 
+		scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3],
+		scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */
+
+	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+	memcpy(cp->Request.CDB, cdb, cdblen);
+	cp->Request.Timeout = 0;
+	cp->Request.CDBLen = cdblen;
+	cp->Request.Type.Type = TYPE_CMD;
+	cp->Request.Type.Attribute = ATTR_SIMPLE;
+	cp->Request.Type.Direction = direction;
+
+	/* Fill in the SG list and do dma mapping */
+	cciss_map_one(c->pdev, cp, (unsigned char *) buf,
+			bufsize, DMA_FROM_DEVICE); 
+
+	cp->waiting = &wait;
+
+	/* Put the request on the tail of the request queue */
+	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+	addQ(&c->reqQ, cp);
+	c->Qdepth++;
+	start_io(c);
+	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+
+	wait_for_completion(&wait);
+
+	/* undo the dma mapping */
+	cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE);
+	return(0);
+}
+
+static void 
+cciss_scsi_interpret_error(CommandList_struct *cp)
+{
+	ErrorInfo_struct *ei;
+
+	ei = cp->err_info; 
+	switch(ei->CommandStatus)
+	{
+		case CMD_TARGET_STATUS:
+			printk(KERN_WARNING "cciss: cmd %p has "
+				"completed with errors\n", cp);
+			printk(KERN_WARNING "cciss: cmd %p "
+				"has SCSI Status = %x\n",
+					cp,  
+					ei->ScsiStatus);
+			if (ei->ScsiStatus == 0)
+				printk(KERN_WARNING 
+				"cciss:SCSI status is abnormally zero.  "
+				"(probably indicates selection timeout "
+				"reported incorrectly due to a known "
+				"firmware bug, circa July, 2001.)\n");
+		break;
+		case CMD_DATA_UNDERRUN: /* let mid layer handle it. */
+			printk("UNDERRUN\n");
+		break;
+		case CMD_DATA_OVERRUN:
+			printk(KERN_WARNING "cciss: cp %p has"
+				" completed with data overrun "
+				"reported\n", cp);
+		break;
+		case CMD_INVALID: {
+			/* controller unfortunately reports SCSI passthru's */
+			/* to non-existent targets as invalid commands. */
+			printk(KERN_WARNING "cciss: cp %p is "
+				"reported invalid (probably means "
+				"target device no longer present)\n", 
+				cp); 
+			/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0);
+			print_cmd(cp);  */
+			}
+		break;
+		case CMD_PROTOCOL_ERR:
+			printk(KERN_WARNING "cciss: cp %p has "
+				"protocol error \n", cp);
+		break;
+		case CMD_HARDWARE_ERR:
+			/* cmd->result = DID_ERROR << 16; */
+			printk(KERN_WARNING "cciss: cp %p had " 
+				" hardware error\n", cp);
+		break;
+		case CMD_CONNECTION_LOST:
+			printk(KERN_WARNING "cciss: cp %p had "
+				"connection lost\n", cp);
+		break;
+		case CMD_ABORTED:
+			printk(KERN_WARNING "cciss: cp %p was "
+				"aborted\n", cp);
+		break;
+		case CMD_ABORT_FAILED:
+			printk(KERN_WARNING "cciss: cp %p reports "
+				"abort failed\n", cp);
+		break;
+		case CMD_UNSOLICITED_ABORT:
+			printk(KERN_WARNING "cciss: cp %p aborted "
+				"do to an unsolicited abort\n", cp);
+		break;
+		case CMD_TIMEOUT:
+			printk(KERN_WARNING "cciss: cp %p timedout\n",
+				cp);
+		break;
+		default:
+			printk(KERN_WARNING "cciss: cp %p returned "
+				"unknown status %x\n", cp, 
+					ei->CommandStatus); 
+	}
+}
+
+static int
+cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 
+		 InquiryData_struct *buf)
+{
+	int rc;
+	CommandList_struct *cp;
+	char cdb[6];
+	ErrorInfo_struct *ei;
+	unsigned long flags;
+
+	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+	cp = scsi_cmd_alloc(c);
+	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+
+	if (cp == NULL) {			/* trouble... */
+		printk("cmd_alloc returned NULL!\n");
+		return -1;
+	}
+
+	ei = cp->err_info; 
+
+	cdb[0] = CISS_INQUIRY;
+	cdb[1] = 0;
+	cdb[2] = 0;
+	cdb[3] = 0;
+	cdb[4] = sizeof(*buf) & 0xff;
+	cdb[5] = 0;
+	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, 
+				6, (unsigned char *) buf, 
+				sizeof(*buf), XFER_READ);
+
+	if (rc != 0) return rc; /* something went wrong */
+
+	if (ei->CommandStatus != 0 && 
+	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		cciss_scsi_interpret_error(cp);
+		rc = -1;
+	}
+	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+	scsi_cmd_free(c, cp);
+	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+	return rc;	
+}
+
+static int
+cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 
+		ReportLunData_struct *buf, int bufsize)
+{
+	int rc;
+	CommandList_struct *cp;
+	unsigned char cdb[12];
+	unsigned char scsi3addr[8]; 
+	ErrorInfo_struct *ei;
+	unsigned long flags;
+
+	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+	cp = scsi_cmd_alloc(c);
+	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+	if (cp == NULL) {			/* trouble... */
+		printk("cmd_alloc returned NULL!\n");
+		return -1;
+	}
+
+	memset(&scsi3addr[0], 0, 8); /* address the controller */
+	cdb[0] = CISS_REPORT_PHYS;
+	cdb[1] = 0;
+	cdb[2] = 0;
+	cdb[3] = 0;
+	cdb[4] = 0;
+	cdb[5] = 0;
+	cdb[6] = (bufsize >> 24) & 0xFF;  //MSB
+	cdb[7] = (bufsize >> 16) & 0xFF;
+	cdb[8] = (bufsize >> 8) & 0xFF;
+	cdb[9] = bufsize & 0xFF;
+	cdb[10] = 0;
+	cdb[11] = 0;
+
+	rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, 
+				cdb, 12, 
+				(unsigned char *) buf, 
+				bufsize, XFER_READ);
+
+	if (rc != 0) return rc; /* something went wrong */
+
+	ei = cp->err_info; 
+	if (ei->CommandStatus != 0 && 
+	    ei->CommandStatus != CMD_DATA_UNDERRUN) {
+		cciss_scsi_interpret_error(cp);
+		rc = -1;
+	}
+	spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags);
+	scsi_cmd_free(c, cp);
+	spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags);
+	return rc;	
+}
+
+static void
+cciss_update_non_disk_devices(int cntl_num, int hostno)
+{
+	/* the idea here is we could get notified from /proc
+	   that some devices have changed, so we do a report 
+	   physical luns cmd, and adjust our list of devices 
+	   accordingly.  (We can't rely on the scsi-mid layer just
+	   doing inquiries, because the "busses" that the scsi 
+	   mid-layer probes are totally fabricated by this driver,
+	   so new devices wouldn't show up.
+
+	   the scsi3addr's of devices won't change so long as the 
+	   adapter is not reset.  That means we can rescan and 
+	   tell which devices we already know about, vs. new 
+	   devices, vs.  disappearing devices.
+
+	   Also, if you yank out a tape drive, then put in a disk
+	   in it's place, (say, a configured volume from another 
+	   array controller for instance)  _don't_ poke this driver 
+           (so it thinks it's still a tape, but _do_ poke the scsi 
+           mid layer, so it does an inquiry... the scsi mid layer 
+           will see the physical disk.  This would be bad.  Need to
+	   think about how to prevent that.  One idea would be to 
+	   snoop all scsi responses and if an inquiry repsonse comes
+	   back that reports a disk, chuck it an return selection
+	   timeout instead and adjust our table...  Not sure i like
+	   that though.  
+
+	 */
+
+	ReportLunData_struct *ld_buff;
+	InquiryData_struct *inq_buff;
+	unsigned char scsi3addr[8];
+	ctlr_info_t *c;
+	__u32 num_luns=0;
+	unsigned char *ch;
+	/* unsigned char found[CCISS_MAX_SCSI_DEVS_PER_HBA]; */
+	struct cciss_scsi_dev_t currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA];
+	int ncurrent=0;
+	int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8;
+	int i;
+
+	c = (ctlr_info_t *) hba[cntl_num];	
+	ld_buff = kmalloc(reportlunsize, GFP_KERNEL);
+	if (ld_buff == NULL) {
+		printk(KERN_ERR "cciss: out of memory\n");
+		return;
+	}
+	memset(ld_buff, 0, reportlunsize);
+	inq_buff = kmalloc(sizeof( InquiryData_struct), GFP_KERNEL);
+        if (inq_buff == NULL) {
+                printk(KERN_ERR "cciss: out of memory\n");
+                kfree(ld_buff);
+                return;
+	}
+
+	if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) {
+		ch = &ld_buff->LUNListLength[0];
+		num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8;
+		if (num_luns > CISS_MAX_PHYS_LUN) {
+			printk(KERN_WARNING 
+				"cciss: Maximum physical LUNs (%d) exceeded.  "
+				"%d LUNs ignored.\n", CISS_MAX_PHYS_LUN, 
+				num_luns - CISS_MAX_PHYS_LUN);
+			num_luns = CISS_MAX_PHYS_LUN;
+		}
+	}
+	else {
+		printk(KERN_ERR  "cciss: Report physical LUNs failed.\n");
+		goto out;
+	}
+
+
+	/* adjust our table of devices */	
+	for(i=0; i<num_luns; i++)
+	{
+		int devtype;
+
+		/* for each physical lun, do an inquiry */
+		if (ld_buff->LUN[i][3] & 0xC0) continue;
+		memset(inq_buff, 0, sizeof(InquiryData_struct));
+		memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8);
+
+		if (cciss_scsi_do_inquiry(hba[cntl_num], 
+			scsi3addr, inq_buff) != 0)
+		{
+			/* Inquiry failed (msg printed already) */
+			devtype = 0; /* so we will skip this device. */
+		} else /* what kind of device is this? */
+			devtype = (inq_buff->data_byte[0] & 0x1f);
+
+		switch (devtype)
+		{
+		  case 0x01: /* sequential access, (tape) */
+		  case 0x08: /* medium changer */
+			if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) {
+				printk(KERN_INFO "cciss%d: %s ignored, "
+					"too many devices.\n", cntl_num,
+					DEVICETYPE(devtype));
+				break;
+			}
+			memcpy(&currentsd[ncurrent].scsi3addr[0], 
+				&scsi3addr[0], 8);
+			currentsd[ncurrent].devtype = devtype;
+			currentsd[ncurrent].bus = -1;
+			currentsd[ncurrent].target = -1;
+			currentsd[ncurrent].lun = -1;
+			ncurrent++;
+			break;
+		  default: 
+			break;
+		}
+	}
+
+	adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent);
+out:
+	kfree(inq_buff);
+	kfree(ld_buff);
+	return;
+}
+
+static int
+is_keyword(char *ptr, int len, char *verb)  // Thanks to ncr53c8xx.c
+{
+	int verb_len = strlen(verb);
+	if (len >= verb_len && !memcmp(verb,ptr,verb_len))
+		return verb_len;
+	else
+		return 0;
+}
+
+static int
+cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length)
+{
+	int arg_len;
+
+	if ((arg_len = is_keyword(buffer, length, "rescan")) != 0)
+		cciss_update_non_disk_devices(ctlr, hostno);
+	else
+		return -EINVAL;
+	return length;
+}
+
+
+static int
+cciss_scsi_proc_info(struct Scsi_Host *sh,
+		char *buffer, /* data buffer */
+		char **start, 	   /* where data in buffer starts */
+		off_t offset,	   /* offset from start of imaginary file */
+		int length, 	   /* length of data in buffer */
+		int func)	   /* 0 == read, 1 == write */
+{
+
+	int buflen, datalen;
+	ctlr_info_t *ci;
+	int cntl_num;
+
+
+	ci = (ctlr_info_t *) sh->hostdata[0];
+	if (ci == NULL)  /* This really shouldn't ever happen. */
+		return -EINVAL;
+
+	cntl_num = ci->ctlr;	/* Get our index into the hba[] array */
+
+	if (func == 0) {	/* User is reading from /proc/scsi/ciss*?/?*  */
+		buflen = sprintf(buffer, "hostnum=%d\n", sh->host_no); 	
+
+		datalen = buflen - offset;
+		if (datalen < 0) { 	/* they're reading past EOF. */
+			datalen = 0;
+			*start = buffer+buflen;	
+		} else
+			*start = buffer + offset;
+		return(datalen);
+	} else 	/* User is writing to /proc/scsi/cciss*?/?*  ... */
+		return cciss_scsi_user_command(cntl_num, sh->host_no,
+			buffer, length);	
+} 
+
+/* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 
+   dma mapping  and fills in the scatter gather entries of the 
+   cciss command, cp. */
+
+static void
+cciss_scatter_gather(struct pci_dev *pdev, 
+		CommandList_struct *cp,	
+		struct scsi_cmnd *cmd)
+{
+	unsigned int use_sg, nsegs=0, len;
+	struct scatterlist *scatter = (struct scatterlist *) cmd->buffer;
+	__u64 addr64;
+
+	/* is it just one virtual address? */	
+	if (!cmd->use_sg) {
+		if (cmd->request_bufflen) {	/* anything to xfer? */
+
+			addr64 = (__u64) pci_map_single(pdev, 
+				cmd->request_buffer, 
+				cmd->request_bufflen, 
+				cmd->sc_data_direction); 
+	
+			cp->SG[0].Addr.lower = 
+			  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+			cp->SG[0].Addr.upper =
+			  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+			cp->SG[0].Len = cmd->request_bufflen;
+			nsegs=1;
+		}
+	} /* else, must be a list of virtual addresses.... */
+	else if (cmd->use_sg <= MAXSGENTRIES) {	/* not too many addrs? */
+
+		use_sg = pci_map_sg(pdev, cmd->buffer, cmd->use_sg, 
+			cmd->sc_data_direction);
+
+		for (nsegs=0; nsegs < use_sg; nsegs++) {
+			addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
+			len  = sg_dma_len(&scatter[nsegs]);
+			cp->SG[nsegs].Addr.lower =
+			  (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
+			cp->SG[nsegs].Addr.upper =
+			  (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
+			cp->SG[nsegs].Len = len;
+			cp->SG[nsegs].Ext = 0;  // we are not chaining
+		}
+	} else BUG();
+
+	cp->Header.SGList = (__u8) nsegs;   /* no. SGs contig in this cmd */
+	cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */
+	return;
+}
+
+
+static int
+cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
+{
+	ctlr_info_t **c;
+	int ctlr, rc;
+	unsigned char scsi3addr[8];
+	CommandList_struct *cp;
+	unsigned long flags;
+
+	// Get the ptr to our adapter structure (hba[i]) out of cmd->host.
+	// We violate cmd->host privacy here.  (Is there another way?)
+	c = (ctlr_info_t **) &cmd->device->host->hostdata[0];	
+	ctlr = (*c)->ctlr;
+
+	rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, 
+			cmd->device->lun, scsi3addr);
+	if (rc != 0) {
+		/* the scsi nexus does not match any that we presented... */
+		/* pretend to mid layer that we got selection timeout */
+		cmd->result = DID_NO_CONNECT << 16;
+		done(cmd);
+		/* we might want to think about registering controller itself
+		   as a processor device on the bus so sg binds to it. */
+		return 0;
+	}
+
+	/* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", 
+		cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/
+	// printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, 
+	//	cmd->target, cmd->lun);
+
+	/* Ok, we have a reasonable scsi nexus, so send the cmd down, and
+           see what the device thinks of it. */
+
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	cp = scsi_cmd_alloc(*c);
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+	if (cp == NULL) {			/* trouble... */
+		printk("scsi_cmd_alloc returned NULL!\n");
+		/* FIXME: next 3 lines are -> BAD! <- */
+		cmd->result = DID_NO_CONNECT << 16;
+		done(cmd);
+		return 0;
+	}
+
+	// Fill in the command list header
+
+	cmd->scsi_done = done;    // save this for use by completion code 
+
+	// save cp in case we have to abort it 
+	cmd->host_scribble = (unsigned char *) cp; 
+
+	cp->cmd_type = CMD_SCSI;
+	cp->scsi_cmd = cmd;
+	cp->Header.ReplyQueue = 0;  // unused in simple mode
+	memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);
+	cp->Header.Tag.lower = cp->busaddr;  // Use k. address of cmd as tag
+	
+	// Fill in the request block...
+
+	cp->Request.Timeout = 0;
+	memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB));
+	if (cmd->cmd_len > sizeof(cp->Request.CDB)) BUG();
+	cp->Request.CDBLen = cmd->cmd_len;
+	memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len);
+	cp->Request.Type.Type = TYPE_CMD;
+	cp->Request.Type.Attribute = ATTR_SIMPLE;
+	switch(cmd->sc_data_direction)
+	{
+	  case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break;
+	  case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break;
+	  case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break;
+	  case DMA_BIDIRECTIONAL:
+		// This can happen if a buggy application does a scsi passthru
+		// and sets both inlen and outlen to non-zero. ( see
+		// ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() )
+
+	  	cp->Request.Type.Direction = XFER_RSVD;
+		// This is technically wrong, and cciss controllers should
+		// reject it with CMD_INVALID, which is the most correct 
+		// response, but non-fibre backends appear to let it 
+		// slide by, and give the same results as if this field
+		// were set correctly.  Either way is acceptable for
+		// our purposes here.
+
+		break;
+
+	  default: 
+		printk("cciss: unknown data direction: %d\n", 
+			cmd->sc_data_direction);
+		BUG();
+		break;
+	}
+
+	cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list
+
+	/* Put the request on the tail of the request queue */
+
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	addQ(&(*c)->reqQ, cp);
+	(*c)->Qdepth++;
+	start_io(*c);
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+
+	/* the cmd'll come back via intr handler in complete_scsi_command()  */
+	return 0;
+}
+
+static void 
+cciss_unregister_scsi(int ctlr)
+{
+	struct cciss_scsi_adapter_data_t *sa;
+	struct cciss_scsi_cmd_stack_t *stk;
+	unsigned long flags;
+
+	/* we are being forcibly unloaded, and may not refuse. */
+
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+	stk = &sa->cmd_stack; 
+
+	/* if we weren't ever actually registered, don't unregister */ 
+	if (sa->registered) {
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		scsi_remove_host(sa->scsi_host);
+		scsi_host_put(sa->scsi_host);
+		spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	}
+
+	/* set scsi_host to NULL so our detect routine will 
+	   find us on register */
+	sa->scsi_host = NULL;
+	scsi_cmd_stack_free(ctlr);
+	kfree(sa);
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+}
+
+static int 
+cciss_register_scsi(int ctlr)
+{
+	unsigned long flags;
+
+	CPQ_TAPE_LOCK(ctlr, flags);
+
+	/* Since this is really a block driver, the SCSI core may not be 
+	   initialized at init time, in which case, calling scsi_register_host
+	   would hang.  Instead, we do it later, via /proc filesystem
+	   and rc scripts, when we know SCSI core is good to go. */
+
+	/* Only register if SCSI devices are detected. */
+	if (ccissscsi[ctlr].ndevices != 0) {
+		((struct cciss_scsi_adapter_data_t *) 
+			hba[ctlr]->scsi_ctlr)->registered = 1;
+		CPQ_TAPE_UNLOCK(ctlr, flags);
+		return cciss_scsi_detect(ctlr);
+	}
+	CPQ_TAPE_UNLOCK(ctlr, flags);
+	printk(KERN_INFO 
+		"cciss%d: No appropriate SCSI device detected, "
+		"SCSI subsystem not engaged.\n", ctlr);
+	return 0;
+}
+
+static int 
+cciss_engage_scsi(int ctlr)
+{
+	struct cciss_scsi_adapter_data_t *sa;
+	struct cciss_scsi_cmd_stack_t *stk;
+	unsigned long flags;
+
+	spin_lock_irqsave(CCISS_LOCK(ctlr), flags);
+	sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr;
+	stk = &sa->cmd_stack; 
+
+	if (((struct cciss_scsi_adapter_data_t *) 
+		hba[ctlr]->scsi_ctlr)->registered) {
+		printk("cciss%d: SCSI subsystem already engaged.\n", ctlr);
+		spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+		return ENXIO;
+	}
+	spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags);
+	cciss_update_non_disk_devices(ctlr, -1);
+	cciss_register_scsi(ctlr);
+	return 0;
+}
+
+static void
+cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len)
+{
+	unsigned long flags;
+	int size;
+
+	*pos = *pos -1; *len = *len - 1; // cut off the last trailing newline
+
+	CPQ_TAPE_LOCK(ctlr, flags);
+	size = sprintf(buffer + *len, 
+		"       Sequential access devices: %d\n\n",
+			ccissscsi[ctlr].ndevices);
+	CPQ_TAPE_UNLOCK(ctlr, flags);
+	*pos += size; *len += size;
+}
+
+#else /* no CONFIG_CISS_SCSI_TAPE */
+
+/* If no tape support, then these become defined out of existence */
+
+#define cciss_scsi_setup(cntl_num)
+#define cciss_unregister_scsi(ctlr)
+#define cciss_register_scsi(ctlr)
+#define cciss_proc_tape_report(ctlr, buffer, pos, len)
+
+#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h
new file mode 100644
index 0000000..5e7e06c
--- /dev/null
+++ b/drivers/block/cciss_scsi.h
@@ -0,0 +1,79 @@
+/*
+ *    Disk Array driver for Compaq SA53xx Controllers, SCSI Tape module
+ *    Copyright 2001 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifdef CONFIG_CISS_SCSI_TAPE
+#ifndef _CCISS_SCSI_H_
+#define _CCISS_SCSI_H_
+
+#include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */
+
+		// the scsi id of the adapter...
+#define SELF_SCSI_ID 15
+		// 15 is somewhat arbitrary, since the scsi-2 bus
+		// that's presented by the driver to the OS is
+		// fabricated.  The "real" scsi-3 bus the 
+		// hardware presents is fabricated too.
+		// The actual, honest-to-goodness physical
+		// bus that the devices are attached to is not 
+		// addressible natively, and may in fact turn
+		// out to be not scsi at all.
+
+#define SCSI_CCISS_CAN_QUEUE 2
+
+/* 
+
+Note, cmd_per_lun could give us some trouble, so I'm setting it very low.
+Likewise, SCSI_CCISS_CAN_QUEUE is set very conservatively.
+
+If the upper scsi layer tries to track how many commands we have 
+outstanding, it will be operating under the misapprehension that it is
+the only one sending us requests.  We also have the block interface,
+which is where most requests must surely come from, so the upper layer's
+notion of how many requests we have outstanding will be wrong most or
+all of the time. 
+
+Note, the normal SCSI mid-layer error handling doesn't work well
+for this driver because 1) it takes the io_request_lock before
+calling error handlers and uses a local variable to store flags,
+so the io_request_lock cannot be released and interrupts enabled
+inside the error handlers, and, the error handlers cannot poll
+for command completion because they might get commands from the
+block half of the driver completing, and not know what to do
+with them.  That's what we get for making a hybrid scsi/block
+driver, I suppose.
+
+*/
+
+struct cciss_scsi_dev_t {
+	int devtype;
+	int bus, target, lun;		/* as presented to the OS */
+	unsigned char scsi3addr[8];	/* as presented to the HW */
+};
+
+struct cciss_scsi_hba_t {
+	char *name;
+	int ndevices;
+#define CCISS_MAX_SCSI_DEVS_PER_HBA 16
+	struct cciss_scsi_dev_t dev[CCISS_MAX_SCSI_DEVS_PER_HBA];
+};
+
+#endif /* _CCISS_SCSI_H_ */
+#endif /* CONFIG_CISS_SCSI_TAPE */
diff --git a/drivers/block/cfq-iosched.c b/drivers/block/cfq-iosched.c
new file mode 100644
index 0000000..0ef7a00
--- /dev/null
+++ b/drivers/block/cfq-iosched.c
@@ -0,0 +1,1856 @@
+/*
+ *  linux/drivers/block/cfq-iosched.c
+ *
+ *  CFQ, or complete fairness queueing, disk scheduler.
+ *
+ *  Based on ideas from a previously unfinished io
+ *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
+ *
+ *  Copyright (C) 2003 Jens Axboe <axboe@suse.de>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+#include <linux/mempool.h>
+
+static unsigned long max_elapsed_crq;
+static unsigned long max_elapsed_dispatch;
+
+/*
+ * tunables
+ */
+static int cfq_quantum = 4;		/* max queue in one round of service */
+static int cfq_queued = 8;		/* minimum rq allocate limit per-queue*/
+static int cfq_service = HZ;		/* period over which service is avg */
+static int cfq_fifo_expire_r = HZ / 2;	/* fifo timeout for sync requests */
+static int cfq_fifo_expire_w = 5 * HZ;	/* fifo timeout for async requests */
+static int cfq_fifo_rate = HZ / 8;	/* fifo expiry rate */
+static int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
+static int cfq_back_penalty = 2;	/* penalty of a backwards seek */
+
+/*
+ * for the hash of cfqq inside the cfqd
+ */
+#define CFQ_QHASH_SHIFT		6
+#define CFQ_QHASH_ENTRIES	(1 << CFQ_QHASH_SHIFT)
+#define list_entry_qhash(entry)	hlist_entry((entry), struct cfq_queue, cfq_hash)
+
+/*
+ * for the hash of crq inside the cfqq
+ */
+#define CFQ_MHASH_SHIFT		6
+#define CFQ_MHASH_BLOCK(sec)	((sec) >> 3)
+#define CFQ_MHASH_ENTRIES	(1 << CFQ_MHASH_SHIFT)
+#define CFQ_MHASH_FN(sec)	hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
+#define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)	hlist_entry((ptr), struct cfq_rq, hash)
+
+#define list_entry_cfqq(ptr)	list_entry((ptr), struct cfq_queue, cfq_list)
+
+#define RQ_DATA(rq)		(rq)->elevator_private
+
+/*
+ * rb-tree defines
+ */
+#define RB_NONE			(2)
+#define RB_EMPTY(node)		((node)->rb_node == NULL)
+#define RB_CLEAR_COLOR(node)	(node)->rb_color = RB_NONE
+#define RB_CLEAR(node)		do {	\
+	(node)->rb_parent = NULL;	\
+	RB_CLEAR_COLOR((node));		\
+	(node)->rb_right = NULL;	\
+	(node)->rb_left = NULL;		\
+} while (0)
+#define RB_CLEAR_ROOT(root)	((root)->rb_node = NULL)
+#define ON_RB(node)		((node)->rb_color != RB_NONE)
+#define rb_entry_crq(node)	rb_entry((node), struct cfq_rq, rb_node)
+#define rq_rb_key(rq)		(rq)->sector
+
+/*
+ * threshold for switching off non-tag accounting
+ */
+#define CFQ_MAX_TAG		(4)
+
+/*
+ * sort key types and names
+ */
+enum {
+	CFQ_KEY_PGID,
+	CFQ_KEY_TGID,
+	CFQ_KEY_UID,
+	CFQ_KEY_GID,
+	CFQ_KEY_LAST,
+};
+
+static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL };
+
+static kmem_cache_t *crq_pool;
+static kmem_cache_t *cfq_pool;
+static kmem_cache_t *cfq_ioc_pool;
+
+struct cfq_data {
+	struct list_head rr_list;
+	struct list_head empty_list;
+
+	struct hlist_head *cfq_hash;
+	struct hlist_head *crq_hash;
+
+	/* queues on rr_list (ie they have pending requests */
+	unsigned int busy_queues;
+
+	unsigned int max_queued;
+
+	atomic_t ref;
+
+	int key_type;
+
+	mempool_t *crq_pool;
+
+	request_queue_t *queue;
+
+	sector_t last_sector;
+
+	int rq_in_driver;
+
+	/*
+	 * tunables, see top of file
+	 */
+	unsigned int cfq_quantum;
+	unsigned int cfq_queued;
+	unsigned int cfq_fifo_expire_r;
+	unsigned int cfq_fifo_expire_w;
+	unsigned int cfq_fifo_batch_expire;
+	unsigned int cfq_back_penalty;
+	unsigned int cfq_back_max;
+	unsigned int find_best_crq;
+
+	unsigned int cfq_tagged;
+};
+
+struct cfq_queue {
+	/* reference count */
+	atomic_t ref;
+	/* parent cfq_data */
+	struct cfq_data *cfqd;
+	/* hash of mergeable requests */
+	struct hlist_node cfq_hash;
+	/* hash key */
+	unsigned long key;
+	/* whether queue is on rr (or empty) list */
+	int on_rr;
+	/* on either rr or empty list of cfqd */
+	struct list_head cfq_list;
+	/* sorted list of pending requests */
+	struct rb_root sort_list;
+	/* if fifo isn't expired, next request to serve */
+	struct cfq_rq *next_crq;
+	/* requests queued in sort_list */
+	int queued[2];
+	/* currently allocated requests */
+	int allocated[2];
+	/* fifo list of requests in sort_list */
+	struct list_head fifo[2];
+	/* last time fifo expired */
+	unsigned long last_fifo_expire;
+
+	int key_type;
+
+	unsigned long service_start;
+	unsigned long service_used;
+
+	unsigned int max_rate;
+
+	/* number of requests that have been handed to the driver */
+	int in_flight;
+	/* number of currently allocated requests */
+	int alloc_limit[2];
+};
+
+struct cfq_rq {
+	struct rb_node rb_node;
+	sector_t rb_key;
+	struct request *request;
+	struct hlist_node hash;
+
+	struct cfq_queue *cfq_queue;
+	struct cfq_io_context *io_context;
+
+	unsigned long service_start;
+	unsigned long queue_start;
+
+	unsigned int in_flight : 1;
+	unsigned int accounted : 1;
+	unsigned int is_sync   : 1;
+	unsigned int is_write  : 1;
+};
+
+static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long);
+static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *);
+static void cfq_update_next_crq(struct cfq_rq *);
+static void cfq_put_cfqd(struct cfq_data *cfqd);
+
+/*
+ * what the fairness is based on (ie how processes are grouped and
+ * differentiated)
+ */
+static inline unsigned long
+cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk)
+{
+	/*
+	 * optimize this so that ->key_type is the offset into the struct
+	 */
+	switch (cfqd->key_type) {
+		case CFQ_KEY_PGID:
+			return process_group(tsk);
+		default:
+		case CFQ_KEY_TGID:
+			return tsk->tgid;
+		case CFQ_KEY_UID:
+			return tsk->uid;
+		case CFQ_KEY_GID:
+			return tsk->gid;
+	}
+}
+
+/*
+ * lots of deadline iosched dupes, can be abstracted later...
+ */
+static inline void cfq_del_crq_hash(struct cfq_rq *crq)
+{
+	hlist_del_init(&crq->hash);
+}
+
+static void cfq_remove_merge_hints(request_queue_t *q, struct cfq_rq *crq)
+{
+	cfq_del_crq_hash(crq);
+
+	if (q->last_merge == crq->request)
+		q->last_merge = NULL;
+
+	cfq_update_next_crq(crq);
+}
+
+static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
+{
+	const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
+
+	BUG_ON(!hlist_unhashed(&crq->hash));
+
+	hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
+}
+
+static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
+{
+	struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
+	struct hlist_node *entry, *next;
+
+	hlist_for_each_safe(entry, next, hash_list) {
+		struct cfq_rq *crq = list_entry_hash(entry);
+		struct request *__rq = crq->request;
+
+		BUG_ON(hlist_unhashed(&crq->hash));
+
+		if (!rq_mergeable(__rq)) {
+			cfq_del_crq_hash(crq);
+			continue;
+		}
+
+		if (rq_hash_key(__rq) == offset)
+			return __rq;
+	}
+
+	return NULL;
+}
+
+/*
+ * Lifted from AS - choose which of crq1 and crq2 that is best served now.
+ * We choose the request that is closest to the head right now. Distance
+ * behind the head are penalized and only allowed to a certain extent.
+ */
+static struct cfq_rq *
+cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
+{
+	sector_t last, s1, s2, d1 = 0, d2 = 0;
+	int r1_wrap = 0, r2_wrap = 0;	/* requests are behind the disk head */
+	unsigned long back_max;
+
+	if (crq1 == NULL || crq1 == crq2)
+		return crq2;
+	if (crq2 == NULL)
+		return crq1;
+
+	s1 = crq1->request->sector;
+	s2 = crq2->request->sector;
+
+	last = cfqd->last_sector;
+
+#if 0
+	if (!list_empty(&cfqd->queue->queue_head)) {
+		struct list_head *entry = &cfqd->queue->queue_head;
+		unsigned long distance = ~0UL;
+		struct request *rq;
+
+		while ((entry = entry->prev) != &cfqd->queue->queue_head) {
+			rq = list_entry_rq(entry);
+
+			if (blk_barrier_rq(rq))
+				break;
+
+			if (distance < abs(s1 - rq->sector + rq->nr_sectors)) {
+				distance = abs(s1 - rq->sector +rq->nr_sectors);
+				last = rq->sector + rq->nr_sectors;
+			}
+			if (distance < abs(s2 - rq->sector + rq->nr_sectors)) {
+				distance = abs(s2 - rq->sector +rq->nr_sectors);
+				last = rq->sector + rq->nr_sectors;
+			}
+		}
+	}
+#endif
+
+	/*
+	 * by definition, 1KiB is 2 sectors
+	 */
+	back_max = cfqd->cfq_back_max * 2;
+
+	/*
+	 * Strict one way elevator _except_ in the case where we allow
+	 * short backward seeks which are biased as twice the cost of a
+	 * similar forward seek.
+	 */
+	if (s1 >= last)
+		d1 = s1 - last;
+	else if (s1 + back_max >= last)
+		d1 = (last - s1) * cfqd->cfq_back_penalty;
+	else
+		r1_wrap = 1;
+
+	if (s2 >= last)
+		d2 = s2 - last;
+	else if (s2 + back_max >= last)
+		d2 = (last - s2) * cfqd->cfq_back_penalty;
+	else
+		r2_wrap = 1;
+
+	/* Found required data */
+	if (!r1_wrap && r2_wrap)
+		return crq1;
+	else if (!r2_wrap && r1_wrap)
+		return crq2;
+	else if (r1_wrap && r2_wrap) {
+		/* both behind the head */
+		if (s1 <= s2)
+			return crq1;
+		else
+			return crq2;
+	}
+
+	/* Both requests in front of the head */
+	if (d1 < d2)
+		return crq1;
+	else if (d2 < d1)
+		return crq2;
+	else {
+		if (s1 >= s2)
+			return crq1;
+		else
+			return crq2;
+	}
+}
+
+/*
+ * would be nice to take fifo expire time into account as well
+ */
+static struct cfq_rq *
+cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
+		  struct cfq_rq *last)
+{
+	struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
+	struct rb_node *rbnext, *rbprev;
+
+	if (!ON_RB(&last->rb_node))
+		return NULL;
+
+	if ((rbnext = rb_next(&last->rb_node)) == NULL)
+		rbnext = rb_first(&cfqq->sort_list);
+
+	rbprev = rb_prev(&last->rb_node);
+
+	if (rbprev)
+		crq_prev = rb_entry_crq(rbprev);
+	if (rbnext)
+		crq_next = rb_entry_crq(rbnext);
+
+	return cfq_choose_req(cfqd, crq_next, crq_prev);
+}
+
+static void cfq_update_next_crq(struct cfq_rq *crq)
+{
+	struct cfq_queue *cfqq = crq->cfq_queue;
+
+	if (cfqq->next_crq == crq)
+		cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
+}
+
+static int cfq_check_sort_rr_list(struct cfq_queue *cfqq)
+{
+	struct list_head *head = &cfqq->cfqd->rr_list;
+	struct list_head *next, *prev;
+
+	/*
+	 * list might still be ordered
+	 */
+	next = cfqq->cfq_list.next;
+	if (next != head) {
+		struct cfq_queue *cnext = list_entry_cfqq(next);
+
+		if (cfqq->service_used > cnext->service_used)
+			return 1;
+	}
+
+	prev = cfqq->cfq_list.prev;
+	if (prev != head) {
+		struct cfq_queue *cprev = list_entry_cfqq(prev);
+
+		if (cfqq->service_used < cprev->service_used)
+			return 1;
+	}
+
+	return 0;
+}
+
+static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue)
+{
+	struct list_head *entry = &cfqq->cfqd->rr_list;
+
+	if (!cfqq->on_rr)
+		return;
+	if (!new_queue && !cfq_check_sort_rr_list(cfqq))
+		return;
+
+	list_del(&cfqq->cfq_list);
+
+	/*
+	 * sort by our mean service_used, sub-sort by in-flight requests
+	 */
+	while ((entry = entry->prev) != &cfqq->cfqd->rr_list) {
+		struct cfq_queue *__cfqq = list_entry_cfqq(entry);
+
+		if (cfqq->service_used > __cfqq->service_used)
+			break;
+		else if (cfqq->service_used == __cfqq->service_used) {
+			struct list_head *prv;
+
+			while ((prv = entry->prev) != &cfqq->cfqd->rr_list) {
+				__cfqq = list_entry_cfqq(prv);
+
+				WARN_ON(__cfqq->service_used > cfqq->service_used);
+				if (cfqq->service_used != __cfqq->service_used)
+					break;
+				if (cfqq->in_flight > __cfqq->in_flight)
+					break;
+
+				entry = prv;
+			}
+		}
+	}
+
+	list_add(&cfqq->cfq_list, entry);
+}
+
+/*
+ * add to busy list of queues for service, trying to be fair in ordering
+ * the pending list according to requests serviced
+ */
+static inline void
+cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	/*
+	 * it's currently on the empty list
+	 */
+	cfqq->on_rr = 1;
+	cfqd->busy_queues++;
+
+	if (time_after(jiffies, cfqq->service_start + cfq_service))
+		cfqq->service_used >>= 3;
+
+	cfq_sort_rr_list(cfqq, 1);
+}
+
+static inline void
+cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
+{
+	list_move(&cfqq->cfq_list, &cfqd->empty_list);
+	cfqq->on_rr = 0;
+
+	BUG_ON(!cfqd->busy_queues);
+	cfqd->busy_queues--;
+}
+
+/*
+ * rb tree support functions
+ */
+static inline void cfq_del_crq_rb(struct cfq_rq *crq)
+{
+	struct cfq_queue *cfqq = crq->cfq_queue;
+
+	if (ON_RB(&crq->rb_node)) {
+		struct cfq_data *cfqd = cfqq->cfqd;
+
+		BUG_ON(!cfqq->queued[crq->is_sync]);
+
+		cfq_update_next_crq(crq);
+
+		cfqq->queued[crq->is_sync]--;
+		rb_erase(&crq->rb_node, &cfqq->sort_list);
+		RB_CLEAR_COLOR(&crq->rb_node);
+
+		if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr)
+			cfq_del_cfqq_rr(cfqd, cfqq);
+	}
+}
+
+static struct cfq_rq *
+__cfq_add_crq_rb(struct cfq_rq *crq)
+{
+	struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
+	struct rb_node *parent = NULL;
+	struct cfq_rq *__crq;
+
+	while (*p) {
+		parent = *p;
+		__crq = rb_entry_crq(parent);
+
+		if (crq->rb_key < __crq->rb_key)
+			p = &(*p)->rb_left;
+		else if (crq->rb_key > __crq->rb_key)
+			p = &(*p)->rb_right;
+		else
+			return __crq;
+	}
+
+	rb_link_node(&crq->rb_node, parent, p);
+	return NULL;
+}
+
+static void cfq_add_crq_rb(struct cfq_rq *crq)
+{
+	struct cfq_queue *cfqq = crq->cfq_queue;
+	struct cfq_data *cfqd = cfqq->cfqd;
+	struct request *rq = crq->request;
+	struct cfq_rq *__alias;
+
+	crq->rb_key = rq_rb_key(rq);
+	cfqq->queued[crq->is_sync]++;
+
+	/*
+	 * looks a little odd, but the first insert might return an alias.
+	 * if that happens, put the alias on the dispatch list
+	 */
+	while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
+		cfq_dispatch_sort(cfqd->queue, __alias);
+
+	rb_insert_color(&crq->rb_node, &cfqq->sort_list);
+
+	if (!cfqq->on_rr)
+		cfq_add_cfqq_rr(cfqd, cfqq);
+
+	/*
+	 * check if this request is a better next-serve candidate
+	 */
+	cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
+}
+
+static inline void
+cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
+{
+	if (ON_RB(&crq->rb_node)) {
+		rb_erase(&crq->rb_node, &cfqq->sort_list);
+		cfqq->queued[crq->is_sync]--;
+	}
+
+	cfq_add_crq_rb(crq);
+}
+
+static struct request *
+cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
+{
+	const unsigned long key = cfq_hash_key(cfqd, current);
+	struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key);
+	struct rb_node *n;
+
+	if (!cfqq)
+		goto out;
+
+	n = cfqq->sort_list.rb_node;
+	while (n) {
+		struct cfq_rq *crq = rb_entry_crq(n);
+
+		if (sector < crq->rb_key)
+			n = n->rb_left;
+		else if (sector > crq->rb_key)
+			n = n->rb_right;
+		else
+			return crq->request;
+	}
+
+out:
+	return NULL;
+}
+
+static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
+{
+	struct cfq_rq *crq = RQ_DATA(rq);
+
+	if (crq) {
+		struct cfq_queue *cfqq = crq->cfq_queue;
+
+		if (cfqq->cfqd->cfq_tagged) {
+			cfqq->service_used--;
+			cfq_sort_rr_list(cfqq, 0);
+		}
+
+		if (crq->accounted) {
+			crq->accounted = 0;
+			cfqq->cfqd->rq_in_driver--;
+		}
+	}
+}
+
+/*
+ * make sure the service time gets corrected on reissue of this request
+ */
+static void cfq_requeue_request(request_queue_t *q, struct request *rq)
+{
+	cfq_deactivate_request(q, rq);
+	list_add(&rq->queuelist, &q->queue_head);
+}
+
+static void cfq_remove_request(request_queue_t *q, struct request *rq)
+{
+	struct cfq_rq *crq = RQ_DATA(rq);
+
+	if (crq) {
+		cfq_remove_merge_hints(q, crq);
+		list_del_init(&rq->queuelist);
+
+		if (crq->cfq_queue)
+			cfq_del_crq_rb(crq);
+	}
+}
+
+static int
+cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct request *__rq;
+	int ret;
+
+	ret = elv_try_last_merge(q, bio);
+	if (ret != ELEVATOR_NO_MERGE) {
+		__rq = q->last_merge;
+		goto out_insert;
+	}
+
+	__rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
+	if (__rq) {
+		BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+		if (elv_rq_merge_ok(__rq, bio)) {
+			ret = ELEVATOR_BACK_MERGE;
+			goto out;
+		}
+	}
+
+	__rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
+	if (__rq) {
+		if (elv_rq_merge_ok(__rq, bio)) {
+			ret = ELEVATOR_FRONT_MERGE;
+			goto out;
+		}
+	}
+
+	return ELEVATOR_NO_MERGE;
+out:
+	q->last_merge = __rq;
+out_insert:
+	*req = __rq;
+	return ret;
+}
+
+static void cfq_merged_request(request_queue_t *q, struct request *req)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_rq *crq = RQ_DATA(req);
+
+	cfq_del_crq_hash(crq);
+	cfq_add_crq_hash(cfqd, crq);
+
+	if (ON_RB(&crq->rb_node) && (rq_rb_key(req) != crq->rb_key)) {
+		struct cfq_queue *cfqq = crq->cfq_queue;
+
+		cfq_update_next_crq(crq);
+		cfq_reposition_crq_rb(cfqq, crq);
+	}
+
+	q->last_merge = req;
+}
+
+static void
+cfq_merged_requests(request_queue_t *q, struct request *rq,
+		    struct request *next)
+{
+	struct cfq_rq *crq = RQ_DATA(rq);
+	struct cfq_rq *cnext = RQ_DATA(next);
+
+	cfq_merged_request(q, rq);
+
+	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+		if (time_before(cnext->queue_start, crq->queue_start)) {
+			list_move(&rq->queuelist, &next->queuelist);
+			crq->queue_start = cnext->queue_start;
+		}
+	}
+
+	cfq_update_next_crq(cnext);
+	cfq_remove_request(q, next);
+}
+
+/*
+ * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues,
+ * this function sector sorts the selected request to minimize seeks. we start
+ * at cfqd->last_sector, not 0.
+ */
+static void cfq_dispatch_sort(request_queue_t *q, struct cfq_rq *crq)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_queue *cfqq = crq->cfq_queue;
+	struct list_head *head = &q->queue_head, *entry = head;
+	struct request *__rq;
+	sector_t last;
+
+	cfq_del_crq_rb(crq);
+	cfq_remove_merge_hints(q, crq);
+	list_del(&crq->request->queuelist);
+
+	last = cfqd->last_sector;
+	while ((entry = entry->prev) != head) {
+		__rq = list_entry_rq(entry);
+
+		if (blk_barrier_rq(crq->request))
+			break;
+		if (!blk_fs_request(crq->request))
+			break;
+
+		if (crq->request->sector > __rq->sector)
+			break;
+		if (__rq->sector > last && crq->request->sector < last) {
+			last = crq->request->sector;
+			break;
+		}
+	}
+
+	cfqd->last_sector = last;
+	crq->in_flight = 1;
+	cfqq->in_flight++;
+	list_add(&crq->request->queuelist, entry);
+}
+
+/*
+ * return expired entry, or NULL to just start from scratch in rbtree
+ */
+static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
+{
+	struct cfq_data *cfqd = cfqq->cfqd;
+	const int reads = !list_empty(&cfqq->fifo[0]);
+	const int writes = !list_empty(&cfqq->fifo[1]);
+	unsigned long now = jiffies;
+	struct cfq_rq *crq;
+
+	if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire))
+		return NULL;
+
+	crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist));
+	if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) {
+		cfqq->last_fifo_expire = now;
+		return crq;
+	}
+
+	crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist));
+	if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) {
+		cfqq->last_fifo_expire = now;
+		return crq;
+	}
+
+	return NULL;
+}
+
+/*
+ * dispatch a single request from given queue
+ */
+static inline void
+cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd,
+		     struct cfq_queue *cfqq)
+{
+	struct cfq_rq *crq;
+
+	/*
+	 * follow expired path, else get first next available
+	 */
+	if ((crq = cfq_check_fifo(cfqq)) == NULL) {
+		if (cfqd->find_best_crq)
+			crq = cfqq->next_crq;
+		else
+			crq = rb_entry_crq(rb_first(&cfqq->sort_list));
+	}
+
+	cfqd->last_sector = crq->request->sector + crq->request->nr_sectors;
+
+	/*
+	 * finally, insert request into driver list
+	 */
+	cfq_dispatch_sort(q, crq);
+}
+
+static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_queue *cfqq;
+	struct list_head *entry, *tmp;
+	int queued, busy_queues, first_round;
+
+	if (list_empty(&cfqd->rr_list))
+		return 0;
+
+	queued = 0;
+	first_round = 1;
+restart:
+	busy_queues = 0;
+	list_for_each_safe(entry, tmp, &cfqd->rr_list) {
+		cfqq = list_entry_cfqq(entry);
+
+		BUG_ON(RB_EMPTY(&cfqq->sort_list));
+
+		/*
+		 * first round of queueing, only select from queues that
+		 * don't already have io in-flight
+		 */
+		if (first_round && cfqq->in_flight)
+			continue;
+
+		cfq_dispatch_request(q, cfqd, cfqq);
+
+		if (!RB_EMPTY(&cfqq->sort_list))
+			busy_queues++;
+
+		queued++;
+	}
+
+	if ((queued < max_dispatch) && (busy_queues || first_round)) {
+		first_round = 0;
+		goto restart;
+	}
+
+	return queued;
+}
+
+static inline void cfq_account_dispatch(struct cfq_rq *crq)
+{
+	struct cfq_queue *cfqq = crq->cfq_queue;
+	struct cfq_data *cfqd = cfqq->cfqd;
+	unsigned long now, elapsed;
+
+	if (!blk_fs_request(crq->request))
+		return;
+
+	/*
+	 * accounted bit is necessary since some drivers will call
+	 * elv_next_request() many times for the same request (eg ide)
+	 */
+	if (crq->accounted)
+		return;
+
+	now = jiffies;
+	if (cfqq->service_start == ~0UL)
+		cfqq->service_start = now;
+
+	/*
+	 * on drives with tagged command queueing, command turn-around time
+	 * doesn't necessarily reflect the time spent processing this very
+	 * command inside the drive. so do the accounting differently there,
+	 * by just sorting on the number of requests
+	 */
+	if (cfqd->cfq_tagged) {
+		if (time_after(now, cfqq->service_start + cfq_service)) {
+			cfqq->service_start = now;
+			cfqq->service_used /= 10;
+		}
+
+		cfqq->service_used++;
+		cfq_sort_rr_list(cfqq, 0);
+	}
+
+	elapsed = now - crq->queue_start;
+	if (elapsed > max_elapsed_dispatch)
+		max_elapsed_dispatch = elapsed;
+
+	crq->accounted = 1;
+	crq->service_start = now;
+
+	if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) {
+		cfqq->cfqd->cfq_tagged = 1;
+		printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG);
+	}
+}
+
+static inline void
+cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
+{
+	struct cfq_data *cfqd = cfqq->cfqd;
+
+	if (!crq->accounted)
+		return;
+
+	WARN_ON(!cfqd->rq_in_driver);
+	cfqd->rq_in_driver--;
+
+	if (!cfqd->cfq_tagged) {
+		unsigned long now = jiffies;
+		unsigned long duration = now - crq->service_start;
+
+		if (time_after(now, cfqq->service_start + cfq_service)) {
+			cfqq->service_start = now;
+			cfqq->service_used >>= 3;
+		}
+
+		cfqq->service_used += duration;
+		cfq_sort_rr_list(cfqq, 0);
+
+		if (duration > max_elapsed_crq)
+			max_elapsed_crq = duration;
+	}
+}
+
+static struct request *cfq_next_request(request_queue_t *q)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct request *rq;
+
+	if (!list_empty(&q->queue_head)) {
+		struct cfq_rq *crq;
+dispatch:
+		rq = list_entry_rq(q->queue_head.next);
+
+		if ((crq = RQ_DATA(rq)) != NULL) {
+			cfq_remove_merge_hints(q, crq);
+			cfq_account_dispatch(crq);
+		}
+
+		return rq;
+	}
+
+	if (cfq_dispatch_requests(q, cfqd->cfq_quantum))
+		goto dispatch;
+
+	return NULL;
+}
+
+/*
+ * task holds one reference to the queue, dropped when task exits. each crq
+ * in-flight on this queue also holds a reference, dropped when crq is freed.
+ *
+ * queue lock must be held here.
+ */
+static void cfq_put_queue(struct cfq_queue *cfqq)
+{
+	BUG_ON(!atomic_read(&cfqq->ref));
+
+	if (!atomic_dec_and_test(&cfqq->ref))
+		return;
+
+	BUG_ON(rb_first(&cfqq->sort_list));
+	BUG_ON(cfqq->on_rr);
+
+	cfq_put_cfqd(cfqq->cfqd);
+
+	/*
+	 * it's on the empty list and still hashed
+	 */
+	list_del(&cfqq->cfq_list);
+	hlist_del(&cfqq->cfq_hash);
+	kmem_cache_free(cfq_pool, cfqq);
+}
+
+static inline struct cfq_queue *
+__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval)
+{
+	struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
+	struct hlist_node *entry, *next;
+
+	hlist_for_each_safe(entry, next, hash_list) {
+		struct cfq_queue *__cfqq = list_entry_qhash(entry);
+
+		if (__cfqq->key == key)
+			return __cfqq;
+	}
+
+	return NULL;
+}
+
+static struct cfq_queue *
+cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key)
+{
+	return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT));
+}
+
+static inline void
+cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq,
+		struct cfq_io_context *cic)
+{
+	unsigned long hashkey = cfq_hash_key(cfqd, current);
+	unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT);
+	struct cfq_queue *__cfqq;
+	unsigned long flags;
+
+	spin_lock_irqsave(cfqd->queue->queue_lock, flags);
+
+	hlist_del(&(*cfqq)->cfq_hash);
+
+	__cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval);
+	if (!__cfqq || __cfqq == *cfqq) {
+		__cfqq = *cfqq;
+		hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+		__cfqq->key_type = cfqd->key_type;
+	} else {
+		atomic_inc(&__cfqq->ref);
+		cic->cfqq = __cfqq;
+		cfq_put_queue(*cfqq);
+		*cfqq = __cfqq;
+	}
+
+	cic->cfqq = __cfqq;
+	spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
+}
+
+static void cfq_free_io_context(struct cfq_io_context *cic)
+{
+	kmem_cache_free(cfq_ioc_pool, cic);
+}
+
+/*
+ * locking hierarchy is: io_context lock -> queue locks
+ */
+static void cfq_exit_io_context(struct cfq_io_context *cic)
+{
+	struct cfq_queue *cfqq = cic->cfqq;
+	struct list_head *entry = &cic->list;
+	request_queue_t *q;
+	unsigned long flags;
+
+	/*
+	 * put the reference this task is holding to the various queues
+	 */
+	spin_lock_irqsave(&cic->ioc->lock, flags);
+	while ((entry = cic->list.next) != &cic->list) {
+		struct cfq_io_context *__cic;
+
+		__cic = list_entry(entry, struct cfq_io_context, list);
+		list_del(entry);
+
+		q = __cic->cfqq->cfqd->queue;
+		spin_lock(q->queue_lock);
+		cfq_put_queue(__cic->cfqq);
+		spin_unlock(q->queue_lock);
+	}
+
+	q = cfqq->cfqd->queue;
+	spin_lock(q->queue_lock);
+	cfq_put_queue(cfqq);
+	spin_unlock(q->queue_lock);
+
+	cic->cfqq = NULL;
+	spin_unlock_irqrestore(&cic->ioc->lock, flags);
+}
+
+static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags)
+{
+	struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags);
+
+	if (cic) {
+		cic->dtor = cfq_free_io_context;
+		cic->exit = cfq_exit_io_context;
+		INIT_LIST_HEAD(&cic->list);
+		cic->cfqq = NULL;
+	}
+
+	return cic;
+}
+
+/*
+ * Setup general io context and cfq io context. There can be several cfq
+ * io contexts per general io context, if this process is doing io to more
+ * than one device managed by cfq. Note that caller is holding a reference to
+ * cfqq, so we don't need to worry about it disappearing
+ */
+static struct cfq_io_context *
+cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags)
+{
+	struct cfq_data *cfqd = (*cfqq)->cfqd;
+	struct cfq_queue *__cfqq = *cfqq;
+	struct cfq_io_context *cic;
+	struct io_context *ioc;
+
+	might_sleep_if(gfp_flags & __GFP_WAIT);
+
+	ioc = get_io_context(gfp_flags);
+	if (!ioc)
+		return NULL;
+
+	if ((cic = ioc->cic) == NULL) {
+		cic = cfq_alloc_io_context(gfp_flags);
+
+		if (cic == NULL)
+			goto err;
+
+		ioc->cic = cic;
+		cic->ioc = ioc;
+		cic->cfqq = __cfqq;
+		atomic_inc(&__cfqq->ref);
+	} else {
+		struct cfq_io_context *__cic;
+		unsigned long flags;
+
+		/*
+		 * since the first cic on the list is actually the head
+		 * itself, need to check this here or we'll duplicate an
+		 * cic per ioc for no reason
+		 */
+		if (cic->cfqq == __cfqq)
+			goto out;
+
+		/*
+		 * cic exists, check if we already are there. linear search
+		 * should be ok here, the list will usually not be more than
+		 * 1 or a few entries long
+		 */
+		spin_lock_irqsave(&ioc->lock, flags);
+		list_for_each_entry(__cic, &cic->list, list) {
+			/*
+			 * this process is already holding a reference to
+			 * this queue, so no need to get one more
+			 */
+			if (__cic->cfqq == __cfqq) {
+				cic = __cic;
+				spin_unlock_irqrestore(&ioc->lock, flags);
+				goto out;
+			}
+		}
+		spin_unlock_irqrestore(&ioc->lock, flags);
+
+		/*
+		 * nope, process doesn't have a cic assoicated with this
+		 * cfqq yet. get a new one and add to list
+		 */
+		__cic = cfq_alloc_io_context(gfp_flags);
+		if (__cic == NULL)
+			goto err;
+
+		__cic->ioc = ioc;
+		__cic->cfqq = __cfqq;
+		atomic_inc(&__cfqq->ref);
+		spin_lock_irqsave(&ioc->lock, flags);
+		list_add(&__cic->list, &cic->list);
+		spin_unlock_irqrestore(&ioc->lock, flags);
+
+		cic = __cic;
+		*cfqq = __cfqq;
+	}
+
+out:
+	/*
+	 * if key_type has been changed on the fly, we lazily rehash
+	 * each queue at lookup time
+	 */
+	if ((*cfqq)->key_type != cfqd->key_type)
+		cfq_rehash_cfqq(cfqd, cfqq, cic);
+
+	return cic;
+err:
+	put_io_context(ioc);
+	return NULL;
+}
+
+static struct cfq_queue *
+__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask)
+{
+	const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
+	struct cfq_queue *cfqq, *new_cfqq = NULL;
+
+retry:
+	cfqq = __cfq_find_cfq_hash(cfqd, key, hashval);
+
+	if (!cfqq) {
+		if (new_cfqq) {
+			cfqq = new_cfqq;
+			new_cfqq = NULL;
+		} else if (gfp_mask & __GFP_WAIT) {
+			spin_unlock_irq(cfqd->queue->queue_lock);
+			new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
+			spin_lock_irq(cfqd->queue->queue_lock);
+			goto retry;
+		} else
+			goto out;
+
+		memset(cfqq, 0, sizeof(*cfqq));
+
+		INIT_HLIST_NODE(&cfqq->cfq_hash);
+		INIT_LIST_HEAD(&cfqq->cfq_list);
+		RB_CLEAR_ROOT(&cfqq->sort_list);
+		INIT_LIST_HEAD(&cfqq->fifo[0]);
+		INIT_LIST_HEAD(&cfqq->fifo[1]);
+
+		cfqq->key = key;
+		hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
+		atomic_set(&cfqq->ref, 0);
+		cfqq->cfqd = cfqd;
+		atomic_inc(&cfqd->ref);
+		cfqq->key_type = cfqd->key_type;
+		cfqq->service_start = ~0UL;
+	}
+
+	if (new_cfqq)
+		kmem_cache_free(cfq_pool, new_cfqq);
+
+	atomic_inc(&cfqq->ref);
+out:
+	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
+	return cfqq;
+}
+
+static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq)
+{
+	crq->is_sync = 0;
+	if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE)
+		crq->is_sync = 1;
+
+	cfq_add_crq_rb(crq);
+	crq->queue_start = jiffies;
+
+	list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]);
+}
+
+static void
+cfq_insert_request(request_queue_t *q, struct request *rq, int where)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_rq *crq = RQ_DATA(rq);
+
+	switch (where) {
+		case ELEVATOR_INSERT_BACK:
+			while (cfq_dispatch_requests(q, cfqd->cfq_quantum))
+				;
+			list_add_tail(&rq->queuelist, &q->queue_head);
+			break;
+		case ELEVATOR_INSERT_FRONT:
+			list_add(&rq->queuelist, &q->queue_head);
+			break;
+		case ELEVATOR_INSERT_SORT:
+			BUG_ON(!blk_fs_request(rq));
+			cfq_enqueue(cfqd, crq);
+			break;
+		default:
+			printk("%s: bad insert point %d\n", __FUNCTION__,where);
+			return;
+	}
+
+	if (rq_mergeable(rq)) {
+		cfq_add_crq_hash(cfqd, crq);
+
+		if (!q->last_merge)
+			q->last_merge = rq;
+	}
+}
+
+static int cfq_queue_empty(request_queue_t *q)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+
+	return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list);
+}
+
+static void cfq_completed_request(request_queue_t *q, struct request *rq)
+{
+	struct cfq_rq *crq = RQ_DATA(rq);
+	struct cfq_queue *cfqq;
+
+	if (unlikely(!blk_fs_request(rq)))
+		return;
+
+	cfqq = crq->cfq_queue;
+
+	if (crq->in_flight) {
+		WARN_ON(!cfqq->in_flight);
+		cfqq->in_flight--;
+	}
+
+	cfq_account_completion(cfqq, crq);
+}
+
+static struct request *
+cfq_former_request(request_queue_t *q, struct request *rq)
+{
+	struct cfq_rq *crq = RQ_DATA(rq);
+	struct rb_node *rbprev = rb_prev(&crq->rb_node);
+
+	if (rbprev)
+		return rb_entry_crq(rbprev)->request;
+
+	return NULL;
+}
+
+static struct request *
+cfq_latter_request(request_queue_t *q, struct request *rq)
+{
+	struct cfq_rq *crq = RQ_DATA(rq);
+	struct rb_node *rbnext = rb_next(&crq->rb_node);
+
+	if (rbnext)
+		return rb_entry_crq(rbnext)->request;
+
+	return NULL;
+}
+
+static int cfq_may_queue(request_queue_t *q, int rw)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_queue *cfqq;
+	int ret = ELV_MQUEUE_MAY;
+
+	if (current->flags & PF_MEMALLOC)
+		return ELV_MQUEUE_MAY;
+
+	cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current));
+	if (cfqq) {
+		int limit = cfqd->max_queued;
+
+		if (cfqq->allocated[rw] < cfqd->cfq_queued)
+			return ELV_MQUEUE_MUST;
+
+		if (cfqd->busy_queues)
+			limit = q->nr_requests / cfqd->busy_queues;
+
+		if (limit < cfqd->cfq_queued)
+			limit = cfqd->cfq_queued;
+		else if (limit > cfqd->max_queued)
+			limit = cfqd->max_queued;
+
+		if (cfqq->allocated[rw] >= limit) {
+			if (limit > cfqq->alloc_limit[rw])
+				cfqq->alloc_limit[rw] = limit;
+
+			ret = ELV_MQUEUE_NO;
+		}
+	}
+
+	return ret;
+}
+
+static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
+{
+	struct request_list *rl = &q->rq;
+	const int write = waitqueue_active(&rl->wait[WRITE]);
+	const int read = waitqueue_active(&rl->wait[READ]);
+
+	if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ])
+		wake_up(&rl->wait[READ]);
+	if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE])
+		wake_up(&rl->wait[WRITE]);
+}
+
+/*
+ * queue lock held here
+ */
+static void cfq_put_request(request_queue_t *q, struct request *rq)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_rq *crq = RQ_DATA(rq);
+
+	if (crq) {
+		struct cfq_queue *cfqq = crq->cfq_queue;
+
+		BUG_ON(q->last_merge == rq);
+		BUG_ON(!hlist_unhashed(&crq->hash));
+
+		if (crq->io_context)
+			put_io_context(crq->io_context->ioc);
+
+		BUG_ON(!cfqq->allocated[crq->is_write]);
+		cfqq->allocated[crq->is_write]--;
+
+		mempool_free(crq, cfqd->crq_pool);
+		rq->elevator_private = NULL;
+
+		smp_mb();
+		cfq_check_waiters(q, cfqq);
+		cfq_put_queue(cfqq);
+	}
+}
+
+/*
+ * Allocate cfq data structures associated with this request. A queue and
+ */
+static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+{
+	struct cfq_data *cfqd = q->elevator->elevator_data;
+	struct cfq_io_context *cic;
+	const int rw = rq_data_dir(rq);
+	struct cfq_queue *cfqq, *saved_cfqq;
+	struct cfq_rq *crq;
+	unsigned long flags;
+
+	might_sleep_if(gfp_mask & __GFP_WAIT);
+
+	spin_lock_irqsave(q->queue_lock, flags);
+
+	cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask);
+	if (!cfqq)
+		goto out_lock;
+
+repeat:
+	if (cfqq->allocated[rw] >= cfqd->max_queued)
+		goto out_lock;
+
+	cfqq->allocated[rw]++;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	/*
+	 * if hashing type has changed, the cfq_queue might change here.
+	 */
+	saved_cfqq = cfqq;
+	cic = cfq_get_io_context(&cfqq, gfp_mask);
+	if (!cic)
+		goto err;
+
+	/*
+	 * repeat allocation checks on queue change
+	 */
+	if (unlikely(saved_cfqq != cfqq)) {
+		spin_lock_irqsave(q->queue_lock, flags);
+		saved_cfqq->allocated[rw]--;
+		goto repeat;
+	}
+
+	crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
+	if (crq) {
+		RB_CLEAR(&crq->rb_node);
+		crq->rb_key = 0;
+		crq->request = rq;
+		INIT_HLIST_NODE(&crq->hash);
+		crq->cfq_queue = cfqq;
+		crq->io_context = cic;
+		crq->service_start = crq->queue_start = 0;
+		crq->in_flight = crq->accounted = crq->is_sync = 0;
+		crq->is_write = rw;
+		rq->elevator_private = crq;
+		cfqq->alloc_limit[rw] = 0;
+		return 0;
+	}
+
+	put_io_context(cic->ioc);
+err:
+	spin_lock_irqsave(q->queue_lock, flags);
+	cfqq->allocated[rw]--;
+	cfq_put_queue(cfqq);
+out_lock:
+	spin_unlock_irqrestore(q->queue_lock, flags);
+	return 1;
+}
+
+static void cfq_put_cfqd(struct cfq_data *cfqd)
+{
+	request_queue_t *q = cfqd->queue;
+
+	if (!atomic_dec_and_test(&cfqd->ref))
+		return;
+
+	blk_put_queue(q);
+
+	mempool_destroy(cfqd->crq_pool);
+	kfree(cfqd->crq_hash);
+	kfree(cfqd->cfq_hash);
+	kfree(cfqd);
+}
+
+static void cfq_exit_queue(elevator_t *e)
+{
+	cfq_put_cfqd(e->elevator_data);
+}
+
+static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+{
+	struct cfq_data *cfqd;
+	int i;
+
+	cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
+	if (!cfqd)
+		return -ENOMEM;
+
+	memset(cfqd, 0, sizeof(*cfqd));
+	INIT_LIST_HEAD(&cfqd->rr_list);
+	INIT_LIST_HEAD(&cfqd->empty_list);
+
+	cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
+	if (!cfqd->crq_hash)
+		goto out_crqhash;
+
+	cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
+	if (!cfqd->cfq_hash)
+		goto out_cfqhash;
+
+	cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool);
+	if (!cfqd->crq_pool)
+		goto out_crqpool;
+
+	for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
+		INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
+	for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
+		INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
+
+	e->elevator_data = cfqd;
+
+	cfqd->queue = q;
+	atomic_inc(&q->refcnt);
+
+	/*
+	 * just set it to some high value, we want anyone to be able to queue
+	 * some requests. fairness is handled differently
+	 */
+	q->nr_requests = 1024;
+	cfqd->max_queued = q->nr_requests / 16;
+	q->nr_batching = cfq_queued;
+	cfqd->key_type = CFQ_KEY_TGID;
+	cfqd->find_best_crq = 1;
+	atomic_set(&cfqd->ref, 1);
+
+	cfqd->cfq_queued = cfq_queued;
+	cfqd->cfq_quantum = cfq_quantum;
+	cfqd->cfq_fifo_expire_r = cfq_fifo_expire_r;
+	cfqd->cfq_fifo_expire_w = cfq_fifo_expire_w;
+	cfqd->cfq_fifo_batch_expire = cfq_fifo_rate;
+	cfqd->cfq_back_max = cfq_back_max;
+	cfqd->cfq_back_penalty = cfq_back_penalty;
+
+	return 0;
+out_crqpool:
+	kfree(cfqd->cfq_hash);
+out_cfqhash:
+	kfree(cfqd->crq_hash);
+out_crqhash:
+	kfree(cfqd);
+	return -ENOMEM;
+}
+
+static void cfq_slab_kill(void)
+{
+	if (crq_pool)
+		kmem_cache_destroy(crq_pool);
+	if (cfq_pool)
+		kmem_cache_destroy(cfq_pool);
+	if (cfq_ioc_pool)
+		kmem_cache_destroy(cfq_ioc_pool);
+}
+
+static int __init cfq_slab_setup(void)
+{
+	crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
+					NULL, NULL);
+	if (!crq_pool)
+		goto fail;
+
+	cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
+					NULL, NULL);
+	if (!cfq_pool)
+		goto fail;
+
+	cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
+			sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
+	if (!cfq_ioc_pool)
+		goto fail;
+
+	return 0;
+fail:
+	cfq_slab_kill();
+	return -ENOMEM;
+}
+
+
+/*
+ * sysfs parts below -->
+ */
+struct cfq_fs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct cfq_data *, char *);
+	ssize_t (*store)(struct cfq_data *, const char *, size_t);
+};
+
+static ssize_t
+cfq_var_show(unsigned int var, char *page)
+{
+	return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+cfq_var_store(unsigned int *var, const char *page, size_t count)
+{
+	char *p = (char *) page;
+
+	*var = simple_strtoul(p, &p, 10);
+	return count;
+}
+
+static ssize_t
+cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count)
+{
+	max_elapsed_dispatch = max_elapsed_crq = 0;
+	return count;
+}
+
+static ssize_t
+cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count)
+{
+	spin_lock_irq(cfqd->queue->queue_lock);
+	if (!strncmp(page, "pgid", 4))
+		cfqd->key_type = CFQ_KEY_PGID;
+	else if (!strncmp(page, "tgid", 4))
+		cfqd->key_type = CFQ_KEY_TGID;
+	else if (!strncmp(page, "uid", 3))
+		cfqd->key_type = CFQ_KEY_UID;
+	else if (!strncmp(page, "gid", 3))
+		cfqd->key_type = CFQ_KEY_GID;
+	spin_unlock_irq(cfqd->queue->queue_lock);
+	return count;
+}
+
+static ssize_t
+cfq_read_key_type(struct cfq_data *cfqd, char *page)
+{
+	ssize_t len = 0;
+	int i;
+
+	for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) {
+		if (cfqd->key_type == i)
+			len += sprintf(page+len, "[%s] ", cfq_key_types[i]);
+		else
+			len += sprintf(page+len, "%s ", cfq_key_types[i]);
+	}
+	len += sprintf(page+len, "\n");
+	return len;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
+static ssize_t __FUNC(struct cfq_data *cfqd, char *page)		\
+{									\
+	unsigned int __data = __VAR;					\
+	if (__CONV)							\
+		__data = jiffies_to_msecs(__data);			\
+	return cfq_var_show(__data, (page));				\
+}
+SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
+SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
+SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1);
+SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1);
+SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1);
+SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0);
+SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0);
+SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
+static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count)	\
+{									\
+	unsigned int __data;						\
+	int ret = cfq_var_store(&__data, (page), count);		\
+	if (__data < (MIN))						\
+		__data = (MIN);						\
+	else if (__data > (MAX))					\
+		__data = (MAX);						\
+	if (__CONV)							\
+		*(__PTR) = msecs_to_jiffies(__data);			\
+	else								\
+		*(__PTR) = __data;					\
+	return ret;							\
+}
+STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
+STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1);
+STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1);
+STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0);
+STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
+STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
+#undef STORE_FUNCTION
+
+static struct cfq_fs_entry cfq_quantum_entry = {
+	.attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_quantum_show,
+	.store = cfq_quantum_store,
+};
+static struct cfq_fs_entry cfq_queued_entry = {
+	.attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_queued_show,
+	.store = cfq_queued_store,
+};
+static struct cfq_fs_entry cfq_fifo_expire_r_entry = {
+	.attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_fifo_expire_r_show,
+	.store = cfq_fifo_expire_r_store,
+};
+static struct cfq_fs_entry cfq_fifo_expire_w_entry = {
+	.attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_fifo_expire_w_show,
+	.store = cfq_fifo_expire_w_store,
+};
+static struct cfq_fs_entry cfq_fifo_batch_expire_entry = {
+	.attr = {.name = "fifo_batch_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_fifo_batch_expire_show,
+	.store = cfq_fifo_batch_expire_store,
+};
+static struct cfq_fs_entry cfq_find_best_entry = {
+	.attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_find_best_show,
+	.store = cfq_find_best_store,
+};
+static struct cfq_fs_entry cfq_back_max_entry = {
+	.attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_back_max_show,
+	.store = cfq_back_max_store,
+};
+static struct cfq_fs_entry cfq_back_penalty_entry = {
+	.attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_back_penalty_show,
+	.store = cfq_back_penalty_store,
+};
+static struct cfq_fs_entry cfq_clear_elapsed_entry = {
+	.attr = {.name = "clear_elapsed", .mode = S_IWUSR },
+	.store = cfq_clear_elapsed,
+};
+static struct cfq_fs_entry cfq_key_type_entry = {
+	.attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR },
+	.show = cfq_read_key_type,
+	.store = cfq_set_key_type,
+};
+
+static struct attribute *default_attrs[] = {
+	&cfq_quantum_entry.attr,
+	&cfq_queued_entry.attr,
+	&cfq_fifo_expire_r_entry.attr,
+	&cfq_fifo_expire_w_entry.attr,
+	&cfq_fifo_batch_expire_entry.attr,
+	&cfq_key_type_entry.attr,
+	&cfq_find_best_entry.attr,
+	&cfq_back_max_entry.attr,
+	&cfq_back_penalty_entry.attr,
+	&cfq_clear_elapsed_entry.attr,
+	NULL,
+};
+
+#define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr)
+
+static ssize_t
+cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct cfq_fs_entry *entry = to_cfq(attr);
+
+	if (!entry->show)
+		return 0;
+
+	return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+cfq_attr_store(struct kobject *kobj, struct attribute *attr,
+	       const char *page, size_t length)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct cfq_fs_entry *entry = to_cfq(attr);
+
+	if (!entry->store)
+		return -EINVAL;
+
+	return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops cfq_sysfs_ops = {
+	.show	= cfq_attr_show,
+	.store	= cfq_attr_store,
+};
+
+static struct kobj_type cfq_ktype = {
+	.sysfs_ops	= &cfq_sysfs_ops,
+	.default_attrs	= default_attrs,
+};
+
+static struct elevator_type iosched_cfq = {
+	.ops = {
+		.elevator_merge_fn = 		cfq_merge,
+		.elevator_merged_fn =		cfq_merged_request,
+		.elevator_merge_req_fn =	cfq_merged_requests,
+		.elevator_next_req_fn =		cfq_next_request,
+		.elevator_add_req_fn =		cfq_insert_request,
+		.elevator_remove_req_fn =	cfq_remove_request,
+		.elevator_requeue_req_fn =	cfq_requeue_request,
+		.elevator_deactivate_req_fn =	cfq_deactivate_request,
+		.elevator_queue_empty_fn =	cfq_queue_empty,
+		.elevator_completed_req_fn =	cfq_completed_request,
+		.elevator_former_req_fn =	cfq_former_request,
+		.elevator_latter_req_fn =	cfq_latter_request,
+		.elevator_set_req_fn =		cfq_set_request,
+		.elevator_put_req_fn =		cfq_put_request,
+		.elevator_may_queue_fn =	cfq_may_queue,
+		.elevator_init_fn =		cfq_init_queue,
+		.elevator_exit_fn =		cfq_exit_queue,
+	},
+	.elevator_ktype =	&cfq_ktype,
+	.elevator_name =	"cfq",
+	.elevator_owner =	THIS_MODULE,
+};
+
+static int __init cfq_init(void)
+{
+	int ret;
+
+	if (cfq_slab_setup())
+		return -ENOMEM;
+
+	ret = elv_register(&iosched_cfq);
+	if (!ret) {
+		__module_get(THIS_MODULE);
+		return 0;
+	}
+
+	cfq_slab_kill();
+	return ret;
+}
+
+static void __exit cfq_exit(void)
+{
+	cfq_slab_kill();
+	elv_unregister(&iosched_cfq);
+}
+
+module_init(cfq_init);
+module_exit(cfq_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
new file mode 100644
index 0000000..cf1822a
--- /dev/null
+++ b/drivers/block/cpqarray.c
@@ -0,0 +1,1850 @@
+/*
+ *    Disk Array driver for Compaq SMART2 Controllers
+ *    Copyright 1998 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#include <linux/config.h>	/* CONFIG_PROC_FS */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/bio.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/blkpg.h>
+#include <linux/timer.h>
+#include <linux/proc_fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/init.h>
+#include <linux/hdreg.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+
+#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
+
+#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
+#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
+
+/* Embedded module documentation macros - see modules.h */
+/* Original author Chris Frantz - Compaq Computer Corporation */
+MODULE_AUTHOR("Compaq Computer Corporation");
+MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
+MODULE_LICENSE("GPL");
+
+#include "cpqarray.h"
+#include "ida_cmd.h"
+#include "smart1,2.h"
+#include "ida_ioctl.h"
+
+#define READ_AHEAD	128
+#define NR_CMDS		128 /* This could probably go as high as ~400 */
+
+#define MAX_CTLR	8
+#define CTLR_SHIFT	8
+
+#define CPQARRAY_DMA_MASK	0xFFFFFFFF	/* 32 bit DMA */
+
+static int nr_ctlr;
+static ctlr_info_t *hba[MAX_CTLR];
+
+static int eisa[8];
+
+#define NR_PRODUCTS (sizeof(products)/sizeof(struct board_type))
+
+/*  board_id = Subsystem Device ID & Vendor ID
+ *  product = Marketing Name for the board
+ *  access = Address of the struct of function pointers 
+ */
+static struct board_type products[] = {
+	{ 0x0040110E, "IDA",			&smart1_access },
+	{ 0x0140110E, "IDA-2",			&smart1_access },
+	{ 0x1040110E, "IAES",			&smart1_access },
+	{ 0x2040110E, "SMART",			&smart1_access },
+	{ 0x3040110E, "SMART-2/E",		&smart2e_access },
+	{ 0x40300E11, "SMART-2/P",		&smart2_access },
+	{ 0x40310E11, "SMART-2SL",		&smart2_access },
+	{ 0x40320E11, "Smart Array 3200",	&smart2_access },
+	{ 0x40330E11, "Smart Array 3100ES",	&smart2_access },
+	{ 0x40340E11, "Smart Array 221",	&smart2_access },
+	{ 0x40400E11, "Integrated Array",	&smart4_access },
+	{ 0x40480E11, "Compaq Raid LC2",        &smart4_access },
+	{ 0x40500E11, "Smart Array 4200",	&smart4_access },
+	{ 0x40510E11, "Smart Array 4250ES",	&smart4_access },
+	{ 0x40580E11, "Smart Array 431",	&smart4_access },
+};
+
+/* define the PCI info for the PCI cards this driver can control */
+static const struct pci_device_id cpqarray_pci_device_id[] =
+{
+	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
+		0x0E11, 0x4058, 0, 0, 0},       /* SA431 */
+	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
+		0x0E11, 0x4051, 0, 0, 0},      /* SA4250ES */
+	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
+		0x0E11, 0x4050, 0, 0, 0},      /* SA4200 */
+	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
+		0x0E11, 0x4048, 0, 0, 0},       /* LC2 */
+	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
+		0x0E11, 0x4040, 0, 0, 0},      /* Integrated Array */
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
+		0x0E11, 0x4034, 0, 0, 0},       /* SA 221 */
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
+		0x0E11, 0x4033, 0, 0, 0},       /* SA 3100ES*/
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
+		0x0E11, 0x4032, 0, 0, 0},       /* SA 3200*/
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
+		0x0E11, 0x4031, 0, 0, 0},       /* SA 2SL*/
+	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
+		0x0E11, 0x4030, 0, 0, 0},       /* SA 2P */
+	{ 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
+
+static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
+
+/* Debug... */
+#define DBG(s)	do { s } while(0)
+/* Debug (general info)... */
+#define DBGINFO(s) do { } while(0)
+/* Debug Paranoid... */
+#define DBGP(s)  do { } while(0)
+/* Debug Extra Paranoid... */
+#define DBGPX(s) do { } while(0)
+
+static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
+static void __iomem *remap_pci_mem(ulong base, ulong size);
+static int cpqarray_eisa_detect(void);
+static int pollcomplete(int ctlr);
+static void getgeometry(int ctlr);
+static void start_fwbk(int ctlr);
+
+static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
+static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
+
+static void free_hba(int i);
+static int alloc_cpqarray_hba(void);
+
+static int sendcmd(
+	__u8	cmd,
+	int	ctlr,
+	void	*buff,
+	size_t	size,
+	unsigned int blk,
+	unsigned int blkcnt,
+	unsigned int log_unit );
+
+static int ida_open(struct inode *inode, struct file *filep);
+static int ida_release(struct inode *inode, struct file *filep);
+static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg);
+static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
+
+static void do_ida_request(request_queue_t *q);
+static void start_io(ctlr_info_t *h);
+
+static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
+static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
+static inline void complete_buffers(struct bio *bio, int ok);
+static inline void complete_command(cmdlist_t *cmd, int timeout);
+
+static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs * regs);
+static void ida_timer(unsigned long tdata);
+static int ida_revalidate(struct gendisk *disk);
+static int revalidate_allvol(ctlr_info_t *host);
+static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
+
+#ifdef CONFIG_PROC_FS
+static void ida_procinit(int i);
+static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
+#else
+static void ida_procinit(int i) {}
+#endif
+
+static inline drv_info_t *get_drv(struct gendisk *disk)
+{
+	return disk->private_data;
+}
+
+static inline ctlr_info_t *get_host(struct gendisk *disk)
+{
+	return disk->queue->queuedata;
+}
+
+
+static struct block_device_operations ida_fops  = {
+	.owner		= THIS_MODULE,
+	.open		= ida_open,
+	.release	= ida_release,
+	.ioctl		= ida_ioctl,
+	.revalidate_disk= ida_revalidate,
+};
+
+
+#ifdef CONFIG_PROC_FS
+
+static struct proc_dir_entry *proc_array;
+
+/*
+ * Get us a file in /proc/array that says something about each controller.
+ * Create /proc/array if it doesn't exist yet.
+ */
+static void __init ida_procinit(int i)
+{
+	if (proc_array == NULL) {
+		proc_array = proc_mkdir("cpqarray", proc_root_driver);
+		if (!proc_array) return;
+	}
+
+	create_proc_read_entry(hba[i]->devname, 0, proc_array,
+			       ida_proc_get_info, hba[i]);
+}
+
+/*
+ * Report information about this controller.
+ */
+static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data)
+{
+	off_t pos = 0;
+	off_t len = 0;
+	int size, i, ctlr;
+	ctlr_info_t *h = (ctlr_info_t*)data;
+	drv_info_t *drv;
+#ifdef CPQ_PROC_PRINT_QUEUES
+	cmdlist_t *c;
+	unsigned long flags;
+#endif
+
+	ctlr = h->ctlr;
+	size = sprintf(buffer, "%s:  Compaq %s Controller\n"
+		"       Board ID: 0x%08lx\n"
+		"       Firmware Revision: %c%c%c%c\n"
+		"       Controller Sig: 0x%08lx\n"
+		"       Memory Address: 0x%08lx\n"
+		"       I/O Port: 0x%04x\n"
+		"       IRQ: %d\n"
+		"       Logical drives: %d\n"
+		"       Physical drives: %d\n\n"
+		"       Current Q depth: %d\n"
+		"       Max Q depth since init: %d\n\n",
+		h->devname, 
+		h->product_name,
+		(unsigned long)h->board_id,
+		h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
+		(unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
+		(unsigned int) h->io_mem_addr, (unsigned int)h->intr,
+		h->log_drives, h->phys_drives,
+		h->Qdepth, h->maxQsinceinit);
+
+	pos += size; len += size;
+	
+	size = sprintf(buffer+len, "Logical Drive Info:\n");
+	pos += size; len += size;
+
+	for(i=0; i<h->log_drives; i++) {
+		drv = &h->drv[i];
+		size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
+				ctlr, i, drv->blk_size, drv->nr_blks);
+		pos += size; len += size;
+	}
+
+#ifdef CPQ_PROC_PRINT_QUEUES
+	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 
+	size = sprintf(buffer+len, "\nCurrent Queues:\n");
+	pos += size; len += size;
+
+	c = h->reqQ;
+	size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size;
+	if (c) c=c->next;
+	while(c && c != h->reqQ) {
+		size = sprintf(buffer+len, "->%p", c);
+		pos += size; len += size;
+		c=c->next;
+	}
+
+	c = h->cmpQ;
+	size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size;
+	if (c) c=c->next;
+	while(c && c != h->cmpQ) {
+		size = sprintf(buffer+len, "->%p", c);
+		pos += size; len += size;
+		c=c->next;
+	}
+
+	size = sprintf(buffer+len, "\n"); pos += size; len += size;
+	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
+#endif
+	size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n",
+			h->nr_allocs, h->nr_frees);
+	pos += size; len += size;
+
+	*eof = 1;
+	*start = buffer+offset;
+	len -= offset;
+	if (len>length)
+		len = length;
+	return len;
+}
+#endif /* CONFIG_PROC_FS */
+
+module_param_array(eisa, int, NULL, 0);
+
+static void release_io_mem(ctlr_info_t *c)
+{
+	/* if IO mem was not protected do nothing */
+	if( c->io_mem_addr == 0)
+		return;
+	release_region(c->io_mem_addr, c->io_mem_length);
+	c->io_mem_addr = 0;
+	c->io_mem_length = 0;
+}
+
+static void __devexit cpqarray_remove_one(int i)
+{
+	int j;
+	char buff[4];
+
+	/* sendcmd will turn off interrupt, and send the flush...
+	 * To write all data in the battery backed cache to disks
+	 * no data returned, but don't want to send NULL to sendcmd */
+	if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
+	{
+		printk(KERN_WARNING "Unable to flush cache on controller %d\n",
+				i);
+	}
+	free_irq(hba[i]->intr, hba[i]);
+	iounmap(hba[i]->vaddr);
+	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
+	del_timer(&hba[i]->timer);
+	remove_proc_entry(hba[i]->devname, proc_array);
+	pci_free_consistent(hba[i]->pci_dev,
+			NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
+			hba[i]->cmd_pool_dhandle);
+	kfree(hba[i]->cmd_pool_bits);
+	for(j = 0; j < NWD; j++) {
+		if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
+			del_gendisk(ida_gendisk[i][j]);
+		devfs_remove("ida/c%dd%d",i,j);
+		put_disk(ida_gendisk[i][j]);
+	}
+	blk_cleanup_queue(hba[i]->queue);
+	release_io_mem(hba[i]);
+	free_hba(i);
+}
+
+static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
+{
+	int i;
+	ctlr_info_t *tmp_ptr;
+
+	if (pci_get_drvdata(pdev) == NULL) {
+		printk( KERN_ERR "cpqarray: Unable to remove device \n");
+		return;
+	}
+
+	tmp_ptr = pci_get_drvdata(pdev);
+	i = tmp_ptr->ctlr;
+	if (hba[i] == NULL) {
+		printk(KERN_ERR "cpqarray: controller %d appears to have"
+			"already been removed \n", i);
+		return;
+        }
+	pci_set_drvdata(pdev, NULL);
+
+	cpqarray_remove_one(i);
+}
+
+/* removing an instance that was not removed automatically..
+ * must be an eisa card.
+ */
+static void __devexit cpqarray_remove_one_eisa (int i)
+{
+	if (hba[i] == NULL) {
+		printk(KERN_ERR "cpqarray: controller %d appears to have"
+			"already been removed \n", i);
+		return;
+        }
+	cpqarray_remove_one(i);
+}
+
+/* pdev is NULL for eisa */
+static int cpqarray_register_ctlr( int i, struct pci_dev *pdev)
+{
+	request_queue_t *q;
+	int j;
+
+	/* 
+	 * register block devices
+	 * Find disks and fill in structs
+	 * Get an interrupt, set the Q depth and get into /proc
+	 */
+
+	/* If this successful it should insure that we are the only */
+	/* instance of the driver */
+	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
+		goto Enomem4;
+	}
+	hba[i]->access.set_intr_mask(hba[i], 0);
+	if (request_irq(hba[i]->intr, do_ida_intr,
+		SA_INTERRUPT|SA_SHIRQ|SA_SAMPLE_RANDOM,
+		hba[i]->devname, hba[i]))
+	{
+		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
+				hba[i]->intr, hba[i]->devname);
+		goto Enomem3;
+	}
+		
+	for (j=0; j<NWD; j++) {
+		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
+		if (!ida_gendisk[i][j])
+			goto Enomem2;
+	}
+
+	hba[i]->cmd_pool = (cmdlist_t *)pci_alloc_consistent(
+		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
+		&(hba[i]->cmd_pool_dhandle));
+	hba[i]->cmd_pool_bits = kmalloc(
+		((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long),
+		GFP_KERNEL);
+
+	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
+			goto Enomem1;
+
+	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
+	memset(hba[i]->cmd_pool_bits, 0, ((NR_CMDS+BITS_PER_LONG-1)/BITS_PER_LONG)*sizeof(unsigned long));
+	printk(KERN_INFO "cpqarray: Finding drives on %s",
+		hba[i]->devname);
+
+	spin_lock_init(&hba[i]->lock);
+	q = blk_init_queue(do_ida_request, &hba[i]->lock);
+	if (!q)
+		goto Enomem1;
+
+	hba[i]->queue = q;
+	q->queuedata = hba[i];
+
+	getgeometry(i);
+	start_fwbk(i);
+
+	ida_procinit(i);
+
+	if (pdev)
+		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
+
+	/* This is a hardware imposed limit. */
+	blk_queue_max_hw_segments(q, SG_MAX);
+
+	/* This is a driver limit and could be eliminated. */
+	blk_queue_max_phys_segments(q, SG_MAX);
+	
+	init_timer(&hba[i]->timer);
+	hba[i]->timer.expires = jiffies + IDA_TIMER;
+	hba[i]->timer.data = (unsigned long)hba[i];
+	hba[i]->timer.function = ida_timer;
+	add_timer(&hba[i]->timer);
+
+	/* Enable IRQ now that spinlock and rate limit timer are set up */
+	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
+
+	for(j=0; j<NWD; j++) {
+		struct gendisk *disk = ida_gendisk[i][j];
+		drv_info_t *drv = &hba[i]->drv[j];
+		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
+		disk->major = COMPAQ_SMART2_MAJOR + i;
+		disk->first_minor = j<<NWD_SHIFT;
+		disk->fops = &ida_fops;
+		if (j && !drv->nr_blks)
+			continue;
+		blk_queue_hardsect_size(hba[i]->queue, drv->blk_size);
+		set_capacity(disk, drv->nr_blks);
+		disk->queue = hba[i]->queue;
+		disk->private_data = drv;
+		add_disk(disk);
+	}
+
+	/* done ! */
+	return(i);
+
+Enomem1:
+	nr_ctlr = i; 
+	kfree(hba[i]->cmd_pool_bits);
+	if (hba[i]->cmd_pool)
+		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 
+				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
+Enomem2:
+	while (j--) {
+		put_disk(ida_gendisk[i][j]);
+		ida_gendisk[i][j] = NULL;
+	}
+	free_irq(hba[i]->intr, hba[i]);
+Enomem3:
+	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
+Enomem4:
+	if (pdev)
+		pci_set_drvdata(pdev, NULL);
+	release_io_mem(hba[i]);
+	free_hba(i);
+
+	printk( KERN_ERR "cpqarray: out of memory");
+
+	return -1;
+}
+
+static int __init cpqarray_init_one( struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	int i;
+
+	printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
+			" bus %d dev %d func %d\n",
+			pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
+			PCI_FUNC(pdev->devfn));
+	i = alloc_cpqarray_hba();
+	if( i < 0 )
+		return (-1);
+	memset(hba[i], 0, sizeof(ctlr_info_t));
+	sprintf(hba[i]->devname, "ida%d", i);
+	hba[i]->ctlr = i;
+	/* Initialize the pdev driver private data */
+	pci_set_drvdata(pdev, hba[i]);
+
+	if (cpqarray_pci_init(hba[i], pdev) != 0) {
+		pci_set_drvdata(pdev, NULL);
+		release_io_mem(hba[i]);
+		free_hba(i);
+		return -1;
+	}
+
+	return (cpqarray_register_ctlr(i, pdev));
+}
+
+static struct pci_driver cpqarray_pci_driver = {
+	.name = "cpqarray",
+	.probe = cpqarray_init_one,
+	.remove = __devexit_p(cpqarray_remove_one_pci),
+	.id_table = cpqarray_pci_device_id,
+};
+
+/*
+ *  This is it.  Find all the controllers and register them.
+ *  returns the number of block devices registered.
+ */
+static int __init cpqarray_init(void)
+{
+	int num_cntlrs_reg = 0;
+	int i;
+	int rc = 0;
+
+	/* detect controllers */
+	printk(DRIVER_NAME "\n");
+
+	rc = pci_register_driver(&cpqarray_pci_driver);
+	if (rc)
+		return rc;
+	cpqarray_eisa_detect();
+	
+	for (i=0; i < MAX_CTLR; i++) {
+		if (hba[i] != NULL)
+			num_cntlrs_reg++;
+	}
+
+	return(num_cntlrs_reg);
+}
+
+/* Function to find the first free pointer into our hba[] array */
+/* Returns -1 if no free entries are left.  */
+static int alloc_cpqarray_hba(void)
+{
+	int i;
+
+	for(i=0; i< MAX_CTLR; i++) {
+		if (hba[i] == NULL) {
+			hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
+			if(hba[i]==NULL) {
+				printk(KERN_ERR "cpqarray: out of memory.\n");
+				return (-1);
+			}
+			return (i);
+		}
+	}
+	printk(KERN_WARNING "cpqarray: This driver supports a maximum"
+		" of 8 controllers.\n");
+	return(-1);
+}
+
+static void free_hba(int i)
+{
+	kfree(hba[i]);
+	hba[i]=NULL;
+}
+
+/*
+ * Find the IO address of the controller, its IRQ and so forth.  Fill
+ * in some basic stuff into the ctlr_info_t structure.
+ */
+static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
+{
+	ushort vendor_id, device_id, command;
+	unchar cache_line_size, latency_timer;
+	unchar irq, revision;
+	unsigned long addr[6];
+	__u32 board_id;
+
+	int i;
+
+	c->pci_dev = pdev;
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
+		return -1;
+	}
+	vendor_id = pdev->vendor;
+	device_id = pdev->device;
+	irq = pdev->irq;
+
+	for(i=0; i<6; i++)
+		addr[i] = pci_resource_start(pdev, i);
+
+	if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
+	{
+		printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
+		return -1;
+	}
+
+	pci_read_config_word(pdev, PCI_COMMAND, &command);
+	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
+	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
+
+	pci_read_config_dword(pdev, 0x2c, &board_id);
+
+	/* check to see if controller has been disabled */
+	if(!(command & 0x02)) {
+		printk(KERN_WARNING
+			"cpqarray: controller appears to be disabled\n");
+		return(-1);
+	}
+
+DBGINFO(
+	printk("vendor_id = %x\n", vendor_id);
+	printk("device_id = %x\n", device_id);
+	printk("command = %x\n", command);
+	for(i=0; i<6; i++)
+		printk("addr[%d] = %lx\n", i, addr[i]);
+	printk("revision = %x\n", revision);
+	printk("irq = %x\n", irq);
+	printk("cache_line_size = %x\n", cache_line_size);
+	printk("latency_timer = %x\n", latency_timer);
+	printk("board_id = %x\n", board_id);
+);
+
+	c->intr = irq;
+
+	for(i=0; i<6; i++) {
+		if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
+		{ /* IO space */
+			c->io_mem_addr = addr[i];
+			c->io_mem_length = pci_resource_end(pdev, i)
+				- pci_resource_start(pdev, i) + 1;
+			if(!request_region( c->io_mem_addr, c->io_mem_length,
+				"cpqarray"))
+			{
+				printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
+				c->io_mem_addr = 0;
+				c->io_mem_length = 0;
+			}
+			break;
+		}
+	}
+
+	c->paddr = 0;
+	for(i=0; i<6; i++)
+		if (!(pci_resource_flags(pdev, i) &
+				PCI_BASE_ADDRESS_SPACE_IO)) {
+			c->paddr = pci_resource_start (pdev, i);
+			break;
+		}
+	if (!c->paddr)
+		return -1;
+	c->vaddr = remap_pci_mem(c->paddr, 128);
+	if (!c->vaddr)
+		return -1;
+	c->board_id = board_id;
+
+	for(i=0; i<NR_PRODUCTS; i++) {
+		if (board_id == products[i].board_id) {
+			c->product_name = products[i].product_name;
+			c->access = *(products[i].access);
+			break;
+		}
+	}
+	if (i == NR_PRODUCTS) {
+		printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
+			" to access the SMART Array controller %08lx\n", 
+				(unsigned long)board_id);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Map (physical) PCI mem into (virtual) kernel space
+ */
+static void __iomem *remap_pci_mem(ulong base, ulong size)
+{
+        ulong page_base        = ((ulong) base) & PAGE_MASK;
+        ulong page_offs        = ((ulong) base) - page_base;
+        void __iomem *page_remapped    = ioremap(page_base, page_offs+size);
+
+        return (page_remapped ? (page_remapped + page_offs) : NULL);
+}
+
+#ifndef MODULE
+/*
+ * Config string is a comma separated set of i/o addresses of EISA cards.
+ */
+static int cpqarray_setup(char *str)
+{
+	int i, ints[9];
+
+	(void)get_options(str, ARRAY_SIZE(ints), ints);
+
+	for(i=0; i<ints[0] && i<8; i++)
+		eisa[i] = ints[i+1];
+	return 1;
+}
+
+__setup("smart2=", cpqarray_setup);
+
+#endif
+
+/*
+ * Find an EISA controller's signature.  Set up an hba if we find it.
+ */
+static int cpqarray_eisa_detect(void)
+{
+	int i=0, j;
+	__u32 board_id;
+	int intr;
+	int ctlr;
+	int num_ctlr = 0;
+
+	while(i<8 && eisa[i]) {
+		ctlr = alloc_cpqarray_hba();
+		if(ctlr == -1)
+			break;
+		board_id = inl(eisa[i]+0xC80);
+		for(j=0; j < NR_PRODUCTS; j++)
+			if (board_id == products[j].board_id) 
+				break;
+
+		if (j == NR_PRODUCTS) {
+			printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
+				" to access the SMART Array controller %08lx\n",				 (unsigned long)board_id);
+			continue;
+		}
+
+		memset(hba[ctlr], 0, sizeof(ctlr_info_t));
+		hba[ctlr]->io_mem_addr = eisa[i];
+		hba[ctlr]->io_mem_length = 0x7FF;
+		if(!request_region(hba[ctlr]->io_mem_addr,
+				hba[ctlr]->io_mem_length,
+				"cpqarray"))
+		{
+			printk(KERN_WARNING "cpqarray: I/O range already in "
+					"use addr = %lx length = %ld\n",
+					hba[ctlr]->io_mem_addr,
+					hba[ctlr]->io_mem_length);
+			free_hba(ctlr);
+			continue;
+		}
+
+		/*
+		 * Read the config register to find our interrupt
+		 */
+		intr = inb(eisa[i]+0xCC0) >> 4;
+		if (intr & 1) intr = 11;
+		else if (intr & 2) intr = 10;
+		else if (intr & 4) intr = 14;
+		else if (intr & 8) intr = 15;
+		
+		hba[ctlr]->intr = intr;
+		sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
+		hba[ctlr]->product_name = products[j].product_name;
+		hba[ctlr]->access = *(products[j].access);
+		hba[ctlr]->ctlr = ctlr;
+		hba[ctlr]->board_id = board_id;
+		hba[ctlr]->pci_dev = NULL; /* not PCI */
+
+DBGINFO(
+	printk("i = %d, j = %d\n", i, j);
+	printk("irq = %x\n", intr);
+	printk("product name = %s\n", products[j].product_name);
+	printk("board_id = %x\n", board_id);
+);
+
+		num_ctlr++;
+		i++;
+
+		if (cpqarray_register_ctlr(ctlr, NULL) == -1)
+			printk(KERN_WARNING
+				"cpqarray: Can't register EISA controller %d\n",
+				ctlr);
+
+	}
+
+	return num_ctlr;
+}
+
+/*
+ * Open.  Make sure the device is really there.
+ */
+static int ida_open(struct inode *inode, struct file *filep)
+{
+	drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
+	ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+
+	DBGINFO(printk("ida_open %s\n", inode->i_bdev->bd_disk->disk_name));
+	/*
+	 * Root is allowed to open raw volume zero even if it's not configured
+	 * so array config can still work.  I don't think I really like this,
+	 * but I'm already using way to many device nodes to claim another one
+	 * for "raw controller".
+	 */
+	if (!drv->nr_blks) {
+		if (!capable(CAP_SYS_RAWIO))
+			return -ENXIO;
+		if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
+			return -ENXIO;
+	}
+	host->usage_count++;
+	return 0;
+}
+
+/*
+ * Close.  Sync first.
+ */
+static int ida_release(struct inode *inode, struct file *filep)
+{
+	ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+	host->usage_count--;
+	return 0;
+}
+
+/*
+ * Enqueuing and dequeuing functions for cmdlists.
+ */
+static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
+{
+	if (*Qptr == NULL) {
+		*Qptr = c;
+		c->next = c->prev = c;
+	} else {
+		c->prev = (*Qptr)->prev;
+		c->next = (*Qptr);
+		(*Qptr)->prev->next = c;
+		(*Qptr)->prev = c;
+	}
+}
+
+static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
+{
+	if (c && c->next != c) {
+		if (*Qptr == c) *Qptr = c->next;
+		c->prev->next = c->next;
+		c->next->prev = c->prev;
+	} else {
+		*Qptr = NULL;
+	}
+	return c;
+}
+
+/*
+ * Get a request and submit it to the controller.
+ * This routine needs to grab all the requests it possibly can from the
+ * req Q and submit them.  Interrupts are off (and need to be off) when you
+ * are in here (either via the dummy do_ida_request functions or by being
+ * called from the interrupt handler
+ */
+static void do_ida_request(request_queue_t *q)
+{
+	ctlr_info_t *h = q->queuedata;
+	cmdlist_t *c;
+	struct request *creq;
+	struct scatterlist tmp_sg[SG_MAX];
+	int i, dir, seg;
+
+	if (blk_queue_plugged(q))
+		goto startio;
+
+queue_next:
+	creq = elv_next_request(q);
+	if (!creq)
+		goto startio;
+
+	if (creq->nr_phys_segments > SG_MAX)
+		BUG();
+
+	if ((c = cmd_alloc(h,1)) == NULL)
+		goto startio;
+
+	blkdev_dequeue_request(creq);
+
+	c->ctlr = h->ctlr;
+	c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
+	c->hdr.size = sizeof(rblk_t) >> 2;
+	c->size += sizeof(rblk_t);
+
+	c->req.hdr.blk = creq->sector;
+	c->rq = creq;
+DBGPX(
+	printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors);
+);
+	seg = blk_rq_map_sg(q, creq, tmp_sg);
+
+	/* Now do all the DMA Mappings */
+	if (rq_data_dir(creq) == READ)
+		dir = PCI_DMA_FROMDEVICE;
+	else
+		dir = PCI_DMA_TODEVICE;
+	for( i=0; i < seg; i++)
+	{
+		c->req.sg[i].size = tmp_sg[i].length;
+		c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
+						 tmp_sg[i].page,
+						 tmp_sg[i].offset,
+						 tmp_sg[i].length, dir);
+	}
+DBGPX(	printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); );
+	c->req.hdr.sg_cnt = seg;
+	c->req.hdr.blk_cnt = creq->nr_sectors;
+	c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
+	c->type = CMD_RWREQ;
+
+	/* Put the request on the tail of the request queue */
+	addQ(&h->reqQ, c);
+	h->Qdepth++;
+	if (h->Qdepth > h->maxQsinceinit) 
+		h->maxQsinceinit = h->Qdepth;
+
+	goto queue_next;
+
+startio:
+	start_io(h);
+}
+
+/* 
+ * start_io submits everything on a controller's request queue
+ * and moves it to the completion queue.
+ *
+ * Interrupts had better be off if you're in here
+ */
+static void start_io(ctlr_info_t *h)
+{
+	cmdlist_t *c;
+
+	while((c = h->reqQ) != NULL) {
+		/* Can't do anything if we're busy */
+		if (h->access.fifo_full(h) == 0)
+			return;
+
+		/* Get the first entry from the request Q */
+		removeQ(&h->reqQ, c);
+		h->Qdepth--;
+	
+		/* Tell the controller to do our bidding */
+		h->access.submit_command(h, c);
+
+		/* Get onto the completion Q */
+		addQ(&h->cmpQ, c);
+	}
+}
+
+static inline void complete_buffers(struct bio *bio, int ok)
+{
+	struct bio *xbh;
+	while(bio) {
+		int nr_sectors = bio_sectors(bio);
+
+		xbh = bio->bi_next;
+		bio->bi_next = NULL;
+		
+		blk_finished_io(nr_sectors);
+		bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
+
+		bio = xbh;
+	}
+}
+/*
+ * Mark all buffers that cmd was responsible for
+ */
+static inline void complete_command(cmdlist_t *cmd, int timeout)
+{
+	int ok=1;
+	int i, ddir;
+
+	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
+	   (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
+		printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
+				cmd->ctlr, cmd->hdr.unit);
+		hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
+	}
+	if (cmd->req.hdr.rcode & RCODE_FATAL) {
+		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
+				cmd->ctlr, cmd->hdr.unit);
+		ok = 0;
+	}
+	if (cmd->req.hdr.rcode & RCODE_INVREQ) {
+				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
+				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
+				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
+				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
+		ok = 0;	
+	}
+	if (timeout) ok = 0;
+	/* unmap the DMA mapping for all the scatter gather elements */
+	if (cmd->req.hdr.cmd == IDA_READ)
+		ddir = PCI_DMA_FROMDEVICE;
+	else
+		ddir = PCI_DMA_TODEVICE;
+        for(i=0; i<cmd->req.hdr.sg_cnt; i++)
+                pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
+				cmd->req.sg[i].size, ddir);
+
+	complete_buffers(cmd->rq->bio, ok);
+
+        DBGPX(printk("Done with %p\n", cmd->rq););
+	end_that_request_last(cmd->rq);
+}
+
+/*
+ *  The controller will interrupt us upon completion of commands.
+ *  Find the command on the completion queue, remove it, tell the OS and
+ *  try to queue up more IO
+ */
+static irqreturn_t do_ida_intr(int irq, void *dev_id, struct pt_regs *regs)
+{
+	ctlr_info_t *h = dev_id;
+	cmdlist_t *c;
+	unsigned long istat;
+	unsigned long flags;
+	__u32 a,a1;
+
+	istat = h->access.intr_pending(h);
+	/* Is this interrupt for us? */
+	if (istat == 0)
+		return IRQ_NONE;
+
+	/*
+	 * If there are completed commands in the completion queue,
+	 * we had better do something about it.
+	 */
+	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
+	if (istat & FIFO_NOT_EMPTY) {
+		while((a = h->access.command_completed(h))) {
+			a1 = a; a &= ~3;
+			if ((c = h->cmpQ) == NULL)
+			{  
+				printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
+				continue;	
+			} 
+			while(c->busaddr != a) {
+				c = c->next;
+				if (c == h->cmpQ) 
+					break;
+			}
+			/*
+			 * If we've found the command, take it off the
+			 * completion Q and free it
+			 */
+			if (c->busaddr == a) {
+				removeQ(&h->cmpQ, c);
+				/*  Check for invalid command.
+                                 *  Controller returns command error,
+                                 *  But rcode = 0.
+                                 */
+
+				if((a1 & 0x03) && (c->req.hdr.rcode == 0))
+                                {
+                                	c->req.hdr.rcode = RCODE_INVREQ;
+                                }
+				if (c->type == CMD_RWREQ) {
+					complete_command(c, 0);
+					cmd_free(h, c, 1);
+				} else if (c->type == CMD_IOCTL_PEND) {
+					c->type = CMD_IOCTL_DONE;
+				}
+				continue;
+			}
+		}
+	}
+
+	/*
+	 * See if we can queue up some more IO
+	 */
+	do_ida_request(h->queue);
+	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
+	return IRQ_HANDLED;
+}
+
+/*
+ * This timer was for timing out requests that haven't happened after
+ * IDA_TIMEOUT.  That wasn't such a good idea.  This timer is used to
+ * reset a flags structure so we don't flood the user with
+ * "Non-Fatal error" messages.
+ */
+static void ida_timer(unsigned long tdata)
+{
+	ctlr_info_t *h = (ctlr_info_t*)tdata;
+
+	h->timer.expires = jiffies + IDA_TIMER;
+	add_timer(&h->timer);
+	h->misc_tflags = 0;
+}
+
+/*
+ *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
+ *  setting readahead and submitting commands from userspace to the controller.
+ */
+static int ida_ioctl(struct inode *inode, struct file *filep, unsigned int cmd, unsigned long arg)
+{
+	drv_info_t *drv = get_drv(inode->i_bdev->bd_disk);
+	ctlr_info_t *host = get_host(inode->i_bdev->bd_disk);
+	int error;
+	int diskinfo[4];
+	struct hd_geometry __user *geo = (struct hd_geometry __user *)arg;
+	ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
+	ida_ioctl_t *my_io;
+
+	switch(cmd) {
+	case HDIO_GETGEO:
+		if (drv->cylinders) {
+			diskinfo[0] = drv->heads;
+			diskinfo[1] = drv->sectors;
+			diskinfo[2] = drv->cylinders;
+		} else {
+			diskinfo[0] = 0xff;
+			diskinfo[1] = 0x3f;
+			diskinfo[2] = drv->nr_blks / (0xff*0x3f);
+		}
+		put_user(diskinfo[0], &geo->heads);
+		put_user(diskinfo[1], &geo->sectors);
+		put_user(diskinfo[2], &geo->cylinders);
+		put_user(get_start_sect(inode->i_bdev), &geo->start);
+		return 0;
+	case IDAGETDRVINFO:
+		if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
+			return -EFAULT;
+		return 0;
+	case IDAPASSTHRU:
+		if (!capable(CAP_SYS_RAWIO))
+			return -EPERM;
+		my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
+		if (!my_io)
+			return -ENOMEM;
+		error = -EFAULT;
+		if (copy_from_user(my_io, io, sizeof(*my_io)))
+			goto out_passthru;
+		error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
+		if (error)
+			goto out_passthru;
+		error = -EFAULT;
+		if (copy_to_user(io, my_io, sizeof(*my_io)))
+			goto out_passthru;
+		error = 0;
+out_passthru:
+		kfree(my_io);
+		return error;
+	case IDAGETCTLRSIG:
+		if (!arg) return -EINVAL;
+		put_user(host->ctlr_sig, (int __user *)arg);
+		return 0;
+	case IDAREVALIDATEVOLS:
+		if (iminor(inode) != 0)
+			return -ENXIO;
+		return revalidate_allvol(host);
+	case IDADRIVERVERSION:
+		if (!arg) return -EINVAL;
+		put_user(DRIVER_VERSION, (unsigned long __user *)arg);
+		return 0;
+	case IDAGETPCIINFO:
+	{
+		
+		ida_pci_info_struct pciinfo;
+
+		if (!arg) return -EINVAL;
+		pciinfo.bus = host->pci_dev->bus->number;
+		pciinfo.dev_fn = host->pci_dev->devfn;
+		pciinfo.board_id = host->board_id;
+		if(copy_to_user((void __user *) arg, &pciinfo,  
+			sizeof( ida_pci_info_struct)))
+				return -EFAULT;
+		return(0);
+	}	
+
+	default:
+		return -EINVAL;
+	}
+		
+}
+/*
+ * ida_ctlr_ioctl is for passing commands to the controller from userspace.
+ * The command block (io) has already been copied to kernel space for us,
+ * however, any elements in the sglist need to be copied to kernel space
+ * or copied back to userspace.
+ *
+ * Only root may perform a controller passthru command, however I'm not doing
+ * any serious sanity checking on the arguments.  Doing an IDA_WRITE_MEDIA and
+ * putting a 64M buffer in the sglist is probably a *bad* idea.
+ */
+static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
+{
+	int ctlr = h->ctlr;
+	cmdlist_t *c;
+	void *p = NULL;
+	unsigned long flags;
+	int error;
+
+	if ((c = cmd_alloc(h, 0)) == NULL)
+		return -ENOMEM;
+	c->ctlr = ctlr;
+	c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
+	c->hdr.size = sizeof(rblk_t) >> 2;
+	c->size += sizeof(rblk_t);
+
+	c->req.hdr.cmd = io->cmd;
+	c->req.hdr.blk = io->blk;
+	c->req.hdr.blk_cnt = io->blk_cnt;
+	c->type = CMD_IOCTL_PEND;
+
+	/* Pre submit processing */
+	switch(io->cmd) {
+	case PASSTHRU_A:
+		p = kmalloc(io->sg[0].size, GFP_KERNEL);
+		if (!p) 
+		{ 
+			error = -ENOMEM; 
+			cmd_free(h, c, 0); 
+			return(error);
+		}
+		if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
+			kfree(p);
+			cmd_free(h, c, 0); 
+			return -EFAULT;
+		}
+		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 
+				sizeof(ida_ioctl_t), 
+				PCI_DMA_BIDIRECTIONAL);
+		c->req.sg[0].size = io->sg[0].size;
+		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
+			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
+		c->req.hdr.sg_cnt = 1;
+		break;
+	case IDA_READ:
+	case READ_FLASH_ROM:
+	case SENSE_CONTROLLER_PERFORMANCE:
+		p = kmalloc(io->sg[0].size, GFP_KERNEL);
+		if (!p) 
+		{ 
+                        error = -ENOMEM; 
+                        cmd_free(h, c, 0);
+                        return(error);
+                }
+
+		c->req.sg[0].size = io->sg[0].size;
+		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
+			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
+		c->req.hdr.sg_cnt = 1;
+		break;
+	case IDA_WRITE:
+	case IDA_WRITE_MEDIA:
+	case DIAG_PASS_THRU:
+	case COLLECT_BUFFER:
+	case WRITE_FLASH_ROM:
+		p = kmalloc(io->sg[0].size, GFP_KERNEL);
+		if (!p) 
+ 		{ 
+                        error = -ENOMEM; 
+                        cmd_free(h, c, 0);
+                        return(error);
+                }
+		if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) {
+			kfree(p);
+                        cmd_free(h, c, 0);
+			return -EFAULT;
+		}
+		c->req.sg[0].size = io->sg[0].size;
+		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
+			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
+		c->req.hdr.sg_cnt = 1;
+		break;
+	default:
+		c->req.sg[0].size = sizeof(io->c);
+		c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, 
+			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
+		c->req.hdr.sg_cnt = 1;
+	}
+	
+	/* Put the request on the tail of the request queue */
+	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
+	addQ(&h->reqQ, c);
+	h->Qdepth++;
+	start_io(h);
+	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
+
+	/* Wait for completion */
+	while(c->type != CMD_IOCTL_DONE)
+		schedule();
+
+	/* Unmap the DMA  */
+	pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, 
+		PCI_DMA_BIDIRECTIONAL);
+	/* Post submit processing */
+	switch(io->cmd) {
+	case PASSTHRU_A:
+		pci_unmap_single(h->pci_dev, c->req.hdr.blk,
+                                sizeof(ida_ioctl_t),
+                                PCI_DMA_BIDIRECTIONAL);
+	case IDA_READ:
+	case DIAG_PASS_THRU:
+	case SENSE_CONTROLLER_PERFORMANCE:
+	case READ_FLASH_ROM:
+		if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
+			kfree(p);
+			return -EFAULT;
+		}
+		/* fall through and free p */
+	case IDA_WRITE:
+	case IDA_WRITE_MEDIA:
+	case COLLECT_BUFFER:
+	case WRITE_FLASH_ROM:
+		kfree(p);
+		break;
+	default:;
+		/* Nothing to do */
+	}
+
+	io->rcode = c->req.hdr.rcode;
+	cmd_free(h, c, 0);
+	return(0);
+}
+
+/*
+ * Commands are pre-allocated in a large block.  Here we use a simple bitmap
+ * scheme to suballocte them to the driver.  Operations that are not time
+ * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
+ * as the first argument to get a new command.
+ */
+static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
+{
+	cmdlist_t * c;
+	int i;
+	dma_addr_t cmd_dhandle;
+
+	if (!get_from_pool) {
+		c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, 
+			sizeof(cmdlist_t), &cmd_dhandle);
+		if(c==NULL)
+			return NULL;
+	} else {
+		do {
+			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
+			if (i == NR_CMDS)
+				return NULL;
+		} while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
+		c = h->cmd_pool + i;
+		cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
+		h->nr_allocs++;
+	}
+
+	memset(c, 0, sizeof(cmdlist_t));
+	c->busaddr = cmd_dhandle; 
+	return c;
+}
+
+static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
+{
+	int i;
+
+	if (!got_from_pool) {
+		pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
+			c->busaddr);
+	} else {
+		i = c - h->cmd_pool;
+		clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
+		h->nr_frees++;
+	}
+}
+
+/***********************************************************************
+    name:        sendcmd
+    Send a command to an IDA using the memory mapped FIFO interface
+    and wait for it to complete.  
+    This routine should only be called at init time.
+***********************************************************************/
+static int sendcmd(
+	__u8	cmd,
+	int	ctlr,
+	void	*buff,
+	size_t	size,
+	unsigned int blk,
+	unsigned int blkcnt,
+	unsigned int log_unit )
+{
+	cmdlist_t *c;
+	int complete;
+	unsigned long temp;
+	unsigned long i;
+	ctlr_info_t *info_p = hba[ctlr];
+
+	c = cmd_alloc(info_p, 1);
+	if(!c)
+		return IO_ERROR;
+	c->ctlr = ctlr;
+	c->hdr.unit = log_unit;
+	c->hdr.prio = 0;
+	c->hdr.size = sizeof(rblk_t) >> 2;
+	c->size += sizeof(rblk_t);
+
+	/* The request information. */
+	c->req.hdr.next = 0;
+	c->req.hdr.rcode = 0;
+	c->req.bp = 0;
+	c->req.hdr.sg_cnt = 1;
+	c->req.hdr.reserved = 0;
+	
+	if (size == 0)
+		c->req.sg[0].size = 512;
+	else
+		c->req.sg[0].size = size;
+
+	c->req.hdr.blk = blk;
+	c->req.hdr.blk_cnt = blkcnt;
+	c->req.hdr.cmd = (unsigned char) cmd;
+	c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, 
+		buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
+	/*
+	 * Disable interrupt
+	 */
+	info_p->access.set_intr_mask(info_p, 0);
+	/* Make sure there is room in the command FIFO */
+	/* Actually it should be completely empty at this time. */
+	for (i = 200000; i > 0; i--) {
+		temp = info_p->access.fifo_full(info_p);
+		if (temp != 0) {
+			break;
+		}
+		udelay(10);
+DBG(
+		printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
+			" waiting!\n", ctlr);
+);
+	} 
+	/*
+	 * Send the cmd
+	 */
+	info_p->access.submit_command(info_p, c);
+	complete = pollcomplete(ctlr);
+	
+	pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
+		c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
+	if (complete != 1) {
+		if (complete != c->busaddr) {
+			printk( KERN_WARNING
+			"cpqarray ida%d: idaSendPciCmd "
+		      "Invalid command list address returned! (%08lx)\n",
+				ctlr, (unsigned long)complete);
+			cmd_free(info_p, c, 1);
+			return (IO_ERROR);
+		}
+	} else {
+		printk( KERN_WARNING
+			"cpqarray ida%d: idaSendPciCmd Timeout out, "
+			"No command list address returned!\n",
+			ctlr);
+		cmd_free(info_p, c, 1);
+		return (IO_ERROR);
+	}
+
+	if (c->req.hdr.rcode & 0x00FE) {
+		if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
+			printk( KERN_WARNING
+			"cpqarray ida%d: idaSendPciCmd, error: "
+				"Controller failed at init time "
+				"cmd: 0x%x, return code = 0x%x\n",
+				ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
+
+			cmd_free(info_p, c, 1);
+			return (IO_ERROR);
+		}
+	}
+	cmd_free(info_p, c, 1);
+	return (IO_OK);
+}
+
+/*
+ * revalidate_allvol is for online array config utilities.  After a
+ * utility reconfigures the drives in the array, it can use this function
+ * (through an ioctl) to make the driver zap any previous disk structs for
+ * that controller and get new ones.
+ *
+ * Right now I'm using the getgeometry() function to do this, but this
+ * function should probably be finer grained and allow you to revalidate one
+ * particualar logical volume (instead of all of them on a particular
+ * controller).
+ */
+static int revalidate_allvol(ctlr_info_t *host)
+{
+	int ctlr = host->ctlr;
+	int i;
+	unsigned long flags;
+
+	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
+	if (host->usage_count > 1) {
+		spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
+		printk(KERN_WARNING "cpqarray: Device busy for volume"
+			" revalidation (usage=%d)\n", host->usage_count);
+		return -EBUSY;
+	}
+	host->usage_count++;
+	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
+
+	/*
+	 * Set the partition and block size structures for all volumes
+	 * on this controller to zero.  We will reread all of this data
+	 */
+	set_capacity(ida_gendisk[ctlr][0], 0);
+	for (i = 1; i < NWD; i++) {
+		struct gendisk *disk = ida_gendisk[ctlr][i];
+		if (disk->flags & GENHD_FL_UP)
+			del_gendisk(disk);
+	}
+	memset(host->drv, 0, sizeof(drv_info_t)*NWD);
+
+	/*
+	 * Tell the array controller not to give us any interrupts while
+	 * we check the new geometry.  Then turn interrupts back on when
+	 * we're done.
+	 */
+	host->access.set_intr_mask(host, 0);
+	getgeometry(ctlr);
+	host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
+
+	for(i=0; i<NWD; i++) {
+		struct gendisk *disk = ida_gendisk[ctlr][i];
+		drv_info_t *drv = &host->drv[i];
+		if (i && !drv->nr_blks)
+			continue;
+		blk_queue_hardsect_size(host->queue, drv->blk_size);
+		set_capacity(disk, drv->nr_blks);
+		disk->queue = host->queue;
+		disk->private_data = drv;
+		if (i)
+			add_disk(disk);
+	}
+
+	host->usage_count--;
+	return 0;
+}
+
+static int ida_revalidate(struct gendisk *disk)
+{
+	drv_info_t *drv = disk->private_data;
+	set_capacity(disk, drv->nr_blks);
+	return 0;
+}
+
+/********************************************************************
+    name: pollcomplete
+    Wait polling for a command to complete.
+    The memory mapped FIFO is polled for the completion.
+    Used only at init time, interrupts disabled.
+ ********************************************************************/
+static int pollcomplete(int ctlr)
+{
+	int done;
+	int i;
+
+	/* Wait (up to 2 seconds) for a command to complete */
+
+	for (i = 200000; i > 0; i--) {
+		done = hba[ctlr]->access.command_completed(hba[ctlr]);
+		if (done == 0) {
+			udelay(10);	/* a short fixed delay */
+		} else
+			return (done);
+	}
+	/* Invalid address to tell caller we ran out of time */
+	return 1;
+}
+/*****************************************************************
+    start_fwbk
+    Starts controller firmwares background processing. 
+    Currently only the Integrated Raid controller needs this done.
+    If the PCI mem address registers are written to after this, 
+	 data corruption may occur
+*****************************************************************/
+static void start_fwbk(int ctlr)
+{
+		id_ctlr_t *id_ctlr_buf; 
+	int ret_code;
+
+	if(	(hba[ctlr]->board_id != 0x40400E11)
+		&& (hba[ctlr]->board_id != 0x40480E11) )
+
+	/* Not a Integrated Raid, so there is nothing for us to do */
+		return;
+	printk(KERN_DEBUG "cpqarray: Starting firmware's background"
+		" processing\n");
+	/* Command does not return anything, but idasend command needs a 
+		buffer */
+	id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
+	if(id_ctlr_buf==NULL)
+	{
+		printk(KERN_WARNING "cpqarray: Out of memory. "
+			"Unable to start background processing.\n");
+		return;
+	}		
+	ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr, 
+		id_ctlr_buf, 0, 0, 0, 0);
+	if(ret_code != IO_OK)
+		printk(KERN_WARNING "cpqarray: Unable to start"
+			" background processing\n");
+
+	kfree(id_ctlr_buf);
+}
+/*****************************************************************
+    getgeometry
+    Get ida logical volume geometry from the controller 
+    This is a large bit of code which once existed in two flavors,
+    It is used only at init time.
+*****************************************************************/
+static void getgeometry(int ctlr)
+{				
+	id_log_drv_t *id_ldrive;
+	id_ctlr_t *id_ctlr_buf;
+	sense_log_drv_stat_t *id_lstatus_buf;
+	config_t *sense_config_buf;
+	unsigned int log_unit, log_index;
+	int ret_code, size;
+	drv_info_t *drv;
+	ctlr_info_t *info_p = hba[ctlr];
+	int i;
+
+	info_p->log_drv_map = 0;	
+	
+	id_ldrive = (id_log_drv_t *)kmalloc(sizeof(id_log_drv_t), GFP_KERNEL);
+	if(id_ldrive == NULL)
+	{
+		printk( KERN_ERR "cpqarray:  out of memory.\n");
+		return;
+	}
+
+	id_ctlr_buf = (id_ctlr_t *)kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
+	if(id_ctlr_buf == NULL)
+	{
+		kfree(id_ldrive);
+		printk( KERN_ERR "cpqarray:  out of memory.\n");
+		return;
+	}
+
+	id_lstatus_buf = (sense_log_drv_stat_t *)kmalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
+	if(id_lstatus_buf == NULL)
+	{
+		kfree(id_ctlr_buf);
+		kfree(id_ldrive);
+		printk( KERN_ERR "cpqarray:  out of memory.\n");
+		return;
+	}
+
+	sense_config_buf = (config_t *)kmalloc(sizeof(config_t), GFP_KERNEL);
+	if(sense_config_buf == NULL)
+	{
+		kfree(id_lstatus_buf);
+		kfree(id_ctlr_buf);
+		kfree(id_ldrive);
+		printk( KERN_ERR "cpqarray:  out of memory.\n");
+		return;
+	}
+
+	memset(id_ldrive, 0, sizeof(id_log_drv_t));
+	memset(id_ctlr_buf, 0, sizeof(id_ctlr_t));
+	memset(id_lstatus_buf, 0, sizeof(sense_log_drv_stat_t));
+	memset(sense_config_buf, 0, sizeof(config_t));
+
+	info_p->phys_drives = 0;
+	info_p->log_drv_map = 0;
+	info_p->drv_assign_map = 0;
+	info_p->drv_spare_map = 0;
+	info_p->mp_failed_drv_map = 0;	/* only initialized here */
+	/* Get controllers info for this logical drive */
+	ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
+	if (ret_code == IO_ERROR) {
+		/*
+		 * If can't get controller info, set the logical drive map to 0,
+		 * so the idastubopen will fail on all logical drives
+		 * on the controller.
+		 */
+		 /* Free all the buffers and return */ 
+		printk(KERN_ERR "cpqarray: error sending ID controller\n");
+		kfree(sense_config_buf);
+                kfree(id_lstatus_buf);
+                kfree(id_ctlr_buf);
+                kfree(id_ldrive);
+                return;
+        }
+
+	info_p->log_drives = id_ctlr_buf->nr_drvs;
+	for(i=0;i<4;i++)
+		info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
+	info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
+
+	printk(" (%s)\n", info_p->product_name);
+	/*
+	 * Initialize logical drive map to zero
+	 */
+	log_index = 0;
+	/*
+	 * Get drive geometry for all logical drives
+	 */
+	if (id_ctlr_buf->nr_drvs > 16)
+		printk(KERN_WARNING "cpqarray ida%d:  This driver supports "
+			"16 logical drives per controller.\n.  "
+			" Additional drives will not be "
+			"detected\n", ctlr);
+
+	for (log_unit = 0;
+	     (log_index < id_ctlr_buf->nr_drvs)
+	     && (log_unit < NWD);
+	     log_unit++) {
+		struct gendisk *disk = ida_gendisk[ctlr][log_unit];
+
+		size = sizeof(sense_log_drv_stat_t);
+
+		/*
+		   Send "Identify logical drive status" cmd
+		 */
+		ret_code = sendcmd(SENSE_LOG_DRV_STAT,
+			     ctlr, id_lstatus_buf, size, 0, 0, log_unit);
+		if (ret_code == IO_ERROR) {
+			/*
+			   If can't get logical drive status, set
+			   the logical drive map to 0, so the
+			   idastubopen will fail for all logical drives
+			   on the controller. 
+			 */
+			info_p->log_drv_map = 0;	
+			printk( KERN_WARNING
+			     "cpqarray ida%d: idaGetGeometry - Controller"
+				" failed to report status of logical drive %d\n"
+			 "Access to this controller has been disabled\n",
+				ctlr, log_unit);
+			/* Free all the buffers and return */
+                	kfree(sense_config_buf);
+                	kfree(id_lstatus_buf);
+                	kfree(id_ctlr_buf);
+                	kfree(id_ldrive);
+                	return;
+		}
+		/*
+		   Make sure the logical drive is configured
+		 */
+		if (id_lstatus_buf->status != LOG_NOT_CONF) {
+			ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
+			       sizeof(id_log_drv_t), 0, 0, log_unit);
+			/*
+			   If error, the bit for this
+			   logical drive won't be set and
+			   idastubopen will return error. 
+			 */
+			if (ret_code != IO_ERROR) {
+				drv = &info_p->drv[log_unit];
+				drv->blk_size = id_ldrive->blk_size;
+				drv->nr_blks = id_ldrive->nr_blks;
+				drv->cylinders = id_ldrive->drv.cyl;
+				drv->heads = id_ldrive->drv.heads;
+				drv->sectors = id_ldrive->drv.sect_per_track;
+				info_p->log_drv_map |=	(1 << log_unit);
+
+	printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
+		ctlr, log_unit, drv->blk_size, drv->nr_blks);
+				ret_code = sendcmd(SENSE_CONFIG,
+						  ctlr, sense_config_buf,
+				 sizeof(config_t), 0, 0, log_unit);
+				if (ret_code == IO_ERROR) {
+					info_p->log_drv_map = 0;
+					/* Free all the buffers and return */
+                			printk(KERN_ERR "cpqarray: error sending sense config\n");
+                			kfree(sense_config_buf);
+                			kfree(id_lstatus_buf);
+                			kfree(id_ctlr_buf);
+                			kfree(id_ldrive);
+                			return;
+
+				}
+
+				sprintf(disk->devfs_name, "ida/c%dd%d", ctlr, log_unit);
+
+				info_p->phys_drives =
+				    sense_config_buf->ctlr_phys_drv;
+				info_p->drv_assign_map
+				    |= sense_config_buf->drv_asgn_map;
+				info_p->drv_assign_map
+				    |= sense_config_buf->spare_asgn_map;
+				info_p->drv_spare_map
+				    |= sense_config_buf->spare_asgn_map;
+			}	/* end of if no error on id_ldrive */
+			log_index = log_index + 1;
+		}		/* end of if logical drive configured */
+	}			/* end of for log_unit */
+	kfree(sense_config_buf);
+  	kfree(id_ldrive);
+  	kfree(id_lstatus_buf);
+	kfree(id_ctlr_buf);
+	return;
+
+}
+
+static void __exit cpqarray_exit(void)
+{
+	int i;
+
+	pci_unregister_driver(&cpqarray_pci_driver);
+
+	/* Double check that all controller entries have been removed */
+	for(i=0; i<MAX_CTLR; i++) {
+		if (hba[i] != NULL) {
+			printk(KERN_WARNING "cpqarray: Removing EISA "
+					"controller %d\n", i);
+			cpqarray_remove_one_eisa(i);
+		}
+	}
+
+	devfs_remove("ida");
+	remove_proc_entry("cpqarray", proc_root_driver);
+}
+
+module_init(cpqarray_init)
+module_exit(cpqarray_exit)
diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
new file mode 100644
index 0000000..be73e9d
--- /dev/null
+++ b/drivers/block/cpqarray.h
@@ -0,0 +1,126 @@
+/*
+ *    Disk Array driver for Compaq SMART2 Controllers
+ *    Copyright 1998 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ *    If you want to make changes, improve or add functionality to this
+ *    driver, you'll probably need the Compaq Array Controller Interface
+ *    Specificiation (Document number ECG086/1198)
+ */
+#ifndef CPQARRAY_H
+#define CPQARRAY_H
+
+#ifdef __KERNEL__
+#include <linux/blkdev.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/timer.h>
+#endif
+
+#include "ida_cmd.h"
+
+#define IO_OK		0
+#define IO_ERROR	1
+#define NWD		16
+#define NWD_SHIFT	4
+
+#define IDA_TIMER	(5*HZ)
+#define IDA_TIMEOUT	(10*HZ)
+
+#define MISC_NONFATAL_WARN	0x01
+
+typedef struct {
+	unsigned blk_size;
+	unsigned nr_blks;
+	unsigned cylinders;
+	unsigned heads;
+	unsigned sectors;
+	int usage_count;
+} drv_info_t;
+
+#ifdef __KERNEL__
+
+struct ctlr_info;
+typedef struct ctlr_info ctlr_info_t;
+
+struct access_method {
+	void (*submit_command)(ctlr_info_t *h, cmdlist_t *c);
+	void (*set_intr_mask)(ctlr_info_t *h, unsigned long val);
+	unsigned long (*fifo_full)(ctlr_info_t *h);
+	unsigned long (*intr_pending)(ctlr_info_t *h);
+	unsigned long (*command_completed)(ctlr_info_t *h);
+};
+
+struct board_type {
+	__u32	board_id;
+	char	*product_name;
+	struct access_method *access;
+};
+
+struct ctlr_info {
+	int	ctlr;
+	char	devname[8];
+	__u32	log_drv_map;
+	__u32	drv_assign_map;
+	__u32	drv_spare_map;
+	__u32	mp_failed_drv_map;
+
+	char	firm_rev[4];
+	int	ctlr_sig;
+
+	int	log_drives;
+	int	phys_drives;
+
+	struct pci_dev *pci_dev;    /* NULL if EISA */
+	__u32	board_id;
+	char	*product_name;	
+
+	void __iomem *vaddr;
+	unsigned long paddr;
+	unsigned long io_mem_addr;
+	unsigned long io_mem_length;
+	int	intr;
+	int	usage_count;
+	drv_info_t	drv[NWD];
+	struct proc_dir_entry *proc;
+
+	struct access_method access;
+
+	cmdlist_t *reqQ;
+	cmdlist_t *cmpQ;
+	cmdlist_t *cmd_pool;
+	dma_addr_t cmd_pool_dhandle;
+	unsigned long *cmd_pool_bits;
+	struct request_queue *queue;
+	spinlock_t lock;
+
+	unsigned int Qdepth;
+	unsigned int maxQsinceinit;
+
+	unsigned int nr_requests;
+	unsigned int nr_allocs;
+	unsigned int nr_frees;
+	struct timer_list timer;
+	unsigned int misc_tflags;
+};
+
+#define IDA_LOCK(i)	(&hba[i]->lock)
+
+#endif
+
+#endif /* CPQARRAY_H */
diff --git a/drivers/block/cryptoloop.c b/drivers/block/cryptoloop.c
new file mode 100644
index 0000000..5be6f99
--- /dev/null
+++ b/drivers/block/cryptoloop.c
@@ -0,0 +1,268 @@
+/*
+   Linux loop encryption enabling module
+
+   Copyright (C)  2002 Herbert Valerio Riedel <hvr@gnu.org>
+   Copyright (C)  2003 Fruhwirth Clemens <clemens@endorphin.org>
+
+   This module is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This module is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this module; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/crypto.h>
+#include <linux/blkdev.h>
+#include <linux/loop.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("loop blockdevice transferfunction adaptor / CryptoAPI");
+MODULE_AUTHOR("Herbert Valerio Riedel <hvr@gnu.org>");
+
+#define LOOP_IV_SECTOR_BITS 9
+#define LOOP_IV_SECTOR_SIZE (1 << LOOP_IV_SECTOR_BITS)
+
+static int
+cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info)
+{
+	int err = -EINVAL;
+	char cms[LO_NAME_SIZE];			/* cipher-mode string */
+	char *cipher;
+	char *mode;
+	char *cmsp = cms;			/* c-m string pointer */
+	struct crypto_tfm *tfm = NULL;
+
+	/* encryption breaks for non sector aligned offsets */
+
+	if (info->lo_offset % LOOP_IV_SECTOR_SIZE)
+		goto out;
+
+	strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE);
+	cms[LO_NAME_SIZE - 1] = 0;
+	cipher = strsep(&cmsp, "-");
+	mode = strsep(&cmsp, "-");
+
+	if (mode == NULL || strcmp(mode, "cbc") == 0)
+		tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC);
+	else if (strcmp(mode, "ecb") == 0)
+		tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB);
+	if (tfm == NULL)
+		return -EINVAL;
+
+	err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key,
+					   info->lo_encrypt_key_size);
+	
+	if (err != 0)
+		goto out_free_tfm;
+
+	lo->key_data = tfm;
+	return 0;
+
+ out_free_tfm:
+	crypto_free_tfm(tfm);
+
+ out:
+	return err;
+}
+
+
+typedef int (*encdec_ecb_t)(struct crypto_tfm *tfm,
+			struct scatterlist *sg_out,
+			struct scatterlist *sg_in,
+			unsigned int nsg);
+
+
+static int
+cryptoloop_transfer_ecb(struct loop_device *lo, int cmd,
+			struct page *raw_page, unsigned raw_off,
+			struct page *loop_page, unsigned loop_off,
+			int size, sector_t IV)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+	struct scatterlist sg_out = { NULL, };
+	struct scatterlist sg_in = { NULL, };
+
+	encdec_ecb_t encdecfunc;
+	struct page *in_page, *out_page;
+	unsigned in_offs, out_offs;
+
+	if (cmd == READ) {
+		in_page = raw_page;
+		in_offs = raw_off;
+		out_page = loop_page;
+		out_offs = loop_off;
+		encdecfunc = tfm->crt_u.cipher.cit_decrypt;
+	} else {
+		in_page = loop_page;
+		in_offs = loop_off;
+		out_page = raw_page;
+		out_offs = raw_off;
+		encdecfunc = tfm->crt_u.cipher.cit_encrypt;
+	}
+
+	while (size > 0) {
+		const int sz = min(size, LOOP_IV_SECTOR_SIZE);
+
+		sg_in.page = in_page;
+		sg_in.offset = in_offs;
+		sg_in.length = sz;
+
+		sg_out.page = out_page;
+		sg_out.offset = out_offs;
+		sg_out.length = sz;
+
+		encdecfunc(tfm, &sg_out, &sg_in, sz);
+
+		size -= sz;
+		in_offs += sz;
+		out_offs += sz;
+	}
+
+	return 0;
+}
+
+typedef int (*encdec_cbc_t)(struct crypto_tfm *tfm,
+			struct scatterlist *sg_out,
+			struct scatterlist *sg_in,
+			unsigned int nsg, u8 *iv);
+
+static int
+cryptoloop_transfer_cbc(struct loop_device *lo, int cmd,
+			struct page *raw_page, unsigned raw_off,
+			struct page *loop_page, unsigned loop_off,
+			int size, sector_t IV)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+	struct scatterlist sg_out = { NULL, };
+	struct scatterlist sg_in = { NULL, };
+
+	encdec_cbc_t encdecfunc;
+	struct page *in_page, *out_page;
+	unsigned in_offs, out_offs;
+
+	if (cmd == READ) {
+		in_page = raw_page;
+		in_offs = raw_off;
+		out_page = loop_page;
+		out_offs = loop_off;
+		encdecfunc = tfm->crt_u.cipher.cit_decrypt_iv;
+	} else {
+		in_page = loop_page;
+		in_offs = loop_off;
+		out_page = raw_page;
+		out_offs = raw_off;
+		encdecfunc = tfm->crt_u.cipher.cit_encrypt_iv;
+	}
+
+	while (size > 0) {
+		const int sz = min(size, LOOP_IV_SECTOR_SIZE);
+		u32 iv[4] = { 0, };
+		iv[0] = cpu_to_le32(IV & 0xffffffff);
+
+		sg_in.page = in_page;
+		sg_in.offset = in_offs;
+		sg_in.length = sz;
+
+		sg_out.page = out_page;
+		sg_out.offset = out_offs;
+		sg_out.length = sz;
+
+		encdecfunc(tfm, &sg_out, &sg_in, sz, (u8 *)iv);
+
+		IV++;
+		size -= sz;
+		in_offs += sz;
+		out_offs += sz;
+	}
+
+	return 0;
+}
+
+static int
+cryptoloop_transfer(struct loop_device *lo, int cmd,
+		    struct page *raw_page, unsigned raw_off,
+		    struct page *loop_page, unsigned loop_off,
+		    int size, sector_t IV)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+	if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_ECB)
+	{
+		lo->transfer = cryptoloop_transfer_ecb;
+		return cryptoloop_transfer_ecb(lo, cmd, raw_page, raw_off,
+					       loop_page, loop_off, size, IV);
+	}	
+	if(tfm->crt_cipher.cit_mode == CRYPTO_TFM_MODE_CBC)
+	{	
+		lo->transfer = cryptoloop_transfer_cbc;
+		return cryptoloop_transfer_cbc(lo, cmd, raw_page, raw_off,
+					       loop_page, loop_off, size, IV);
+	}
+	
+	/*  This is not supposed to happen */
+
+	printk( KERN_ERR "cryptoloop: unsupported cipher mode in cryptoloop_transfer!\n");
+	return -EINVAL;
+}
+
+static int
+cryptoloop_ioctl(struct loop_device *lo, int cmd, unsigned long arg)
+{
+	return -EINVAL;
+}
+
+static int
+cryptoloop_release(struct loop_device *lo)
+{
+	struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data;
+	if (tfm != NULL) {
+		crypto_free_tfm(tfm);
+		lo->key_data = NULL;
+		return 0;
+	}
+	printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n");
+	return -EINVAL;
+}
+
+static struct loop_func_table cryptoloop_funcs = {
+	.number = LO_CRYPT_CRYPTOAPI,
+	.init = cryptoloop_init,
+	.ioctl = cryptoloop_ioctl,
+	.transfer = cryptoloop_transfer,
+	.release = cryptoloop_release,
+	.owner = THIS_MODULE
+};
+
+static int __init
+init_cryptoloop(void)
+{
+	int rc = loop_register_transfer(&cryptoloop_funcs);
+
+	if (rc)
+		printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
+	return rc;
+}
+
+static void __exit
+cleanup_cryptoloop(void)
+{
+	if (loop_unregister_transfer(LO_CRYPT_CRYPTOAPI))
+		printk(KERN_ERR
+			"cryptoloop: loop_unregister_transfer failed\n");
+}
+
+module_init(init_cryptoloop);
+module_exit(cleanup_cryptoloop);
diff --git a/drivers/block/deadline-iosched.c b/drivers/block/deadline-iosched.c
new file mode 100644
index 0000000..d63d34c
--- /dev/null
+++ b/drivers/block/deadline-iosched.c
@@ -0,0 +1,967 @@
+/*
+ *  linux/drivers/block/deadline-iosched.c
+ *
+ *  Deadline i/o scheduler.
+ *
+ *  Copyright (C) 2002 Jens Axboe <axboe@suse.de>
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/hash.h>
+#include <linux/rbtree.h>
+
+/*
+ * See Documentation/block/deadline-iosched.txt
+ */
+static int read_expire = HZ / 2;  /* max time before a read is submitted. */
+static int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
+static int writes_starved = 2;    /* max times reads can starve a write */
+static int fifo_batch = 16;       /* # of sequential requests treated as one
+				     by the above parameters. For throughput. */
+
+static const int deadline_hash_shift = 5;
+#define DL_HASH_BLOCK(sec)	((sec) >> 3)
+#define DL_HASH_FN(sec)		(hash_long(DL_HASH_BLOCK((sec)), deadline_hash_shift))
+#define DL_HASH_ENTRIES		(1 << deadline_hash_shift)
+#define rq_hash_key(rq)		((rq)->sector + (rq)->nr_sectors)
+#define list_entry_hash(ptr)	list_entry((ptr), struct deadline_rq, hash)
+#define ON_HASH(drq)		(drq)->on_hash
+
+struct deadline_data {
+	/*
+	 * run time data
+	 */
+
+	/*
+	 * requests (deadline_rq s) are present on both sort_list and fifo_list
+	 */
+	struct rb_root sort_list[2];	
+	struct list_head fifo_list[2];
+	
+	/*
+	 * next in sort order. read, write or both are NULL
+	 */
+	struct deadline_rq *next_drq[2];
+	struct list_head *dispatch;	/* driver dispatch queue */
+	struct list_head *hash;		/* request hash */
+	unsigned int batching;		/* number of sequential requests made */
+	sector_t last_sector;		/* head position */
+	unsigned int starved;		/* times reads have starved writes */
+
+	/*
+	 * settings that change how the i/o scheduler behaves
+	 */
+	int fifo_expire[2];
+	int fifo_batch;
+	int writes_starved;
+	int front_merges;
+
+	mempool_t *drq_pool;
+};
+
+/*
+ * pre-request data.
+ */
+struct deadline_rq {
+	/*
+	 * rbtree index, key is the starting offset
+	 */
+	struct rb_node rb_node;
+	sector_t rb_key;
+
+	struct request *request;
+
+	/*
+	 * request hash, key is the ending offset (for back merge lookup)
+	 */
+	struct list_head hash;
+	char on_hash;
+
+	/*
+	 * expire fifo
+	 */
+	struct list_head fifo;
+	unsigned long expires;
+};
+
+static void deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq);
+
+static kmem_cache_t *drq_pool;
+
+#define RQ_DATA(rq)	((struct deadline_rq *) (rq)->elevator_private)
+
+/*
+ * the back merge hash support functions
+ */
+static inline void __deadline_del_drq_hash(struct deadline_rq *drq)
+{
+	drq->on_hash = 0;
+	list_del_init(&drq->hash);
+}
+
+static inline void deadline_del_drq_hash(struct deadline_rq *drq)
+{
+	if (ON_HASH(drq))
+		__deadline_del_drq_hash(drq);
+}
+
+static void
+deadline_remove_merge_hints(request_queue_t *q, struct deadline_rq *drq)
+{
+	deadline_del_drq_hash(drq);
+
+	if (q->last_merge == drq->request)
+		q->last_merge = NULL;
+}
+
+static inline void
+deadline_add_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	struct request *rq = drq->request;
+
+	BUG_ON(ON_HASH(drq));
+
+	drq->on_hash = 1;
+	list_add(&drq->hash, &dd->hash[DL_HASH_FN(rq_hash_key(rq))]);
+}
+
+/*
+ * move hot entry to front of chain
+ */
+static inline void
+deadline_hot_drq_hash(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	struct request *rq = drq->request;
+	struct list_head *head = &dd->hash[DL_HASH_FN(rq_hash_key(rq))];
+
+	if (ON_HASH(drq) && drq->hash.prev != head) {
+		list_del(&drq->hash);
+		list_add(&drq->hash, head);
+	}
+}
+
+static struct request *
+deadline_find_drq_hash(struct deadline_data *dd, sector_t offset)
+{
+	struct list_head *hash_list = &dd->hash[DL_HASH_FN(offset)];
+	struct list_head *entry, *next = hash_list->next;
+
+	while ((entry = next) != hash_list) {
+		struct deadline_rq *drq = list_entry_hash(entry);
+		struct request *__rq = drq->request;
+
+		next = entry->next;
+		
+		BUG_ON(!ON_HASH(drq));
+
+		if (!rq_mergeable(__rq)) {
+			__deadline_del_drq_hash(drq);
+			continue;
+		}
+
+		if (rq_hash_key(__rq) == offset)
+			return __rq;
+	}
+
+	return NULL;
+}
+
+/*
+ * rb tree support functions
+ */
+#define RB_NONE		(2)
+#define RB_EMPTY(root)	((root)->rb_node == NULL)
+#define ON_RB(node)	((node)->rb_color != RB_NONE)
+#define RB_CLEAR(node)	((node)->rb_color = RB_NONE)
+#define rb_entry_drq(node)	rb_entry((node), struct deadline_rq, rb_node)
+#define DRQ_RB_ROOT(dd, drq)	(&(dd)->sort_list[rq_data_dir((drq)->request)])
+#define rq_rb_key(rq)		(rq)->sector
+
+static struct deadline_rq *
+__deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	struct rb_node **p = &DRQ_RB_ROOT(dd, drq)->rb_node;
+	struct rb_node *parent = NULL;
+	struct deadline_rq *__drq;
+
+	while (*p) {
+		parent = *p;
+		__drq = rb_entry_drq(parent);
+
+		if (drq->rb_key < __drq->rb_key)
+			p = &(*p)->rb_left;
+		else if (drq->rb_key > __drq->rb_key)
+			p = &(*p)->rb_right;
+		else
+			return __drq;
+	}
+
+	rb_link_node(&drq->rb_node, parent, p);
+	return NULL;
+}
+
+static void
+deadline_add_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	struct deadline_rq *__alias;
+
+	drq->rb_key = rq_rb_key(drq->request);
+
+retry:
+	__alias = __deadline_add_drq_rb(dd, drq);
+	if (!__alias) {
+		rb_insert_color(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
+		return;
+	}
+
+	deadline_move_request(dd, __alias);
+	goto retry;
+}
+
+static inline void
+deadline_del_drq_rb(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	const int data_dir = rq_data_dir(drq->request);
+
+	if (dd->next_drq[data_dir] == drq) {
+		struct rb_node *rbnext = rb_next(&drq->rb_node);
+
+		dd->next_drq[data_dir] = NULL;
+		if (rbnext)
+			dd->next_drq[data_dir] = rb_entry_drq(rbnext);
+	}
+
+	if (ON_RB(&drq->rb_node)) {
+		rb_erase(&drq->rb_node, DRQ_RB_ROOT(dd, drq));
+		RB_CLEAR(&drq->rb_node);
+	}
+}
+
+static struct request *
+deadline_find_drq_rb(struct deadline_data *dd, sector_t sector, int data_dir)
+{
+	struct rb_node *n = dd->sort_list[data_dir].rb_node;
+	struct deadline_rq *drq;
+
+	while (n) {
+		drq = rb_entry_drq(n);
+
+		if (sector < drq->rb_key)
+			n = n->rb_left;
+		else if (sector > drq->rb_key)
+			n = n->rb_right;
+		else
+			return drq->request;
+	}
+
+	return NULL;
+}
+
+/*
+ * deadline_find_first_drq finds the first (lowest sector numbered) request
+ * for the specified data_dir. Used to sweep back to the start of the disk
+ * (1-way elevator) after we process the last (highest sector) request.
+ */
+static struct deadline_rq *
+deadline_find_first_drq(struct deadline_data *dd, int data_dir)
+{
+	struct rb_node *n = dd->sort_list[data_dir].rb_node;
+
+	for (;;) {
+		if (n->rb_left == NULL)
+			return rb_entry_drq(n);
+		
+		n = n->rb_left;
+	}
+}
+
+/*
+ * add drq to rbtree and fifo
+ */
+static inline void
+deadline_add_request(struct request_queue *q, struct request *rq)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct deadline_rq *drq = RQ_DATA(rq);
+
+	const int data_dir = rq_data_dir(drq->request);
+
+	deadline_add_drq_rb(dd, drq);
+	/*
+	 * set expire time (only used for reads) and add to fifo list
+	 */
+	drq->expires = jiffies + dd->fifo_expire[data_dir];
+	list_add_tail(&drq->fifo, &dd->fifo_list[data_dir]);
+
+	if (rq_mergeable(rq)) {
+		deadline_add_drq_hash(dd, drq);
+
+		if (!q->last_merge)
+			q->last_merge = rq;
+	}
+}
+
+/*
+ * remove rq from rbtree, fifo, and hash
+ */
+static void deadline_remove_request(request_queue_t *q, struct request *rq)
+{
+	struct deadline_rq *drq = RQ_DATA(rq);
+
+	if (drq) {
+		struct deadline_data *dd = q->elevator->elevator_data;
+
+		list_del_init(&drq->fifo);
+		deadline_remove_merge_hints(q, drq);
+		deadline_del_drq_rb(dd, drq);
+	}
+}
+
+static int
+deadline_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct request *__rq;
+	int ret;
+
+	/*
+	 * try last_merge to avoid going to hash
+	 */
+	ret = elv_try_last_merge(q, bio);
+	if (ret != ELEVATOR_NO_MERGE) {
+		__rq = q->last_merge;
+		goto out_insert;
+	}
+
+	/*
+	 * see if the merge hash can satisfy a back merge
+	 */
+	__rq = deadline_find_drq_hash(dd, bio->bi_sector);
+	if (__rq) {
+		BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector);
+
+		if (elv_rq_merge_ok(__rq, bio)) {
+			ret = ELEVATOR_BACK_MERGE;
+			goto out;
+		}
+	}
+
+	/*
+	 * check for front merge
+	 */
+	if (dd->front_merges) {
+		sector_t rb_key = bio->bi_sector + bio_sectors(bio);
+
+		__rq = deadline_find_drq_rb(dd, rb_key, bio_data_dir(bio));
+		if (__rq) {
+			BUG_ON(rb_key != rq_rb_key(__rq));
+
+			if (elv_rq_merge_ok(__rq, bio)) {
+				ret = ELEVATOR_FRONT_MERGE;
+				goto out;
+			}
+		}
+	}
+
+	return ELEVATOR_NO_MERGE;
+out:
+	q->last_merge = __rq;
+out_insert:
+	if (ret)
+		deadline_hot_drq_hash(dd, RQ_DATA(__rq));
+	*req = __rq;
+	return ret;
+}
+
+static void deadline_merged_request(request_queue_t *q, struct request *req)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct deadline_rq *drq = RQ_DATA(req);
+
+	/*
+	 * hash always needs to be repositioned, key is end sector
+	 */
+	deadline_del_drq_hash(drq);
+	deadline_add_drq_hash(dd, drq);
+
+	/*
+	 * if the merge was a front merge, we need to reposition request
+	 */
+	if (rq_rb_key(req) != drq->rb_key) {
+		deadline_del_drq_rb(dd, drq);
+		deadline_add_drq_rb(dd, drq);
+	}
+
+	q->last_merge = req;
+}
+
+static void
+deadline_merged_requests(request_queue_t *q, struct request *req,
+			 struct request *next)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct deadline_rq *drq = RQ_DATA(req);
+	struct deadline_rq *dnext = RQ_DATA(next);
+
+	BUG_ON(!drq);
+	BUG_ON(!dnext);
+
+	/*
+	 * reposition drq (this is the merged request) in hash, and in rbtree
+	 * in case of a front merge
+	 */
+	deadline_del_drq_hash(drq);
+	deadline_add_drq_hash(dd, drq);
+
+	if (rq_rb_key(req) != drq->rb_key) {
+		deadline_del_drq_rb(dd, drq);
+		deadline_add_drq_rb(dd, drq);
+	}
+
+	/*
+	 * if dnext expires before drq, assign its expire time to drq
+	 * and move into dnext position (dnext will be deleted) in fifo
+	 */
+	if (!list_empty(&drq->fifo) && !list_empty(&dnext->fifo)) {
+		if (time_before(dnext->expires, drq->expires)) {
+			list_move(&drq->fifo, &dnext->fifo);
+			drq->expires = dnext->expires;
+		}
+	}
+
+	/*
+	 * kill knowledge of next, this one is a goner
+	 */
+	deadline_remove_request(q, next);
+}
+
+/*
+ * move request from sort list to dispatch queue.
+ */
+static inline void
+deadline_move_to_dispatch(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	request_queue_t *q = drq->request->q;
+
+	deadline_remove_request(q, drq->request);
+	list_add_tail(&drq->request->queuelist, dd->dispatch);
+}
+
+/*
+ * move an entry to dispatch queue
+ */
+static void
+deadline_move_request(struct deadline_data *dd, struct deadline_rq *drq)
+{
+	const int data_dir = rq_data_dir(drq->request);
+	struct rb_node *rbnext = rb_next(&drq->rb_node);
+
+	dd->next_drq[READ] = NULL;
+	dd->next_drq[WRITE] = NULL;
+
+	if (rbnext)
+		dd->next_drq[data_dir] = rb_entry_drq(rbnext);
+	
+	dd->last_sector = drq->request->sector + drq->request->nr_sectors;
+
+	/*
+	 * take it off the sort and fifo list, move
+	 * to dispatch queue
+	 */
+	deadline_move_to_dispatch(dd, drq);
+}
+
+#define list_entry_fifo(ptr)	list_entry((ptr), struct deadline_rq, fifo)
+
+/*
+ * deadline_check_fifo returns 0 if there are no expired reads on the fifo,
+ * 1 otherwise. Requires !list_empty(&dd->fifo_list[data_dir])
+ */
+static inline int deadline_check_fifo(struct deadline_data *dd, int ddir)
+{
+	struct deadline_rq *drq = list_entry_fifo(dd->fifo_list[ddir].next);
+
+	/*
+	 * drq is expired!
+	 */
+	if (time_after(jiffies, drq->expires))
+		return 1;
+
+	return 0;
+}
+
+/*
+ * deadline_dispatch_requests selects the best request according to
+ * read/write expire, fifo_batch, etc
+ */
+static int deadline_dispatch_requests(struct deadline_data *dd)
+{
+	const int reads = !list_empty(&dd->fifo_list[READ]);
+	const int writes = !list_empty(&dd->fifo_list[WRITE]);
+	struct deadline_rq *drq;
+	int data_dir, other_dir;
+
+	/*
+	 * batches are currently reads XOR writes
+	 */
+	drq = NULL;
+
+	if (dd->next_drq[READ])
+		drq = dd->next_drq[READ];
+
+	if (dd->next_drq[WRITE])
+		drq = dd->next_drq[WRITE];
+
+	if (drq) {
+		/* we have a "next request" */
+		
+		if (dd->last_sector != drq->request->sector)
+			/* end the batch on a non sequential request */
+			dd->batching += dd->fifo_batch;
+		
+		if (dd->batching < dd->fifo_batch)
+			/* we are still entitled to batch */
+			goto dispatch_request;
+	}
+
+	/*
+	 * at this point we are not running a batch. select the appropriate
+	 * data direction (read / write)
+	 */
+
+	if (reads) {
+		BUG_ON(RB_EMPTY(&dd->sort_list[READ]));
+
+		if (writes && (dd->starved++ >= dd->writes_starved))
+			goto dispatch_writes;
+
+		data_dir = READ;
+		other_dir = WRITE;
+
+		goto dispatch_find_request;
+	}
+
+	/*
+	 * there are either no reads or writes have been starved
+	 */
+
+	if (writes) {
+dispatch_writes:
+		BUG_ON(RB_EMPTY(&dd->sort_list[WRITE]));
+
+		dd->starved = 0;
+
+		data_dir = WRITE;
+		other_dir = READ;
+
+		goto dispatch_find_request;
+	}
+
+	return 0;
+
+dispatch_find_request:
+	/*
+	 * we are not running a batch, find best request for selected data_dir
+	 */
+	if (deadline_check_fifo(dd, data_dir)) {
+		/* An expired request exists - satisfy it */
+		dd->batching = 0;
+		drq = list_entry_fifo(dd->fifo_list[data_dir].next);
+		
+	} else if (dd->next_drq[data_dir]) {
+		/*
+		 * The last req was the same dir and we have a next request in
+		 * sort order. No expired requests so continue on from here.
+		 */
+		drq = dd->next_drq[data_dir];
+	} else {
+		/*
+		 * The last req was the other direction or we have run out of
+		 * higher-sectored requests. Go back to the lowest sectored
+		 * request (1 way elevator) and start a new batch.
+		 */
+		dd->batching = 0;
+		drq = deadline_find_first_drq(dd, data_dir);
+	}
+
+dispatch_request:
+	/*
+	 * drq is the selected appropriate request.
+	 */
+	dd->batching++;
+	deadline_move_request(dd, drq);
+
+	return 1;
+}
+
+static struct request *deadline_next_request(request_queue_t *q)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct request *rq;
+
+	/*
+	 * if there are still requests on the dispatch queue, grab the first one
+	 */
+	if (!list_empty(dd->dispatch)) {
+dispatch:
+		rq = list_entry_rq(dd->dispatch->next);
+		return rq;
+	}
+
+	if (deadline_dispatch_requests(dd))
+		goto dispatch;
+
+	return NULL;
+}
+
+static void
+deadline_insert_request(request_queue_t *q, struct request *rq, int where)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+
+	/* barriers must flush the reorder queue */
+	if (unlikely(rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)
+			&& where == ELEVATOR_INSERT_SORT))
+		where = ELEVATOR_INSERT_BACK;
+
+	switch (where) {
+		case ELEVATOR_INSERT_BACK:
+			while (deadline_dispatch_requests(dd))
+				;
+			list_add_tail(&rq->queuelist, dd->dispatch);
+			break;
+		case ELEVATOR_INSERT_FRONT:
+			list_add(&rq->queuelist, dd->dispatch);
+			break;
+		case ELEVATOR_INSERT_SORT:
+			BUG_ON(!blk_fs_request(rq));
+			deadline_add_request(q, rq);
+			break;
+		default:
+			printk("%s: bad insert point %d\n", __FUNCTION__,where);
+			return;
+	}
+}
+
+static int deadline_queue_empty(request_queue_t *q)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+
+	if (!list_empty(&dd->fifo_list[WRITE])
+	    || !list_empty(&dd->fifo_list[READ])
+	    || !list_empty(dd->dispatch))
+		return 0;
+
+	return 1;
+}
+
+static struct request *
+deadline_former_request(request_queue_t *q, struct request *rq)
+{
+	struct deadline_rq *drq = RQ_DATA(rq);
+	struct rb_node *rbprev = rb_prev(&drq->rb_node);
+
+	if (rbprev)
+		return rb_entry_drq(rbprev)->request;
+
+	return NULL;
+}
+
+static struct request *
+deadline_latter_request(request_queue_t *q, struct request *rq)
+{
+	struct deadline_rq *drq = RQ_DATA(rq);
+	struct rb_node *rbnext = rb_next(&drq->rb_node);
+
+	if (rbnext)
+		return rb_entry_drq(rbnext)->request;
+
+	return NULL;
+}
+
+static void deadline_exit_queue(elevator_t *e)
+{
+	struct deadline_data *dd = e->elevator_data;
+
+	BUG_ON(!list_empty(&dd->fifo_list[READ]));
+	BUG_ON(!list_empty(&dd->fifo_list[WRITE]));
+
+	mempool_destroy(dd->drq_pool);
+	kfree(dd->hash);
+	kfree(dd);
+}
+
+/*
+ * initialize elevator private data (deadline_data), and alloc a drq for
+ * each request on the free lists
+ */
+static int deadline_init_queue(request_queue_t *q, elevator_t *e)
+{
+	struct deadline_data *dd;
+	int i;
+
+	if (!drq_pool)
+		return -ENOMEM;
+
+	dd = kmalloc(sizeof(*dd), GFP_KERNEL);
+	if (!dd)
+		return -ENOMEM;
+	memset(dd, 0, sizeof(*dd));
+
+	dd->hash = kmalloc(sizeof(struct list_head)*DL_HASH_ENTRIES,GFP_KERNEL);
+	if (!dd->hash) {
+		kfree(dd);
+		return -ENOMEM;
+	}
+
+	dd->drq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, drq_pool);
+	if (!dd->drq_pool) {
+		kfree(dd->hash);
+		kfree(dd);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < DL_HASH_ENTRIES; i++)
+		INIT_LIST_HEAD(&dd->hash[i]);
+
+	INIT_LIST_HEAD(&dd->fifo_list[READ]);
+	INIT_LIST_HEAD(&dd->fifo_list[WRITE]);
+	dd->sort_list[READ] = RB_ROOT;
+	dd->sort_list[WRITE] = RB_ROOT;
+	dd->dispatch = &q->queue_head;
+	dd->fifo_expire[READ] = read_expire;
+	dd->fifo_expire[WRITE] = write_expire;
+	dd->writes_starved = writes_starved;
+	dd->front_merges = 1;
+	dd->fifo_batch = fifo_batch;
+	e->elevator_data = dd;
+	return 0;
+}
+
+static void deadline_put_request(request_queue_t *q, struct request *rq)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct deadline_rq *drq = RQ_DATA(rq);
+
+	if (drq) {
+		mempool_free(drq, dd->drq_pool);
+		rq->elevator_private = NULL;
+	}
+}
+
+static int
+deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+{
+	struct deadline_data *dd = q->elevator->elevator_data;
+	struct deadline_rq *drq;
+
+	drq = mempool_alloc(dd->drq_pool, gfp_mask);
+	if (drq) {
+		memset(drq, 0, sizeof(*drq));
+		RB_CLEAR(&drq->rb_node);
+		drq->request = rq;
+
+		INIT_LIST_HEAD(&drq->hash);
+		drq->on_hash = 0;
+
+		INIT_LIST_HEAD(&drq->fifo);
+
+		rq->elevator_private = drq;
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ * sysfs parts below
+ */
+struct deadline_fs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct deadline_data *, char *);
+	ssize_t (*store)(struct deadline_data *, const char *, size_t);
+};
+
+static ssize_t
+deadline_var_show(int var, char *page)
+{
+	return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+deadline_var_store(int *var, const char *page, size_t count)
+{
+	char *p = (char *) page;
+
+	*var = simple_strtol(p, &p, 10);
+	return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV)				\
+static ssize_t __FUNC(struct deadline_data *dd, char *page)		\
+{									\
+	int __data = __VAR;					\
+	if (__CONV)							\
+		__data = jiffies_to_msecs(__data);			\
+	return deadline_var_show(__data, (page));			\
+}
+SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1);
+SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1);
+SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0);
+SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0);
+SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV)			\
+static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count)	\
+{									\
+	int __data;							\
+	int ret = deadline_var_store(&__data, (page), count);		\
+	if (__data < (MIN))						\
+		__data = (MIN);						\
+	else if (__data > (MAX))					\
+		__data = (MAX);						\
+	if (__CONV)							\
+		*(__PTR) = msecs_to_jiffies(__data);			\
+	else								\
+		*(__PTR) = __data;					\
+	return ret;							\
+}
+STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1);
+STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0);
+STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0);
+STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+static struct deadline_fs_entry deadline_readexpire_entry = {
+	.attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = deadline_readexpire_show,
+	.store = deadline_readexpire_store,
+};
+static struct deadline_fs_entry deadline_writeexpire_entry = {
+	.attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR },
+	.show = deadline_writeexpire_show,
+	.store = deadline_writeexpire_store,
+};
+static struct deadline_fs_entry deadline_writesstarved_entry = {
+	.attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR },
+	.show = deadline_writesstarved_show,
+	.store = deadline_writesstarved_store,
+};
+static struct deadline_fs_entry deadline_frontmerges_entry = {
+	.attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR },
+	.show = deadline_frontmerges_show,
+	.store = deadline_frontmerges_store,
+};
+static struct deadline_fs_entry deadline_fifobatch_entry = {
+	.attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR },
+	.show = deadline_fifobatch_show,
+	.store = deadline_fifobatch_store,
+};
+
+static struct attribute *default_attrs[] = {
+	&deadline_readexpire_entry.attr,
+	&deadline_writeexpire_entry.attr,
+	&deadline_writesstarved_entry.attr,
+	&deadline_frontmerges_entry.attr,
+	&deadline_fifobatch_entry.attr,
+	NULL,
+};
+
+#define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr)
+
+static ssize_t
+deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct deadline_fs_entry *entry = to_deadline(attr);
+
+	if (!entry->show)
+		return 0;
+
+	return entry->show(e->elevator_data, page);
+}
+
+static ssize_t
+deadline_attr_store(struct kobject *kobj, struct attribute *attr,
+		    const char *page, size_t length)
+{
+	elevator_t *e = container_of(kobj, elevator_t, kobj);
+	struct deadline_fs_entry *entry = to_deadline(attr);
+
+	if (!entry->store)
+		return -EINVAL;
+
+	return entry->store(e->elevator_data, page, length);
+}
+
+static struct sysfs_ops deadline_sysfs_ops = {
+	.show	= deadline_attr_show,
+	.store	= deadline_attr_store,
+};
+
+static struct kobj_type deadline_ktype = {
+	.sysfs_ops	= &deadline_sysfs_ops,
+	.default_attrs	= default_attrs,
+};
+
+static struct elevator_type iosched_deadline = {
+	.ops = {
+		.elevator_merge_fn = 		deadline_merge,
+		.elevator_merged_fn =		deadline_merged_request,
+		.elevator_merge_req_fn =	deadline_merged_requests,
+		.elevator_next_req_fn =		deadline_next_request,
+		.elevator_add_req_fn =		deadline_insert_request,
+		.elevator_remove_req_fn =	deadline_remove_request,
+		.elevator_queue_empty_fn =	deadline_queue_empty,
+		.elevator_former_req_fn =	deadline_former_request,
+		.elevator_latter_req_fn =	deadline_latter_request,
+		.elevator_set_req_fn =		deadline_set_request,
+		.elevator_put_req_fn = 		deadline_put_request,
+		.elevator_init_fn =		deadline_init_queue,
+		.elevator_exit_fn =		deadline_exit_queue,
+	},
+
+	.elevator_ktype = &deadline_ktype,
+	.elevator_name = "deadline",
+	.elevator_owner = THIS_MODULE,
+};
+
+static int __init deadline_init(void)
+{
+	int ret;
+
+	drq_pool = kmem_cache_create("deadline_drq", sizeof(struct deadline_rq),
+				     0, 0, NULL, NULL);
+
+	if (!drq_pool)
+		return -ENOMEM;
+
+	ret = elv_register(&iosched_deadline);
+	if (ret)
+		kmem_cache_destroy(drq_pool);
+
+	return ret;
+}
+
+static void __exit deadline_exit(void)
+{
+	kmem_cache_destroy(drq_pool);
+	elv_unregister(&iosched_deadline);
+}
+
+module_init(deadline_init);
+module_exit(deadline_exit);
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("deadline IO scheduler");
diff --git a/drivers/block/elevator.c b/drivers/block/elevator.c
new file mode 100644
index 0000000..6b79b43
--- /dev/null
+++ b/drivers/block/elevator.c
@@ -0,0 +1,705 @@
+/*
+ *  linux/drivers/block/elevator.c
+ *
+ *  Block device elevator/IO-scheduler.
+ *
+ *  Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ *
+ * 30042000 Jens Axboe <axboe@suse.de> :
+ *
+ * Split the elevator a bit so that it is possible to choose a different
+ * one or even write a new "plug in". There are three pieces:
+ * - elevator_fn, inserts a new request in the queue list
+ * - elevator_merge_fn, decides whether a new buffer can be merged with
+ *   an existing request
+ * - elevator_dequeue_fn, called when a request is taken off the active list
+ *
+ * 20082000 Dave Jones <davej@suse.de> :
+ * Removed tests for max-bomb-segments, which was breaking elvtune
+ *  when run without -bN
+ *
+ * Jens:
+ * - Rework again to work with bio instead of buffer_heads
+ * - loose bi_dev comparisons, partition handling is right now
+ * - completely modularize elevator setup and teardown
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+
+#include <asm/uaccess.h>
+
+static DEFINE_SPINLOCK(elv_list_lock);
+static LIST_HEAD(elv_list);
+
+/*
+ * can we safely merge with this request?
+ */
+inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
+{
+	if (!rq_mergeable(rq))
+		return 0;
+
+	/*
+	 * different data direction or already started, don't merge
+	 */
+	if (bio_data_dir(bio) != rq_data_dir(rq))
+		return 0;
+
+	/*
+	 * same device and no special stuff set, merge is ok
+	 */
+	if (rq->rq_disk == bio->bi_bdev->bd_disk &&
+	    !rq->waiting && !rq->special)
+		return 1;
+
+	return 0;
+}
+EXPORT_SYMBOL(elv_rq_merge_ok);
+
+inline int elv_try_merge(struct request *__rq, struct bio *bio)
+{
+	int ret = ELEVATOR_NO_MERGE;
+
+	/*
+	 * we can merge and sequence is ok, check if it's possible
+	 */
+	if (elv_rq_merge_ok(__rq, bio)) {
+		if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
+			ret = ELEVATOR_BACK_MERGE;
+		else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
+			ret = ELEVATOR_FRONT_MERGE;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(elv_try_merge);
+
+inline int elv_try_last_merge(request_queue_t *q, struct bio *bio)
+{
+	if (q->last_merge)
+		return elv_try_merge(q->last_merge, bio);
+
+	return ELEVATOR_NO_MERGE;
+}
+EXPORT_SYMBOL(elv_try_last_merge);
+
+static struct elevator_type *elevator_find(const char *name)
+{
+	struct elevator_type *e = NULL;
+	struct list_head *entry;
+
+	spin_lock_irq(&elv_list_lock);
+	list_for_each(entry, &elv_list) {
+		struct elevator_type *__e;
+
+		__e = list_entry(entry, struct elevator_type, list);
+
+		if (!strcmp(__e->elevator_name, name)) {
+			e = __e;
+			break;
+		}
+	}
+	spin_unlock_irq(&elv_list_lock);
+
+	return e;
+}
+
+static void elevator_put(struct elevator_type *e)
+{
+	module_put(e->elevator_owner);
+}
+
+static struct elevator_type *elevator_get(const char *name)
+{
+	struct elevator_type *e = elevator_find(name);
+
+	if (!e)
+		return NULL;
+	if (!try_module_get(e->elevator_owner))
+		return NULL;
+
+	return e;
+}
+
+static int elevator_attach(request_queue_t *q, struct elevator_type *e,
+			   struct elevator_queue *eq)
+{
+	int ret = 0;
+
+	memset(eq, 0, sizeof(*eq));
+	eq->ops = &e->ops;
+	eq->elevator_type = e;
+
+	INIT_LIST_HEAD(&q->queue_head);
+	q->last_merge = NULL;
+	q->elevator = eq;
+
+	if (eq->ops->elevator_init_fn)
+		ret = eq->ops->elevator_init_fn(q, eq);
+
+	return ret;
+}
+
+static char chosen_elevator[16];
+
+static void elevator_setup_default(void)
+{
+	/*
+	 * check if default is set and exists
+	 */
+	if (chosen_elevator[0] && elevator_find(chosen_elevator))
+		return;
+
+#if defined(CONFIG_IOSCHED_AS)
+	strcpy(chosen_elevator, "anticipatory");
+#elif defined(CONFIG_IOSCHED_DEADLINE)
+	strcpy(chosen_elevator, "deadline");
+#elif defined(CONFIG_IOSCHED_CFQ)
+	strcpy(chosen_elevator, "cfq");
+#elif defined(CONFIG_IOSCHED_NOOP)
+	strcpy(chosen_elevator, "noop");
+#else
+#error "You must build at least 1 IO scheduler into the kernel"
+#endif
+}
+
+static int __init elevator_setup(char *str)
+{
+	strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
+	return 0;
+}
+
+__setup("elevator=", elevator_setup);
+
+int elevator_init(request_queue_t *q, char *name)
+{
+	struct elevator_type *e = NULL;
+	struct elevator_queue *eq;
+	int ret = 0;
+
+	elevator_setup_default();
+
+	if (!name)
+		name = chosen_elevator;
+
+	e = elevator_get(name);
+	if (!e)
+		return -EINVAL;
+
+	eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL);
+	if (!eq) {
+		elevator_put(e->elevator_type);
+		return -ENOMEM;
+	}
+
+	ret = elevator_attach(q, e, eq);
+	if (ret) {
+		kfree(eq);
+		elevator_put(e->elevator_type);
+	}
+
+	return ret;
+}
+
+void elevator_exit(elevator_t *e)
+{
+	if (e->ops->elevator_exit_fn)
+		e->ops->elevator_exit_fn(e);
+
+	elevator_put(e->elevator_type);
+	e->elevator_type = NULL;
+	kfree(e);
+}
+
+static int elevator_global_init(void)
+{
+	return 0;
+}
+
+int elv_merge(request_queue_t *q, struct request **req, struct bio *bio)
+{
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_merge_fn)
+		return e->ops->elevator_merge_fn(q, req, bio);
+
+	return ELEVATOR_NO_MERGE;
+}
+
+void elv_merged_request(request_queue_t *q, struct request *rq)
+{
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_merged_fn)
+		e->ops->elevator_merged_fn(q, rq);
+}
+
+void elv_merge_requests(request_queue_t *q, struct request *rq,
+			     struct request *next)
+{
+	elevator_t *e = q->elevator;
+
+	if (q->last_merge == next)
+		q->last_merge = NULL;
+
+	if (e->ops->elevator_merge_req_fn)
+		e->ops->elevator_merge_req_fn(q, rq, next);
+}
+
+/*
+ * For careful internal use by the block layer. Essentially the same as
+ * a requeue in that it tells the io scheduler that this request is not
+ * active in the driver or hardware anymore, but we don't want the request
+ * added back to the scheduler. Function is not exported.
+ */
+void elv_deactivate_request(request_queue_t *q, struct request *rq)
+{
+	elevator_t *e = q->elevator;
+
+	/*
+	 * it already went through dequeue, we need to decrement the
+	 * in_flight count again
+	 */
+	if (blk_account_rq(rq))
+		q->in_flight--;
+
+	rq->flags &= ~REQ_STARTED;
+
+	if (e->ops->elevator_deactivate_req_fn)
+		e->ops->elevator_deactivate_req_fn(q, rq);
+}
+
+void elv_requeue_request(request_queue_t *q, struct request *rq)
+{
+	elv_deactivate_request(q, rq);
+
+	/*
+	 * if this is the flush, requeue the original instead and drop the flush
+	 */
+	if (rq->flags & REQ_BAR_FLUSH) {
+		clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+		rq = rq->end_io_data;
+	}
+
+	/*
+	 * if iosched has an explicit requeue hook, then use that. otherwise
+	 * just put the request at the front of the queue
+	 */
+	if (q->elevator->ops->elevator_requeue_req_fn)
+		q->elevator->ops->elevator_requeue_req_fn(q, rq);
+	else
+		__elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0);
+}
+
+void __elv_add_request(request_queue_t *q, struct request *rq, int where,
+		       int plug)
+{
+	/*
+	 * barriers implicitly indicate back insertion
+	 */
+	if (rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER) &&
+	    where == ELEVATOR_INSERT_SORT)
+		where = ELEVATOR_INSERT_BACK;
+
+	if (plug)
+		blk_plug_device(q);
+
+	rq->q = q;
+
+	if (!test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
+		q->elevator->ops->elevator_add_req_fn(q, rq, where);
+
+		if (blk_queue_plugged(q)) {
+			int nrq = q->rq.count[READ] + q->rq.count[WRITE]
+				  - q->in_flight;
+
+			if (nrq == q->unplug_thresh)
+				__generic_unplug_device(q);
+		}
+	} else
+		/*
+		 * if drain is set, store the request "locally". when the drain
+		 * is finished, the requests will be handed ordered to the io
+		 * scheduler
+		 */
+		list_add_tail(&rq->queuelist, &q->drain_list);
+}
+
+void elv_add_request(request_queue_t *q, struct request *rq, int where,
+		     int plug)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	__elv_add_request(q, rq, where, plug);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+static inline struct request *__elv_next_request(request_queue_t *q)
+{
+	struct request *rq = q->elevator->ops->elevator_next_req_fn(q);
+
+	/*
+	 * if this is a barrier write and the device has to issue a
+	 * flush sequence to support it, check how far we are
+	 */
+	if (rq && blk_fs_request(rq) && blk_barrier_rq(rq)) {
+		BUG_ON(q->ordered == QUEUE_ORDERED_NONE);
+
+		if (q->ordered == QUEUE_ORDERED_FLUSH &&
+		    !blk_barrier_preflush(rq))
+			rq = blk_start_pre_flush(q, rq);
+	}
+
+	return rq;
+}
+
+struct request *elv_next_request(request_queue_t *q)
+{
+	struct request *rq;
+	int ret;
+
+	while ((rq = __elv_next_request(q)) != NULL) {
+		/*
+		 * just mark as started even if we don't start it, a request
+		 * that has been delayed should not be passed by new incoming
+		 * requests
+		 */
+		rq->flags |= REQ_STARTED;
+
+		if (rq == q->last_merge)
+			q->last_merge = NULL;
+
+		if ((rq->flags & REQ_DONTPREP) || !q->prep_rq_fn)
+			break;
+
+		ret = q->prep_rq_fn(q, rq);
+		if (ret == BLKPREP_OK) {
+			break;
+		} else if (ret == BLKPREP_DEFER) {
+			rq = NULL;
+			break;
+		} else if (ret == BLKPREP_KILL) {
+			int nr_bytes = rq->hard_nr_sectors << 9;
+
+			if (!nr_bytes)
+				nr_bytes = rq->data_len;
+
+			blkdev_dequeue_request(rq);
+			rq->flags |= REQ_QUIET;
+			end_that_request_chunk(rq, 0, nr_bytes);
+			end_that_request_last(rq);
+		} else {
+			printk(KERN_ERR "%s: bad return=%d\n", __FUNCTION__,
+								ret);
+			break;
+		}
+	}
+
+	return rq;
+}
+
+void elv_remove_request(request_queue_t *q, struct request *rq)
+{
+	elevator_t *e = q->elevator;
+
+	/*
+	 * the time frame between a request being removed from the lists
+	 * and to it is freed is accounted as io that is in progress at
+	 * the driver side. note that we only account requests that the
+	 * driver has seen (REQ_STARTED set), to avoid false accounting
+	 * for request-request merges
+	 */
+	if (blk_account_rq(rq))
+		q->in_flight++;
+
+	/*
+	 * the main clearing point for q->last_merge is on retrieval of
+	 * request by driver (it calls elv_next_request()), but it _can_
+	 * also happen here if a request is added to the queue but later
+	 * deleted without ever being given to driver (merged with another
+	 * request).
+	 */
+	if (rq == q->last_merge)
+		q->last_merge = NULL;
+
+	if (e->ops->elevator_remove_req_fn)
+		e->ops->elevator_remove_req_fn(q, rq);
+}
+
+int elv_queue_empty(request_queue_t *q)
+{
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_queue_empty_fn)
+		return e->ops->elevator_queue_empty_fn(q);
+
+	return list_empty(&q->queue_head);
+}
+
+struct request *elv_latter_request(request_queue_t *q, struct request *rq)
+{
+	struct list_head *next;
+
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_latter_req_fn)
+		return e->ops->elevator_latter_req_fn(q, rq);
+
+	next = rq->queuelist.next;
+	if (next != &q->queue_head && next != &rq->queuelist)
+		return list_entry_rq(next);
+
+	return NULL;
+}
+
+struct request *elv_former_request(request_queue_t *q, struct request *rq)
+{
+	struct list_head *prev;
+
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_former_req_fn)
+		return e->ops->elevator_former_req_fn(q, rq);
+
+	prev = rq->queuelist.prev;
+	if (prev != &q->queue_head && prev != &rq->queuelist)
+		return list_entry_rq(prev);
+
+	return NULL;
+}
+
+int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask)
+{
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_set_req_fn)
+		return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
+
+	rq->elevator_private = NULL;
+	return 0;
+}
+
+void elv_put_request(request_queue_t *q, struct request *rq)
+{
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_put_req_fn)
+		e->ops->elevator_put_req_fn(q, rq);
+}
+
+int elv_may_queue(request_queue_t *q, int rw)
+{
+	elevator_t *e = q->elevator;
+
+	if (e->ops->elevator_may_queue_fn)
+		return e->ops->elevator_may_queue_fn(q, rw);
+
+	return ELV_MQUEUE_MAY;
+}
+
+void elv_completed_request(request_queue_t *q, struct request *rq)
+{
+	elevator_t *e = q->elevator;
+
+	/*
+	 * request is released from the driver, io must be done
+	 */
+	if (blk_account_rq(rq))
+		q->in_flight--;
+
+	if (e->ops->elevator_completed_req_fn)
+		e->ops->elevator_completed_req_fn(q, rq);
+}
+
+int elv_register_queue(struct request_queue *q)
+{
+	elevator_t *e = q->elevator;
+
+	e->kobj.parent = kobject_get(&q->kobj);
+	if (!e->kobj.parent)
+		return -EBUSY;
+
+	snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched");
+	e->kobj.ktype = e->elevator_type->elevator_ktype;
+
+	return kobject_register(&e->kobj);
+}
+
+void elv_unregister_queue(struct request_queue *q)
+{
+	if (q) {
+		elevator_t *e = q->elevator;
+		kobject_unregister(&e->kobj);
+		kobject_put(&q->kobj);
+	}
+}
+
+int elv_register(struct elevator_type *e)
+{
+	if (elevator_find(e->elevator_name))
+		BUG();
+
+	spin_lock_irq(&elv_list_lock);
+	list_add_tail(&e->list, &elv_list);
+	spin_unlock_irq(&elv_list_lock);
+
+	printk(KERN_INFO "io scheduler %s registered", e->elevator_name);
+	if (!strcmp(e->elevator_name, chosen_elevator))
+		printk(" (default)");
+	printk("\n");
+	return 0;
+}
+EXPORT_SYMBOL_GPL(elv_register);
+
+void elv_unregister(struct elevator_type *e)
+{
+	spin_lock_irq(&elv_list_lock);
+	list_del_init(&e->list);
+	spin_unlock_irq(&elv_list_lock);
+}
+EXPORT_SYMBOL_GPL(elv_unregister);
+
+/*
+ * switch to new_e io scheduler. be careful not to introduce deadlocks -
+ * we don't free the old io scheduler, before we have allocated what we
+ * need for the new one. this way we have a chance of going back to the old
+ * one, if the new one fails init for some reason. we also do an intermediate
+ * switch to noop to ensure safety with stack-allocated requests, since they
+ * don't originate from the block layer allocator. noop is safe here, because
+ * it never needs to touch the elevator itself for completion events. DRAIN
+ * flags will make sure we don't touch it for additions either.
+ */
+static void elevator_switch(request_queue_t *q, struct elevator_type *new_e)
+{
+	elevator_t *e = kmalloc(sizeof(elevator_t), GFP_KERNEL);
+	struct elevator_type *noop_elevator = NULL;
+	elevator_t *old_elevator;
+
+	if (!e)
+		goto error;
+
+	/*
+	 * first step, drain requests from the block freelist
+	 */
+	blk_wait_queue_drained(q, 0);
+
+	/*
+	 * unregister old elevator data
+	 */
+	elv_unregister_queue(q);
+	old_elevator = q->elevator;
+
+	/*
+ 	 * next step, switch to noop since it uses no private rq structures
+	 * and doesn't allocate any memory for anything. then wait for any
+	 * non-fs requests in-flight
+ 	 */
+	noop_elevator = elevator_get("noop");
+	spin_lock_irq(q->queue_lock);
+	elevator_attach(q, noop_elevator, e);
+	spin_unlock_irq(q->queue_lock);
+
+	blk_wait_queue_drained(q, 1);
+
+	/*
+	 * attach and start new elevator
+	 */
+	if (elevator_attach(q, new_e, e))
+		goto fail;
+
+	if (elv_register_queue(q))
+		goto fail_register;
+
+	/*
+	 * finally exit old elevator and start queue again
+	 */
+	elevator_exit(old_elevator);
+	blk_finish_queue_drain(q);
+	elevator_put(noop_elevator);
+	return;
+
+fail_register:
+	/*
+	 * switch failed, exit the new io scheduler and reattach the old
+	 * one again (along with re-adding the sysfs dir)
+	 */
+	elevator_exit(e);
+fail:
+	q->elevator = old_elevator;
+	elv_register_queue(q);
+	blk_finish_queue_drain(q);
+error:
+	if (noop_elevator)
+		elevator_put(noop_elevator);
+	elevator_put(new_e);
+	printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name);
+}
+
+ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count)
+{
+	char elevator_name[ELV_NAME_MAX];
+	struct elevator_type *e;
+
+	memset(elevator_name, 0, sizeof(elevator_name));
+	strncpy(elevator_name, name, sizeof(elevator_name));
+
+	if (elevator_name[strlen(elevator_name) - 1] == '\n')
+		elevator_name[strlen(elevator_name) - 1] = '\0';
+
+	e = elevator_get(elevator_name);
+	if (!e) {
+		printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
+		return -EINVAL;
+	}
+
+	if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name))
+		return count;
+
+	elevator_switch(q, e);
+	return count;
+}
+
+ssize_t elv_iosched_show(request_queue_t *q, char *name)
+{
+	elevator_t *e = q->elevator;
+	struct elevator_type *elv = e->elevator_type;
+	struct list_head *entry;
+	int len = 0;
+
+	spin_lock_irq(q->queue_lock);
+	list_for_each(entry, &elv_list) {
+		struct elevator_type *__e;
+
+		__e = list_entry(entry, struct elevator_type, list);
+		if (!strcmp(elv->elevator_name, __e->elevator_name))
+			len += sprintf(name+len, "[%s] ", elv->elevator_name);
+		else
+			len += sprintf(name+len, "%s ", __e->elevator_name);
+	}
+	spin_unlock_irq(q->queue_lock);
+
+	len += sprintf(len+name, "\n");
+	return len;
+}
+
+module_init(elevator_global_init);
+
+EXPORT_SYMBOL(elv_add_request);
+EXPORT_SYMBOL(__elv_add_request);
+EXPORT_SYMBOL(elv_requeue_request);
+EXPORT_SYMBOL(elv_next_request);
+EXPORT_SYMBOL(elv_remove_request);
+EXPORT_SYMBOL(elv_queue_empty);
+EXPORT_SYMBOL(elv_completed_request);
+EXPORT_SYMBOL(elevator_exit);
+EXPORT_SYMBOL(elevator_init);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
new file mode 100644
index 0000000..42dfa28
--- /dev/null
+++ b/drivers/block/floppy.c
@@ -0,0 +1,4638 @@
+/*
+ *  linux/drivers/block/floppy.c
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 1993, 1994  Alain Knaff
+ *  Copyright (C) 1998 Alan Cox
+ */
+/*
+ * 02.12.91 - Changed to static variables to indicate need for reset
+ * and recalibrate. This makes some things easier (output_byte reset
+ * checking etc), and means less interrupt jumping in case of errors,
+ * so the code is hopefully easier to understand.
+ */
+
+/*
+ * This file is certainly a mess. I've tried my best to get it working,
+ * but I don't like programming floppies, and I have only one anyway.
+ * Urgel. I should check for more errors, and do more graceful error
+ * recovery. Seems there are problems with several drives. I've tried to
+ * correct them. No promises.
+ */
+
+/*
+ * As with hd.c, all routines within this file can (and will) be called
+ * by interrupts, so extreme caution is needed. A hardware interrupt
+ * handler may not sleep, or a kernel panic will happen. Thus I cannot
+ * call "floppy-on" directly, but have to set a special timer interrupt
+ * etc.
+ */
+
+/*
+ * 28.02.92 - made track-buffering routines, based on the routines written
+ * by entropy@wintermute.wpi.edu (Lawrence Foard). Linus.
+ */
+
+/*
+ * Automatic floppy-detection and formatting written by Werner Almesberger
+ * (almesber@nessie.cs.id.ethz.ch), who also corrected some problems with
+ * the floppy-change signal detection.
+ */
+
+/*
+ * 1992/7/22 -- Hennus Bergman: Added better error reporting, fixed
+ * FDC data overrun bug, added some preliminary stuff for vertical
+ * recording support.
+ *
+ * 1992/9/17: Added DMA allocation & DMA functions. -- hhb.
+ *
+ * TODO: Errors are still not counted properly.
+ */
+
+/* 1992/9/20
+ * Modifications for ``Sector Shifting'' by Rob Hooft (hooft@chem.ruu.nl)
+ * modeled after the freeware MS-DOS program fdformat/88 V1.8 by
+ * Christoph H. Hochst\"atter.
+ * I have fixed the shift values to the ones I always use. Maybe a new
+ * ioctl() should be created to be able to modify them.
+ * There is a bug in the driver that makes it impossible to format a
+ * floppy as the first thing after bootup.
+ */
+
+/*
+ * 1993/4/29 -- Linus -- cleaned up the timer handling in the kernel, and
+ * this helped the floppy driver as well. Much cleaner, and still seems to
+ * work.
+ */
+
+/* 1994/6/24 --bbroad-- added the floppy table entries and made
+ * minor modifications to allow 2.88 floppies to be run.
+ */
+
+/* 1994/7/13 -- Paul Vojta -- modified the probing code to allow three or more
+ * disk types.
+ */
+
+/*
+ * 1994/8/8 -- Alain Knaff -- Switched to fdpatch driver: Support for bigger
+ * format bug fixes, but unfortunately some new bugs too...
+ */
+
+/* 1994/9/17 -- Koen Holtman -- added logging of physical floppy write
+ * errors to allow safe writing by specialized programs.
+ */
+
+/* 1995/4/24 -- Dan Fandrich -- added support for Commodore 1581 3.5" disks
+ * by defining bit 1 of the "stretch" parameter to mean put sectors on the
+ * opposite side of the disk, leaving the sector IDs alone (i.e. Commodore's
+ * drives are "upside-down").
+ */
+
+/*
+ * 1995/8/26 -- Andreas Busse -- added Mips support.
+ */
+
+/*
+ * 1995/10/18 -- Ralf Baechle -- Portability cleanup; move machine dependent
+ * features to asm/floppy.h.
+ */
+
+/*
+ * 1998/05/07 -- Russell King -- More portability cleanups; moved definition of
+ * interrupt and dma channel to asm/floppy.h. Cleaned up some formatting &
+ * use of '0' for NULL.
+ */
+
+/*
+ * 1998/06/07 -- Alan Cox -- Merged the 2.0.34 fixes for resource allocation
+ * failures.
+ */
+
+/*
+ * 1998/09/20 -- David Weinehall -- Added slow-down code for buggy PS/2-drives.
+ */
+
+/*
+ * 1999/08/13 -- Paul Slootman -- floppy stopped working on Alpha after 24
+ * days, 6 hours, 32 minutes and 32 seconds (i.e. MAXINT jiffies; ints were
+ * being used to store jiffies, which are unsigned longs).
+ */
+
+/*
+ * 2000/08/28 -- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ * - get rid of check_region
+ * - s/suser/capable/
+ */
+
+/*
+ * 2001/08/26 -- Paul Gortmaker - fix insmod oops on machines with no
+ * floppy controller (lingering task on list after module is gone... boom.)
+ */
+
+/*
+ * 2002/02/07 -- Anton Altaparmakov - Fix io ports reservation to correct range
+ * (0x3f2-0x3f5, 0x3f7). This fix is a bit of a hack but the proper fix
+ * requires many non-obvious changes in arch dependent code.
+ */
+
+/* 2003/07/28 -- Daniele Bellucci <bellucda@tiscali.it>.
+ * Better audit of register_blkdev.
+ */
+
+#define FLOPPY_SANITY_CHECK
+#undef  FLOPPY_SILENT_DCL_CLEAR
+
+#define REALLY_SLOW_IO
+
+#define DEBUGT 2
+#define DCL_DEBUG		/* debug disk change line */
+
+/* do print messages for unexpected interrupts */
+static int print_unex = 1;
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#define FDPATCHES
+#include <linux/fdreg.h>
+
+/*
+ * 1998/1/21 -- Richard Gooch <rgooch@atnf.csiro.au> -- devfs support
+ */
+
+#include <linux/fd.h>
+#include <linux/hdreg.h>
+
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/bio.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/delay.h>
+#include <linux/mc146818rtc.h>	/* CMOS defines */
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/device.h>
+#include <linux/buffer_head.h>	/* for invalidate_buffers() */
+
+/*
+ * PS/2 floppies have much slower step rates than regular floppies.
+ * It's been recommended that take about 1/4 of the default speed
+ * in some more extreme cases.
+ */
+static int slow_floppy;
+
+#include <asm/dma.h>
+#include <asm/irq.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+static int FLOPPY_IRQ = 6;
+static int FLOPPY_DMA = 2;
+static int can_use_virtual_dma = 2;
+/* =======
+ * can use virtual DMA:
+ * 0 = use of virtual DMA disallowed by config
+ * 1 = use of virtual DMA prescribed by config
+ * 2 = no virtual DMA preference configured.  By default try hard DMA,
+ * but fall back on virtual DMA when not enough memory available
+ */
+
+static int use_virtual_dma;
+/* =======
+ * use virtual DMA
+ * 0 using hard DMA
+ * 1 using virtual DMA
+ * This variable is set to virtual when a DMA mem problem arises, and
+ * reset back in floppy_grab_irq_and_dma.
+ * It is not safe to reset it in other circumstances, because the floppy
+ * driver may have several buffers in use at once, and we do currently not
+ * record each buffers capabilities
+ */
+
+static DEFINE_SPINLOCK(floppy_lock);
+static struct completion device_release;
+
+static unsigned short virtual_dma_port = 0x3f0;
+irqreturn_t floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+static int set_dor(int fdc, char mask, char data);
+static void register_devfs_entries(int drive) __init;
+
+#define K_64	0x10000		/* 64KB */
+
+/* the following is the mask of allowed drives. By default units 2 and
+ * 3 of both floppy controllers are disabled, because switching on the
+ * motor of these drives causes system hangs on some PCI computers. drive
+ * 0 is the low bit (0x1), and drive 7 is the high bit (0x80). Bits are on if
+ * a drive is allowed.
+ *
+ * NOTE: This must come before we include the arch floppy header because
+ *       some ports reference this variable from there. -DaveM
+ */
+
+static int allowed_drive_mask = 0x33;
+
+#include <asm/floppy.h>
+
+static int irqdma_allocated;
+
+#define LOCAL_END_REQUEST
+#define DEVICE_NAME "floppy"
+
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/cdrom.h>	/* for the compatibility eject ioctl */
+#include <linux/completion.h>
+
+static struct request *current_req;
+static struct request_queue *floppy_queue;
+static void do_fd_request(request_queue_t * q);
+
+#ifndef fd_get_dma_residue
+#define fd_get_dma_residue() get_dma_residue(FLOPPY_DMA)
+#endif
+
+/* Dma Memory related stuff */
+
+#ifndef fd_dma_mem_free
+#define fd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
+#endif
+
+#ifndef fd_dma_mem_alloc
+#define fd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
+#endif
+
+static inline void fallback_on_nodma_alloc(char **addr, size_t l)
+{
+#ifdef FLOPPY_CAN_FALLBACK_ON_NODMA
+	if (*addr)
+		return;		/* we have the memory */
+	if (can_use_virtual_dma != 2)
+		return;		/* no fallback allowed */
+	printk
+	    ("DMA memory shortage. Temporarily falling back on virtual DMA\n");
+	*addr = (char *)nodma_mem_alloc(l);
+#else
+	return;
+#endif
+}
+
+/* End dma memory related stuff */
+
+static unsigned long fake_change;
+static int initialising = 1;
+
+#define ITYPE(x) (((x)>>2) & 0x1f)
+#define TOMINOR(x) ((x & 3) | ((x & 4) << 5))
+#define UNIT(x) ((x) & 0x03)	/* drive on fdc */
+#define FDC(x) (((x) & 0x04) >> 2)	/* fdc of drive */
+#define REVDRIVE(fdc, unit) ((unit) + ((fdc) << 2))
+				/* reverse mapping from unit and fdc to drive */
+#define DP (&drive_params[current_drive])
+#define DRS (&drive_state[current_drive])
+#define DRWE (&write_errors[current_drive])
+#define FDCS (&fdc_state[fdc])
+#define CLEARF(x) (clear_bit(x##_BIT, &DRS->flags))
+#define SETF(x) (set_bit(x##_BIT, &DRS->flags))
+#define TESTF(x) (test_bit(x##_BIT, &DRS->flags))
+
+#define UDP (&drive_params[drive])
+#define UDRS (&drive_state[drive])
+#define UDRWE (&write_errors[drive])
+#define UFDCS (&fdc_state[FDC(drive)])
+#define UCLEARF(x) (clear_bit(x##_BIT, &UDRS->flags))
+#define USETF(x) (set_bit(x##_BIT, &UDRS->flags))
+#define UTESTF(x) (test_bit(x##_BIT, &UDRS->flags))
+
+#define DPRINT(format, args...) printk(DEVICE_NAME "%d: " format, current_drive , ## args)
+
+#define PH_HEAD(floppy,head) (((((floppy)->stretch & 2) >>1) ^ head) << 2)
+#define STRETCH(floppy) ((floppy)->stretch & FD_STRETCH)
+
+#define CLEARSTRUCT(x) memset((x), 0, sizeof(*(x)))
+
+/* read/write */
+#define COMMAND raw_cmd->cmd[0]
+#define DR_SELECT raw_cmd->cmd[1]
+#define TRACK raw_cmd->cmd[2]
+#define HEAD raw_cmd->cmd[3]
+#define SECTOR raw_cmd->cmd[4]
+#define SIZECODE raw_cmd->cmd[5]
+#define SECT_PER_TRACK raw_cmd->cmd[6]
+#define GAP raw_cmd->cmd[7]
+#define SIZECODE2 raw_cmd->cmd[8]
+#define NR_RW 9
+
+/* format */
+#define F_SIZECODE raw_cmd->cmd[2]
+#define F_SECT_PER_TRACK raw_cmd->cmd[3]
+#define F_GAP raw_cmd->cmd[4]
+#define F_FILL raw_cmd->cmd[5]
+#define NR_F 6
+
+/*
+ * Maximum disk size (in kilobytes). This default is used whenever the
+ * current disk size is unknown.
+ * [Now it is rather a minimum]
+ */
+#define MAX_DISK_SIZE 4		/* 3984 */
+
+/*
+ * globals used by 'result()'
+ */
+#define MAX_REPLIES 16
+static unsigned char reply_buffer[MAX_REPLIES];
+static int inr;			/* size of reply buffer, when called from interrupt */
+#define ST0 (reply_buffer[0])
+#define ST1 (reply_buffer[1])
+#define ST2 (reply_buffer[2])
+#define ST3 (reply_buffer[0])	/* result of GETSTATUS */
+#define R_TRACK (reply_buffer[3])
+#define R_HEAD (reply_buffer[4])
+#define R_SECTOR (reply_buffer[5])
+#define R_SIZECODE (reply_buffer[6])
+
+#define SEL_DLY (2*HZ/100)
+
+/*
+ * this struct defines the different floppy drive types.
+ */
+static struct {
+	struct floppy_drive_params params;
+	const char *name;	/* name printed while booting */
+} default_drive_params[] = {
+/* NOTE: the time values in jiffies should be in msec!
+ CMOS drive type
+  |     Maximum data rate supported by drive type
+  |     |   Head load time, msec
+  |     |   |   Head unload time, msec (not used)
+  |     |   |   |     Step rate interval, usec
+  |     |   |   |     |       Time needed for spinup time (jiffies)
+  |     |   |   |     |       |      Timeout for spinning down (jiffies)
+  |     |   |   |     |       |      |   Spindown offset (where disk stops)
+  |     |   |   |     |       |      |   |     Select delay
+  |     |   |   |     |       |      |   |     |     RPS
+  |     |   |   |     |       |      |   |     |     |    Max number of tracks
+  |     |   |   |     |       |      |   |     |     |    |     Interrupt timeout
+  |     |   |   |     |       |      |   |     |     |    |     |   Max nonintlv. sectors
+  |     |   |   |     |       |      |   |     |     |    |     |   | -Max Errors- flags */
+{{0,  500, 16, 16, 8000,    1*HZ, 3*HZ,  0, SEL_DLY, 5,  80, 3*HZ, 20, {3,1,2,0,2}, 0,
+      0, { 7, 4, 8, 2, 1, 5, 3,10}, 3*HZ/2, 0 }, "unknown" },
+
+{{1,  300, 16, 16, 8000,    1*HZ, 3*HZ,  0, SEL_DLY, 5,  40, 3*HZ, 17, {3,1,2,0,2}, 0,
+      0, { 1, 0, 0, 0, 0, 0, 0, 0}, 3*HZ/2, 1 }, "360K PC" }, /*5 1/4 360 KB PC*/
+
+{{2,  500, 16, 16, 6000, 4*HZ/10, 3*HZ, 14, SEL_DLY, 6,  83, 3*HZ, 17, {3,1,2,0,2}, 0,
+      0, { 2, 5, 6,23,10,20,12, 0}, 3*HZ/2, 2 }, "1.2M" }, /*5 1/4 HD AT*/
+
+{{3,  250, 16, 16, 3000,    1*HZ, 3*HZ,  0, SEL_DLY, 5,  83, 3*HZ, 20, {3,1,2,0,2}, 0,
+      0, { 4,22,21,30, 3, 0, 0, 0}, 3*HZ/2, 4 }, "720k" }, /*3 1/2 DD*/
+
+{{4,  500, 16, 16, 4000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5,  83, 3*HZ, 20, {3,1,2,0,2}, 0,
+      0, { 7, 4,25,22,31,21,29,11}, 3*HZ/2, 7 }, "1.44M" }, /*3 1/2 HD*/
+
+{{5, 1000, 15,  8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5,  83, 3*HZ, 40, {3,1,2,0,2}, 0,
+      0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M AMI BIOS" }, /*3 1/2 ED*/
+
+{{6, 1000, 15,  8, 3000, 4*HZ/10, 3*HZ, 10, SEL_DLY, 5,  83, 3*HZ, 40, {3,1,2,0,2}, 0,
+      0, { 7, 8, 4,25,28,22,31,21}, 3*HZ/2, 8 }, "2.88M" } /*3 1/2 ED*/
+/*    |  --autodetected formats---    |      |      |
+ *    read_track                      |      |    Name printed when booting
+ *				      |     Native format
+ *	            Frequency of disk change checks */
+};
+
+static struct floppy_drive_params drive_params[N_DRIVE];
+static struct floppy_drive_struct drive_state[N_DRIVE];
+static struct floppy_write_errors write_errors[N_DRIVE];
+static struct timer_list motor_off_timer[N_DRIVE];
+static struct gendisk *disks[N_DRIVE];
+static struct block_device *opened_bdev[N_DRIVE];
+static DECLARE_MUTEX(open_lock);
+static struct floppy_raw_cmd *raw_cmd, default_raw_cmd;
+
+/*
+ * This struct defines the different floppy types.
+ *
+ * Bit 0 of 'stretch' tells if the tracks need to be doubled for some
+ * types (e.g. 360kB diskette in 1.2MB drive, etc.).  Bit 1 of 'stretch'
+ * tells if the disk is in Commodore 1581 format, which means side 0 sectors
+ * are located on side 1 of the disk but with a side 0 ID, and vice-versa.
+ * This is the same as the Sharp MZ-80 5.25" CP/M disk format, except that the
+ * 1581's logical side 0 is on physical side 1, whereas the Sharp's logical
+ * side 0 is on physical side 0 (but with the misnamed sector IDs).
+ * 'stretch' should probably be renamed to something more general, like
+ * 'options'.  Other parameters should be self-explanatory (see also
+ * setfdprm(8)).
+ */
+/*
+	    Size
+	     |  Sectors per track
+	     |  | Head
+	     |  | |  Tracks
+	     |  | |  | Stretch
+	     |  | |  | |  Gap 1 size
+	     |  | |  | |    |  Data rate, | 0x40 for perp
+	     |  | |  | |    |    |  Spec1 (stepping rate, head unload
+	     |  | |  | |    |    |    |    /fmt gap (gap2) */
+static struct floppy_struct floppy_type[32] = {
+	{    0, 0,0, 0,0,0x00,0x00,0x00,0x00,NULL    },	/*  0 no testing    */
+	{  720, 9,2,40,0,0x2A,0x02,0xDF,0x50,"d360"  }, /*  1 360KB PC      */
+	{ 2400,15,2,80,0,0x1B,0x00,0xDF,0x54,"h1200" },	/*  2 1.2MB AT      */
+	{  720, 9,1,80,0,0x2A,0x02,0xDF,0x50,"D360"  },	/*  3 360KB SS 3.5" */
+	{ 1440, 9,2,80,0,0x2A,0x02,0xDF,0x50,"D720"  },	/*  4 720KB 3.5"    */
+	{  720, 9,2,40,1,0x23,0x01,0xDF,0x50,"h360"  },	/*  5 360KB AT      */
+	{ 1440, 9,2,80,0,0x23,0x01,0xDF,0x50,"h720"  },	/*  6 720KB AT      */
+	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,"H1440" },	/*  7 1.44MB 3.5"   */
+	{ 5760,36,2,80,0,0x1B,0x43,0xAF,0x54,"E2880" },	/*  8 2.88MB 3.5"   */
+	{ 6240,39,2,80,0,0x1B,0x43,0xAF,0x28,"E3120" },	/*  9 3.12MB 3.5"   */
+
+	{ 2880,18,2,80,0,0x25,0x00,0xDF,0x02,"h1440" }, /* 10 1.44MB 5.25"  */
+	{ 3360,21,2,80,0,0x1C,0x00,0xCF,0x0C,"H1680" }, /* 11 1.68MB 3.5"   */
+	{  820,10,2,41,1,0x25,0x01,0xDF,0x2E,"h410"  },	/* 12 410KB 5.25"   */
+	{ 1640,10,2,82,0,0x25,0x02,0xDF,0x2E,"H820"  },	/* 13 820KB 3.5"    */
+	{ 2952,18,2,82,0,0x25,0x00,0xDF,0x02,"h1476" },	/* 14 1.48MB 5.25"  */
+	{ 3444,21,2,82,0,0x25,0x00,0xDF,0x0C,"H1722" },	/* 15 1.72MB 3.5"   */
+	{  840,10,2,42,1,0x25,0x01,0xDF,0x2E,"h420"  },	/* 16 420KB 5.25"   */
+	{ 1660,10,2,83,0,0x25,0x02,0xDF,0x2E,"H830"  },	/* 17 830KB 3.5"    */
+	{ 2988,18,2,83,0,0x25,0x00,0xDF,0x02,"h1494" },	/* 18 1.49MB 5.25"  */
+	{ 3486,21,2,83,0,0x25,0x00,0xDF,0x0C,"H1743" }, /* 19 1.74 MB 3.5"  */
+
+	{ 1760,11,2,80,0,0x1C,0x09,0xCF,0x00,"h880"  }, /* 20 880KB 5.25"   */
+	{ 2080,13,2,80,0,0x1C,0x01,0xCF,0x00,"D1040" }, /* 21 1.04MB 3.5"   */
+	{ 2240,14,2,80,0,0x1C,0x19,0xCF,0x00,"D1120" }, /* 22 1.12MB 3.5"   */
+	{ 3200,20,2,80,0,0x1C,0x20,0xCF,0x2C,"h1600" }, /* 23 1.6MB 5.25"   */
+	{ 3520,22,2,80,0,0x1C,0x08,0xCF,0x2e,"H1760" }, /* 24 1.76MB 3.5"   */
+	{ 3840,24,2,80,0,0x1C,0x20,0xCF,0x00,"H1920" }, /* 25 1.92MB 3.5"   */
+	{ 6400,40,2,80,0,0x25,0x5B,0xCF,0x00,"E3200" }, /* 26 3.20MB 3.5"   */
+	{ 7040,44,2,80,0,0x25,0x5B,0xCF,0x00,"E3520" }, /* 27 3.52MB 3.5"   */
+	{ 7680,48,2,80,0,0x25,0x63,0xCF,0x00,"E3840" }, /* 28 3.84MB 3.5"   */
+
+	{ 3680,23,2,80,0,0x1C,0x10,0xCF,0x00,"H1840" }, /* 29 1.84MB 3.5"   */
+	{ 1600,10,2,80,0,0x25,0x02,0xDF,0x2E,"D800"  },	/* 30 800KB 3.5"    */
+	{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5"    */
+};
+
+#define	NUMBER(x)	(sizeof(x) / sizeof(*(x)))
+#define SECTSIZE (_FD_SECTSIZE(*floppy))
+
+/* Auto-detection: Disk type used until the next media change occurs. */
+static struct floppy_struct *current_type[N_DRIVE];
+
+/*
+ * User-provided type information. current_type points to
+ * the respective entry of this array.
+ */
+static struct floppy_struct user_params[N_DRIVE];
+
+static sector_t floppy_sizes[256];
+
+/*
+ * The driver is trying to determine the correct media format
+ * while probing is set. rw_interrupt() clears it after a
+ * successful access.
+ */
+static int probing;
+
+/* Synchronization of FDC access. */
+#define FD_COMMAND_NONE -1
+#define FD_COMMAND_ERROR 2
+#define FD_COMMAND_OKAY 3
+
+static volatile int command_status = FD_COMMAND_NONE;
+static unsigned long fdc_busy;
+static DECLARE_WAIT_QUEUE_HEAD(fdc_wait);
+static DECLARE_WAIT_QUEUE_HEAD(command_done);
+
+#define NO_SIGNAL (!interruptible || !signal_pending(current))
+#define CALL(x) if ((x) == -EINTR) return -EINTR
+#define ECALL(x) if ((ret = (x))) return ret;
+#define _WAIT(x,i) CALL(ret=wait_til_done((x),i))
+#define WAIT(x) _WAIT((x),interruptible)
+#define IWAIT(x) _WAIT((x),1)
+
+/* Errors during formatting are counted here. */
+static int format_errors;
+
+/* Format request descriptor. */
+static struct format_descr format_req;
+
+/*
+ * Rate is 0 for 500kb/s, 1 for 300kbps, 2 for 250kbps
+ * Spec1 is 0xSH, where S is stepping rate (F=1ms, E=2ms, D=3ms etc),
+ * H is head unload time (1=16ms, 2=32ms, etc)
+ */
+
+/*
+ * Track buffer
+ * Because these are written to by the DMA controller, they must
+ * not contain a 64k byte boundary crossing, or data will be
+ * corrupted/lost.
+ */
+static char *floppy_track_buffer;
+static int max_buffer_sectors;
+
+static int *errors;
+typedef void (*done_f) (int);
+static struct cont_t {
+	void (*interrupt) (void);	/* this is called after the interrupt of the
+					 * main command */
+	void (*redo) (void);	/* this is called to retry the operation */
+	void (*error) (void);	/* this is called to tally an error */
+	done_f done;		/* this is called to say if the operation has
+				 * succeeded/failed */
+} *cont;
+
+static void floppy_ready(void);
+static void floppy_start(void);
+static void process_fd_request(void);
+static void recalibrate_floppy(void);
+static void floppy_shutdown(unsigned long);
+
+static int floppy_grab_irq_and_dma(void);
+static void floppy_release_irq_and_dma(void);
+
+/*
+ * The "reset" variable should be tested whenever an interrupt is scheduled,
+ * after the commands have been sent. This is to ensure that the driver doesn't
+ * get wedged when the interrupt doesn't come because of a failed command.
+ * reset doesn't need to be tested before sending commands, because
+ * output_byte is automatically disabled when reset is set.
+ */
+#define CHECK_RESET { if (FDCS->reset){ reset_fdc(); return; } }
+static void reset_fdc(void);
+
+/*
+ * These are global variables, as that's the easiest way to give
+ * information to interrupts. They are the data used for the current
+ * request.
+ */
+#define NO_TRACK -1
+#define NEED_1_RECAL -2
+#define NEED_2_RECAL -3
+
+static int usage_count;
+
+/* buffer related variables */
+static int buffer_track = -1;
+static int buffer_drive = -1;
+static int buffer_min = -1;
+static int buffer_max = -1;
+
+/* fdc related variables, should end up in a struct */
+static struct floppy_fdc_state fdc_state[N_FDC];
+static int fdc;			/* current fdc */
+
+static struct floppy_struct *_floppy = floppy_type;
+static unsigned char current_drive;
+static long current_count_sectors;
+static unsigned char fsector_t;	/* sector in track */
+static unsigned char in_sector_offset;	/* offset within physical sector,
+					 * expressed in units of 512 bytes */
+
+#ifndef fd_eject
+static inline int fd_eject(int drive)
+{
+	return -EINVAL;
+}
+#endif
+
+/*
+ * Debugging
+ * =========
+ */
+#ifdef DEBUGT
+static long unsigned debugtimer;
+
+static inline void set_debugt(void)
+{
+	debugtimer = jiffies;
+}
+
+static inline void debugt(const char *message)
+{
+	if (DP->flags & DEBUGT)
+		printk("%s dtime=%lu\n", message, jiffies - debugtimer);
+}
+#else
+static inline void set_debugt(void) { }
+static inline void debugt(const char *message) { }
+#endif /* DEBUGT */
+
+typedef void (*timeout_fn) (unsigned long);
+static struct timer_list fd_timeout = TIMER_INITIALIZER(floppy_shutdown, 0, 0);
+
+static const char *timeout_message;
+
+#ifdef FLOPPY_SANITY_CHECK
+static void is_alive(const char *message)
+{
+	/* this routine checks whether the floppy driver is "alive" */
+	if (test_bit(0, &fdc_busy) && command_status < 2
+	    && !timer_pending(&fd_timeout)) {
+		DPRINT("timeout handler died: %s\n", message);
+	}
+}
+#endif
+
+static void (*do_floppy) (void) = NULL;
+
+#ifdef FLOPPY_SANITY_CHECK
+
+#define OLOGSIZE 20
+
+static void (*lasthandler) (void);
+static unsigned long interruptjiffies;
+static unsigned long resultjiffies;
+static int resultsize;
+static unsigned long lastredo;
+
+static struct output_log {
+	unsigned char data;
+	unsigned char status;
+	unsigned long jiffies;
+} output_log[OLOGSIZE];
+
+static int output_log_pos;
+#endif
+
+#define current_reqD -1
+#define MAXTIMEOUT -2
+
+static void __reschedule_timeout(int drive, const char *message, int marg)
+{
+	if (drive == current_reqD)
+		drive = current_drive;
+	del_timer(&fd_timeout);
+	if (drive < 0 || drive > N_DRIVE) {
+		fd_timeout.expires = jiffies + 20UL * HZ;
+		drive = 0;
+	} else
+		fd_timeout.expires = jiffies + UDP->timeout;
+	add_timer(&fd_timeout);
+	if (UDP->flags & FD_DEBUG) {
+		DPRINT("reschedule timeout ");
+		printk(message, marg);
+		printk("\n");
+	}
+	timeout_message = message;
+}
+
+static void reschedule_timeout(int drive, const char *message, int marg)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&floppy_lock, flags);
+	__reschedule_timeout(drive, message, marg);
+	spin_unlock_irqrestore(&floppy_lock, flags);
+}
+
+#define INFBOUND(a,b) (a)=max_t(int, a, b)
+
+#define SUPBOUND(a,b) (a)=min_t(int, a, b)
+
+/*
+ * Bottom half floppy driver.
+ * ==========================
+ *
+ * This part of the file contains the code talking directly to the hardware,
+ * and also the main service loop (seek-configure-spinup-command)
+ */
+
+/*
+ * disk change.
+ * This routine is responsible for maintaining the FD_DISK_CHANGE flag,
+ * and the last_checked date.
+ *
+ * last_checked is the date of the last check which showed 'no disk change'
+ * FD_DISK_CHANGE is set under two conditions:
+ * 1. The floppy has been changed after some i/o to that floppy already
+ *    took place.
+ * 2. No floppy disk is in the drive. This is done in order to ensure that
+ *    requests are quickly flushed in case there is no disk in the drive. It
+ *    follows that FD_DISK_CHANGE can only be cleared if there is a disk in
+ *    the drive.
+ *
+ * For 1., maxblock is observed. Maxblock is 0 if no i/o has taken place yet.
+ * For 2., FD_DISK_NEWCHANGE is watched. FD_DISK_NEWCHANGE is cleared on
+ *  each seek. If a disk is present, the disk change line should also be
+ *  cleared on each seek. Thus, if FD_DISK_NEWCHANGE is clear, but the disk
+ *  change line is set, this means either that no disk is in the drive, or
+ *  that it has been removed since the last seek.
+ *
+ * This means that we really have a third possibility too:
+ *  The floppy has been changed after the last seek.
+ */
+
+static int disk_change(int drive)
+{
+	int fdc = FDC(drive);
+#ifdef FLOPPY_SANITY_CHECK
+	if (jiffies - UDRS->select_date < UDP->select_delay)
+		DPRINT("WARNING disk change called early\n");
+	if (!(FDCS->dor & (0x10 << UNIT(drive))) ||
+	    (FDCS->dor & 3) != UNIT(drive) || fdc != FDC(drive)) {
+		DPRINT("probing disk change on unselected drive\n");
+		DPRINT("drive=%d fdc=%d dor=%x\n", drive, FDC(drive),
+		       (unsigned int)FDCS->dor);
+	}
+#endif
+
+#ifdef DCL_DEBUG
+	if (UDP->flags & FD_DEBUG) {
+		DPRINT("checking disk change line for drive %d\n", drive);
+		DPRINT("jiffies=%lu\n", jiffies);
+		DPRINT("disk change line=%x\n", fd_inb(FD_DIR) & 0x80);
+		DPRINT("flags=%lx\n", UDRS->flags);
+	}
+#endif
+	if (UDP->flags & FD_BROKEN_DCL)
+		return UTESTF(FD_DISK_CHANGED);
+	if ((fd_inb(FD_DIR) ^ UDP->flags) & 0x80) {
+		USETF(FD_VERIFY);	/* verify write protection */
+		if (UDRS->maxblock) {
+			/* mark it changed */
+			USETF(FD_DISK_CHANGED);
+		}
+
+		/* invalidate its geometry */
+		if (UDRS->keep_data >= 0) {
+			if ((UDP->flags & FTD_MSG) &&
+			    current_type[drive] != NULL)
+				DPRINT("Disk type is undefined after "
+				       "disk change\n");
+			current_type[drive] = NULL;
+			floppy_sizes[TOMINOR(drive)] = MAX_DISK_SIZE << 1;
+		}
+
+		/*USETF(FD_DISK_NEWCHANGE); */
+		return 1;
+	} else {
+		UDRS->last_checked = jiffies;
+		UCLEARF(FD_DISK_NEWCHANGE);
+	}
+	return 0;
+}
+
+static inline int is_selected(int dor, int unit)
+{
+	return ((dor & (0x10 << unit)) && (dor & 3) == unit);
+}
+
+static int set_dor(int fdc, char mask, char data)
+{
+	register unsigned char drive, unit, newdor, olddor;
+
+	if (FDCS->address == -1)
+		return -1;
+
+	olddor = FDCS->dor;
+	newdor = (olddor & mask) | data;
+	if (newdor != olddor) {
+		unit = olddor & 0x3;
+		if (is_selected(olddor, unit) && !is_selected(newdor, unit)) {
+			drive = REVDRIVE(fdc, unit);
+#ifdef DCL_DEBUG
+			if (UDP->flags & FD_DEBUG) {
+				DPRINT("calling disk change from set_dor\n");
+			}
+#endif
+			disk_change(drive);
+		}
+		FDCS->dor = newdor;
+		fd_outb(newdor, FD_DOR);
+
+		unit = newdor & 0x3;
+		if (!is_selected(olddor, unit) && is_selected(newdor, unit)) {
+			drive = REVDRIVE(fdc, unit);
+			UDRS->select_date = jiffies;
+		}
+	}
+	/*
+	 *      We should propagate failures to grab the resources back
+	 *      nicely from here. Actually we ought to rewrite the fd
+	 *      driver some day too.
+	 */
+	if (newdor & FLOPPY_MOTOR_MASK)
+		floppy_grab_irq_and_dma();
+	if (olddor & FLOPPY_MOTOR_MASK)
+		floppy_release_irq_and_dma();
+	return olddor;
+}
+
+static void twaddle(void)
+{
+	if (DP->select_delay)
+		return;
+	fd_outb(FDCS->dor & ~(0x10 << UNIT(current_drive)), FD_DOR);
+	fd_outb(FDCS->dor, FD_DOR);
+	DRS->select_date = jiffies;
+}
+
+/* reset all driver information about the current fdc. This is needed after
+ * a reset, and after a raw command. */
+static void reset_fdc_info(int mode)
+{
+	int drive;
+
+	FDCS->spec1 = FDCS->spec2 = -1;
+	FDCS->need_configure = 1;
+	FDCS->perp_mode = 1;
+	FDCS->rawcmd = 0;
+	for (drive = 0; drive < N_DRIVE; drive++)
+		if (FDC(drive) == fdc && (mode || UDRS->track != NEED_1_RECAL))
+			UDRS->track = NEED_2_RECAL;
+}
+
+/* selects the fdc and drive, and enables the fdc's input/dma. */
+static void set_fdc(int drive)
+{
+	if (drive >= 0 && drive < N_DRIVE) {
+		fdc = FDC(drive);
+		current_drive = drive;
+	}
+	if (fdc != 1 && fdc != 0) {
+		printk("bad fdc value\n");
+		return;
+	}
+	set_dor(fdc, ~0, 8);
+#if N_FDC > 1
+	set_dor(1 - fdc, ~8, 0);
+#endif
+	if (FDCS->rawcmd == 2)
+		reset_fdc_info(1);
+	if (fd_inb(FD_STATUS) != STATUS_READY)
+		FDCS->reset = 1;
+}
+
+/* locks the driver */
+static int _lock_fdc(int drive, int interruptible, int line)
+{
+	if (!usage_count) {
+		printk(KERN_ERR
+		       "Trying to lock fdc while usage count=0 at line %d\n",
+		       line);
+		return -1;
+	}
+	if (floppy_grab_irq_and_dma() == -1)
+		return -EBUSY;
+
+	if (test_and_set_bit(0, &fdc_busy)) {
+		DECLARE_WAITQUEUE(wait, current);
+		add_wait_queue(&fdc_wait, &wait);
+
+		for (;;) {
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			if (!test_and_set_bit(0, &fdc_busy))
+				break;
+
+			schedule();
+
+			if (!NO_SIGNAL) {
+				remove_wait_queue(&fdc_wait, &wait);
+				return -EINTR;
+			}
+		}
+
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&fdc_wait, &wait);
+	}
+	command_status = FD_COMMAND_NONE;
+
+	__reschedule_timeout(drive, "lock fdc", 0);
+	set_fdc(drive);
+	return 0;
+}
+
+#define lock_fdc(drive,interruptible) _lock_fdc(drive,interruptible, __LINE__)
+
+#define LOCK_FDC(drive,interruptible) \
+if (lock_fdc(drive,interruptible)) return -EINTR;
+
+/* unlocks the driver */
+static inline void unlock_fdc(void)
+{
+	unsigned long flags;
+
+	raw_cmd = NULL;
+	if (!test_bit(0, &fdc_busy))
+		DPRINT("FDC access conflict!\n");
+
+	if (do_floppy)
+		DPRINT("device interrupt still active at FDC release: %p!\n",
+		       do_floppy);
+	command_status = FD_COMMAND_NONE;
+	spin_lock_irqsave(&floppy_lock, flags);
+	del_timer(&fd_timeout);
+	cont = NULL;
+	clear_bit(0, &fdc_busy);
+	if (elv_next_request(floppy_queue))
+		do_fd_request(floppy_queue);
+	spin_unlock_irqrestore(&floppy_lock, flags);
+	floppy_release_irq_and_dma();
+	wake_up(&fdc_wait);
+}
+
+/* switches the motor off after a given timeout */
+static void motor_off_callback(unsigned long nr)
+{
+	unsigned char mask = ~(0x10 << UNIT(nr));
+
+	set_dor(FDC(nr), mask, 0);
+}
+
+/* schedules motor off */
+static void floppy_off(unsigned int drive)
+{
+	unsigned long volatile delta;
+	register int fdc = FDC(drive);
+
+	if (!(FDCS->dor & (0x10 << UNIT(drive))))
+		return;
+
+	del_timer(motor_off_timer + drive);
+
+	/* make spindle stop in a position which minimizes spinup time
+	 * next time */
+	if (UDP->rps) {
+		delta = jiffies - UDRS->first_read_date + HZ -
+		    UDP->spindown_offset;
+		delta = ((delta * UDP->rps) % HZ) / UDP->rps;
+		motor_off_timer[drive].expires =
+		    jiffies + UDP->spindown - delta;
+	}
+	add_timer(motor_off_timer + drive);
+}
+
+/*
+ * cycle through all N_DRIVE floppy drives, for disk change testing.
+ * stopping at current drive. This is done before any long operation, to
+ * be sure to have up to date disk change information.
+ */
+static void scandrives(void)
+{
+	int i, drive, saved_drive;
+
+	if (DP->select_delay)
+		return;
+
+	saved_drive = current_drive;
+	for (i = 0; i < N_DRIVE; i++) {
+		drive = (saved_drive + i + 1) % N_DRIVE;
+		if (UDRS->fd_ref == 0 || UDP->select_delay != 0)
+			continue;	/* skip closed drives */
+		set_fdc(drive);
+		if (!(set_dor(fdc, ~3, UNIT(drive) | (0x10 << UNIT(drive))) &
+		      (0x10 << UNIT(drive))))
+			/* switch the motor off again, if it was off to
+			 * begin with */
+			set_dor(fdc, ~(0x10 << UNIT(drive)), 0);
+	}
+	set_fdc(saved_drive);
+}
+
+static void empty(void)
+{
+}
+
+static DECLARE_WORK(floppy_work, NULL, NULL);
+
+static void schedule_bh(void (*handler) (void))
+{
+	PREPARE_WORK(&floppy_work, (void (*)(void *))handler, NULL);
+	schedule_work(&floppy_work);
+}
+
+static struct timer_list fd_timer = TIMER_INITIALIZER(NULL, 0, 0);
+
+static void cancel_activity(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&floppy_lock, flags);
+	do_floppy = NULL;
+	PREPARE_WORK(&floppy_work, (void *)empty, NULL);
+	del_timer(&fd_timer);
+	spin_unlock_irqrestore(&floppy_lock, flags);
+}
+
+/* this function makes sure that the disk stays in the drive during the
+ * transfer */
+static void fd_watchdog(void)
+{
+#ifdef DCL_DEBUG
+	if (DP->flags & FD_DEBUG) {
+		DPRINT("calling disk change from watchdog\n");
+	}
+#endif
+
+	if (disk_change(current_drive)) {
+		DPRINT("disk removed during i/o\n");
+		cancel_activity();
+		cont->done(0);
+		reset_fdc();
+	} else {
+		del_timer(&fd_timer);
+		fd_timer.function = (timeout_fn) fd_watchdog;
+		fd_timer.expires = jiffies + HZ / 10;
+		add_timer(&fd_timer);
+	}
+}
+
+static void main_command_interrupt(void)
+{
+	del_timer(&fd_timer);
+	cont->interrupt();
+}
+
+/* waits for a delay (spinup or select) to pass */
+static int fd_wait_for_completion(unsigned long delay, timeout_fn function)
+{
+	if (FDCS->reset) {
+		reset_fdc();	/* do the reset during sleep to win time
+				 * if we don't need to sleep, it's a good
+				 * occasion anyways */
+		return 1;
+	}
+
+	if ((signed)(jiffies - delay) < 0) {
+		del_timer(&fd_timer);
+		fd_timer.function = function;
+		fd_timer.expires = delay;
+		add_timer(&fd_timer);
+		return 1;
+	}
+	return 0;
+}
+
+static DEFINE_SPINLOCK(floppy_hlt_lock);
+static int hlt_disabled;
+static void floppy_disable_hlt(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&floppy_hlt_lock, flags);
+	if (!hlt_disabled) {
+		hlt_disabled = 1;
+#ifdef HAVE_DISABLE_HLT
+		disable_hlt();
+#endif
+	}
+	spin_unlock_irqrestore(&floppy_hlt_lock, flags);
+}
+
+static void floppy_enable_hlt(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&floppy_hlt_lock, flags);
+	if (hlt_disabled) {
+		hlt_disabled = 0;
+#ifdef HAVE_DISABLE_HLT
+		enable_hlt();
+#endif
+	}
+	spin_unlock_irqrestore(&floppy_hlt_lock, flags);
+}
+
+static void setup_DMA(void)
+{
+	unsigned long f;
+
+#ifdef FLOPPY_SANITY_CHECK
+	if (raw_cmd->length == 0) {
+		int i;
+
+		printk("zero dma transfer size:");
+		for (i = 0; i < raw_cmd->cmd_count; i++)
+			printk("%x,", raw_cmd->cmd[i]);
+		printk("\n");
+		cont->done(0);
+		FDCS->reset = 1;
+		return;
+	}
+	if (((unsigned long)raw_cmd->kernel_data) % 512) {
+		printk("non aligned address: %p\n", raw_cmd->kernel_data);
+		cont->done(0);
+		FDCS->reset = 1;
+		return;
+	}
+#endif
+	f = claim_dma_lock();
+	fd_disable_dma();
+#ifdef fd_dma_setup
+	if (fd_dma_setup(raw_cmd->kernel_data, raw_cmd->length,
+			 (raw_cmd->flags & FD_RAW_READ) ?
+			 DMA_MODE_READ : DMA_MODE_WRITE, FDCS->address) < 0) {
+		release_dma_lock(f);
+		cont->done(0);
+		FDCS->reset = 1;
+		return;
+	}
+	release_dma_lock(f);
+#else
+	fd_clear_dma_ff();
+	fd_cacheflush(raw_cmd->kernel_data, raw_cmd->length);
+	fd_set_dma_mode((raw_cmd->flags & FD_RAW_READ) ?
+			DMA_MODE_READ : DMA_MODE_WRITE);
+	fd_set_dma_addr(raw_cmd->kernel_data);
+	fd_set_dma_count(raw_cmd->length);
+	virtual_dma_port = FDCS->address;
+	fd_enable_dma();
+	release_dma_lock(f);
+#endif
+	floppy_disable_hlt();
+}
+
+static void show_floppy(void);
+
+/* waits until the fdc becomes ready */
+static int wait_til_ready(void)
+{
+	int counter, status;
+	if (FDCS->reset)
+		return -1;
+	for (counter = 0; counter < 10000; counter++) {
+		status = fd_inb(FD_STATUS);
+		if (status & STATUS_READY)
+			return status;
+	}
+	if (!initialising) {
+		DPRINT("Getstatus times out (%x) on fdc %d\n", status, fdc);
+		show_floppy();
+	}
+	FDCS->reset = 1;
+	return -1;
+}
+
+/* sends a command byte to the fdc */
+static int output_byte(char byte)
+{
+	int status;
+
+	if ((status = wait_til_ready()) < 0)
+		return -1;
+	if ((status & (STATUS_READY | STATUS_DIR | STATUS_DMA)) == STATUS_READY) {
+		fd_outb(byte, FD_DATA);
+#ifdef FLOPPY_SANITY_CHECK
+		output_log[output_log_pos].data = byte;
+		output_log[output_log_pos].status = status;
+		output_log[output_log_pos].jiffies = jiffies;
+		output_log_pos = (output_log_pos + 1) % OLOGSIZE;
+#endif
+		return 0;
+	}
+	FDCS->reset = 1;
+	if (!initialising) {
+		DPRINT("Unable to send byte %x to FDC. Fdc=%x Status=%x\n",
+		       byte, fdc, status);
+		show_floppy();
+	}
+	return -1;
+}
+
+#define LAST_OUT(x) if (output_byte(x)<0){ reset_fdc();return;}
+
+/* gets the response from the fdc */
+static int result(void)
+{
+	int i, status = 0;
+
+	for (i = 0; i < MAX_REPLIES; i++) {
+		if ((status = wait_til_ready()) < 0)
+			break;
+		status &= STATUS_DIR | STATUS_READY | STATUS_BUSY | STATUS_DMA;
+		if ((status & ~STATUS_BUSY) == STATUS_READY) {
+#ifdef FLOPPY_SANITY_CHECK
+			resultjiffies = jiffies;
+			resultsize = i;
+#endif
+			return i;
+		}
+		if (status == (STATUS_DIR | STATUS_READY | STATUS_BUSY))
+			reply_buffer[i] = fd_inb(FD_DATA);
+		else
+			break;
+	}
+	if (!initialising) {
+		DPRINT
+		    ("get result error. Fdc=%d Last status=%x Read bytes=%d\n",
+		     fdc, status, i);
+		show_floppy();
+	}
+	FDCS->reset = 1;
+	return -1;
+}
+
+#define MORE_OUTPUT -2
+/* does the fdc need more output? */
+static int need_more_output(void)
+{
+	int status;
+	if ((status = wait_til_ready()) < 0)
+		return -1;
+	if ((status & (STATUS_READY | STATUS_DIR | STATUS_DMA)) == STATUS_READY)
+		return MORE_OUTPUT;
+	return result();
+}
+
+/* Set perpendicular mode as required, based on data rate, if supported.
+ * 82077 Now tested. 1Mbps data rate only possible with 82077-1.
+ */
+static inline void perpendicular_mode(void)
+{
+	unsigned char perp_mode;
+
+	if (raw_cmd->rate & 0x40) {
+		switch (raw_cmd->rate & 3) {
+		case 0:
+			perp_mode = 2;
+			break;
+		case 3:
+			perp_mode = 3;
+			break;
+		default:
+			DPRINT("Invalid data rate for perpendicular mode!\n");
+			cont->done(0);
+			FDCS->reset = 1;	/* convenient way to return to
+						 * redo without to much hassle (deep
+						 * stack et al. */
+			return;
+		}
+	} else
+		perp_mode = 0;
+
+	if (FDCS->perp_mode == perp_mode)
+		return;
+	if (FDCS->version >= FDC_82077_ORIG) {
+		output_byte(FD_PERPENDICULAR);
+		output_byte(perp_mode);
+		FDCS->perp_mode = perp_mode;
+	} else if (perp_mode) {
+		DPRINT("perpendicular mode not supported by this FDC.\n");
+	}
+}				/* perpendicular_mode */
+
+static int fifo_depth = 0xa;
+static int no_fifo;
+
+static int fdc_configure(void)
+{
+	/* Turn on FIFO */
+	output_byte(FD_CONFIGURE);
+	if (need_more_output() != MORE_OUTPUT)
+		return 0;
+	output_byte(0);
+	output_byte(0x10 | (no_fifo & 0x20) | (fifo_depth & 0xf));
+	output_byte(0);		/* pre-compensation from track
+				   0 upwards */
+	return 1;
+}
+
+#define NOMINAL_DTR 500
+
+/* Issue a "SPECIFY" command to set the step rate time, head unload time,
+ * head load time, and DMA disable flag to values needed by floppy.
+ *
+ * The value "dtr" is the data transfer rate in Kbps.  It is needed
+ * to account for the data rate-based scaling done by the 82072 and 82077
+ * FDC types.  This parameter is ignored for other types of FDCs (i.e.
+ * 8272a).
+ *
+ * Note that changing the data transfer rate has a (probably deleterious)
+ * effect on the parameters subject to scaling for 82072/82077 FDCs, so
+ * fdc_specify is called again after each data transfer rate
+ * change.
+ *
+ * srt: 1000 to 16000 in microseconds
+ * hut: 16 to 240 milliseconds
+ * hlt: 2 to 254 milliseconds
+ *
+ * These values are rounded up to the next highest available delay time.
+ */
+static void fdc_specify(void)
+{
+	unsigned char spec1, spec2;
+	unsigned long srt, hlt, hut;
+	unsigned long dtr = NOMINAL_DTR;
+	unsigned long scale_dtr = NOMINAL_DTR;
+	int hlt_max_code = 0x7f;
+	int hut_max_code = 0xf;
+
+	if (FDCS->need_configure && FDCS->version >= FDC_82072A) {
+		fdc_configure();
+		FDCS->need_configure = 0;
+		/*DPRINT("FIFO enabled\n"); */
+	}
+
+	switch (raw_cmd->rate & 0x03) {
+	case 3:
+		dtr = 1000;
+		break;
+	case 1:
+		dtr = 300;
+		if (FDCS->version >= FDC_82078) {
+			/* chose the default rate table, not the one
+			 * where 1 = 2 Mbps */
+			output_byte(FD_DRIVESPEC);
+			if (need_more_output() == MORE_OUTPUT) {
+				output_byte(UNIT(current_drive));
+				output_byte(0xc0);
+			}
+		}
+		break;
+	case 2:
+		dtr = 250;
+		break;
+	}
+
+	if (FDCS->version >= FDC_82072) {
+		scale_dtr = dtr;
+		hlt_max_code = 0x00;	/* 0==256msec*dtr0/dtr (not linear!) */
+		hut_max_code = 0x0;	/* 0==256msec*dtr0/dtr (not linear!) */
+	}
+
+	/* Convert step rate from microseconds to milliseconds and 4 bits */
+	srt = 16 - (DP->srt * scale_dtr / 1000 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+	if (slow_floppy) {
+		srt = srt / 4;
+	}
+	SUPBOUND(srt, 0xf);
+	INFBOUND(srt, 0);
+
+	hlt = (DP->hlt * scale_dtr / 2 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+	if (hlt < 0x01)
+		hlt = 0x01;
+	else if (hlt > 0x7f)
+		hlt = hlt_max_code;
+
+	hut = (DP->hut * scale_dtr / 16 + NOMINAL_DTR - 1) / NOMINAL_DTR;
+	if (hut < 0x1)
+		hut = 0x1;
+	else if (hut > 0xf)
+		hut = hut_max_code;
+
+	spec1 = (srt << 4) | hut;
+	spec2 = (hlt << 1) | (use_virtual_dma & 1);
+
+	/* If these parameters did not change, just return with success */
+	if (FDCS->spec1 != spec1 || FDCS->spec2 != spec2) {
+		/* Go ahead and set spec1 and spec2 */
+		output_byte(FD_SPECIFY);
+		output_byte(FDCS->spec1 = spec1);
+		output_byte(FDCS->spec2 = spec2);
+	}
+}				/* fdc_specify */
+
+/* Set the FDC's data transfer rate on behalf of the specified drive.
+ * NOTE: with 82072/82077 FDCs, changing the data rate requires a reissue
+ * of the specify command (i.e. using the fdc_specify function).
+ */
+static int fdc_dtr(void)
+{
+	/* If data rate not already set to desired value, set it. */
+	if ((raw_cmd->rate & 3) == FDCS->dtr)
+		return 0;
+
+	/* Set dtr */
+	fd_outb(raw_cmd->rate & 3, FD_DCR);
+
+	/* TODO: some FDC/drive combinations (C&T 82C711 with TEAC 1.2MB)
+	 * need a stabilization period of several milliseconds to be
+	 * enforced after data rate changes before R/W operations.
+	 * Pause 5 msec to avoid trouble. (Needs to be 2 jiffies)
+	 */
+	FDCS->dtr = raw_cmd->rate & 3;
+	return (fd_wait_for_completion(jiffies + 2UL * HZ / 100,
+				       (timeout_fn) floppy_ready));
+}				/* fdc_dtr */
+
+static void tell_sector(void)
+{
+	printk(": track %d, head %d, sector %d, size %d",
+	       R_TRACK, R_HEAD, R_SECTOR, R_SIZECODE);
+}				/* tell_sector */
+
+/*
+ * OK, this error interpreting routine is called after a
+ * DMA read/write has succeeded
+ * or failed, so we check the results, and copy any buffers.
+ * hhb: Added better error reporting.
+ * ak: Made this into a separate routine.
+ */
+static int interpret_errors(void)
+{
+	char bad;
+
+	if (inr != 7) {
+		DPRINT("-- FDC reply error");
+		FDCS->reset = 1;
+		return 1;
+	}
+
+	/* check IC to find cause of interrupt */
+	switch (ST0 & ST0_INTR) {
+	case 0x40:		/* error occurred during command execution */
+		if (ST1 & ST1_EOC)
+			return 0;	/* occurs with pseudo-DMA */
+		bad = 1;
+		if (ST1 & ST1_WP) {
+			DPRINT("Drive is write protected\n");
+			CLEARF(FD_DISK_WRITABLE);
+			cont->done(0);
+			bad = 2;
+		} else if (ST1 & ST1_ND) {
+			SETF(FD_NEED_TWADDLE);
+		} else if (ST1 & ST1_OR) {
+			if (DP->flags & FTD_MSG)
+				DPRINT("Over/Underrun - retrying\n");
+			bad = 0;
+		} else if (*errors >= DP->max_errors.reporting) {
+			DPRINT("");
+			if (ST0 & ST0_ECE) {
+				printk("Recalibrate failed!");
+			} else if (ST2 & ST2_CRC) {
+				printk("data CRC error");
+				tell_sector();
+			} else if (ST1 & ST1_CRC) {
+				printk("CRC error");
+				tell_sector();
+			} else if ((ST1 & (ST1_MAM | ST1_ND))
+				   || (ST2 & ST2_MAM)) {
+				if (!probing) {
+					printk("sector not found");
+					tell_sector();
+				} else
+					printk("probe failed...");
+			} else if (ST2 & ST2_WC) {	/* seek error */
+				printk("wrong cylinder");
+			} else if (ST2 & ST2_BC) {	/* cylinder marked as bad */
+				printk("bad cylinder");
+			} else {
+				printk
+				    ("unknown error. ST[0..2] are: 0x%x 0x%x 0x%x",
+				     ST0, ST1, ST2);
+				tell_sector();
+			}
+			printk("\n");
+
+		}
+		if (ST2 & ST2_WC || ST2 & ST2_BC)
+			/* wrong cylinder => recal */
+			DRS->track = NEED_2_RECAL;
+		return bad;
+	case 0x80:		/* invalid command given */
+		DPRINT("Invalid FDC command given!\n");
+		cont->done(0);
+		return 2;
+	case 0xc0:
+		DPRINT("Abnormal termination caused by polling\n");
+		cont->error();
+		return 2;
+	default:		/* (0) Normal command termination */
+		return 0;
+	}
+}
+
+/*
+ * This routine is called when everything should be correctly set up
+ * for the transfer (i.e. floppy motor is on, the correct floppy is
+ * selected, and the head is sitting on the right track).
+ */
+static void setup_rw_floppy(void)
+{
+	int i, r, flags, dflags;
+	unsigned long ready_date;
+	timeout_fn function;
+
+	flags = raw_cmd->flags;
+	if (flags & (FD_RAW_READ | FD_RAW_WRITE))
+		flags |= FD_RAW_INTR;
+
+	if ((flags & FD_RAW_SPIN) && !(flags & FD_RAW_NO_MOTOR)) {
+		ready_date = DRS->spinup_date + DP->spinup;
+		/* If spinup will take a long time, rerun scandrives
+		 * again just before spinup completion. Beware that
+		 * after scandrives, we must again wait for selection.
+		 */
+		if ((signed)(ready_date - jiffies) > DP->select_delay) {
+			ready_date -= DP->select_delay;
+			function = (timeout_fn) floppy_start;
+		} else
+			function = (timeout_fn) setup_rw_floppy;
+
+		/* wait until the floppy is spinning fast enough */
+		if (fd_wait_for_completion(ready_date, function))
+			return;
+	}
+	dflags = DRS->flags;
+
+	if ((flags & FD_RAW_READ) || (flags & FD_RAW_WRITE))
+		setup_DMA();
+
+	if (flags & FD_RAW_INTR)
+		do_floppy = main_command_interrupt;
+
+	r = 0;
+	for (i = 0; i < raw_cmd->cmd_count; i++)
+		r |= output_byte(raw_cmd->cmd[i]);
+
+	debugt("rw_command: ");
+
+	if (r) {
+		cont->error();
+		reset_fdc();
+		return;
+	}
+
+	if (!(flags & FD_RAW_INTR)) {
+		inr = result();
+		cont->interrupt();
+	} else if (flags & FD_RAW_NEED_DISK)
+		fd_watchdog();
+}
+
+static int blind_seek;
+
+/*
+ * This is the routine called after every seek (or recalibrate) interrupt
+ * from the floppy controller.
+ */
+static void seek_interrupt(void)
+{
+	debugt("seek interrupt:");
+	if (inr != 2 || (ST0 & 0xF8) != 0x20) {
+		DPRINT("seek failed\n");
+		DRS->track = NEED_2_RECAL;
+		cont->error();
+		cont->redo();
+		return;
+	}
+	if (DRS->track >= 0 && DRS->track != ST1 && !blind_seek) {
+#ifdef DCL_DEBUG
+		if (DP->flags & FD_DEBUG) {
+			DPRINT
+			    ("clearing NEWCHANGE flag because of effective seek\n");
+			DPRINT("jiffies=%lu\n", jiffies);
+		}
+#endif
+		CLEARF(FD_DISK_NEWCHANGE);	/* effective seek */
+		DRS->select_date = jiffies;
+	}
+	DRS->track = ST1;
+	floppy_ready();
+}
+
+static void check_wp(void)
+{
+	if (TESTF(FD_VERIFY)) {
+		/* check write protection */
+		output_byte(FD_GETSTATUS);
+		output_byte(UNIT(current_drive));
+		if (result() != 1) {
+			FDCS->reset = 1;
+			return;
+		}
+		CLEARF(FD_VERIFY);
+		CLEARF(FD_NEED_TWADDLE);
+#ifdef DCL_DEBUG
+		if (DP->flags & FD_DEBUG) {
+			DPRINT("checking whether disk is write protected\n");
+			DPRINT("wp=%x\n", ST3 & 0x40);
+		}
+#endif
+		if (!(ST3 & 0x40))
+			SETF(FD_DISK_WRITABLE);
+		else
+			CLEARF(FD_DISK_WRITABLE);
+	}
+}
+
+static void seek_floppy(void)
+{
+	int track;
+
+	blind_seek = 0;
+
+#ifdef DCL_DEBUG
+	if (DP->flags & FD_DEBUG) {
+		DPRINT("calling disk change from seek\n");
+	}
+#endif
+
+	if (!TESTF(FD_DISK_NEWCHANGE) &&
+	    disk_change(current_drive) && (raw_cmd->flags & FD_RAW_NEED_DISK)) {
+		/* the media changed flag should be cleared after the seek.
+		 * If it isn't, this means that there is really no disk in
+		 * the drive.
+		 */
+		SETF(FD_DISK_CHANGED);
+		cont->done(0);
+		cont->redo();
+		return;
+	}
+	if (DRS->track <= NEED_1_RECAL) {
+		recalibrate_floppy();
+		return;
+	} else if (TESTF(FD_DISK_NEWCHANGE) &&
+		   (raw_cmd->flags & FD_RAW_NEED_DISK) &&
+		   (DRS->track <= NO_TRACK || DRS->track == raw_cmd->track)) {
+		/* we seek to clear the media-changed condition. Does anybody
+		 * know a more elegant way, which works on all drives? */
+		if (raw_cmd->track)
+			track = raw_cmd->track - 1;
+		else {
+			if (DP->flags & FD_SILENT_DCL_CLEAR) {
+				set_dor(fdc, ~(0x10 << UNIT(current_drive)), 0);
+				blind_seek = 1;
+				raw_cmd->flags |= FD_RAW_NEED_SEEK;
+			}
+			track = 1;
+		}
+	} else {
+		check_wp();
+		if (raw_cmd->track != DRS->track &&
+		    (raw_cmd->flags & FD_RAW_NEED_SEEK))
+			track = raw_cmd->track;
+		else {
+			setup_rw_floppy();
+			return;
+		}
+	}
+
+	do_floppy = seek_interrupt;
+	output_byte(FD_SEEK);
+	output_byte(UNIT(current_drive));
+	LAST_OUT(track);
+	debugt("seek command:");
+}
+
+static void recal_interrupt(void)
+{
+	debugt("recal interrupt:");
+	if (inr != 2)
+		FDCS->reset = 1;
+	else if (ST0 & ST0_ECE) {
+		switch (DRS->track) {
+		case NEED_1_RECAL:
+			debugt("recal interrupt need 1 recal:");
+			/* after a second recalibrate, we still haven't
+			 * reached track 0. Probably no drive. Raise an
+			 * error, as failing immediately might upset
+			 * computers possessed by the Devil :-) */
+			cont->error();
+			cont->redo();
+			return;
+		case NEED_2_RECAL:
+			debugt("recal interrupt need 2 recal:");
+			/* If we already did a recalibrate,
+			 * and we are not at track 0, this
+			 * means we have moved. (The only way
+			 * not to move at recalibration is to
+			 * be already at track 0.) Clear the
+			 * new change flag */
+#ifdef DCL_DEBUG
+			if (DP->flags & FD_DEBUG) {
+				DPRINT
+				    ("clearing NEWCHANGE flag because of second recalibrate\n");
+			}
+#endif
+
+			CLEARF(FD_DISK_NEWCHANGE);
+			DRS->select_date = jiffies;
+			/* fall through */
+		default:
+			debugt("recal interrupt default:");
+			/* Recalibrate moves the head by at
+			 * most 80 steps. If after one
+			 * recalibrate we don't have reached
+			 * track 0, this might mean that we
+			 * started beyond track 80.  Try
+			 * again.  */
+			DRS->track = NEED_1_RECAL;
+			break;
+		}
+	} else
+		DRS->track = ST1;
+	floppy_ready();
+}
+
+static void print_result(char *message, int inr)
+{
+	int i;
+
+	DPRINT("%s ", message);
+	if (inr >= 0)
+		for (i = 0; i < inr; i++)
+			printk("repl[%d]=%x ", i, reply_buffer[i]);
+	printk("\n");
+}
+
+/* interrupt handler. Note that this can be called externally on the Sparc */
+irqreturn_t floppy_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	void (*handler) (void) = do_floppy;
+	int do_print;
+	unsigned long f;
+
+	lasthandler = handler;
+	interruptjiffies = jiffies;
+
+	f = claim_dma_lock();
+	fd_disable_dma();
+	release_dma_lock(f);
+
+	floppy_enable_hlt();
+	do_floppy = NULL;
+	if (fdc >= N_FDC || FDCS->address == -1) {
+		/* we don't even know which FDC is the culprit */
+		printk("DOR0=%x\n", fdc_state[0].dor);
+		printk("floppy interrupt on bizarre fdc %d\n", fdc);
+		printk("handler=%p\n", handler);
+		is_alive("bizarre fdc");
+		return IRQ_NONE;
+	}
+
+	FDCS->reset = 0;
+	/* We have to clear the reset flag here, because apparently on boxes
+	 * with level triggered interrupts (PS/2, Sparc, ...), it is needed to
+	 * emit SENSEI's to clear the interrupt line. And FDCS->reset blocks the
+	 * emission of the SENSEI's.
+	 * It is OK to emit floppy commands because we are in an interrupt
+	 * handler here, and thus we have to fear no interference of other
+	 * activity.
+	 */
+
+	do_print = !handler && print_unex && !initialising;
+
+	inr = result();
+	if (do_print)
+		print_result("unexpected interrupt", inr);
+	if (inr == 0) {
+		int max_sensei = 4;
+		do {
+			output_byte(FD_SENSEI);
+			inr = result();
+			if (do_print)
+				print_result("sensei", inr);
+			max_sensei--;
+		} while ((ST0 & 0x83) != UNIT(current_drive) && inr == 2
+			 && max_sensei);
+	}
+	if (!handler) {
+		FDCS->reset = 1;
+		return IRQ_NONE;
+	}
+	schedule_bh(handler);
+	is_alive("normal interrupt end");
+
+	/* FIXME! Was it really for us? */
+	return IRQ_HANDLED;
+}
+
+static void recalibrate_floppy(void)
+{
+	debugt("recalibrate floppy:");
+	do_floppy = recal_interrupt;
+	output_byte(FD_RECALIBRATE);
+	LAST_OUT(UNIT(current_drive));
+}
+
+/*
+ * Must do 4 FD_SENSEIs after reset because of ``drive polling''.
+ */
+static void reset_interrupt(void)
+{
+	debugt("reset interrupt:");
+	result();		/* get the status ready for set_fdc */
+	if (FDCS->reset) {
+		printk("reset set in interrupt, calling %p\n", cont->error);
+		cont->error();	/* a reset just after a reset. BAD! */
+	}
+	cont->redo();
+}
+
+/*
+ * reset is done by pulling bit 2 of DOR low for a while (old FDCs),
+ * or by setting the self clearing bit 7 of STATUS (newer FDCs)
+ */
+static void reset_fdc(void)
+{
+	unsigned long flags;
+
+	do_floppy = reset_interrupt;
+	FDCS->reset = 0;
+	reset_fdc_info(0);
+
+	/* Pseudo-DMA may intercept 'reset finished' interrupt.  */
+	/* Irrelevant for systems with true DMA (i386).          */
+
+	flags = claim_dma_lock();
+	fd_disable_dma();
+	release_dma_lock(flags);
+
+	if (FDCS->version >= FDC_82072A)
+		fd_outb(0x80 | (FDCS->dtr & 3), FD_STATUS);
+	else {
+		fd_outb(FDCS->dor & ~0x04, FD_DOR);
+		udelay(FD_RESET_DELAY);
+		fd_outb(FDCS->dor, FD_DOR);
+	}
+}
+
+static void show_floppy(void)
+{
+	int i;
+
+	printk("\n");
+	printk("floppy driver state\n");
+	printk("-------------------\n");
+	printk("now=%lu last interrupt=%lu diff=%lu last called handler=%p\n",
+	       jiffies, interruptjiffies, jiffies - interruptjiffies,
+	       lasthandler);
+
+#ifdef FLOPPY_SANITY_CHECK
+	printk("timeout_message=%s\n", timeout_message);
+	printk("last output bytes:\n");
+	for (i = 0; i < OLOGSIZE; i++)
+		printk("%2x %2x %lu\n",
+		       output_log[(i + output_log_pos) % OLOGSIZE].data,
+		       output_log[(i + output_log_pos) % OLOGSIZE].status,
+		       output_log[(i + output_log_pos) % OLOGSIZE].jiffies);
+	printk("last result at %lu\n", resultjiffies);
+	printk("last redo_fd_request at %lu\n", lastredo);
+	for (i = 0; i < resultsize; i++) {
+		printk("%2x ", reply_buffer[i]);
+	}
+	printk("\n");
+#endif
+
+	printk("status=%x\n", fd_inb(FD_STATUS));
+	printk("fdc_busy=%lu\n", fdc_busy);
+	if (do_floppy)
+		printk("do_floppy=%p\n", do_floppy);
+	if (floppy_work.pending)
+		printk("floppy_work.func=%p\n", floppy_work.func);
+	if (timer_pending(&fd_timer))
+		printk("fd_timer.function=%p\n", fd_timer.function);
+	if (timer_pending(&fd_timeout)) {
+		printk("timer_function=%p\n", fd_timeout.function);
+		printk("expires=%lu\n", fd_timeout.expires - jiffies);
+		printk("now=%lu\n", jiffies);
+	}
+	printk("cont=%p\n", cont);
+	printk("current_req=%p\n", current_req);
+	printk("command_status=%d\n", command_status);
+	printk("\n");
+}
+
+static void floppy_shutdown(unsigned long data)
+{
+	unsigned long flags;
+
+	if (!initialising)
+		show_floppy();
+	cancel_activity();
+
+	floppy_enable_hlt();
+
+	flags = claim_dma_lock();
+	fd_disable_dma();
+	release_dma_lock(flags);
+
+	/* avoid dma going to a random drive after shutdown */
+
+	if (!initialising)
+		DPRINT("floppy timeout called\n");
+	FDCS->reset = 1;
+	if (cont) {
+		cont->done(0);
+		cont->redo();	/* this will recall reset when needed */
+	} else {
+		printk("no cont in shutdown!\n");
+		process_fd_request();
+	}
+	is_alive("floppy shutdown");
+}
+
+/*typedef void (*timeout_fn)(unsigned long);*/
+
+/* start motor, check media-changed condition and write protection */
+static int start_motor(void (*function) (void))
+{
+	int mask, data;
+
+	mask = 0xfc;
+	data = UNIT(current_drive);
+	if (!(raw_cmd->flags & FD_RAW_NO_MOTOR)) {
+		if (!(FDCS->dor & (0x10 << UNIT(current_drive)))) {
+			set_debugt();
+			/* no read since this drive is running */
+			DRS->first_read_date = 0;
+			/* note motor start time if motor is not yet running */
+			DRS->spinup_date = jiffies;
+			data |= (0x10 << UNIT(current_drive));
+		}
+	} else if (FDCS->dor & (0x10 << UNIT(current_drive)))
+		mask &= ~(0x10 << UNIT(current_drive));
+
+	/* starts motor and selects floppy */
+	del_timer(motor_off_timer + current_drive);
+	set_dor(fdc, mask, data);
+
+	/* wait_for_completion also schedules reset if needed. */
+	return (fd_wait_for_completion(DRS->select_date + DP->select_delay,
+				       (timeout_fn) function));
+}
+
+static void floppy_ready(void)
+{
+	CHECK_RESET;
+	if (start_motor(floppy_ready))
+		return;
+	if (fdc_dtr())
+		return;
+
+#ifdef DCL_DEBUG
+	if (DP->flags & FD_DEBUG) {
+		DPRINT("calling disk change from floppy_ready\n");
+	}
+#endif
+	if (!(raw_cmd->flags & FD_RAW_NO_MOTOR) &&
+	    disk_change(current_drive) && !DP->select_delay)
+		twaddle();	/* this clears the dcl on certain drive/controller
+				 * combinations */
+
+#ifdef fd_chose_dma_mode
+	if ((raw_cmd->flags & FD_RAW_READ) || (raw_cmd->flags & FD_RAW_WRITE)) {
+		unsigned long flags = claim_dma_lock();
+		fd_chose_dma_mode(raw_cmd->kernel_data, raw_cmd->length);
+		release_dma_lock(flags);
+	}
+#endif
+
+	if (raw_cmd->flags & (FD_RAW_NEED_SEEK | FD_RAW_NEED_DISK)) {
+		perpendicular_mode();
+		fdc_specify();	/* must be done here because of hut, hlt ... */
+		seek_floppy();
+	} else {
+		if ((raw_cmd->flags & FD_RAW_READ) ||
+		    (raw_cmd->flags & FD_RAW_WRITE))
+			fdc_specify();
+		setup_rw_floppy();
+	}
+}
+
+static void floppy_start(void)
+{
+	reschedule_timeout(current_reqD, "floppy start", 0);
+
+	scandrives();
+#ifdef DCL_DEBUG
+	if (DP->flags & FD_DEBUG) {
+		DPRINT("setting NEWCHANGE in floppy_start\n");
+	}
+#endif
+	SETF(FD_DISK_NEWCHANGE);
+	floppy_ready();
+}
+
+/*
+ * ========================================================================
+ * here ends the bottom half. Exported routines are:
+ * floppy_start, floppy_off, floppy_ready, lock_fdc, unlock_fdc, set_fdc,
+ * start_motor, reset_fdc, reset_fdc_info, interpret_errors.
+ * Initialization also uses output_byte, result, set_dor, floppy_interrupt
+ * and set_dor.
+ * ========================================================================
+ */
+/*
+ * General purpose continuations.
+ * ==============================
+ */
+
+static void do_wakeup(void)
+{
+	reschedule_timeout(MAXTIMEOUT, "do wakeup", 0);
+	cont = NULL;
+	command_status += 2;
+	wake_up(&command_done);
+}
+
+static struct cont_t wakeup_cont = {
+	.interrupt	= empty,
+	.redo		= do_wakeup,
+	.error		= empty,
+	.done		= (done_f) empty
+};
+
+static struct cont_t intr_cont = {
+	.interrupt	= empty,
+	.redo		= process_fd_request,
+	.error		= empty,
+	.done		= (done_f) empty
+};
+
+static int wait_til_done(void (*handler) (void), int interruptible)
+{
+	int ret;
+
+	schedule_bh(handler);
+
+	if (command_status < 2 && NO_SIGNAL) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		add_wait_queue(&command_done, &wait);
+		for (;;) {
+			set_current_state(interruptible ?
+					  TASK_INTERRUPTIBLE :
+					  TASK_UNINTERRUPTIBLE);
+
+			if (command_status >= 2 || !NO_SIGNAL)
+				break;
+
+			is_alive("wait_til_done");
+
+			schedule();
+		}
+
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&command_done, &wait);
+	}
+
+	if (command_status < 2) {
+		cancel_activity();
+		cont = &intr_cont;
+		reset_fdc();
+		return -EINTR;
+	}
+
+	if (FDCS->reset)
+		command_status = FD_COMMAND_ERROR;
+	if (command_status == FD_COMMAND_OKAY)
+		ret = 0;
+	else
+		ret = -EIO;
+	command_status = FD_COMMAND_NONE;
+	return ret;
+}
+
+static void generic_done(int result)
+{
+	command_status = result;
+	cont = &wakeup_cont;
+}
+
+static void generic_success(void)
+{
+	cont->done(1);
+}
+
+static void generic_failure(void)
+{
+	cont->done(0);
+}
+
+static void success_and_wakeup(void)
+{
+	generic_success();
+	cont->redo();
+}
+
+/*
+ * formatting and rw support.
+ * ==========================
+ */
+
+static int next_valid_format(void)
+{
+	int probed_format;
+
+	probed_format = DRS->probed_format;
+	while (1) {
+		if (probed_format >= 8 || !DP->autodetect[probed_format]) {
+			DRS->probed_format = 0;
+			return 1;
+		}
+		if (floppy_type[DP->autodetect[probed_format]].sect) {
+			DRS->probed_format = probed_format;
+			return 0;
+		}
+		probed_format++;
+	}
+}
+
+static void bad_flp_intr(void)
+{
+	int err_count;
+
+	if (probing) {
+		DRS->probed_format++;
+		if (!next_valid_format())
+			return;
+	}
+	err_count = ++(*errors);
+	INFBOUND(DRWE->badness, err_count);
+	if (err_count > DP->max_errors.abort)
+		cont->done(0);
+	if (err_count > DP->max_errors.reset)
+		FDCS->reset = 1;
+	else if (err_count > DP->max_errors.recal)
+		DRS->track = NEED_2_RECAL;
+}
+
+static void set_floppy(int drive)
+{
+	int type = ITYPE(UDRS->fd_device);
+	if (type)
+		_floppy = floppy_type + type;
+	else
+		_floppy = current_type[drive];
+}
+
+/*
+ * formatting support.
+ * ===================
+ */
+static void format_interrupt(void)
+{
+	switch (interpret_errors()) {
+	case 1:
+		cont->error();
+	case 2:
+		break;
+	case 0:
+		cont->done(1);
+	}
+	cont->redo();
+}
+
+#define CODE2SIZE (ssize = ((1 << SIZECODE) + 3) >> 2)
+#define FM_MODE(x,y) ((y) & ~(((x)->rate & 0x80) >>1))
+#define CT(x) ((x) | 0xc0)
+static void setup_format_params(int track)
+{
+	struct fparm {
+		unsigned char track, head, sect, size;
+	} *here = (struct fparm *)floppy_track_buffer;
+	int il, n;
+	int count, head_shift, track_shift;
+
+	raw_cmd = &default_raw_cmd;
+	raw_cmd->track = track;
+
+	raw_cmd->flags = FD_RAW_WRITE | FD_RAW_INTR | FD_RAW_SPIN |
+	    FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK;
+	raw_cmd->rate = _floppy->rate & 0x43;
+	raw_cmd->cmd_count = NR_F;
+	COMMAND = FM_MODE(_floppy, FD_FORMAT);
+	DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, format_req.head);
+	F_SIZECODE = FD_SIZECODE(_floppy);
+	F_SECT_PER_TRACK = _floppy->sect << 2 >> F_SIZECODE;
+	F_GAP = _floppy->fmt_gap;
+	F_FILL = FD_FILL_BYTE;
+
+	raw_cmd->kernel_data = floppy_track_buffer;
+	raw_cmd->length = 4 * F_SECT_PER_TRACK;
+
+	/* allow for about 30ms for data transport per track */
+	head_shift = (F_SECT_PER_TRACK + 5) / 6;
+
+	/* a ``cylinder'' is two tracks plus a little stepping time */
+	track_shift = 2 * head_shift + 3;
+
+	/* position of logical sector 1 on this track */
+	n = (track_shift * format_req.track + head_shift * format_req.head)
+	    % F_SECT_PER_TRACK;
+
+	/* determine interleave */
+	il = 1;
+	if (_floppy->fmt_gap < 0x22)
+		il++;
+
+	/* initialize field */
+	for (count = 0; count < F_SECT_PER_TRACK; ++count) {
+		here[count].track = format_req.track;
+		here[count].head = format_req.head;
+		here[count].sect = 0;
+		here[count].size = F_SIZECODE;
+	}
+	/* place logical sectors */
+	for (count = 1; count <= F_SECT_PER_TRACK; ++count) {
+		here[n].sect = count;
+		n = (n + il) % F_SECT_PER_TRACK;
+		if (here[n].sect) {	/* sector busy, find next free sector */
+			++n;
+			if (n >= F_SECT_PER_TRACK) {
+				n -= F_SECT_PER_TRACK;
+				while (here[n].sect)
+					++n;
+			}
+		}
+	}
+	if (_floppy->stretch & FD_ZEROBASED) {
+		for (count = 0; count < F_SECT_PER_TRACK; count++)
+			here[count].sect--;
+	}
+}
+
+static void redo_format(void)
+{
+	buffer_track = -1;
+	setup_format_params(format_req.track << STRETCH(_floppy));
+	floppy_start();
+	debugt("queue format request");
+}
+
+static struct cont_t format_cont = {
+	.interrupt	= format_interrupt,
+	.redo		= redo_format,
+	.error		= bad_flp_intr,
+	.done		= generic_done
+};
+
+static int do_format(int drive, struct format_descr *tmp_format_req)
+{
+	int ret;
+
+	LOCK_FDC(drive, 1);
+	set_floppy(drive);
+	if (!_floppy ||
+	    _floppy->track > DP->tracks ||
+	    tmp_format_req->track >= _floppy->track ||
+	    tmp_format_req->head >= _floppy->head ||
+	    (_floppy->sect << 2) % (1 << FD_SIZECODE(_floppy)) ||
+	    !_floppy->fmt_gap) {
+		process_fd_request();
+		return -EINVAL;
+	}
+	format_req = *tmp_format_req;
+	format_errors = 0;
+	cont = &format_cont;
+	errors = &format_errors;
+	IWAIT(redo_format);
+	process_fd_request();
+	return ret;
+}
+
+/*
+ * Buffer read/write and support
+ * =============================
+ */
+
+static void floppy_end_request(struct request *req, int uptodate)
+{
+	unsigned int nr_sectors = current_count_sectors;
+
+	/* current_count_sectors can be zero if transfer failed */
+	if (!uptodate)
+		nr_sectors = req->current_nr_sectors;
+	if (end_that_request_first(req, uptodate, nr_sectors))
+		return;
+	add_disk_randomness(req->rq_disk);
+	floppy_off((long)req->rq_disk->private_data);
+	blkdev_dequeue_request(req);
+	end_that_request_last(req);
+
+	/* We're done with the request */
+	current_req = NULL;
+}
+
+/* new request_done. Can handle physical sectors which are smaller than a
+ * logical buffer */
+static void request_done(int uptodate)
+{
+	struct request_queue *q = floppy_queue;
+	struct request *req = current_req;
+	unsigned long flags;
+	int block;
+
+	probing = 0;
+	reschedule_timeout(MAXTIMEOUT, "request done %d", uptodate);
+
+	if (!req) {
+		printk("floppy.c: no request in request_done\n");
+		return;
+	}
+
+	if (uptodate) {
+		/* maintain values for invalidation on geometry
+		 * change */
+		block = current_count_sectors + req->sector;
+		INFBOUND(DRS->maxblock, block);
+		if (block > _floppy->sect)
+			DRS->maxtrack = 1;
+
+		/* unlock chained buffers */
+		spin_lock_irqsave(q->queue_lock, flags);
+		floppy_end_request(req, 1);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	} else {
+		if (rq_data_dir(req) == WRITE) {
+			/* record write error information */
+			DRWE->write_errors++;
+			if (DRWE->write_errors == 1) {
+				DRWE->first_error_sector = req->sector;
+				DRWE->first_error_generation = DRS->generation;
+			}
+			DRWE->last_error_sector = req->sector;
+			DRWE->last_error_generation = DRS->generation;
+		}
+		spin_lock_irqsave(q->queue_lock, flags);
+		floppy_end_request(req, 0);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
+}
+
+/* Interrupt handler evaluating the result of the r/w operation */
+static void rw_interrupt(void)
+{
+	int nr_sectors, ssize, eoc, heads;
+
+	if (R_HEAD >= 2) {
+		/* some Toshiba floppy controllers occasionnally seem to
+		 * return bogus interrupts after read/write operations, which
+		 * can be recognized by a bad head number (>= 2) */
+		return;
+	}
+
+	if (!DRS->first_read_date)
+		DRS->first_read_date = jiffies;
+
+	nr_sectors = 0;
+	CODE2SIZE;
+
+	if (ST1 & ST1_EOC)
+		eoc = 1;
+	else
+		eoc = 0;
+
+	if (COMMAND & 0x80)
+		heads = 2;
+	else
+		heads = 1;
+
+	nr_sectors = (((R_TRACK - TRACK) * heads +
+		       R_HEAD - HEAD) * SECT_PER_TRACK +
+		      R_SECTOR - SECTOR + eoc) << SIZECODE >> 2;
+
+#ifdef FLOPPY_SANITY_CHECK
+	if (nr_sectors / ssize >
+	    (in_sector_offset + current_count_sectors + ssize - 1) / ssize) {
+		DPRINT("long rw: %x instead of %lx\n",
+		       nr_sectors, current_count_sectors);
+		printk("rs=%d s=%d\n", R_SECTOR, SECTOR);
+		printk("rh=%d h=%d\n", R_HEAD, HEAD);
+		printk("rt=%d t=%d\n", R_TRACK, TRACK);
+		printk("heads=%d eoc=%d\n", heads, eoc);
+		printk("spt=%d st=%d ss=%d\n", SECT_PER_TRACK,
+		       fsector_t, ssize);
+		printk("in_sector_offset=%d\n", in_sector_offset);
+	}
+#endif
+
+	nr_sectors -= in_sector_offset;
+	INFBOUND(nr_sectors, 0);
+	SUPBOUND(current_count_sectors, nr_sectors);
+
+	switch (interpret_errors()) {
+	case 2:
+		cont->redo();
+		return;
+	case 1:
+		if (!current_count_sectors) {
+			cont->error();
+			cont->redo();
+			return;
+		}
+		break;
+	case 0:
+		if (!current_count_sectors) {
+			cont->redo();
+			return;
+		}
+		current_type[current_drive] = _floppy;
+		floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
+		break;
+	}
+
+	if (probing) {
+		if (DP->flags & FTD_MSG)
+			DPRINT("Auto-detected floppy type %s in fd%d\n",
+			       _floppy->name, current_drive);
+		current_type[current_drive] = _floppy;
+		floppy_sizes[TOMINOR(current_drive)] = _floppy->size;
+		probing = 0;
+	}
+
+	if (CT(COMMAND) != FD_READ ||
+	    raw_cmd->kernel_data == current_req->buffer) {
+		/* transfer directly from buffer */
+		cont->done(1);
+	} else if (CT(COMMAND) == FD_READ) {
+		buffer_track = raw_cmd->track;
+		buffer_drive = current_drive;
+		INFBOUND(buffer_max, nr_sectors + fsector_t);
+	}
+	cont->redo();
+}
+
+/* Compute maximal contiguous buffer size. */
+static int buffer_chain_size(void)
+{
+	struct bio *bio;
+	struct bio_vec *bv;
+	int size, i;
+	char *base;
+
+	base = bio_data(current_req->bio);
+	size = 0;
+
+	rq_for_each_bio(bio, current_req) {
+		bio_for_each_segment(bv, bio, i) {
+			if (page_address(bv->bv_page) + bv->bv_offset !=
+			    base + size)
+				break;
+
+			size += bv->bv_len;
+		}
+	}
+
+	return size >> 9;
+}
+
+/* Compute the maximal transfer size */
+static int transfer_size(int ssize, int max_sector, int max_size)
+{
+	SUPBOUND(max_sector, fsector_t + max_size);
+
+	/* alignment */
+	max_sector -= (max_sector % _floppy->sect) % ssize;
+
+	/* transfer size, beginning not aligned */
+	current_count_sectors = max_sector - fsector_t;
+
+	return max_sector;
+}
+
+/*
+ * Move data from/to the track buffer to/from the buffer cache.
+ */
+static void copy_buffer(int ssize, int max_sector, int max_sector_2)
+{
+	int remaining;		/* number of transferred 512-byte sectors */
+	struct bio_vec *bv;
+	struct bio *bio;
+	char *buffer, *dma_buffer;
+	int size, i;
+
+	max_sector = transfer_size(ssize,
+				   min(max_sector, max_sector_2),
+				   current_req->nr_sectors);
+
+	if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE &&
+	    buffer_max > fsector_t + current_req->nr_sectors)
+		current_count_sectors = min_t(int, buffer_max - fsector_t,
+					      current_req->nr_sectors);
+
+	remaining = current_count_sectors << 9;
+#ifdef FLOPPY_SANITY_CHECK
+	if ((remaining >> 9) > current_req->nr_sectors &&
+	    CT(COMMAND) == FD_WRITE) {
+		DPRINT("in copy buffer\n");
+		printk("current_count_sectors=%ld\n", current_count_sectors);
+		printk("remaining=%d\n", remaining >> 9);
+		printk("current_req->nr_sectors=%ld\n",
+		       current_req->nr_sectors);
+		printk("current_req->current_nr_sectors=%u\n",
+		       current_req->current_nr_sectors);
+		printk("max_sector=%d\n", max_sector);
+		printk("ssize=%d\n", ssize);
+	}
+#endif
+
+	buffer_max = max(max_sector, buffer_max);
+
+	dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9);
+
+	size = current_req->current_nr_sectors << 9;
+
+	rq_for_each_bio(bio, current_req) {
+		bio_for_each_segment(bv, bio, i) {
+			if (!remaining)
+				break;
+
+			size = bv->bv_len;
+			SUPBOUND(size, remaining);
+
+			buffer = page_address(bv->bv_page) + bv->bv_offset;
+#ifdef FLOPPY_SANITY_CHECK
+			if (dma_buffer + size >
+			    floppy_track_buffer + (max_buffer_sectors << 10) ||
+			    dma_buffer < floppy_track_buffer) {
+				DPRINT("buffer overrun in copy buffer %d\n",
+				       (int)((floppy_track_buffer -
+					      dma_buffer) >> 9));
+				printk("fsector_t=%d buffer_min=%d\n",
+				       fsector_t, buffer_min);
+				printk("current_count_sectors=%ld\n",
+				       current_count_sectors);
+				if (CT(COMMAND) == FD_READ)
+					printk("read\n");
+				if (CT(COMMAND) == FD_WRITE)
+					printk("write\n");
+				break;
+			}
+			if (((unsigned long)buffer) % 512)
+				DPRINT("%p buffer not aligned\n", buffer);
+#endif
+			if (CT(COMMAND) == FD_READ)
+				memcpy(buffer, dma_buffer, size);
+			else
+				memcpy(dma_buffer, buffer, size);
+
+			remaining -= size;
+			dma_buffer += size;
+		}
+	}
+#ifdef FLOPPY_SANITY_CHECK
+	if (remaining) {
+		if (remaining > 0)
+			max_sector -= remaining >> 9;
+		DPRINT("weirdness: remaining %d\n", remaining >> 9);
+	}
+#endif
+}
+
+#if 0
+static inline int check_dma_crossing(char *start,
+				     unsigned long length, char *message)
+{
+	if (CROSS_64KB(start, length)) {
+		printk("DMA xfer crosses 64KB boundary in %s %p-%p\n",
+		       message, start, start + length);
+		return 1;
+	} else
+		return 0;
+}
+#endif
+
+/* work around a bug in pseudo DMA
+ * (on some FDCs) pseudo DMA does not stop when the CPU stops
+ * sending data.  Hence we need a different way to signal the
+ * transfer length:  We use SECT_PER_TRACK.  Unfortunately, this
+ * does not work with MT, hence we can only transfer one head at
+ * a time
+ */
+static void virtualdmabug_workaround(void)
+{
+	int hard_sectors, end_sector;
+
+	if (CT(COMMAND) == FD_WRITE) {
+		COMMAND &= ~0x80;	/* switch off multiple track mode */
+
+		hard_sectors = raw_cmd->length >> (7 + SIZECODE);
+		end_sector = SECTOR + hard_sectors - 1;
+#ifdef FLOPPY_SANITY_CHECK
+		if (end_sector > SECT_PER_TRACK) {
+			printk("too many sectors %d > %d\n",
+			       end_sector, SECT_PER_TRACK);
+			return;
+		}
+#endif
+		SECT_PER_TRACK = end_sector;	/* make sure SECT_PER_TRACK points
+						 * to end of transfer */
+	}
+}
+
+/*
+ * Formulate a read/write request.
+ * this routine decides where to load the data (directly to buffer, or to
+ * tmp floppy area), how much data to load (the size of the buffer, the whole
+ * track, or a single sector)
+ * All floppy_track_buffer handling goes in here. If we ever add track buffer
+ * allocation on the fly, it should be done here. No other part should need
+ * modification.
+ */
+
+static int make_raw_rw_request(void)
+{
+	int aligned_sector_t;
+	int max_sector, max_size, tracksize, ssize;
+
+	if (max_buffer_sectors == 0) {
+		printk("VFS: Block I/O scheduled on unopened device\n");
+		return 0;
+	}
+
+	set_fdc((long)current_req->rq_disk->private_data);
+
+	raw_cmd = &default_raw_cmd;
+	raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK |
+	    FD_RAW_NEED_SEEK;
+	raw_cmd->cmd_count = NR_RW;
+	if (rq_data_dir(current_req) == READ) {
+		raw_cmd->flags |= FD_RAW_READ;
+		COMMAND = FM_MODE(_floppy, FD_READ);
+	} else if (rq_data_dir(current_req) == WRITE) {
+		raw_cmd->flags |= FD_RAW_WRITE;
+		COMMAND = FM_MODE(_floppy, FD_WRITE);
+	} else {
+		DPRINT("make_raw_rw_request: unknown command\n");
+		return 0;
+	}
+
+	max_sector = _floppy->sect * _floppy->head;
+
+	TRACK = (int)current_req->sector / max_sector;
+	fsector_t = (int)current_req->sector % max_sector;
+	if (_floppy->track && TRACK >= _floppy->track) {
+		if (current_req->current_nr_sectors & 1) {
+			current_count_sectors = 1;
+			return 1;
+		} else
+			return 0;
+	}
+	HEAD = fsector_t / _floppy->sect;
+
+	if (((_floppy->stretch & (FD_SWAPSIDES | FD_ZEROBASED)) ||
+	     TESTF(FD_NEED_TWADDLE)) && fsector_t < _floppy->sect)
+		max_sector = _floppy->sect;
+
+	/* 2M disks have phantom sectors on the first track */
+	if ((_floppy->rate & FD_2M) && (!TRACK) && (!HEAD)) {
+		max_sector = 2 * _floppy->sect / 3;
+		if (fsector_t >= max_sector) {
+			current_count_sectors =
+			    min_t(int, _floppy->sect - fsector_t,
+				  current_req->nr_sectors);
+			return 1;
+		}
+		SIZECODE = 2;
+	} else
+		SIZECODE = FD_SIZECODE(_floppy);
+	raw_cmd->rate = _floppy->rate & 0x43;
+	if ((_floppy->rate & FD_2M) && (TRACK || HEAD) && raw_cmd->rate == 2)
+		raw_cmd->rate = 1;
+
+	if (SIZECODE)
+		SIZECODE2 = 0xff;
+	else
+		SIZECODE2 = 0x80;
+	raw_cmd->track = TRACK << STRETCH(_floppy);
+	DR_SELECT = UNIT(current_drive) + PH_HEAD(_floppy, HEAD);
+	GAP = _floppy->gap;
+	CODE2SIZE;
+	SECT_PER_TRACK = _floppy->sect << 2 >> SIZECODE;
+	SECTOR = ((fsector_t % _floppy->sect) << 2 >> SIZECODE) +
+	    ((_floppy->stretch & FD_ZEROBASED) ? 0 : 1);
+
+	/* tracksize describes the size which can be filled up with sectors
+	 * of size ssize.
+	 */
+	tracksize = _floppy->sect - _floppy->sect % ssize;
+	if (tracksize < _floppy->sect) {
+		SECT_PER_TRACK++;
+		if (tracksize <= fsector_t % _floppy->sect)
+			SECTOR--;
+
+		/* if we are beyond tracksize, fill up using smaller sectors */
+		while (tracksize <= fsector_t % _floppy->sect) {
+			while (tracksize + ssize > _floppy->sect) {
+				SIZECODE--;
+				ssize >>= 1;
+			}
+			SECTOR++;
+			SECT_PER_TRACK++;
+			tracksize += ssize;
+		}
+		max_sector = HEAD * _floppy->sect + tracksize;
+	} else if (!TRACK && !HEAD && !(_floppy->rate & FD_2M) && probing) {
+		max_sector = _floppy->sect;
+	} else if (!HEAD && CT(COMMAND) == FD_WRITE) {
+		/* for virtual DMA bug workaround */
+		max_sector = _floppy->sect;
+	}
+
+	in_sector_offset = (fsector_t % _floppy->sect) % ssize;
+	aligned_sector_t = fsector_t - in_sector_offset;
+	max_size = current_req->nr_sectors;
+	if ((raw_cmd->track == buffer_track) &&
+	    (current_drive == buffer_drive) &&
+	    (fsector_t >= buffer_min) && (fsector_t < buffer_max)) {
+		/* data already in track buffer */
+		if (CT(COMMAND) == FD_READ) {
+			copy_buffer(1, max_sector, buffer_max);
+			return 1;
+		}
+	} else if (in_sector_offset || current_req->nr_sectors < ssize) {
+		if (CT(COMMAND) == FD_WRITE) {
+			if (fsector_t + current_req->nr_sectors > ssize &&
+			    fsector_t + current_req->nr_sectors < ssize + ssize)
+				max_size = ssize + ssize;
+			else
+				max_size = ssize;
+		}
+		raw_cmd->flags &= ~FD_RAW_WRITE;
+		raw_cmd->flags |= FD_RAW_READ;
+		COMMAND = FM_MODE(_floppy, FD_READ);
+	} else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) {
+		unsigned long dma_limit;
+		int direct, indirect;
+
+		indirect =
+		    transfer_size(ssize, max_sector,
+				  max_buffer_sectors * 2) - fsector_t;
+
+		/*
+		 * Do NOT use minimum() here---MAX_DMA_ADDRESS is 64 bits wide
+		 * on a 64 bit machine!
+		 */
+		max_size = buffer_chain_size();
+		dma_limit =
+		    (MAX_DMA_ADDRESS -
+		     ((unsigned long)current_req->buffer)) >> 9;
+		if ((unsigned long)max_size > dma_limit) {
+			max_size = dma_limit;
+		}
+		/* 64 kb boundaries */
+		if (CROSS_64KB(current_req->buffer, max_size << 9))
+			max_size = (K_64 -
+				    ((unsigned long)current_req->buffer) %
+				    K_64) >> 9;
+		direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
+		/*
+		 * We try to read tracks, but if we get too many errors, we
+		 * go back to reading just one sector at a time.
+		 *
+		 * This means we should be able to read a sector even if there
+		 * are other bad sectors on this track.
+		 */
+		if (!direct ||
+		    (indirect * 2 > direct * 3 &&
+		     *errors < DP->max_errors.read_track &&
+		     /*!TESTF(FD_NEED_TWADDLE) && */
+		     ((!probing
+		       || (DP->read_track & (1 << DRS->probed_format)))))) {
+			max_size = current_req->nr_sectors;
+		} else {
+			raw_cmd->kernel_data = current_req->buffer;
+			raw_cmd->length = current_count_sectors << 9;
+			if (raw_cmd->length == 0) {
+				DPRINT
+				    ("zero dma transfer attempted from make_raw_request\n");
+				DPRINT("indirect=%d direct=%d fsector_t=%d",
+				       indirect, direct, fsector_t);
+				return 0;
+			}
+/*			check_dma_crossing(raw_cmd->kernel_data, 
+					   raw_cmd->length, 
+					   "end of make_raw_request [1]");*/
+
+			virtualdmabug_workaround();
+			return 2;
+		}
+	}
+
+	if (CT(COMMAND) == FD_READ)
+		max_size = max_sector;	/* unbounded */
+
+	/* claim buffer track if needed */
+	if (buffer_track != raw_cmd->track ||	/* bad track */
+	    buffer_drive != current_drive ||	/* bad drive */
+	    fsector_t > buffer_max ||
+	    fsector_t < buffer_min ||
+	    ((CT(COMMAND) == FD_READ ||
+	      (!in_sector_offset && current_req->nr_sectors >= ssize)) &&
+	     max_sector > 2 * max_buffer_sectors + buffer_min &&
+	     max_size + fsector_t > 2 * max_buffer_sectors + buffer_min)
+	    /* not enough space */
+	    ) {
+		buffer_track = -1;
+		buffer_drive = current_drive;
+		buffer_max = buffer_min = aligned_sector_t;
+	}
+	raw_cmd->kernel_data = floppy_track_buffer +
+	    ((aligned_sector_t - buffer_min) << 9);
+
+	if (CT(COMMAND) == FD_WRITE) {
+		/* copy write buffer to track buffer.
+		 * if we get here, we know that the write
+		 * is either aligned or the data already in the buffer
+		 * (buffer will be overwritten) */
+#ifdef FLOPPY_SANITY_CHECK
+		if (in_sector_offset && buffer_track == -1)
+			DPRINT("internal error offset !=0 on write\n");
+#endif
+		buffer_track = raw_cmd->track;
+		buffer_drive = current_drive;
+		copy_buffer(ssize, max_sector,
+			    2 * max_buffer_sectors + buffer_min);
+	} else
+		transfer_size(ssize, max_sector,
+			      2 * max_buffer_sectors + buffer_min -
+			      aligned_sector_t);
+
+	/* round up current_count_sectors to get dma xfer size */
+	raw_cmd->length = in_sector_offset + current_count_sectors;
+	raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
+	raw_cmd->length <<= 9;
+#ifdef FLOPPY_SANITY_CHECK
+	/*check_dma_crossing(raw_cmd->kernel_data, raw_cmd->length, 
+	   "end of make_raw_request"); */
+	if ((raw_cmd->length < current_count_sectors << 9) ||
+	    (raw_cmd->kernel_data != current_req->buffer &&
+	     CT(COMMAND) == FD_WRITE &&
+	     (aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
+	      aligned_sector_t < buffer_min)) ||
+	    raw_cmd->length % (128 << SIZECODE) ||
+	    raw_cmd->length <= 0 || current_count_sectors <= 0) {
+		DPRINT("fractionary current count b=%lx s=%lx\n",
+		       raw_cmd->length, current_count_sectors);
+		if (raw_cmd->kernel_data != current_req->buffer)
+			printk("addr=%d, length=%ld\n",
+			       (int)((raw_cmd->kernel_data -
+				      floppy_track_buffer) >> 9),
+			       current_count_sectors);
+		printk("st=%d ast=%d mse=%d msi=%d\n",
+		       fsector_t, aligned_sector_t, max_sector, max_size);
+		printk("ssize=%x SIZECODE=%d\n", ssize, SIZECODE);
+		printk("command=%x SECTOR=%d HEAD=%d, TRACK=%d\n",
+		       COMMAND, SECTOR, HEAD, TRACK);
+		printk("buffer drive=%d\n", buffer_drive);
+		printk("buffer track=%d\n", buffer_track);
+		printk("buffer_min=%d\n", buffer_min);
+		printk("buffer_max=%d\n", buffer_max);
+		return 0;
+	}
+
+	if (raw_cmd->kernel_data != current_req->buffer) {
+		if (raw_cmd->kernel_data < floppy_track_buffer ||
+		    current_count_sectors < 0 ||
+		    raw_cmd->length < 0 ||
+		    raw_cmd->kernel_data + raw_cmd->length >
+		    floppy_track_buffer + (max_buffer_sectors << 10)) {
+			DPRINT("buffer overrun in schedule dma\n");
+			printk("fsector_t=%d buffer_min=%d current_count=%ld\n",
+			       fsector_t, buffer_min, raw_cmd->length >> 9);
+			printk("current_count_sectors=%ld\n",
+			       current_count_sectors);
+			if (CT(COMMAND) == FD_READ)
+				printk("read\n");
+			if (CT(COMMAND) == FD_WRITE)
+				printk("write\n");
+			return 0;
+		}
+	} else if (raw_cmd->length > current_req->nr_sectors << 9 ||
+		   current_count_sectors > current_req->nr_sectors) {
+		DPRINT("buffer overrun in direct transfer\n");
+		return 0;
+	} else if (raw_cmd->length < current_count_sectors << 9) {
+		DPRINT("more sectors than bytes\n");
+		printk("bytes=%ld\n", raw_cmd->length >> 9);
+		printk("sectors=%ld\n", current_count_sectors);
+	}
+	if (raw_cmd->length == 0) {
+		DPRINT("zero dma transfer attempted from make_raw_request\n");
+		return 0;
+	}
+#endif
+
+	virtualdmabug_workaround();
+	return 2;
+}
+
+static void redo_fd_request(void)
+{
+#define REPEAT {request_done(0); continue; }
+	int drive;
+	int tmp;
+
+	lastredo = jiffies;
+	if (current_drive < N_DRIVE)
+		floppy_off(current_drive);
+
+	for (;;) {
+		if (!current_req) {
+			struct request *req;
+
+			spin_lock_irq(floppy_queue->queue_lock);
+			req = elv_next_request(floppy_queue);
+			spin_unlock_irq(floppy_queue->queue_lock);
+			if (!req) {
+				do_floppy = NULL;
+				unlock_fdc();
+				return;
+			}
+			current_req = req;
+		}
+		drive = (long)current_req->rq_disk->private_data;
+		set_fdc(drive);
+		reschedule_timeout(current_reqD, "redo fd request", 0);
+
+		set_floppy(drive);
+		raw_cmd = &default_raw_cmd;
+		raw_cmd->flags = 0;
+		if (start_motor(redo_fd_request))
+			return;
+		disk_change(current_drive);
+		if (test_bit(current_drive, &fake_change) ||
+		    TESTF(FD_DISK_CHANGED)) {
+			DPRINT("disk absent or changed during operation\n");
+			REPEAT;
+		}
+		if (!_floppy) {	/* Autodetection */
+			if (!probing) {
+				DRS->probed_format = 0;
+				if (next_valid_format()) {
+					DPRINT("no autodetectable formats\n");
+					_floppy = NULL;
+					REPEAT;
+				}
+			}
+			probing = 1;
+			_floppy =
+			    floppy_type + DP->autodetect[DRS->probed_format];
+		} else
+			probing = 0;
+		errors = &(current_req->errors);
+		tmp = make_raw_rw_request();
+		if (tmp < 2) {
+			request_done(tmp);
+			continue;
+		}
+
+		if (TESTF(FD_NEED_TWADDLE))
+			twaddle();
+		schedule_bh(floppy_start);
+		debugt("queue fd request");
+		return;
+	}
+#undef REPEAT
+}
+
+static struct cont_t rw_cont = {
+	.interrupt	= rw_interrupt,
+	.redo		= redo_fd_request,
+	.error		= bad_flp_intr,
+	.done		= request_done
+};
+
+static void process_fd_request(void)
+{
+	cont = &rw_cont;
+	schedule_bh(redo_fd_request);
+}
+
+static void do_fd_request(request_queue_t * q)
+{
+	if (max_buffer_sectors == 0) {
+		printk("VFS: do_fd_request called on non-open device\n");
+		return;
+	}
+
+	if (usage_count == 0) {
+		printk("warning: usage count=0, current_req=%p exiting\n",
+		       current_req);
+		printk("sect=%ld flags=%lx\n", (long)current_req->sector,
+		       current_req->flags);
+		return;
+	}
+	if (test_bit(0, &fdc_busy)) {
+		/* fdc busy, this new request will be treated when the
+		   current one is done */
+		is_alive("do fd request, old request running");
+		return;
+	}
+	lock_fdc(MAXTIMEOUT, 0);
+	process_fd_request();
+	is_alive("do fd request");
+}
+
+static struct cont_t poll_cont = {
+	.interrupt	= success_and_wakeup,
+	.redo		= floppy_ready,
+	.error		= generic_failure,
+	.done		= generic_done
+};
+
+static int poll_drive(int interruptible, int flag)
+{
+	int ret;
+	/* no auto-sense, just clear dcl */
+	raw_cmd = &default_raw_cmd;
+	raw_cmd->flags = flag;
+	raw_cmd->track = 0;
+	raw_cmd->cmd_count = 0;
+	cont = &poll_cont;
+#ifdef DCL_DEBUG
+	if (DP->flags & FD_DEBUG) {
+		DPRINT("setting NEWCHANGE in poll_drive\n");
+	}
+#endif
+	SETF(FD_DISK_NEWCHANGE);
+	WAIT(floppy_ready);
+	return ret;
+}
+
+/*
+ * User triggered reset
+ * ====================
+ */
+
+static void reset_intr(void)
+{
+	printk("weird, reset interrupt called\n");
+}
+
+static struct cont_t reset_cont = {
+	.interrupt	= reset_intr,
+	.redo		= success_and_wakeup,
+	.error		= generic_failure,
+	.done		= generic_done
+};
+
+static int user_reset_fdc(int drive, int arg, int interruptible)
+{
+	int ret;
+
+	ret = 0;
+	LOCK_FDC(drive, interruptible);
+	if (arg == FD_RESET_ALWAYS)
+		FDCS->reset = 1;
+	if (FDCS->reset) {
+		cont = &reset_cont;
+		WAIT(reset_fdc);
+	}
+	process_fd_request();
+	return ret;
+}
+
+/*
+ * Misc Ioctl's and support
+ * ========================
+ */
+static inline int fd_copyout(void __user *param, const void *address,
+			     unsigned long size)
+{
+	return copy_to_user(param, address, size) ? -EFAULT : 0;
+}
+
+static inline int fd_copyin(void __user *param, void *address, unsigned long size)
+{
+	return copy_from_user(address, param, size) ? -EFAULT : 0;
+}
+
+#define _COPYOUT(x) (copy_to_user((void __user *)param, &(x), sizeof(x)) ? -EFAULT : 0)
+#define _COPYIN(x) (copy_from_user(&(x), (void __user *)param, sizeof(x)) ? -EFAULT : 0)
+
+#define COPYOUT(x) ECALL(_COPYOUT(x))
+#define COPYIN(x) ECALL(_COPYIN(x))
+
+static inline const char *drive_name(int type, int drive)
+{
+	struct floppy_struct *floppy;
+
+	if (type)
+		floppy = floppy_type + type;
+	else {
+		if (UDP->native_format)
+			floppy = floppy_type + UDP->native_format;
+		else
+			return "(null)";
+	}
+	if (floppy->name)
+		return floppy->name;
+	else
+		return "(null)";
+}
+
+/* raw commands */
+static void raw_cmd_done(int flag)
+{
+	int i;
+
+	if (!flag) {
+		raw_cmd->flags |= FD_RAW_FAILURE;
+		raw_cmd->flags |= FD_RAW_HARDFAILURE;
+	} else {
+		raw_cmd->reply_count = inr;
+		if (raw_cmd->reply_count > MAX_REPLIES)
+			raw_cmd->reply_count = 0;
+		for (i = 0; i < raw_cmd->reply_count; i++)
+			raw_cmd->reply[i] = reply_buffer[i];
+
+		if (raw_cmd->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+			unsigned long flags;
+			flags = claim_dma_lock();
+			raw_cmd->length = fd_get_dma_residue();
+			release_dma_lock(flags);
+		}
+
+		if ((raw_cmd->flags & FD_RAW_SOFTFAILURE) &&
+		    (!raw_cmd->reply_count || (raw_cmd->reply[0] & 0xc0)))
+			raw_cmd->flags |= FD_RAW_FAILURE;
+
+		if (disk_change(current_drive))
+			raw_cmd->flags |= FD_RAW_DISK_CHANGE;
+		else
+			raw_cmd->flags &= ~FD_RAW_DISK_CHANGE;
+		if (raw_cmd->flags & FD_RAW_NO_MOTOR_AFTER)
+			motor_off_callback(current_drive);
+
+		if (raw_cmd->next &&
+		    (!(raw_cmd->flags & FD_RAW_FAILURE) ||
+		     !(raw_cmd->flags & FD_RAW_STOP_IF_FAILURE)) &&
+		    ((raw_cmd->flags & FD_RAW_FAILURE) ||
+		     !(raw_cmd->flags & FD_RAW_STOP_IF_SUCCESS))) {
+			raw_cmd = raw_cmd->next;
+			return;
+		}
+	}
+	generic_done(flag);
+}
+
+static struct cont_t raw_cmd_cont = {
+	.interrupt	= success_and_wakeup,
+	.redo		= floppy_start,
+	.error		= generic_failure,
+	.done		= raw_cmd_done
+};
+
+static inline int raw_cmd_copyout(int cmd, char __user *param,
+				  struct floppy_raw_cmd *ptr)
+{
+	int ret;
+
+	while (ptr) {
+		COPYOUT(*ptr);
+		param += sizeof(struct floppy_raw_cmd);
+		if ((ptr->flags & FD_RAW_READ) && ptr->buffer_length) {
+			if (ptr->length >= 0
+			    && ptr->length <= ptr->buffer_length)
+				ECALL(fd_copyout
+				      (ptr->data, ptr->kernel_data,
+				       ptr->buffer_length - ptr->length));
+		}
+		ptr = ptr->next;
+	}
+	return 0;
+}
+
+static void raw_cmd_free(struct floppy_raw_cmd **ptr)
+{
+	struct floppy_raw_cmd *next, *this;
+
+	this = *ptr;
+	*ptr = NULL;
+	while (this) {
+		if (this->buffer_length) {
+			fd_dma_mem_free((unsigned long)this->kernel_data,
+					this->buffer_length);
+			this->buffer_length = 0;
+		}
+		next = this->next;
+		kfree(this);
+		this = next;
+	}
+}
+
+static inline int raw_cmd_copyin(int cmd, char __user *param,
+				 struct floppy_raw_cmd **rcmd)
+{
+	struct floppy_raw_cmd *ptr;
+	int ret;
+	int i;
+
+	*rcmd = NULL;
+	while (1) {
+		ptr = (struct floppy_raw_cmd *)
+		    kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER);
+		if (!ptr)
+			return -ENOMEM;
+		*rcmd = ptr;
+		COPYIN(*ptr);
+		ptr->next = NULL;
+		ptr->buffer_length = 0;
+		param += sizeof(struct floppy_raw_cmd);
+		if (ptr->cmd_count > 33)
+			/* the command may now also take up the space
+			 * initially intended for the reply & the
+			 * reply count. Needed for long 82078 commands
+			 * such as RESTORE, which takes ... 17 command
+			 * bytes. Murphy's law #137: When you reserve
+			 * 16 bytes for a structure, you'll one day
+			 * discover that you really need 17...
+			 */
+			return -EINVAL;
+
+		for (i = 0; i < 16; i++)
+			ptr->reply[i] = 0;
+		ptr->resultcode = 0;
+		ptr->kernel_data = NULL;
+
+		if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
+			if (ptr->length <= 0)
+				return -EINVAL;
+			ptr->kernel_data =
+			    (char *)fd_dma_mem_alloc(ptr->length);
+			fallback_on_nodma_alloc(&ptr->kernel_data, ptr->length);
+			if (!ptr->kernel_data)
+				return -ENOMEM;
+			ptr->buffer_length = ptr->length;
+		}
+		if (ptr->flags & FD_RAW_WRITE)
+			ECALL(fd_copyin(ptr->data, ptr->kernel_data,
+					ptr->length));
+		rcmd = &(ptr->next);
+		if (!(ptr->flags & FD_RAW_MORE))
+			return 0;
+		ptr->rate &= 0x43;
+	}
+}
+
+static int raw_cmd_ioctl(int cmd, void __user *param)
+{
+	int drive, ret, ret2;
+	struct floppy_raw_cmd *my_raw_cmd;
+
+	if (FDCS->rawcmd <= 1)
+		FDCS->rawcmd = 1;
+	for (drive = 0; drive < N_DRIVE; drive++) {
+		if (FDC(drive) != fdc)
+			continue;
+		if (drive == current_drive) {
+			if (UDRS->fd_ref > 1) {
+				FDCS->rawcmd = 2;
+				break;
+			}
+		} else if (UDRS->fd_ref) {
+			FDCS->rawcmd = 2;
+			break;
+		}
+	}
+
+	if (FDCS->reset)
+		return -EIO;
+
+	ret = raw_cmd_copyin(cmd, param, &my_raw_cmd);
+	if (ret) {
+		raw_cmd_free(&my_raw_cmd);
+		return ret;
+	}
+
+	raw_cmd = my_raw_cmd;
+	cont = &raw_cmd_cont;
+	ret = wait_til_done(floppy_start, 1);
+#ifdef DCL_DEBUG
+	if (DP->flags & FD_DEBUG) {
+		DPRINT("calling disk change from raw_cmd ioctl\n");
+	}
+#endif
+
+	if (ret != -EINTR && FDCS->reset)
+		ret = -EIO;
+
+	DRS->track = NO_TRACK;
+
+	ret2 = raw_cmd_copyout(cmd, param, my_raw_cmd);
+	if (!ret)
+		ret = ret2;
+	raw_cmd_free(&my_raw_cmd);
+	return ret;
+}
+
+static int invalidate_drive(struct block_device *bdev)
+{
+	/* invalidate the buffer track to force a reread */
+	set_bit((long)bdev->bd_disk->private_data, &fake_change);
+	process_fd_request();
+	check_disk_change(bdev);
+	return 0;
+}
+
+static inline int set_geometry(unsigned int cmd, struct floppy_struct *g,
+			       int drive, int type, struct block_device *bdev)
+{
+	int cnt;
+
+	/* sanity checking for parameters. */
+	if (g->sect <= 0 ||
+	    g->head <= 0 ||
+	    g->track <= 0 || g->track > UDP->tracks >> STRETCH(g) ||
+	    /* check if reserved bits are set */
+	    (g->stretch & ~(FD_STRETCH | FD_SWAPSIDES | FD_ZEROBASED)) != 0)
+		return -EINVAL;
+	if (type) {
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		down(&open_lock);
+		LOCK_FDC(drive, 1);
+		floppy_type[type] = *g;
+		floppy_type[type].name = "user format";
+		for (cnt = type << 2; cnt < (type << 2) + 4; cnt++)
+			floppy_sizes[cnt] = floppy_sizes[cnt + 0x80] =
+			    floppy_type[type].size + 1;
+		process_fd_request();
+		for (cnt = 0; cnt < N_DRIVE; cnt++) {
+			struct block_device *bdev = opened_bdev[cnt];
+			if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
+				continue;
+			__invalidate_device(bdev, 0);
+		}
+		up(&open_lock);
+	} else {
+		int oldStretch;
+		LOCK_FDC(drive, 1);
+		if (cmd != FDDEFPRM)
+			/* notice a disk change immediately, else
+			 * we lose our settings immediately*/
+			CALL(poll_drive(1, FD_RAW_NEED_DISK));
+		oldStretch = g->stretch;
+		user_params[drive] = *g;
+		if (buffer_drive == drive)
+			SUPBOUND(buffer_max, user_params[drive].sect);
+		current_type[drive] = &user_params[drive];
+		floppy_sizes[drive] = user_params[drive].size;
+		if (cmd == FDDEFPRM)
+			DRS->keep_data = -1;
+		else
+			DRS->keep_data = 1;
+		/* invalidation. Invalidate only when needed, i.e.
+		 * when there are already sectors in the buffer cache
+		 * whose number will change. This is useful, because
+		 * mtools often changes the geometry of the disk after
+		 * looking at the boot block */
+		if (DRS->maxblock > user_params[drive].sect ||
+		    DRS->maxtrack ||
+		    ((user_params[drive].sect ^ oldStretch) &
+		     (FD_SWAPSIDES | FD_ZEROBASED)))
+			invalidate_drive(bdev);
+		else
+			process_fd_request();
+	}
+	return 0;
+}
+
+/* handle obsolete ioctl's */
+static int ioctl_table[] = {
+	FDCLRPRM,
+	FDSETPRM,
+	FDDEFPRM,
+	FDGETPRM,
+	FDMSGON,
+	FDMSGOFF,
+	FDFMTBEG,
+	FDFMTTRK,
+	FDFMTEND,
+	FDSETEMSGTRESH,
+	FDFLUSH,
+	FDSETMAXERRS,
+	FDGETMAXERRS,
+	FDGETDRVTYP,
+	FDSETDRVPRM,
+	FDGETDRVPRM,
+	FDGETDRVSTAT,
+	FDPOLLDRVSTAT,
+	FDRESET,
+	FDGETFDCSTAT,
+	FDWERRORCLR,
+	FDWERRORGET,
+	FDRAWCMD,
+	FDEJECT,
+	FDTWADDLE
+};
+
+static inline int normalize_ioctl(int *cmd, int *size)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(ioctl_table); i++) {
+		if ((*cmd & 0xffff) == (ioctl_table[i] & 0xffff)) {
+			*size = _IOC_SIZE(*cmd);
+			*cmd = ioctl_table[i];
+			if (*size > _IOC_SIZE(*cmd)) {
+				printk("ioctl not yet supported\n");
+				return -EFAULT;
+			}
+			return 0;
+		}
+	}
+	return -EINVAL;
+}
+
+static int get_floppy_geometry(int drive, int type, struct floppy_struct **g)
+{
+	if (type)
+		*g = &floppy_type[type];
+	else {
+		LOCK_FDC(drive, 0);
+		CALL(poll_drive(0, 0));
+		process_fd_request();
+		*g = current_type[drive];
+	}
+	if (!*g)
+		return -ENODEV;
+	return 0;
+}
+
+static int fd_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
+		    unsigned long param)
+{
+#define FD_IOCTL_ALLOWED ((filp) && (filp)->private_data)
+#define OUT(c,x) case c: outparam = (const char *) (x); break
+#define IN(c,x,tag) case c: *(x) = inparam. tag ; return 0
+
+	int drive = (long)inode->i_bdev->bd_disk->private_data;
+	int i, type = ITYPE(UDRS->fd_device);
+	int ret;
+	int size;
+	union inparam {
+		struct floppy_struct g;	/* geometry */
+		struct format_descr f;
+		struct floppy_max_errors max_errors;
+		struct floppy_drive_params dp;
+	} inparam;		/* parameters coming from user space */
+	const char *outparam;	/* parameters passed back to user space */
+
+	/* convert compatibility eject ioctls into floppy eject ioctl.
+	 * We do this in order to provide a means to eject floppy disks before
+	 * installing the new fdutils package */
+	if (cmd == CDROMEJECT ||	/* CD-ROM eject */
+	    cmd == 0x6470 /* SunOS floppy eject */ ) {
+		DPRINT("obsolete eject ioctl\n");
+		DPRINT("please use floppycontrol --eject\n");
+		cmd = FDEJECT;
+	}
+
+	/* generic block device ioctls */
+	switch (cmd) {
+		/* the following have been inspired by the corresponding
+		 * code for other block devices. */
+		struct floppy_struct *g;
+	case HDIO_GETGEO:
+		{
+			struct hd_geometry loc;
+			ECALL(get_floppy_geometry(drive, type, &g));
+			loc.heads = g->head;
+			loc.sectors = g->sect;
+			loc.cylinders = g->track;
+			loc.start = 0;
+			return _COPYOUT(loc);
+		}
+	}
+
+	/* convert the old style command into a new style command */
+	if ((cmd & 0xff00) == 0x0200) {
+		ECALL(normalize_ioctl(&cmd, &size));
+	} else
+		return -EINVAL;
+
+	/* permission checks */
+	if (((cmd & 0x40) && !FD_IOCTL_ALLOWED) ||
+	    ((cmd & 0x80) && !capable(CAP_SYS_ADMIN)))
+		return -EPERM;
+
+	/* copyin */
+	CLEARSTRUCT(&inparam);
+	if (_IOC_DIR(cmd) & _IOC_WRITE)
+	    ECALL(fd_copyin((void __user *)param, &inparam, size))
+
+		switch (cmd) {
+		case FDEJECT:
+			if (UDRS->fd_ref != 1)
+				/* somebody else has this drive open */
+				return -EBUSY;
+			LOCK_FDC(drive, 1);
+
+			/* do the actual eject. Fails on
+			 * non-Sparc architectures */
+			ret = fd_eject(UNIT(drive));
+
+			USETF(FD_DISK_CHANGED);
+			USETF(FD_VERIFY);
+			process_fd_request();
+			return ret;
+		case FDCLRPRM:
+			LOCK_FDC(drive, 1);
+			current_type[drive] = NULL;
+			floppy_sizes[drive] = MAX_DISK_SIZE << 1;
+			UDRS->keep_data = 0;
+			return invalidate_drive(inode->i_bdev);
+		case FDSETPRM:
+		case FDDEFPRM:
+			return set_geometry(cmd, &inparam.g,
+					    drive, type, inode->i_bdev);
+		case FDGETPRM:
+			ECALL(get_floppy_geometry(drive, type,
+						  (struct floppy_struct **)
+						  &outparam));
+			break;
+
+		case FDMSGON:
+			UDP->flags |= FTD_MSG;
+			return 0;
+		case FDMSGOFF:
+			UDP->flags &= ~FTD_MSG;
+			return 0;
+
+		case FDFMTBEG:
+			LOCK_FDC(drive, 1);
+			CALL(poll_drive(1, FD_RAW_NEED_DISK));
+			ret = UDRS->flags;
+			process_fd_request();
+			if (ret & FD_VERIFY)
+				return -ENODEV;
+			if (!(ret & FD_DISK_WRITABLE))
+				return -EROFS;
+			return 0;
+		case FDFMTTRK:
+			if (UDRS->fd_ref != 1)
+				return -EBUSY;
+			return do_format(drive, &inparam.f);
+		case FDFMTEND:
+		case FDFLUSH:
+			LOCK_FDC(drive, 1);
+			return invalidate_drive(inode->i_bdev);
+
+		case FDSETEMSGTRESH:
+			UDP->max_errors.reporting =
+			    (unsigned short)(param & 0x0f);
+			return 0;
+			OUT(FDGETMAXERRS, &UDP->max_errors);
+			IN(FDSETMAXERRS, &UDP->max_errors, max_errors);
+
+		case FDGETDRVTYP:
+			outparam = drive_name(type, drive);
+			SUPBOUND(size, strlen(outparam) + 1);
+			break;
+
+			IN(FDSETDRVPRM, UDP, dp);
+			OUT(FDGETDRVPRM, UDP);
+
+		case FDPOLLDRVSTAT:
+			LOCK_FDC(drive, 1);
+			CALL(poll_drive(1, FD_RAW_NEED_DISK));
+			process_fd_request();
+			/* fall through */
+			OUT(FDGETDRVSTAT, UDRS);
+
+		case FDRESET:
+			return user_reset_fdc(drive, (int)param, 1);
+
+			OUT(FDGETFDCSTAT, UFDCS);
+
+		case FDWERRORCLR:
+			CLEARSTRUCT(UDRWE);
+			return 0;
+			OUT(FDWERRORGET, UDRWE);
+
+		case FDRAWCMD:
+			if (type)
+				return -EINVAL;
+			LOCK_FDC(drive, 1);
+			set_floppy(drive);
+			CALL(i = raw_cmd_ioctl(cmd, (void __user *)param));
+			process_fd_request();
+			return i;
+
+		case FDTWADDLE:
+			LOCK_FDC(drive, 1);
+			twaddle();
+			process_fd_request();
+			return 0;
+
+		default:
+			return -EINVAL;
+		}
+
+	if (_IOC_DIR(cmd) & _IOC_READ)
+		return fd_copyout((void __user *)param, outparam, size);
+	else
+		return 0;
+#undef OUT
+#undef IN
+}
+
+static void __init config_types(void)
+{
+	int first = 1;
+	int drive;
+
+	/* read drive info out of physical CMOS */
+	drive = 0;
+	if (!UDP->cmos)
+		UDP->cmos = FLOPPY0_TYPE;
+	drive = 1;
+	if (!UDP->cmos && FLOPPY1_TYPE)
+		UDP->cmos = FLOPPY1_TYPE;
+
+	/* XXX */
+	/* additional physical CMOS drive detection should go here */
+
+	for (drive = 0; drive < N_DRIVE; drive++) {
+		unsigned int type = UDP->cmos;
+		struct floppy_drive_params *params;
+		const char *name = NULL;
+		static char temparea[32];
+
+		if (type < NUMBER(default_drive_params)) {
+			params = &default_drive_params[type].params;
+			if (type) {
+				name = default_drive_params[type].name;
+				allowed_drive_mask |= 1 << drive;
+			} else
+				allowed_drive_mask &= ~(1 << drive);
+		} else {
+			params = &default_drive_params[0].params;
+			sprintf(temparea, "unknown type %d (usb?)", type);
+			name = temparea;
+		}
+		if (name) {
+			const char *prepend = ",";
+			if (first) {
+				prepend = KERN_INFO "Floppy drive(s):";
+				first = 0;
+			}
+			printk("%s fd%d is %s", prepend, drive, name);
+			register_devfs_entries(drive);
+		}
+		*UDP = *params;
+	}
+	if (!first)
+		printk("\n");
+}
+
+static int floppy_release(struct inode *inode, struct file *filp)
+{
+	int drive = (long)inode->i_bdev->bd_disk->private_data;
+
+	down(&open_lock);
+	if (UDRS->fd_ref < 0)
+		UDRS->fd_ref = 0;
+	else if (!UDRS->fd_ref--) {
+		DPRINT("floppy_release with fd_ref == 0");
+		UDRS->fd_ref = 0;
+	}
+	if (!UDRS->fd_ref)
+		opened_bdev[drive] = NULL;
+	floppy_release_irq_and_dma();
+	up(&open_lock);
+	return 0;
+}
+
+/*
+ * floppy_open check for aliasing (/dev/fd0 can be the same as
+ * /dev/PS0 etc), and disallows simultaneous access to the same
+ * drive with different device numbers.
+ */
+static int floppy_open(struct inode *inode, struct file *filp)
+{
+	int drive = (long)inode->i_bdev->bd_disk->private_data;
+	int old_dev;
+	int try;
+	int res = -EBUSY;
+	char *tmp;
+
+	filp->private_data = (void *)0;
+	down(&open_lock);
+	old_dev = UDRS->fd_device;
+	if (opened_bdev[drive] && opened_bdev[drive] != inode->i_bdev)
+		goto out2;
+
+	if (!UDRS->fd_ref && (UDP->flags & FD_BROKEN_DCL)) {
+		USETF(FD_DISK_CHANGED);
+		USETF(FD_VERIFY);
+	}
+
+	if (UDRS->fd_ref == -1 || (UDRS->fd_ref && (filp->f_flags & O_EXCL)))
+		goto out2;
+
+	if (floppy_grab_irq_and_dma())
+		goto out2;
+
+	if (filp->f_flags & O_EXCL)
+		UDRS->fd_ref = -1;
+	else
+		UDRS->fd_ref++;
+
+	opened_bdev[drive] = inode->i_bdev;
+
+	res = -ENXIO;
+
+	if (!floppy_track_buffer) {
+		/* if opening an ED drive, reserve a big buffer,
+		 * else reserve a small one */
+		if ((UDP->cmos == 6) || (UDP->cmos == 5))
+			try = 64;	/* Only 48 actually useful */
+		else
+			try = 32;	/* Only 24 actually useful */
+
+		tmp = (char *)fd_dma_mem_alloc(1024 * try);
+		if (!tmp && !floppy_track_buffer) {
+			try >>= 1;	/* buffer only one side */
+			INFBOUND(try, 16);
+			tmp = (char *)fd_dma_mem_alloc(1024 * try);
+		}
+		if (!tmp && !floppy_track_buffer) {
+			fallback_on_nodma_alloc(&tmp, 2048 * try);
+		}
+		if (!tmp && !floppy_track_buffer) {
+			DPRINT("Unable to allocate DMA memory\n");
+			goto out;
+		}
+		if (floppy_track_buffer) {
+			if (tmp)
+				fd_dma_mem_free((unsigned long)tmp, try * 1024);
+		} else {
+			buffer_min = buffer_max = -1;
+			floppy_track_buffer = tmp;
+			max_buffer_sectors = try;
+		}
+	}
+
+	UDRS->fd_device = iminor(inode);
+	set_capacity(disks[drive], floppy_sizes[iminor(inode)]);
+	if (old_dev != -1 && old_dev != iminor(inode)) {
+		if (buffer_drive == drive)
+			buffer_track = -1;
+	}
+
+	/* Allow ioctls if we have write-permissions even if read-only open.
+	 * Needed so that programs such as fdrawcmd still can work on write
+	 * protected disks */
+	if (filp->f_mode & 2
+	    || permission(filp->f_dentry->d_inode, 2, NULL) == 0)
+		filp->private_data = (void *)8;
+
+	if (UFDCS->rawcmd == 1)
+		UFDCS->rawcmd = 2;
+
+	if (!(filp->f_flags & O_NDELAY)) {
+		if (filp->f_mode & 3) {
+			UDRS->last_checked = 0;
+			check_disk_change(inode->i_bdev);
+			if (UTESTF(FD_DISK_CHANGED))
+				goto out;
+		}
+		res = -EROFS;
+		if ((filp->f_mode & 2) && !(UTESTF(FD_DISK_WRITABLE)))
+			goto out;
+	}
+	up(&open_lock);
+	return 0;
+out:
+	if (UDRS->fd_ref < 0)
+		UDRS->fd_ref = 0;
+	else
+		UDRS->fd_ref--;
+	if (!UDRS->fd_ref)
+		opened_bdev[drive] = NULL;
+	floppy_release_irq_and_dma();
+out2:
+	up(&open_lock);
+	return res;
+}
+
+/*
+ * Check if the disk has been changed or if a change has been faked.
+ */
+static int check_floppy_change(struct gendisk *disk)
+{
+	int drive = (long)disk->private_data;
+
+	if (UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY))
+		return 1;
+
+	if (UDP->checkfreq < (int)(jiffies - UDRS->last_checked)) {
+		if (floppy_grab_irq_and_dma()) {
+			return 1;
+		}
+
+		lock_fdc(drive, 0);
+		poll_drive(0, 0);
+		process_fd_request();
+		floppy_release_irq_and_dma();
+	}
+
+	if (UTESTF(FD_DISK_CHANGED) ||
+	    UTESTF(FD_VERIFY) ||
+	    test_bit(drive, &fake_change) ||
+	    (!ITYPE(UDRS->fd_device) && !current_type[drive]))
+		return 1;
+	return 0;
+}
+
+/*
+ * This implements "read block 0" for floppy_revalidate().
+ * Needed for format autodetection, checking whether there is
+ * a disk in the drive, and whether that disk is writable.
+ */
+
+static int floppy_rb0_complete(struct bio *bio, unsigned int bytes_done,
+			       int err)
+{
+	if (bio->bi_size)
+		return 1;
+
+	complete((struct completion *)bio->bi_private);
+	return 0;
+}
+
+static int __floppy_read_block_0(struct block_device *bdev)
+{
+	struct bio bio;
+	struct bio_vec bio_vec;
+	struct completion complete;
+	struct page *page;
+	size_t size;
+
+	page = alloc_page(GFP_NOIO);
+	if (!page) {
+		process_fd_request();
+		return -ENOMEM;
+	}
+
+	size = bdev->bd_block_size;
+	if (!size)
+		size = 1024;
+
+	bio_init(&bio);
+	bio.bi_io_vec = &bio_vec;
+	bio_vec.bv_page = page;
+	bio_vec.bv_len = size;
+	bio_vec.bv_offset = 0;
+	bio.bi_vcnt = 1;
+	bio.bi_idx = 0;
+	bio.bi_size = size;
+	bio.bi_bdev = bdev;
+	bio.bi_sector = 0;
+	init_completion(&complete);
+	bio.bi_private = &complete;
+	bio.bi_end_io = floppy_rb0_complete;
+
+	submit_bio(READ, &bio);
+	generic_unplug_device(bdev_get_queue(bdev));
+	process_fd_request();
+	wait_for_completion(&complete);
+
+	__free_page(page);
+
+	return 0;
+}
+
+/* revalidate the floppy disk, i.e. trigger format autodetection by reading
+ * the bootblock (block 0). "Autodetection" is also needed to check whether
+ * there is a disk in the drive at all... Thus we also do it for fixed
+ * geometry formats */
+static int floppy_revalidate(struct gendisk *disk)
+{
+	int drive = (long)disk->private_data;
+#define NO_GEOM (!current_type[drive] && !ITYPE(UDRS->fd_device))
+	int cf;
+	int res = 0;
+
+	if (UTESTF(FD_DISK_CHANGED) ||
+	    UTESTF(FD_VERIFY) || test_bit(drive, &fake_change) || NO_GEOM) {
+		if (usage_count == 0) {
+			printk("VFS: revalidate called on non-open device.\n");
+			return -EFAULT;
+		}
+		lock_fdc(drive, 0);
+		cf = UTESTF(FD_DISK_CHANGED) || UTESTF(FD_VERIFY);
+		if (!(cf || test_bit(drive, &fake_change) || NO_GEOM)) {
+			process_fd_request();	/*already done by another thread */
+			return 0;
+		}
+		UDRS->maxblock = 0;
+		UDRS->maxtrack = 0;
+		if (buffer_drive == drive)
+			buffer_track = -1;
+		clear_bit(drive, &fake_change);
+		UCLEARF(FD_DISK_CHANGED);
+		if (cf)
+			UDRS->generation++;
+		if (NO_GEOM) {
+			/* auto-sensing */
+			res = __floppy_read_block_0(opened_bdev[drive]);
+		} else {
+			if (cf)
+				poll_drive(0, FD_RAW_NEED_DISK);
+			process_fd_request();
+		}
+	}
+	set_capacity(disk, floppy_sizes[UDRS->fd_device]);
+	return res;
+}
+
+static struct block_device_operations floppy_fops = {
+	.owner		= THIS_MODULE,
+	.open		= floppy_open,
+	.release	= floppy_release,
+	.ioctl		= fd_ioctl,
+	.media_changed	= check_floppy_change,
+	.revalidate_disk = floppy_revalidate,
+};
+static char *table[] = {
+	"", "d360", "h1200", "u360", "u720", "h360", "h720",
+	"u1440", "u2880", "CompaQ", "h1440", "u1680", "h410",
+	"u820", "h1476", "u1722", "h420", "u830", "h1494", "u1743",
+	"h880", "u1040", "u1120", "h1600", "u1760", "u1920",
+	"u3200", "u3520", "u3840", "u1840", "u800", "u1600",
+	NULL
+};
+static int t360[] = { 1, 0 },
+	t1200[] = { 2, 5, 6, 10, 12, 14, 16, 18, 20, 23, 0 },
+	t3in[] = { 8, 9, 26, 27, 28, 7, 11, 15, 19, 24, 25, 29, 31, 3, 4, 13,
+			17, 21, 22, 30, 0 };
+static int *table_sup[] =
+    { NULL, t360, t1200, t3in + 5 + 8, t3in + 5, t3in, t3in };
+
+static void __init register_devfs_entries(int drive)
+{
+	int base_minor = (drive < 4) ? drive : (124 + drive);
+
+	if (UDP->cmos < NUMBER(default_drive_params)) {
+		int i = 0;
+		do {
+			int minor = base_minor + (table_sup[UDP->cmos][i] << 2);
+
+			devfs_mk_bdev(MKDEV(FLOPPY_MAJOR, minor),
+				      S_IFBLK | S_IRUSR | S_IWUSR | S_IRGRP |
+				      S_IWGRP, "floppy/%d%s", drive,
+				      table[table_sup[UDP->cmos][i]]);
+		} while (table_sup[UDP->cmos][i++]);
+	}
+}
+
+/*
+ * Floppy Driver initialization
+ * =============================
+ */
+
+/* Determine the floppy disk controller type */
+/* This routine was written by David C. Niemi */
+static char __init get_fdc_version(void)
+{
+	int r;
+
+	output_byte(FD_DUMPREGS);	/* 82072 and better know DUMPREGS */
+	if (FDCS->reset)
+		return FDC_NONE;
+	if ((r = result()) <= 0x00)
+		return FDC_NONE;	/* No FDC present ??? */
+	if ((r == 1) && (reply_buffer[0] == 0x80)) {
+		printk(KERN_INFO "FDC %d is an 8272A\n", fdc);
+		return FDC_8272A;	/* 8272a/765 don't know DUMPREGS */
+	}
+	if (r != 10) {
+		printk
+		    ("FDC %d init: DUMPREGS: unexpected return of %d bytes.\n",
+		     fdc, r);
+		return FDC_UNKNOWN;
+	}
+
+	if (!fdc_configure()) {
+		printk(KERN_INFO "FDC %d is an 82072\n", fdc);
+		return FDC_82072;	/* 82072 doesn't know CONFIGURE */
+	}
+
+	output_byte(FD_PERPENDICULAR);
+	if (need_more_output() == MORE_OUTPUT) {
+		output_byte(0);
+	} else {
+		printk(KERN_INFO "FDC %d is an 82072A\n", fdc);
+		return FDC_82072A;	/* 82072A as found on Sparcs. */
+	}
+
+	output_byte(FD_UNLOCK);
+	r = result();
+	if ((r == 1) && (reply_buffer[0] == 0x80)) {
+		printk(KERN_INFO "FDC %d is a pre-1991 82077\n", fdc);
+		return FDC_82077_ORIG;	/* Pre-1991 82077, doesn't know 
+					 * LOCK/UNLOCK */
+	}
+	if ((r != 1) || (reply_buffer[0] != 0x00)) {
+		printk("FDC %d init: UNLOCK: unexpected return of %d bytes.\n",
+		       fdc, r);
+		return FDC_UNKNOWN;
+	}
+	output_byte(FD_PARTID);
+	r = result();
+	if (r != 1) {
+		printk("FDC %d init: PARTID: unexpected return of %d bytes.\n",
+		       fdc, r);
+		return FDC_UNKNOWN;
+	}
+	if (reply_buffer[0] == 0x80) {
+		printk(KERN_INFO "FDC %d is a post-1991 82077\n", fdc);
+		return FDC_82077;	/* Revised 82077AA passes all the tests */
+	}
+	switch (reply_buffer[0] >> 5) {
+	case 0x0:
+		/* Either a 82078-1 or a 82078SL running at 5Volt */
+		printk(KERN_INFO "FDC %d is an 82078.\n", fdc);
+		return FDC_82078;
+	case 0x1:
+		printk(KERN_INFO "FDC %d is a 44pin 82078\n", fdc);
+		return FDC_82078;
+	case 0x2:
+		printk(KERN_INFO "FDC %d is a S82078B\n", fdc);
+		return FDC_S82078B;
+	case 0x3:
+		printk(KERN_INFO "FDC %d is a National Semiconductor PC87306\n",
+		       fdc);
+		return FDC_87306;
+	default:
+		printk(KERN_INFO
+		       "FDC %d init: 82078 variant with unknown PARTID=%d.\n",
+		       fdc, reply_buffer[0] >> 5);
+		return FDC_82078_UNKN;
+	}
+}				/* get_fdc_version */
+
+/* lilo configuration */
+
+static void __init floppy_set_flags(int *ints, int param, int param2)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
+		if (param)
+			default_drive_params[i].params.flags |= param2;
+		else
+			default_drive_params[i].params.flags &= ~param2;
+	}
+	DPRINT("%s flag 0x%x\n", param2 ? "Setting" : "Clearing", param);
+}
+
+static void __init daring(int *ints, int param, int param2)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(default_drive_params); i++) {
+		if (param) {
+			default_drive_params[i].params.select_delay = 0;
+			default_drive_params[i].params.flags |=
+			    FD_SILENT_DCL_CLEAR;
+		} else {
+			default_drive_params[i].params.select_delay =
+			    2 * HZ / 100;
+			default_drive_params[i].params.flags &=
+			    ~FD_SILENT_DCL_CLEAR;
+		}
+	}
+	DPRINT("Assuming %s floppy hardware\n", param ? "standard" : "broken");
+}
+
+static void __init set_cmos(int *ints, int dummy, int dummy2)
+{
+	int current_drive = 0;
+
+	if (ints[0] != 2) {
+		DPRINT("wrong number of parameters for CMOS\n");
+		return;
+	}
+	current_drive = ints[1];
+	if (current_drive < 0 || current_drive >= 8) {
+		DPRINT("bad drive for set_cmos\n");
+		return;
+	}
+#if N_FDC > 1
+	if (current_drive >= 4 && !FDC2)
+		FDC2 = 0x370;
+#endif
+	DP->cmos = ints[2];
+	DPRINT("setting CMOS code to %d\n", ints[2]);
+}
+
+static struct param_table {
+	const char *name;
+	void (*fn) (int *ints, int param, int param2);
+	int *var;
+	int def_param;
+	int param2;
+} config_params[] __initdata = {
+	{"allowed_drive_mask", NULL, &allowed_drive_mask, 0xff, 0}, /* obsolete */
+	{"all_drives", NULL, &allowed_drive_mask, 0xff, 0},	/* obsolete */
+	{"asus_pci", NULL, &allowed_drive_mask, 0x33, 0},
+	{"irq", NULL, &FLOPPY_IRQ, 6, 0},
+	{"dma", NULL, &FLOPPY_DMA, 2, 0},
+	{"daring", daring, NULL, 1, 0},
+#if N_FDC > 1
+	{"two_fdc", NULL, &FDC2, 0x370, 0},
+	{"one_fdc", NULL, &FDC2, 0, 0},
+#endif
+	{"thinkpad", floppy_set_flags, NULL, 1, FD_INVERTED_DCL},
+	{"broken_dcl", floppy_set_flags, NULL, 1, FD_BROKEN_DCL},
+	{"messages", floppy_set_flags, NULL, 1, FTD_MSG},
+	{"silent_dcl_clear", floppy_set_flags, NULL, 1, FD_SILENT_DCL_CLEAR},
+	{"debug", floppy_set_flags, NULL, 1, FD_DEBUG},
+	{"nodma", NULL, &can_use_virtual_dma, 1, 0},
+	{"omnibook", NULL, &can_use_virtual_dma, 1, 0},
+	{"yesdma", NULL, &can_use_virtual_dma, 0, 0},
+	{"fifo_depth", NULL, &fifo_depth, 0xa, 0},
+	{"nofifo", NULL, &no_fifo, 0x20, 0},
+	{"usefifo", NULL, &no_fifo, 0, 0},
+	{"cmos", set_cmos, NULL, 0, 0},
+	{"slow", NULL, &slow_floppy, 1, 0},
+	{"unexpected_interrupts", NULL, &print_unex, 1, 0},
+	{"no_unexpected_interrupts", NULL, &print_unex, 0, 0},
+	{"L40SX", NULL, &print_unex, 0, 0}
+
+	EXTRA_FLOPPY_PARAMS
+};
+
+static int __init floppy_setup(char *str)
+{
+	int i;
+	int param;
+	int ints[11];
+
+	str = get_options(str, ARRAY_SIZE(ints), ints);
+	if (str) {
+		for (i = 0; i < ARRAY_SIZE(config_params); i++) {
+			if (strcmp(str, config_params[i].name) == 0) {
+				if (ints[0])
+					param = ints[1];
+				else
+					param = config_params[i].def_param;
+				if (config_params[i].fn)
+					config_params[i].
+					    fn(ints, param,
+					       config_params[i].param2);
+				if (config_params[i].var) {
+					DPRINT("%s=%d\n", str, param);
+					*config_params[i].var = param;
+				}
+				return 1;
+			}
+		}
+	}
+	if (str) {
+		DPRINT("unknown floppy option [%s]\n", str);
+
+		DPRINT("allowed options are:");
+		for (i = 0; i < ARRAY_SIZE(config_params); i++)
+			printk(" %s", config_params[i].name);
+		printk("\n");
+	} else
+		DPRINT("botched floppy option\n");
+	DPRINT("Read Documentation/floppy.txt\n");
+	return 0;
+}
+
+static int have_no_fdc = -ENODEV;
+
+static void floppy_device_release(struct device *dev)
+{
+	complete(&device_release);
+}
+
+static struct platform_device floppy_device = {
+	.name		= "floppy",
+	.id		= 0,
+	.dev		= {
+			.release = floppy_device_release,
+			}
+};
+
+static struct kobject *floppy_find(dev_t dev, int *part, void *data)
+{
+	int drive = (*part & 3) | ((*part & 0x80) >> 5);
+	if (drive >= N_DRIVE ||
+	    !(allowed_drive_mask & (1 << drive)) ||
+	    fdc_state[FDC(drive)].version == FDC_NONE)
+		return NULL;
+	if (((*part >> 2) & 0x1f) >= NUMBER(floppy_type))
+		return NULL;
+	*part = 0;
+	return get_disk(disks[drive]);
+}
+
+static int __init floppy_init(void)
+{
+	int i, unit, drive;
+	int err, dr;
+
+	raw_cmd = NULL;
+
+	for (dr = 0; dr < N_DRIVE; dr++) {
+		disks[dr] = alloc_disk(1);
+		if (!disks[dr]) {
+			err = -ENOMEM;
+			goto out_put_disk;
+		}
+
+		disks[dr]->major = FLOPPY_MAJOR;
+		disks[dr]->first_minor = TOMINOR(dr);
+		disks[dr]->fops = &floppy_fops;
+		sprintf(disks[dr]->disk_name, "fd%d", dr);
+
+		init_timer(&motor_off_timer[dr]);
+		motor_off_timer[dr].data = dr;
+		motor_off_timer[dr].function = motor_off_callback;
+	}
+
+	devfs_mk_dir("floppy");
+
+	err = register_blkdev(FLOPPY_MAJOR, "fd");
+	if (err)
+		goto out_devfs_remove;
+
+	floppy_queue = blk_init_queue(do_fd_request, &floppy_lock);
+	if (!floppy_queue) {
+		err = -ENOMEM;
+		goto out_unreg_blkdev;
+	}
+	blk_queue_max_sectors(floppy_queue, 64);
+
+	blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
+			    floppy_find, NULL, NULL);
+
+	for (i = 0; i < 256; i++)
+		if (ITYPE(i))
+			floppy_sizes[i] = floppy_type[ITYPE(i)].size;
+		else
+			floppy_sizes[i] = MAX_DISK_SIZE << 1;
+
+	reschedule_timeout(MAXTIMEOUT, "floppy init", MAXTIMEOUT);
+	config_types();
+
+	for (i = 0; i < N_FDC; i++) {
+		fdc = i;
+		CLEARSTRUCT(FDCS);
+		FDCS->dtr = -1;
+		FDCS->dor = 0x4;
+#if defined(__sparc__) || defined(__mc68000__)
+		/*sparcs/sun3x don't have a DOR reset which we can fall back on to */
+#ifdef __mc68000__
+		if (MACH_IS_SUN3X)
+#endif
+			FDCS->version = FDC_82072A;
+#endif
+	}
+
+	use_virtual_dma = can_use_virtual_dma & 1;
+#if defined(CONFIG_PPC64)
+	if (check_legacy_ioport(FDC1)) {
+		del_timer(&fd_timeout);
+		err = -ENODEV;
+		goto out_unreg_region;
+	}
+#endif
+	fdc_state[0].address = FDC1;
+	if (fdc_state[0].address == -1) {
+		del_timer(&fd_timeout);
+		err = -ENODEV;
+		goto out_unreg_region;
+	}
+#if N_FDC > 1
+	fdc_state[1].address = FDC2;
+#endif
+
+	fdc = 0;		/* reset fdc in case of unexpected interrupt */
+	err = floppy_grab_irq_and_dma();
+	if (err) {
+		del_timer(&fd_timeout);
+		err = -EBUSY;
+		goto out_unreg_region;
+	}
+
+	/* initialise drive state */
+	for (drive = 0; drive < N_DRIVE; drive++) {
+		CLEARSTRUCT(UDRS);
+		CLEARSTRUCT(UDRWE);
+		USETF(FD_DISK_NEWCHANGE);
+		USETF(FD_DISK_CHANGED);
+		USETF(FD_VERIFY);
+		UDRS->fd_device = -1;
+		floppy_track_buffer = NULL;
+		max_buffer_sectors = 0;
+	}
+	/*
+	 * Small 10 msec delay to let through any interrupt that
+	 * initialization might have triggered, to not
+	 * confuse detection:
+	 */
+	msleep(10);
+
+	for (i = 0; i < N_FDC; i++) {
+		fdc = i;
+		FDCS->driver_version = FD_DRIVER_VERSION;
+		for (unit = 0; unit < 4; unit++)
+			FDCS->track[unit] = 0;
+		if (FDCS->address == -1)
+			continue;
+		FDCS->rawcmd = 2;
+		if (user_reset_fdc(-1, FD_RESET_ALWAYS, 0)) {
+			/* free ioports reserved by floppy_grab_irq_and_dma() */
+			release_region(FDCS->address + 2, 4);
+			release_region(FDCS->address + 7, 1);
+			FDCS->address = -1;
+			FDCS->version = FDC_NONE;
+			continue;
+		}
+		/* Try to determine the floppy controller type */
+		FDCS->version = get_fdc_version();
+		if (FDCS->version == FDC_NONE) {
+			/* free ioports reserved by floppy_grab_irq_and_dma() */
+			release_region(FDCS->address + 2, 4);
+			release_region(FDCS->address + 7, 1);
+			FDCS->address = -1;
+			continue;
+		}
+		if (can_use_virtual_dma == 2 && FDCS->version < FDC_82072A)
+			can_use_virtual_dma = 0;
+
+		have_no_fdc = 0;
+		/* Not all FDCs seem to be able to handle the version command
+		 * properly, so force a reset for the standard FDC clones,
+		 * to avoid interrupt garbage.
+		 */
+		user_reset_fdc(-1, FD_RESET_ALWAYS, 0);
+	}
+	fdc = 0;
+	del_timer(&fd_timeout);
+	current_drive = 0;
+	floppy_release_irq_and_dma();
+	initialising = 0;
+	if (have_no_fdc) {
+		DPRINT("no floppy controllers found\n");
+		err = have_no_fdc;
+		goto out_flush_work;
+	}
+
+	err = platform_device_register(&floppy_device);
+	if (err)
+		goto out_flush_work;
+
+	for (drive = 0; drive < N_DRIVE; drive++) {
+		if (!(allowed_drive_mask & (1 << drive)))
+			continue;
+		if (fdc_state[FDC(drive)].version == FDC_NONE)
+			continue;
+		/* to be cleaned up... */
+		disks[drive]->private_data = (void *)(long)drive;
+		disks[drive]->queue = floppy_queue;
+		disks[drive]->flags |= GENHD_FL_REMOVABLE;
+		disks[drive]->driverfs_dev = &floppy_device.dev;
+		add_disk(disks[drive]);
+	}
+
+	return 0;
+
+out_flush_work:
+	flush_scheduled_work();
+	if (usage_count)
+		floppy_release_irq_and_dma();
+out_unreg_region:
+	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+	blk_cleanup_queue(floppy_queue);
+out_unreg_blkdev:
+	unregister_blkdev(FLOPPY_MAJOR, "fd");
+out_devfs_remove:
+	devfs_remove("floppy");
+out_put_disk:
+	while (dr--) {
+		del_timer(&motor_off_timer[dr]);
+		put_disk(disks[dr]);
+	}
+	return err;
+}
+
+static DEFINE_SPINLOCK(floppy_usage_lock);
+
+static int floppy_grab_irq_and_dma(void)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&floppy_usage_lock, flags);
+	if (usage_count++) {
+		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+		return 0;
+	}
+	spin_unlock_irqrestore(&floppy_usage_lock, flags);
+	if (fd_request_irq()) {
+		DPRINT("Unable to grab IRQ%d for the floppy driver\n",
+		       FLOPPY_IRQ);
+		spin_lock_irqsave(&floppy_usage_lock, flags);
+		usage_count--;
+		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+		return -1;
+	}
+	if (fd_request_dma()) {
+		DPRINT("Unable to grab DMA%d for the floppy driver\n",
+		       FLOPPY_DMA);
+		fd_free_irq();
+		spin_lock_irqsave(&floppy_usage_lock, flags);
+		usage_count--;
+		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+		return -1;
+	}
+
+	for (fdc = 0; fdc < N_FDC; fdc++) {
+		if (FDCS->address != -1) {
+			if (!request_region(FDCS->address + 2, 4, "floppy")) {
+				DPRINT("Floppy io-port 0x%04lx in use\n",
+				       FDCS->address + 2);
+				goto cleanup1;
+			}
+			if (!request_region(FDCS->address + 7, 1, "floppy DIR")) {
+				DPRINT("Floppy io-port 0x%04lx in use\n",
+				       FDCS->address + 7);
+				goto cleanup2;
+			}
+			/* address + 6 is reserved, and may be taken by IDE.
+			 * Unfortunately, Adaptec doesn't know this :-(, */
+		}
+	}
+	for (fdc = 0; fdc < N_FDC; fdc++) {
+		if (FDCS->address != -1) {
+			reset_fdc_info(1);
+			fd_outb(FDCS->dor, FD_DOR);
+		}
+	}
+	fdc = 0;
+	set_dor(0, ~0, 8);	/* avoid immediate interrupt */
+
+	for (fdc = 0; fdc < N_FDC; fdc++)
+		if (FDCS->address != -1)
+			fd_outb(FDCS->dor, FD_DOR);
+	/*
+	 *      The driver will try and free resources and relies on us
+	 *      to know if they were allocated or not.
+	 */
+	fdc = 0;
+	irqdma_allocated = 1;
+	return 0;
+cleanup2:
+	release_region(FDCS->address + 2, 4);
+cleanup1:
+	fd_free_irq();
+	fd_free_dma();
+	while (--fdc >= 0) {
+		release_region(FDCS->address + 2, 4);
+		release_region(FDCS->address + 7, 1);
+	}
+	spin_lock_irqsave(&floppy_usage_lock, flags);
+	usage_count--;
+	spin_unlock_irqrestore(&floppy_usage_lock, flags);
+	return -1;
+}
+
+static void floppy_release_irq_and_dma(void)
+{
+	int old_fdc;
+#ifdef FLOPPY_SANITY_CHECK
+#ifndef __sparc__
+	int drive;
+#endif
+#endif
+	long tmpsize;
+	unsigned long tmpaddr;
+	unsigned long flags;
+
+	spin_lock_irqsave(&floppy_usage_lock, flags);
+	if (--usage_count) {
+		spin_unlock_irqrestore(&floppy_usage_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&floppy_usage_lock, flags);
+	if (irqdma_allocated) {
+		fd_disable_dma();
+		fd_free_dma();
+		fd_free_irq();
+		irqdma_allocated = 0;
+	}
+	set_dor(0, ~0, 8);
+#if N_FDC > 1
+	set_dor(1, ~8, 0);
+#endif
+	floppy_enable_hlt();
+
+	if (floppy_track_buffer && max_buffer_sectors) {
+		tmpsize = max_buffer_sectors * 1024;
+		tmpaddr = (unsigned long)floppy_track_buffer;
+		floppy_track_buffer = NULL;
+		max_buffer_sectors = 0;
+		buffer_min = buffer_max = -1;
+		fd_dma_mem_free(tmpaddr, tmpsize);
+	}
+#ifdef FLOPPY_SANITY_CHECK
+#ifndef __sparc__
+	for (drive = 0; drive < N_FDC * 4; drive++)
+		if (timer_pending(motor_off_timer + drive))
+			printk("motor off timer %d still active\n", drive);
+#endif
+
+	if (timer_pending(&fd_timeout))
+		printk("floppy timer still active:%s\n", timeout_message);
+	if (timer_pending(&fd_timer))
+		printk("auxiliary floppy timer still active\n");
+	if (floppy_work.pending)
+		printk("work still pending\n");
+#endif
+	old_fdc = fdc;
+	for (fdc = 0; fdc < N_FDC; fdc++)
+		if (FDCS->address != -1) {
+			release_region(FDCS->address + 2, 4);
+			release_region(FDCS->address + 7, 1);
+		}
+	fdc = old_fdc;
+}
+
+#ifdef MODULE
+
+static char *floppy;
+
+static void unregister_devfs_entries(int drive)
+{
+	int i;
+
+	if (UDP->cmos < NUMBER(default_drive_params)) {
+		i = 0;
+		do {
+			devfs_remove("floppy/%d%s", drive,
+				     table[table_sup[UDP->cmos][i]]);
+		} while (table_sup[UDP->cmos][i++]);
+	}
+}
+
+static void __init parse_floppy_cfg_string(char *cfg)
+{
+	char *ptr;
+
+	while (*cfg) {
+		for (ptr = cfg; *cfg && *cfg != ' ' && *cfg != '\t'; cfg++) ;
+		if (*cfg) {
+			*cfg = '\0';
+			cfg++;
+		}
+		if (*ptr)
+			floppy_setup(ptr);
+	}
+}
+
+int init_module(void)
+{
+	if (floppy)
+		parse_floppy_cfg_string(floppy);
+	return floppy_init();
+}
+
+void cleanup_module(void)
+{
+	int drive;
+
+	init_completion(&device_release);
+	blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
+	unregister_blkdev(FLOPPY_MAJOR, "fd");
+
+	for (drive = 0; drive < N_DRIVE; drive++) {
+		del_timer_sync(&motor_off_timer[drive]);
+
+		if ((allowed_drive_mask & (1 << drive)) &&
+		    fdc_state[FDC(drive)].version != FDC_NONE) {
+			del_gendisk(disks[drive]);
+			unregister_devfs_entries(drive);
+		}
+		put_disk(disks[drive]);
+	}
+	platform_device_unregister(&floppy_device);
+	devfs_remove("floppy");
+
+	del_timer_sync(&fd_timeout);
+	del_timer_sync(&fd_timer);
+	blk_cleanup_queue(floppy_queue);
+
+	if (usage_count)
+		floppy_release_irq_and_dma();
+
+	/* eject disk, if any */
+	fd_eject(0);
+
+	wait_for_completion(&device_release);
+}
+
+module_param(floppy, charp, 0);
+module_param(FLOPPY_IRQ, int, 0);
+module_param(FLOPPY_DMA, int, 0);
+MODULE_AUTHOR("Alain L. Knaff");
+MODULE_SUPPORTED_DEVICE("fd");
+MODULE_LICENSE("GPL");
+
+#else
+
+__setup("floppy=", floppy_setup);
+module_init(floppy_init)
+#endif
+
+MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
diff --git a/drivers/block/genhd.c b/drivers/block/genhd.c
new file mode 100644
index 0000000..ab4db71
--- /dev/null
+++ b/drivers/block/genhd.c
@@ -0,0 +1,685 @@
+/*
+ *  gendisk handling
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/kmod.h>
+#include <linux/kobj_map.h>
+
+#define MAX_PROBE_HASH 255	/* random */
+
+static struct subsystem block_subsys;
+
+static DECLARE_MUTEX(block_subsys_sem);
+
+/*
+ * Can be deleted altogether. Later.
+ *
+ */
+static struct blk_major_name {
+	struct blk_major_name *next;
+	int major;
+	char name[16];
+} *major_names[MAX_PROBE_HASH];
+
+/* index in the above - for now: assume no multimajor ranges */
+static inline int major_to_index(int major)
+{
+	return major % MAX_PROBE_HASH;
+}
+
+#ifdef CONFIG_PROC_FS
+/* get block device names in somewhat random order */
+int get_blkdev_list(char *p)
+{
+	struct blk_major_name *n;
+	int i, len;
+
+	len = sprintf(p, "\nBlock devices:\n");
+
+	down(&block_subsys_sem);
+	for (i = 0; i < ARRAY_SIZE(major_names); i++) {
+		for (n = major_names[i]; n; n = n->next)
+			len += sprintf(p+len, "%3d %s\n",
+				       n->major, n->name);
+	}
+	up(&block_subsys_sem);
+
+	return len;
+}
+#endif
+
+int register_blkdev(unsigned int major, const char *name)
+{
+	struct blk_major_name **n, *p;
+	int index, ret = 0;
+
+	down(&block_subsys_sem);
+
+	/* temporary */
+	if (major == 0) {
+		for (index = ARRAY_SIZE(major_names)-1; index > 0; index--) {
+			if (major_names[index] == NULL)
+				break;
+		}
+
+		if (index == 0) {
+			printk("register_blkdev: failed to get major for %s\n",
+			       name);
+			ret = -EBUSY;
+			goto out;
+		}
+		major = index;
+		ret = major;
+	}
+
+	p = kmalloc(sizeof(struct blk_major_name), GFP_KERNEL);
+	if (p == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	p->major = major;
+	strlcpy(p->name, name, sizeof(p->name));
+	p->next = NULL;
+	index = major_to_index(major);
+
+	for (n = &major_names[index]; *n; n = &(*n)->next) {
+		if ((*n)->major == major)
+			break;
+	}
+	if (!*n)
+		*n = p;
+	else
+		ret = -EBUSY;
+
+	if (ret < 0) {
+		printk("register_blkdev: cannot get major %d for %s\n",
+		       major, name);
+		kfree(p);
+	}
+out:
+	up(&block_subsys_sem);
+	return ret;
+}
+
+EXPORT_SYMBOL(register_blkdev);
+
+/* todo: make void - error printk here */
+int unregister_blkdev(unsigned int major, const char *name)
+{
+	struct blk_major_name **n;
+	struct blk_major_name *p = NULL;
+	int index = major_to_index(major);
+	int ret = 0;
+
+	down(&block_subsys_sem);
+	for (n = &major_names[index]; *n; n = &(*n)->next)
+		if ((*n)->major == major)
+			break;
+	if (!*n || strcmp((*n)->name, name))
+		ret = -EINVAL;
+	else {
+		p = *n;
+		*n = p->next;
+	}
+	up(&block_subsys_sem);
+	kfree(p);
+
+	return ret;
+}
+
+EXPORT_SYMBOL(unregister_blkdev);
+
+static struct kobj_map *bdev_map;
+
+/*
+ * Register device numbers dev..(dev+range-1)
+ * range must be nonzero
+ * The hash chain is sorted on range, so that subranges can override.
+ */
+void blk_register_region(dev_t dev, unsigned long range, struct module *module,
+			 struct kobject *(*probe)(dev_t, int *, void *),
+			 int (*lock)(dev_t, void *), void *data)
+{
+	kobj_map(bdev_map, dev, range, module, probe, lock, data);
+}
+
+EXPORT_SYMBOL(blk_register_region);
+
+void blk_unregister_region(dev_t dev, unsigned long range)
+{
+	kobj_unmap(bdev_map, dev, range);
+}
+
+EXPORT_SYMBOL(blk_unregister_region);
+
+static struct kobject *exact_match(dev_t dev, int *part, void *data)
+{
+	struct gendisk *p = data;
+	return &p->kobj;
+}
+
+static int exact_lock(dev_t dev, void *data)
+{
+	struct gendisk *p = data;
+
+	if (!get_disk(p))
+		return -1;
+	return 0;
+}
+
+/**
+ * add_disk - add partitioning information to kernel list
+ * @disk: per-device partitioning information
+ *
+ * This function registers the partitioning information in @disk
+ * with the kernel.
+ */
+void add_disk(struct gendisk *disk)
+{
+	disk->flags |= GENHD_FL_UP;
+	blk_register_region(MKDEV(disk->major, disk->first_minor),
+			    disk->minors, NULL, exact_match, exact_lock, disk);
+	register_disk(disk);
+	blk_register_queue(disk);
+}
+
+EXPORT_SYMBOL(add_disk);
+EXPORT_SYMBOL(del_gendisk);	/* in partitions/check.c */
+
+void unlink_gendisk(struct gendisk *disk)
+{
+	blk_unregister_queue(disk);
+	blk_unregister_region(MKDEV(disk->major, disk->first_minor),
+			      disk->minors);
+}
+
+#define to_disk(obj) container_of(obj,struct gendisk,kobj)
+
+/**
+ * get_gendisk - get partitioning information for a given device
+ * @dev: device to get partitioning information for
+ *
+ * This function gets the structure containing partitioning
+ * information for the given device @dev.
+ */
+struct gendisk *get_gendisk(dev_t dev, int *part)
+{
+	struct kobject *kobj = kobj_lookup(bdev_map, dev, part);
+	return  kobj ? to_disk(kobj) : NULL;
+}
+
+#ifdef CONFIG_PROC_FS
+/* iterator */
+static void *part_start(struct seq_file *part, loff_t *pos)
+{
+	struct list_head *p;
+	loff_t l = *pos;
+
+	down(&block_subsys_sem);
+	list_for_each(p, &block_subsys.kset.list)
+		if (!l--)
+			return list_entry(p, struct gendisk, kobj.entry);
+	return NULL;
+}
+
+static void *part_next(struct seq_file *part, void *v, loff_t *pos)
+{
+	struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
+	++*pos;
+	return p==&block_subsys.kset.list ? NULL : 
+		list_entry(p, struct gendisk, kobj.entry);
+}
+
+static void part_stop(struct seq_file *part, void *v)
+{
+	up(&block_subsys_sem);
+}
+
+static int show_partition(struct seq_file *part, void *v)
+{
+	struct gendisk *sgp = v;
+	int n;
+	char buf[BDEVNAME_SIZE];
+
+	if (&sgp->kobj.entry == block_subsys.kset.list.next)
+		seq_puts(part, "major minor  #blocks  name\n\n");
+
+	/* Don't show non-partitionable removeable devices or empty devices */
+	if (!get_capacity(sgp) ||
+			(sgp->minors == 1 && (sgp->flags & GENHD_FL_REMOVABLE)))
+		return 0;
+	if (sgp->flags & GENHD_FL_SUPPRESS_PARTITION_INFO)
+		return 0;
+
+	/* show the full disk and all non-0 size partitions of it */
+	seq_printf(part, "%4d  %4d %10llu %s\n",
+		sgp->major, sgp->first_minor,
+		(unsigned long long)get_capacity(sgp) >> 1,
+		disk_name(sgp, 0, buf));
+	for (n = 0; n < sgp->minors - 1; n++) {
+		if (!sgp->part[n])
+			continue;
+		if (sgp->part[n]->nr_sects == 0)
+			continue;
+		seq_printf(part, "%4d  %4d %10llu %s\n",
+			sgp->major, n + 1 + sgp->first_minor,
+			(unsigned long long)sgp->part[n]->nr_sects >> 1 ,
+			disk_name(sgp, n + 1, buf));
+	}
+
+	return 0;
+}
+
+struct seq_operations partitions_op = {
+	.start =part_start,
+	.next =	part_next,
+	.stop =	part_stop,
+	.show =	show_partition
+};
+#endif
+
+
+extern int blk_dev_init(void);
+
+static struct kobject *base_probe(dev_t dev, int *part, void *data)
+{
+	if (request_module("block-major-%d-%d", MAJOR(dev), MINOR(dev)) > 0)
+		/* Make old-style 2.4 aliases work */
+		request_module("block-major-%d", MAJOR(dev));
+	return NULL;
+}
+
+static int __init genhd_device_init(void)
+{
+	bdev_map = kobj_map_init(base_probe, &block_subsys_sem);
+	blk_dev_init();
+	subsystem_register(&block_subsys);
+	return 0;
+}
+
+subsys_initcall(genhd_device_init);
+
+
+
+/*
+ * kobject & sysfs bindings for block devices
+ */
+static ssize_t disk_attr_show(struct kobject *kobj, struct attribute *attr,
+			      char *page)
+{
+	struct gendisk *disk = to_disk(kobj);
+	struct disk_attribute *disk_attr =
+		container_of(attr,struct disk_attribute,attr);
+	ssize_t ret = 0;
+
+	if (disk_attr->show)
+		ret = disk_attr->show(disk,page);
+	return ret;
+}
+
+static struct sysfs_ops disk_sysfs_ops = {
+	.show	= &disk_attr_show,
+};
+
+static ssize_t disk_dev_read(struct gendisk * disk, char *page)
+{
+	dev_t base = MKDEV(disk->major, disk->first_minor); 
+	return print_dev_t(page, base);
+}
+static ssize_t disk_range_read(struct gendisk * disk, char *page)
+{
+	return sprintf(page, "%d\n", disk->minors);
+}
+static ssize_t disk_removable_read(struct gendisk * disk, char *page)
+{
+	return sprintf(page, "%d\n",
+		       (disk->flags & GENHD_FL_REMOVABLE ? 1 : 0));
+
+}
+static ssize_t disk_size_read(struct gendisk * disk, char *page)
+{
+	return sprintf(page, "%llu\n", (unsigned long long)get_capacity(disk));
+}
+
+static ssize_t disk_stats_read(struct gendisk * disk, char *page)
+{
+	preempt_disable();
+	disk_round_stats(disk);
+	preempt_enable();
+	return sprintf(page,
+		"%8u %8u %8llu %8u "
+		"%8u %8u %8llu %8u "
+		"%8u %8u %8u"
+		"\n",
+		disk_stat_read(disk, reads), disk_stat_read(disk, read_merges),
+		(unsigned long long)disk_stat_read(disk, read_sectors),
+		jiffies_to_msecs(disk_stat_read(disk, read_ticks)),
+		disk_stat_read(disk, writes), 
+		disk_stat_read(disk, write_merges),
+		(unsigned long long)disk_stat_read(disk, write_sectors),
+		jiffies_to_msecs(disk_stat_read(disk, write_ticks)),
+		disk->in_flight,
+		jiffies_to_msecs(disk_stat_read(disk, io_ticks)),
+		jiffies_to_msecs(disk_stat_read(disk, time_in_queue)));
+}
+static struct disk_attribute disk_attr_dev = {
+	.attr = {.name = "dev", .mode = S_IRUGO },
+	.show	= disk_dev_read
+};
+static struct disk_attribute disk_attr_range = {
+	.attr = {.name = "range", .mode = S_IRUGO },
+	.show	= disk_range_read
+};
+static struct disk_attribute disk_attr_removable = {
+	.attr = {.name = "removable", .mode = S_IRUGO },
+	.show	= disk_removable_read
+};
+static struct disk_attribute disk_attr_size = {
+	.attr = {.name = "size", .mode = S_IRUGO },
+	.show	= disk_size_read
+};
+static struct disk_attribute disk_attr_stat = {
+	.attr = {.name = "stat", .mode = S_IRUGO },
+	.show	= disk_stats_read
+};
+
+static struct attribute * default_attrs[] = {
+	&disk_attr_dev.attr,
+	&disk_attr_range.attr,
+	&disk_attr_removable.attr,
+	&disk_attr_size.attr,
+	&disk_attr_stat.attr,
+	NULL,
+};
+
+static void disk_release(struct kobject * kobj)
+{
+	struct gendisk *disk = to_disk(kobj);
+	kfree(disk->random);
+	kfree(disk->part);
+	free_disk_stats(disk);
+	kfree(disk);
+}
+
+static struct kobj_type ktype_block = {
+	.release	= disk_release,
+	.sysfs_ops	= &disk_sysfs_ops,
+	.default_attrs	= default_attrs,
+};
+
+extern struct kobj_type ktype_part;
+
+static int block_hotplug_filter(struct kset *kset, struct kobject *kobj)
+{
+	struct kobj_type *ktype = get_ktype(kobj);
+
+	return ((ktype == &ktype_block) || (ktype == &ktype_part));
+}
+
+static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp,
+			 int num_envp, char *buffer, int buffer_size)
+{
+	struct kobj_type *ktype = get_ktype(kobj);
+	struct device *physdev;
+	struct gendisk *disk;
+	struct hd_struct *part;
+	int length = 0;
+	int i = 0;
+
+	if (ktype == &ktype_block) {
+		disk = container_of(kobj, struct gendisk, kobj);
+		add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
+				    &length, "MINOR=%u", disk->first_minor);
+	} else if (ktype == &ktype_part) {
+		disk = container_of(kobj->parent, struct gendisk, kobj);
+		part = container_of(kobj, struct hd_struct, kobj);
+		add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
+				    &length, "MINOR=%u",
+				    disk->first_minor + part->partno);
+	} else
+		return 0;
+
+	add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length,
+			    "MAJOR=%u", disk->major);
+
+	/* add physical device, backing this device  */
+	physdev = disk->driverfs_dev;
+	if (physdev) {
+		char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL);
+
+		add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size,
+				    &length, "PHYSDEVPATH=%s", path);
+		kfree(path);
+
+		if (physdev->bus)
+			add_hotplug_env_var(envp, num_envp, &i,
+					    buffer, buffer_size, &length,
+					    "PHYSDEVBUS=%s",
+					    physdev->bus->name);
+
+		if (physdev->driver)
+			add_hotplug_env_var(envp, num_envp, &i,
+					    buffer, buffer_size, &length,
+					    "PHYSDEVDRIVER=%s",
+					    physdev->driver->name);
+	}
+
+	/* terminate, set to next free slot, shrink available space */
+	envp[i] = NULL;
+	envp = &envp[i];
+	num_envp -= i;
+	buffer = &buffer[length];
+	buffer_size -= length;
+
+	return 0;
+}
+
+static struct kset_hotplug_ops block_hotplug_ops = {
+	.filter		= block_hotplug_filter,
+	.hotplug	= block_hotplug,
+};
+
+/* declare block_subsys. */
+static decl_subsys(block, &ktype_block, &block_hotplug_ops);
+
+
+/*
+ * aggregate disk stat collector.  Uses the same stats that the sysfs
+ * entries do, above, but makes them available through one seq_file.
+ * Watching a few disks may be efficient through sysfs, but watching
+ * all of them will be more efficient through this interface.
+ *
+ * The output looks suspiciously like /proc/partitions with a bunch of
+ * extra fields.
+ */
+
+/* iterator */
+static void *diskstats_start(struct seq_file *part, loff_t *pos)
+{
+	loff_t k = *pos;
+	struct list_head *p;
+
+	down(&block_subsys_sem);
+	list_for_each(p, &block_subsys.kset.list)
+		if (!k--)
+			return list_entry(p, struct gendisk, kobj.entry);
+	return NULL;
+}
+
+static void *diskstats_next(struct seq_file *part, void *v, loff_t *pos)
+{
+	struct list_head *p = ((struct gendisk *)v)->kobj.entry.next;
+	++*pos;
+	return p==&block_subsys.kset.list ? NULL :
+		list_entry(p, struct gendisk, kobj.entry);
+}
+
+static void diskstats_stop(struct seq_file *part, void *v)
+{
+	up(&block_subsys_sem);
+}
+
+static int diskstats_show(struct seq_file *s, void *v)
+{
+	struct gendisk *gp = v;
+	char buf[BDEVNAME_SIZE];
+	int n = 0;
+
+	/*
+	if (&sgp->kobj.entry == block_subsys.kset.list.next)
+		seq_puts(s,	"major minor name"
+				"     rio rmerge rsect ruse wio wmerge "
+				"wsect wuse running use aveq"
+				"\n\n");
+	*/
+ 
+	preempt_disable();
+	disk_round_stats(gp);
+	preempt_enable();
+	seq_printf(s, "%4d %4d %s %u %u %llu %u %u %u %llu %u %u %u %u\n",
+		gp->major, n + gp->first_minor, disk_name(gp, n, buf),
+		disk_stat_read(gp, reads), disk_stat_read(gp, read_merges),
+		(unsigned long long)disk_stat_read(gp, read_sectors),
+		jiffies_to_msecs(disk_stat_read(gp, read_ticks)),
+		disk_stat_read(gp, writes), disk_stat_read(gp, write_merges),
+		(unsigned long long)disk_stat_read(gp, write_sectors),
+		jiffies_to_msecs(disk_stat_read(gp, write_ticks)),
+		gp->in_flight,
+		jiffies_to_msecs(disk_stat_read(gp, io_ticks)),
+		jiffies_to_msecs(disk_stat_read(gp, time_in_queue)));
+
+	/* now show all non-0 size partitions of it */
+	for (n = 0; n < gp->minors - 1; n++) {
+		struct hd_struct *hd = gp->part[n];
+
+		if (hd && hd->nr_sects)
+			seq_printf(s, "%4d %4d %s %u %u %u %u\n",
+				gp->major, n + gp->first_minor + 1,
+				disk_name(gp, n + 1, buf),
+				hd->reads, hd->read_sectors,
+				hd->writes, hd->write_sectors);
+	}
+ 
+	return 0;
+}
+
+struct seq_operations diskstats_op = {
+	.start	= diskstats_start,
+	.next	= diskstats_next,
+	.stop	= diskstats_stop,
+	.show	= diskstats_show
+};
+
+
+struct gendisk *alloc_disk(int minors)
+{
+	struct gendisk *disk = kmalloc(sizeof(struct gendisk), GFP_KERNEL);
+	if (disk) {
+		memset(disk, 0, sizeof(struct gendisk));
+		if (!init_disk_stats(disk)) {
+			kfree(disk);
+			return NULL;
+		}
+		if (minors > 1) {
+			int size = (minors - 1) * sizeof(struct hd_struct *);
+			disk->part = kmalloc(size, GFP_KERNEL);
+			if (!disk->part) {
+				kfree(disk);
+				return NULL;
+			}
+			memset(disk->part, 0, size);
+		}
+		disk->minors = minors;
+		kobj_set_kset_s(disk,block_subsys);
+		kobject_init(&disk->kobj);
+		rand_initialize_disk(disk);
+	}
+	return disk;
+}
+
+EXPORT_SYMBOL(alloc_disk);
+
+struct kobject *get_disk(struct gendisk *disk)
+{
+	struct module *owner;
+	struct kobject *kobj;
+
+	if (!disk->fops)
+		return NULL;
+	owner = disk->fops->owner;
+	if (owner && !try_module_get(owner))
+		return NULL;
+	kobj = kobject_get(&disk->kobj);
+	if (kobj == NULL) {
+		module_put(owner);
+		return NULL;
+	}
+	return kobj;
+
+}
+
+EXPORT_SYMBOL(get_disk);
+
+void put_disk(struct gendisk *disk)
+{
+	if (disk)
+		kobject_put(&disk->kobj);
+}
+
+EXPORT_SYMBOL(put_disk);
+
+void set_device_ro(struct block_device *bdev, int flag)
+{
+	if (bdev->bd_contains != bdev)
+		bdev->bd_part->policy = flag;
+	else
+		bdev->bd_disk->policy = flag;
+}
+
+EXPORT_SYMBOL(set_device_ro);
+
+void set_disk_ro(struct gendisk *disk, int flag)
+{
+	int i;
+	disk->policy = flag;
+	for (i = 0; i < disk->minors - 1; i++)
+		if (disk->part[i]) disk->part[i]->policy = flag;
+}
+
+EXPORT_SYMBOL(set_disk_ro);
+
+int bdev_read_only(struct block_device *bdev)
+{
+	if (!bdev)
+		return 0;
+	else if (bdev->bd_contains != bdev)
+		return bdev->bd_part->policy;
+	else
+		return bdev->bd_disk->policy;
+}
+
+EXPORT_SYMBOL(bdev_read_only);
+
+int invalidate_partition(struct gendisk *disk, int index)
+{
+	int res = 0;
+	struct block_device *bdev = bdget_disk(disk, index);
+	if (bdev) {
+		res = __invalidate_device(bdev, 1);
+		bdput(bdev);
+	}
+	return res;
+}
+
+EXPORT_SYMBOL(invalidate_partition);
diff --git a/drivers/block/ida_cmd.h b/drivers/block/ida_cmd.h
new file mode 100644
index 0000000..98b5746
--- /dev/null
+++ b/drivers/block/ida_cmd.h
@@ -0,0 +1,349 @@
+/*
+ *    Disk Array driver for Compaq SMART2 Controllers
+ *    Copyright 1998 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef ARRAYCMD_H
+#define ARRAYCMD_H
+
+#include <asm/types.h>
+#if 0
+#include <linux/blkdev.h>
+#endif
+
+/* for the Smart Array 42XX cards */
+#define S42XX_REQUEST_PORT_OFFSET	0x40
+#define S42XX_REPLY_INTR_MASK_OFFSET	0x34
+#define S42XX_REPLY_PORT_OFFSET		0x44
+#define S42XX_INTR_STATUS		0x30
+
+#define S42XX_INTR_OFF		0x08
+#define S42XX_INTR_PENDING	0x08
+
+#define COMMAND_FIFO		0x04
+#define COMMAND_COMPLETE_FIFO	0x08
+#define INTR_MASK		0x0C
+#define INTR_STATUS		0x10
+#define INTR_PENDING		0x14
+
+#define FIFO_NOT_EMPTY		0x01
+#define FIFO_NOT_FULL		0x02
+
+#define BIG_PROBLEM		0x40
+#define LOG_NOT_CONF		2
+
+#pragma pack(1)
+typedef struct {
+	__u32	size;
+	__u32	addr;
+} sg_t;
+
+#define RCODE_NONFATAL	0x02
+#define RCODE_FATAL	0x04
+#define RCODE_INVREQ	0x10
+typedef struct {
+	__u16	next;
+	__u8	cmd;
+	__u8	rcode;
+	__u32	blk;
+	__u16	blk_cnt;
+	__u8	sg_cnt;
+	__u8	reserved;
+} rhdr_t;
+
+#define SG_MAX			32
+typedef struct {
+	rhdr_t	hdr;
+	sg_t	sg[SG_MAX];
+	__u32	bp;
+} rblk_t;
+
+typedef struct {
+	__u8	unit;
+	__u8	prio;
+	__u16	size;
+} chdr_t;
+
+#define CMD_RWREQ	0x00
+#define CMD_IOCTL_PEND	0x01
+#define CMD_IOCTL_DONE	0x02
+
+typedef struct cmdlist {
+	chdr_t	hdr;
+	rblk_t	req;
+	__u32	size;
+	int	retry_cnt;
+	__u32	busaddr;
+	int	ctlr;
+	struct cmdlist *prev;
+	struct cmdlist *next;
+	struct request *rq;
+	int type;
+} cmdlist_t;
+	
+#define ID_CTLR		0x11
+typedef struct {
+	__u8	nr_drvs;
+	__u32	cfg_sig;
+	__u8	firm_rev[4];
+	__u8	rom_rev[4];
+	__u8	hw_rev;
+	__u32	bb_rev;
+	__u32	drv_present_map;
+	__u32	ext_drv_map;
+	__u32	board_id;
+	__u8	cfg_error;
+	__u32	non_disk_bits;
+	__u8	bad_ram_addr;
+	__u8	cpu_rev;
+	__u8	pdpi_rev;
+	__u8	epic_rev;
+	__u8	wcxc_rev;
+	__u8	marketing_rev;
+	__u8	ctlr_flags;
+	__u8	host_flags;
+	__u8	expand_dis;
+	__u8	scsi_chips;
+	__u32	max_req_blocks;
+	__u32	ctlr_clock;
+	__u8	drvs_per_bus;
+	__u16	big_drv_present_map[8];
+	__u16	big_ext_drv_map[8];
+	__u16	big_non_disk_map[8];
+	__u16	task_flags;
+	__u8	icl_bus;
+	__u8	red_modes;
+	__u8	cur_red_mode;
+	__u8	red_ctlr_stat;
+	__u8	red_fail_reason;
+	__u8	reserved[403];
+} id_ctlr_t;
+
+typedef struct {
+	__u16	cyl;
+	__u8	heads;
+	__u8	xsig;
+	__u8	psectors;
+	__u16	wpre;
+	__u8	maxecc;
+	__u8	drv_ctrl;
+	__u16	pcyls;
+	__u8	pheads;
+	__u16	landz;
+	__u8	sect_per_track;
+	__u8	cksum;
+} drv_param_t;
+
+#define ID_LOG_DRV	0x10
+typedef struct {
+	__u16	blk_size;
+	__u32	nr_blks;
+	drv_param_t drv;
+	__u8	fault_tol;
+	__u8	reserved;
+	__u8	bios_disable;
+} id_log_drv_t;
+
+#define ID_LOG_DRV_EXT	0x18
+typedef struct {
+	__u32	log_drv_id;
+	__u8	log_drv_label[64];
+	__u8	reserved[418];
+} id_log_drv_ext_t;
+
+#define SENSE_LOG_DRV_STAT	0x12
+typedef struct {
+	__u8	status;
+	__u32	fail_map;
+	__u16	read_err[32];
+	__u16	write_err[32];
+	__u8	drv_err_data[256];
+	__u8	drq_timeout[32];
+	__u32	blks_to_recover;
+	__u8	drv_recovering;
+	__u16	remap_cnt[32];
+	__u32	replace_drv_map;
+	__u32	act_spare_map;
+	__u8	spare_stat;
+	__u8	spare_repl_map[32];
+	__u32	repl_ok_map;
+	__u8	media_exch;
+	__u8	cache_fail;
+	__u8	expn_fail;
+	__u8	unit_flags;
+	__u16	big_fail_map[8];
+	__u16	big_remap_map[128];
+	__u16	big_repl_map[8];
+	__u16	big_act_spare_map[8];
+	__u8	big_spar_repl_map[128];
+	__u16	big_repl_ok_map[8];
+	__u8	big_drv_rebuild;
+	__u8	reserved[36];
+} sense_log_drv_stat_t;
+
+#define START_RECOVER		0x13
+
+#define ID_PHYS_DRV		0x15
+typedef struct {
+	__u8	scsi_bus;
+	__u8	scsi_id;
+	__u16	blk_size;
+	__u32	nr_blks;
+	__u32	rsvd_blks;
+	__u8	drv_model[40];
+	__u8	drv_sn[40];
+	__u8	drv_fw[8];
+	__u8	scsi_iq_bits;
+	__u8	compaq_drv_stmp;
+	__u8	last_fail;
+	__u8	phys_drv_flags;
+	__u8	phys_drv_flags1;
+	__u8	scsi_lun;
+	__u8	phys_drv_flags2;
+	__u8	reserved;
+	__u32	spi_speed_rules;
+	__u8	phys_connector[2];
+	__u8	phys_box_on_bus;
+	__u8	phys_bay_in_box;
+} id_phys_drv_t;
+
+#define BLINK_DRV_LEDS		0x16
+typedef struct {
+	__u32	blink_duration;
+	__u32	reserved;
+	__u8	blink[256];
+	__u8	reserved1[248];
+} blink_drv_leds_t;
+
+#define SENSE_BLINK_LEDS	0x17
+typedef struct {
+	__u32	blink_duration;
+	__u32	btime_elap;
+	__u8	blink[256];
+	__u8	reserved1[248];
+} sense_blink_leds_t;
+
+#define IDA_READ		0x20
+#define IDA_WRITE		0x30
+#define IDA_WRITE_MEDIA		0x31
+#define RESET_TO_DIAG		0x40
+#define DIAG_PASS_THRU		0x41
+
+#define SENSE_CONFIG		0x50
+#define SET_CONFIG		0x51
+typedef struct {
+	__u32	cfg_sig;
+	__u16	compat_port;
+	__u8	data_dist_mode;
+	__u8	surf_an_ctrl;
+	__u16	ctlr_phys_drv;
+	__u16	log_unit_phys_drv;
+	__u16	fault_tol_mode;
+	__u8	phys_drv_param[16];
+	drv_param_t drv;
+	__u32	drv_asgn_map;
+	__u16	dist_factor;
+	__u32	spare_asgn_map;
+	__u8	reserved[6];
+	__u16	os;
+	__u8	ctlr_order;
+	__u8	extra_info;
+	__u32	data_offs;
+	__u8	parity_backedout_write_drvs;
+	__u8	parity_dist_mode;
+	__u8	parity_shift_fact;
+	__u8	bios_disable_flag;
+	__u32	blks_on_vol;
+	__u32	blks_per_drv;
+	__u8	scratch[16];
+	__u16	big_drv_map[8];
+	__u16	big_spare_map[8];
+	__u8	ss_source_vol;
+	__u8	mix_drv_cap_range;
+	struct {
+		__u16	big_drv_map[8];
+		__u32	blks_per_drv;
+		__u16	fault_tol_mode;
+		__u16	dist_factor;
+	} MDC_range[4];
+	__u8	reserved1[248];
+} config_t;
+
+#define BYPASS_VOL_STATE	0x52
+#define SS_CREATE_VOL		0x53
+#define CHANGE_CONFIG		0x54
+#define SENSE_ORIG_CONF		0x55
+#define REORDER_LOG_DRV		0x56
+typedef struct {
+	__u8	old_units[32];
+} reorder_log_drv_t;
+
+#define LABEL_LOG_DRV		0x57
+typedef struct {
+	__u8	log_drv_label[64];
+} label_log_drv_t;
+
+#define SS_TO_VOL		0x58
+	
+#define SET_SURF_DELAY		0x60
+typedef struct {
+	__u16	delay;
+	__u8	reserved[510];
+} surf_delay_t;
+
+#define SET_OVERHEAT_DELAY	0x61
+typedef struct {
+	__u16	delay;
+} overhead_delay_t;
+ 
+#define SET_MP_DELAY
+typedef struct {
+	__u16	delay;
+	__u8	reserved[510];
+} mp_delay_t;
+
+#define PASSTHRU_A	0x91
+typedef struct {
+	__u8	target;
+	__u8	bus;
+	__u8	lun;
+	__u32	timeout;
+	__u32	flags;
+	__u8	status;
+	__u8	error;
+	__u8	cdb_len;
+	__u8	sense_error;
+	__u8	sense_key;
+	__u32	sense_info;
+	__u8	sense_code;
+	__u8	sense_qual;
+	__u32	residual;
+	__u8	reserved[4];
+	__u8	cdb[12];	
+} scsi_param_t;
+
+#define RESUME_BACKGROUND_ACTIVITY	0x99
+#define SENSE_CONTROLLER_PERFORMANCE	0xa8
+#define FLUSH_CACHE			0xc2
+#define COLLECT_BUFFER			0xd2
+#define READ_FLASH_ROM			0xf6
+#define WRITE_FLASH_ROM			0xf7
+#pragma pack()	
+
+#endif /* ARRAYCMD_H */
diff --git a/drivers/block/ida_ioctl.h b/drivers/block/ida_ioctl.h
new file mode 100644
index 0000000..888fff9
--- /dev/null
+++ b/drivers/block/ida_ioctl.h
@@ -0,0 +1,87 @@
+/*
+ *    Disk Array driver for Compaq SMART2 Controllers
+ *    Copyright 1998 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ */
+#ifndef IDA_IOCTL_H
+#define IDA_IOCTL_H
+
+#include "ida_cmd.h"
+#include "cpqarray.h"
+
+#define IDAGETDRVINFO		0x27272828
+#define IDAPASSTHRU		0x28282929
+#define IDAGETCTLRSIG		0x29293030
+#define IDAREVALIDATEVOLS	0x30303131
+#define IDADRIVERVERSION	0x31313232
+#define IDAGETPCIINFO		0x32323333
+
+typedef struct _ida_pci_info_struct
+{
+	unsigned char 	bus;
+	unsigned char 	dev_fn;
+	__u32 		board_id;
+} ida_pci_info_struct;
+/*
+ * Normally, the ioctl determines the logical unit for this command by
+ * the major,minor number of the fd passed to ioctl.  If you need to send
+ * a command to a different/nonexistant unit (such as during config), you
+ * can override the normal behavior by setting the unit valid bit. (Normally,
+ * it should be zero) The controller the command is sent to is still
+ * determined by the major number of the open device.
+ */
+
+#define UNITVALID	0x80
+typedef struct {
+	__u8	cmd;
+	__u8	rcode;
+	__u8	unit;
+	__u32	blk;
+	__u16	blk_cnt;
+
+/* currently, sg_cnt is assumed to be 1: only the 0th element of sg is used */
+	struct {
+		void	__user *addr;
+		size_t	size;
+	} sg[SG_MAX];
+	int	sg_cnt;
+
+	union ctlr_cmds {
+		drv_info_t		drv;
+		unsigned char		buf[1024];
+
+		id_ctlr_t		id_ctlr;
+		drv_param_t		drv_param;
+		id_log_drv_t		id_log_drv;
+		id_log_drv_ext_t	id_log_drv_ext;
+		sense_log_drv_stat_t	sense_log_drv_stat;
+		id_phys_drv_t		id_phys_drv;
+		blink_drv_leds_t	blink_drv_leds;
+		sense_blink_leds_t	sense_blink_leds;
+		config_t		config;
+		reorder_log_drv_t	reorder_log_drv;
+		label_log_drv_t		label_log_drv;
+		surf_delay_t		surf_delay;
+		overhead_delay_t	overhead_delay;
+		mp_delay_t		mp_delay;
+		scsi_param_t		scsi_param;
+	} c;
+} ida_ioctl_t;
+
+#endif /* IDA_IOCTL_H */
diff --git a/drivers/block/ioctl.c b/drivers/block/ioctl.c
new file mode 100644
index 0000000..5e03f51
--- /dev/null
+++ b/drivers/block/ioctl.c
@@ -0,0 +1,239 @@
+#include <linux/sched.h>		/* for capable() */
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/backing-dev.h>
+#include <linux/buffer_head.h>
+#include <linux/smp_lock.h>
+#include <asm/uaccess.h>
+
+static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg)
+{
+	struct block_device *bdevp;
+	struct gendisk *disk;
+	struct blkpg_ioctl_arg a;
+	struct blkpg_partition p;
+	long long start, length;
+	int part;
+	int i;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (copy_from_user(&a, arg, sizeof(struct blkpg_ioctl_arg)))
+		return -EFAULT;
+	if (copy_from_user(&p, a.data, sizeof(struct blkpg_partition)))
+		return -EFAULT;
+	disk = bdev->bd_disk;
+	if (bdev != bdev->bd_contains)
+		return -EINVAL;
+	part = p.pno;
+	if (part <= 0 || part >= disk->minors)
+		return -EINVAL;
+	switch (a.op) {
+		case BLKPG_ADD_PARTITION:
+			start = p.start >> 9;
+			length = p.length >> 9;
+			/* check for fit in a hd_struct */ 
+			if (sizeof(sector_t) == sizeof(long) && 
+			    sizeof(long long) > sizeof(long)) {
+				long pstart = start, plength = length;
+				if (pstart != start || plength != length
+				    || pstart < 0 || plength < 0)
+					return -EINVAL;
+			}
+			/* partition number in use? */
+			down(&bdev->bd_sem);
+			if (disk->part[part - 1]) {
+				up(&bdev->bd_sem);
+				return -EBUSY;
+			}
+			/* overlap? */
+			for (i = 0; i < disk->minors - 1; i++) {
+				struct hd_struct *s = disk->part[i];
+
+				if (!s)
+					continue;
+				if (!(start+length <= s->start_sect ||
+				      start >= s->start_sect + s->nr_sects)) {
+					up(&bdev->bd_sem);
+					return -EBUSY;
+				}
+			}
+			/* all seems OK */
+			add_partition(disk, part, start, length);
+			up(&bdev->bd_sem);
+			return 0;
+		case BLKPG_DEL_PARTITION:
+			if (!disk->part[part-1])
+				return -ENXIO;
+			if (disk->part[part - 1]->nr_sects == 0)
+				return -ENXIO;
+			bdevp = bdget_disk(disk, part);
+			if (!bdevp)
+				return -ENOMEM;
+			down(&bdevp->bd_sem);
+			if (bdevp->bd_openers) {
+				up(&bdevp->bd_sem);
+				bdput(bdevp);
+				return -EBUSY;
+			}
+			/* all seems OK */
+			fsync_bdev(bdevp);
+			invalidate_bdev(bdevp, 0);
+
+			down(&bdev->bd_sem);
+			delete_partition(disk, part);
+			up(&bdev->bd_sem);
+			up(&bdevp->bd_sem);
+			bdput(bdevp);
+
+			return 0;
+		default:
+			return -EINVAL;
+	}
+}
+
+static int blkdev_reread_part(struct block_device *bdev)
+{
+	struct gendisk *disk = bdev->bd_disk;
+	int res;
+
+	if (disk->minors == 1 || bdev != bdev->bd_contains)
+		return -EINVAL;
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+	if (down_trylock(&bdev->bd_sem))
+		return -EBUSY;
+	res = rescan_partitions(disk, bdev);
+	up(&bdev->bd_sem);
+	return res;
+}
+
+static int put_ushort(unsigned long arg, unsigned short val)
+{
+	return put_user(val, (unsigned short __user *)arg);
+}
+
+static int put_int(unsigned long arg, int val)
+{
+	return put_user(val, (int __user *)arg);
+}
+
+static int put_long(unsigned long arg, long val)
+{
+	return put_user(val, (long __user *)arg);
+}
+
+static int put_ulong(unsigned long arg, unsigned long val)
+{
+	return put_user(val, (unsigned long __user *)arg);
+}
+
+static int put_u64(unsigned long arg, u64 val)
+{
+	return put_user(val, (u64 __user *)arg);
+}
+
+int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
+			unsigned long arg)
+{
+	struct block_device *bdev = inode->i_bdev;
+	struct gendisk *disk = bdev->bd_disk;
+	struct backing_dev_info *bdi;
+	int ret, n;
+
+	switch (cmd) {
+	case BLKRAGET:
+	case BLKFRAGET:
+		if (!arg)
+			return -EINVAL;
+		bdi = blk_get_backing_dev_info(bdev);
+		if (bdi == NULL)
+			return -ENOTTY;
+		return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512);
+	case BLKROGET:
+		return put_int(arg, bdev_read_only(bdev) != 0);
+	case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */
+		return put_int(arg, block_size(bdev));
+	case BLKSSZGET: /* get block device hardware sector size */
+		return put_int(arg, bdev_hardsect_size(bdev));
+	case BLKSECTGET:
+		return put_ushort(arg, bdev_get_queue(bdev)->max_sectors);
+	case BLKRASET:
+	case BLKFRASET:
+		if(!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		bdi = blk_get_backing_dev_info(bdev);
+		if (bdi == NULL)
+			return -ENOTTY;
+		bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
+		return 0;
+	case BLKBSZSET:
+		/* set the logical block size */
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		if (!arg)
+			return -EINVAL;
+		if (get_user(n, (int __user *) arg))
+			return -EFAULT;
+		if (bd_claim(bdev, file) < 0)
+			return -EBUSY;
+		ret = set_blocksize(bdev, n);
+		bd_release(bdev);
+		return ret;
+	case BLKPG:
+		return blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg);
+	case BLKRRPART:
+		return blkdev_reread_part(bdev);
+	case BLKGETSIZE:
+		if ((bdev->bd_inode->i_size >> 9) > ~0UL)
+			return -EFBIG;
+		return put_ulong(arg, bdev->bd_inode->i_size >> 9);
+	case BLKGETSIZE64:
+		return put_u64(arg, bdev->bd_inode->i_size);
+	case BLKFLSBUF:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		if (disk->fops->ioctl) {
+			ret = disk->fops->ioctl(inode, file, cmd, arg);
+			/* -EINVAL to handle old uncorrected drivers */
+			if (ret != -EINVAL && ret != -ENOTTY)
+				return ret;
+		}
+		fsync_bdev(bdev);
+		invalidate_bdev(bdev, 0);
+		return 0;
+	case BLKROSET:
+		if (disk->fops->ioctl) {
+			ret = disk->fops->ioctl(inode, file, cmd, arg);
+			/* -EINVAL to handle old uncorrected drivers */
+			if (ret != -EINVAL && ret != -ENOTTY)
+				return ret;
+		}
+		if (!capable(CAP_SYS_ADMIN))
+			return -EACCES;
+		if (get_user(n, (int __user *)(arg)))
+			return -EFAULT;
+		set_device_ro(bdev, n);
+		return 0;
+	default:
+		if (disk->fops->ioctl)
+			return disk->fops->ioctl(inode, file, cmd, arg);
+	}
+	return -ENOTTY;
+}
+
+/* Most of the generic ioctls are handled in the normal fallback path.
+   This assumes the blkdev's low level compat_ioctl always returns
+   ENOIOCTLCMD for unknown ioctls. */
+long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+	struct block_device *bdev = file->f_dentry->d_inode->i_bdev;
+	struct gendisk *disk = bdev->bd_disk;
+	int ret = -ENOIOCTLCMD;
+	if (disk->fops->compat_ioctl) {
+		lock_kernel();
+		ret = disk->fops->compat_ioctl(file, cmd, arg);
+		unlock_kernel();
+	}
+	return ret;
+}
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
new file mode 100644
index 0000000..02242e8
--- /dev/null
+++ b/drivers/block/ll_rw_blk.c
@@ -0,0 +1,3642 @@
+/*
+ *  linux/drivers/block/ll_rw_blk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994,      Karl Keyte: Added support for disk statistics
+ * Elevator latency, (C) 2000  Andrea Arcangeli <andrea@suse.de> SuSE
+ * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de>
+ * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> -  July2000
+ * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001
+ */
+
+/*
+ * This handles all read/write requests to block devices
+ */
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/backing-dev.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/kernel_stat.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>	/* for max_pfn/max_low_pfn */
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/swap.h>
+#include <linux/writeback.h>
+
+/*
+ * for max sense size
+ */
+#include <scsi/scsi_cmnd.h>
+
+static void blk_unplug_work(void *data);
+static void blk_unplug_timeout(unsigned long data);
+
+/*
+ * For the allocated request tables
+ */
+static kmem_cache_t *request_cachep;
+
+/*
+ * For queue allocation
+ */
+static kmem_cache_t *requestq_cachep;
+
+/*
+ * For io context allocations
+ */
+static kmem_cache_t *iocontext_cachep;
+
+static wait_queue_head_t congestion_wqh[2] = {
+		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
+		__WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
+	};
+
+/*
+ * Controlling structure to kblockd
+ */
+static struct workqueue_struct *kblockd_workqueue; 
+
+unsigned long blk_max_low_pfn, blk_max_pfn;
+
+EXPORT_SYMBOL(blk_max_low_pfn);
+EXPORT_SYMBOL(blk_max_pfn);
+
+/* Amount of time in which a process may batch requests */
+#define BLK_BATCH_TIME	(HZ/50UL)
+
+/* Number of requests a "batching" process may submit */
+#define BLK_BATCH_REQ	32
+
+/*
+ * Return the threshold (number of used requests) at which the queue is
+ * considered to be congested.  It include a little hysteresis to keep the
+ * context switch rate down.
+ */
+static inline int queue_congestion_on_threshold(struct request_queue *q)
+{
+	return q->nr_congestion_on;
+}
+
+/*
+ * The threshold at which a queue is considered to be uncongested
+ */
+static inline int queue_congestion_off_threshold(struct request_queue *q)
+{
+	return q->nr_congestion_off;
+}
+
+static void blk_queue_congestion_threshold(struct request_queue *q)
+{
+	int nr;
+
+	nr = q->nr_requests - (q->nr_requests / 8) + 1;
+	if (nr > q->nr_requests)
+		nr = q->nr_requests;
+	q->nr_congestion_on = nr;
+
+	nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1;
+	if (nr < 1)
+		nr = 1;
+	q->nr_congestion_off = nr;
+}
+
+/*
+ * A queue has just exitted congestion.  Note this in the global counter of
+ * congested queues, and wake up anyone who was waiting for requests to be
+ * put back.
+ */
+static void clear_queue_congested(request_queue_t *q, int rw)
+{
+	enum bdi_state bit;
+	wait_queue_head_t *wqh = &congestion_wqh[rw];
+
+	bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
+	clear_bit(bit, &q->backing_dev_info.state);
+	smp_mb__after_clear_bit();
+	if (waitqueue_active(wqh))
+		wake_up(wqh);
+}
+
+/*
+ * A queue has just entered congestion.  Flag that in the queue's VM-visible
+ * state flags and increment the global gounter of congested queues.
+ */
+static void set_queue_congested(request_queue_t *q, int rw)
+{
+	enum bdi_state bit;
+
+	bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested;
+	set_bit(bit, &q->backing_dev_info.state);
+}
+
+/**
+ * blk_get_backing_dev_info - get the address of a queue's backing_dev_info
+ * @bdev:	device
+ *
+ * Locates the passed device's request queue and returns the address of its
+ * backing_dev_info
+ *
+ * Will return NULL if the request queue cannot be located.
+ */
+struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev)
+{
+	struct backing_dev_info *ret = NULL;
+	request_queue_t *q = bdev_get_queue(bdev);
+
+	if (q)
+		ret = &q->backing_dev_info;
+	return ret;
+}
+
+EXPORT_SYMBOL(blk_get_backing_dev_info);
+
+void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data)
+{
+	q->activity_fn = fn;
+	q->activity_data = data;
+}
+
+EXPORT_SYMBOL(blk_queue_activity_fn);
+
+/**
+ * blk_queue_prep_rq - set a prepare_request function for queue
+ * @q:		queue
+ * @pfn:	prepare_request function
+ *
+ * It's possible for a queue to register a prepare_request callback which
+ * is invoked before the request is handed to the request_fn. The goal of
+ * the function is to prepare a request for I/O, it can be used to build a
+ * cdb from the request data for instance.
+ *
+ */
+void blk_queue_prep_rq(request_queue_t *q, prep_rq_fn *pfn)
+{
+	q->prep_rq_fn = pfn;
+}
+
+EXPORT_SYMBOL(blk_queue_prep_rq);
+
+/**
+ * blk_queue_merge_bvec - set a merge_bvec function for queue
+ * @q:		queue
+ * @mbfn:	merge_bvec_fn
+ *
+ * Usually queues have static limitations on the max sectors or segments that
+ * we can put in a request. Stacking drivers may have some settings that
+ * are dynamic, and thus we have to query the queue whether it is ok to
+ * add a new bio_vec to a bio at a given offset or not. If the block device
+ * has such limitations, it needs to register a merge_bvec_fn to control
+ * the size of bio's sent to it. Note that a block device *must* allow a
+ * single page to be added to an empty bio. The block device driver may want
+ * to use the bio_split() function to deal with these bio's. By default
+ * no merge_bvec_fn is defined for a queue, and only the fixed limits are
+ * honored.
+ */
+void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
+{
+	q->merge_bvec_fn = mbfn;
+}
+
+EXPORT_SYMBOL(blk_queue_merge_bvec);
+
+/**
+ * blk_queue_make_request - define an alternate make_request function for a device
+ * @q:  the request queue for the device to be affected
+ * @mfn: the alternate make_request function
+ *
+ * Description:
+ *    The normal way for &struct bios to be passed to a device
+ *    driver is for them to be collected into requests on a request
+ *    queue, and then to allow the device driver to select requests
+ *    off that queue when it is ready.  This works well for many block
+ *    devices. However some block devices (typically virtual devices
+ *    such as md or lvm) do not benefit from the processing on the
+ *    request queue, and are served best by having the requests passed
+ *    directly to them.  This can be achieved by providing a function
+ *    to blk_queue_make_request().
+ *
+ * Caveat:
+ *    The driver that does this *must* be able to deal appropriately
+ *    with buffers in "highmemory". This can be accomplished by either calling
+ *    __bio_kmap_atomic() to get a temporary kernel mapping, or by calling
+ *    blk_queue_bounce() to create a buffer in normal memory.
+ **/
+void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn)
+{
+	/*
+	 * set defaults
+	 */
+	q->nr_requests = BLKDEV_MAX_RQ;
+	q->max_phys_segments = MAX_PHYS_SEGMENTS;
+	q->max_hw_segments = MAX_HW_SEGMENTS;
+	q->make_request_fn = mfn;
+	q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
+	q->backing_dev_info.state = 0;
+	q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
+	blk_queue_max_sectors(q, MAX_SECTORS);
+	blk_queue_hardsect_size(q, 512);
+	blk_queue_dma_alignment(q, 511);
+	blk_queue_congestion_threshold(q);
+	q->nr_batching = BLK_BATCH_REQ;
+
+	q->unplug_thresh = 4;		/* hmm */
+	q->unplug_delay = (3 * HZ) / 1000;	/* 3 milliseconds */
+	if (q->unplug_delay == 0)
+		q->unplug_delay = 1;
+
+	INIT_WORK(&q->unplug_work, blk_unplug_work, q);
+
+	q->unplug_timer.function = blk_unplug_timeout;
+	q->unplug_timer.data = (unsigned long)q;
+
+	/*
+	 * by default assume old behaviour and bounce for any highmem page
+	 */
+	blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
+
+	blk_queue_activity_fn(q, NULL, NULL);
+
+	INIT_LIST_HEAD(&q->drain_list);
+}
+
+EXPORT_SYMBOL(blk_queue_make_request);
+
+static inline void rq_init(request_queue_t *q, struct request *rq)
+{
+	INIT_LIST_HEAD(&rq->queuelist);
+
+	rq->errors = 0;
+	rq->rq_status = RQ_ACTIVE;
+	rq->bio = rq->biotail = NULL;
+	rq->buffer = NULL;
+	rq->ref_count = 1;
+	rq->q = q;
+	rq->waiting = NULL;
+	rq->special = NULL;
+	rq->data_len = 0;
+	rq->data = NULL;
+	rq->sense = NULL;
+	rq->end_io = NULL;
+	rq->end_io_data = NULL;
+}
+
+/**
+ * blk_queue_ordered - does this queue support ordered writes
+ * @q:     the request queue
+ * @flag:  see below
+ *
+ * Description:
+ *   For journalled file systems, doing ordered writes on a commit
+ *   block instead of explicitly doing wait_on_buffer (which is bad
+ *   for performance) can be a big win. Block drivers supporting this
+ *   feature should call this function and indicate so.
+ *
+ **/
+void blk_queue_ordered(request_queue_t *q, int flag)
+{
+	switch (flag) {
+		case QUEUE_ORDERED_NONE:
+			if (q->flush_rq)
+				kmem_cache_free(request_cachep, q->flush_rq);
+			q->flush_rq = NULL;
+			q->ordered = flag;
+			break;
+		case QUEUE_ORDERED_TAG:
+			q->ordered = flag;
+			break;
+		case QUEUE_ORDERED_FLUSH:
+			q->ordered = flag;
+			if (!q->flush_rq)
+				q->flush_rq = kmem_cache_alloc(request_cachep,
+								GFP_KERNEL);
+			break;
+		default:
+			printk("blk_queue_ordered: bad value %d\n", flag);
+			break;
+	}
+}
+
+EXPORT_SYMBOL(blk_queue_ordered);
+
+/**
+ * blk_queue_issue_flush_fn - set function for issuing a flush
+ * @q:     the request queue
+ * @iff:   the function to be called issuing the flush
+ *
+ * Description:
+ *   If a driver supports issuing a flush command, the support is notified
+ *   to the block layer by defining it through this call.
+ *
+ **/
+void blk_queue_issue_flush_fn(request_queue_t *q, issue_flush_fn *iff)
+{
+	q->issue_flush_fn = iff;
+}
+
+EXPORT_SYMBOL(blk_queue_issue_flush_fn);
+
+/*
+ * Cache flushing for ordered writes handling
+ */
+static void blk_pre_flush_end_io(struct request *flush_rq)
+{
+	struct request *rq = flush_rq->end_io_data;
+	request_queue_t *q = rq->q;
+
+	rq->flags |= REQ_BAR_PREFLUSH;
+
+	if (!flush_rq->errors)
+		elv_requeue_request(q, rq);
+	else {
+		q->end_flush_fn(q, flush_rq);
+		clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+		q->request_fn(q);
+	}
+}
+
+static void blk_post_flush_end_io(struct request *flush_rq)
+{
+	struct request *rq = flush_rq->end_io_data;
+	request_queue_t *q = rq->q;
+
+	rq->flags |= REQ_BAR_POSTFLUSH;
+
+	q->end_flush_fn(q, flush_rq);
+	clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+	q->request_fn(q);
+}
+
+struct request *blk_start_pre_flush(request_queue_t *q, struct request *rq)
+{
+	struct request *flush_rq = q->flush_rq;
+
+	BUG_ON(!blk_barrier_rq(rq));
+
+	if (test_and_set_bit(QUEUE_FLAG_FLUSH, &q->queue_flags))
+		return NULL;
+
+	rq_init(q, flush_rq);
+	flush_rq->elevator_private = NULL;
+	flush_rq->flags = REQ_BAR_FLUSH;
+	flush_rq->rq_disk = rq->rq_disk;
+	flush_rq->rl = NULL;
+
+	/*
+	 * prepare_flush returns 0 if no flush is needed, just mark both
+	 * pre and post flush as done in that case
+	 */
+	if (!q->prepare_flush_fn(q, flush_rq)) {
+		rq->flags |= REQ_BAR_PREFLUSH | REQ_BAR_POSTFLUSH;
+		clear_bit(QUEUE_FLAG_FLUSH, &q->queue_flags);
+		return rq;
+	}
+
+	/*
+	 * some drivers dequeue requests right away, some only after io
+	 * completion. make sure the request is dequeued.
+	 */
+	if (!list_empty(&rq->queuelist))
+		blkdev_dequeue_request(rq);
+
+	elv_deactivate_request(q, rq);
+
+	flush_rq->end_io_data = rq;
+	flush_rq->end_io = blk_pre_flush_end_io;
+
+	__elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
+	return flush_rq;
+}
+
+static void blk_start_post_flush(request_queue_t *q, struct request *rq)
+{
+	struct request *flush_rq = q->flush_rq;
+
+	BUG_ON(!blk_barrier_rq(rq));
+
+	rq_init(q, flush_rq);
+	flush_rq->elevator_private = NULL;
+	flush_rq->flags = REQ_BAR_FLUSH;
+	flush_rq->rq_disk = rq->rq_disk;
+	flush_rq->rl = NULL;
+
+	if (q->prepare_flush_fn(q, flush_rq)) {
+		flush_rq->end_io_data = rq;
+		flush_rq->end_io = blk_post_flush_end_io;
+
+		__elv_add_request(q, flush_rq, ELEVATOR_INSERT_FRONT, 0);
+		q->request_fn(q);
+	}
+}
+
+static inline int blk_check_end_barrier(request_queue_t *q, struct request *rq,
+					int sectors)
+{
+	if (sectors > rq->nr_sectors)
+		sectors = rq->nr_sectors;
+
+	rq->nr_sectors -= sectors;
+	return rq->nr_sectors;
+}
+
+static int __blk_complete_barrier_rq(request_queue_t *q, struct request *rq,
+				     int sectors, int queue_locked)
+{
+	if (q->ordered != QUEUE_ORDERED_FLUSH)
+		return 0;
+	if (!blk_fs_request(rq) || !blk_barrier_rq(rq))
+		return 0;
+	if (blk_barrier_postflush(rq))
+		return 0;
+
+	if (!blk_check_end_barrier(q, rq, sectors)) {
+		unsigned long flags = 0;
+
+		if (!queue_locked)
+			spin_lock_irqsave(q->queue_lock, flags);
+
+		blk_start_post_flush(q, rq);
+
+		if (!queue_locked)
+			spin_unlock_irqrestore(q->queue_lock, flags);
+	}
+
+	return 1;
+}
+
+/**
+ * blk_complete_barrier_rq - complete possible barrier request
+ * @q:  the request queue for the device
+ * @rq:  the request
+ * @sectors:  number of sectors to complete
+ *
+ * Description:
+ *   Used in driver end_io handling to determine whether to postpone
+ *   completion of a barrier request until a post flush has been done. This
+ *   is the unlocked variant, used if the caller doesn't already hold the
+ *   queue lock.
+ **/
+int blk_complete_barrier_rq(request_queue_t *q, struct request *rq, int sectors)
+{
+	return __blk_complete_barrier_rq(q, rq, sectors, 0);
+}
+EXPORT_SYMBOL(blk_complete_barrier_rq);
+
+/**
+ * blk_complete_barrier_rq_locked - complete possible barrier request
+ * @q:  the request queue for the device
+ * @rq:  the request
+ * @sectors:  number of sectors to complete
+ *
+ * Description:
+ *   See blk_complete_barrier_rq(). This variant must be used if the caller
+ *   holds the queue lock.
+ **/
+int blk_complete_barrier_rq_locked(request_queue_t *q, struct request *rq,
+				   int sectors)
+{
+	return __blk_complete_barrier_rq(q, rq, sectors, 1);
+}
+EXPORT_SYMBOL(blk_complete_barrier_rq_locked);
+
+/**
+ * blk_queue_bounce_limit - set bounce buffer limit for queue
+ * @q:  the request queue for the device
+ * @dma_addr:   bus address limit
+ *
+ * Description:
+ *    Different hardware can have different requirements as to what pages
+ *    it can do I/O directly to. A low level driver can call
+ *    blk_queue_bounce_limit to have lower memory pages allocated as bounce
+ *    buffers for doing I/O to pages residing above @page. By default
+ *    the block layer sets this to the highest numbered "low" memory page.
+ **/
+void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr)
+{
+	unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT;
+
+	/*
+	 * set appropriate bounce gfp mask -- unfortunately we don't have a
+	 * full 4GB zone, so we have to resort to low memory for any bounces.
+	 * ISA has its own < 16MB zone.
+	 */
+	if (bounce_pfn < blk_max_low_pfn) {
+		BUG_ON(dma_addr < BLK_BOUNCE_ISA);
+		init_emergency_isa_pool();
+		q->bounce_gfp = GFP_NOIO | GFP_DMA;
+	} else
+		q->bounce_gfp = GFP_NOIO;
+
+	q->bounce_pfn = bounce_pfn;
+}
+
+EXPORT_SYMBOL(blk_queue_bounce_limit);
+
+/**
+ * blk_queue_max_sectors - set max sectors for a request for this queue
+ * @q:  the request queue for the device
+ * @max_sectors:  max sectors in the usual 512b unit
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the size of
+ *    received requests.
+ **/
+void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors)
+{
+	if ((max_sectors << 9) < PAGE_CACHE_SIZE) {
+		max_sectors = 1 << (PAGE_CACHE_SHIFT - 9);
+		printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors);
+	}
+
+	q->max_sectors = q->max_hw_sectors = max_sectors;
+}
+
+EXPORT_SYMBOL(blk_queue_max_sectors);
+
+/**
+ * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+ * @q:  the request queue for the device
+ * @max_segments:  max number of segments
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the number of
+ *    physical data segments in a request.  This would be the largest sized
+ *    scatter list the driver could handle.
+ **/
+void blk_queue_max_phys_segments(request_queue_t *q, unsigned short max_segments)
+{
+	if (!max_segments) {
+		max_segments = 1;
+		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+	}
+
+	q->max_phys_segments = max_segments;
+}
+
+EXPORT_SYMBOL(blk_queue_max_phys_segments);
+
+/**
+ * blk_queue_max_hw_segments - set max hw segments for a request for this queue
+ * @q:  the request queue for the device
+ * @max_segments:  max number of segments
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the number of
+ *    hw data segments in a request.  This would be the largest number of
+ *    address/length pairs the host adapter can actually give as once
+ *    to the device.
+ **/
+void blk_queue_max_hw_segments(request_queue_t *q, unsigned short max_segments)
+{
+	if (!max_segments) {
+		max_segments = 1;
+		printk("%s: set to minimum %d\n", __FUNCTION__, max_segments);
+	}
+
+	q->max_hw_segments = max_segments;
+}
+
+EXPORT_SYMBOL(blk_queue_max_hw_segments);
+
+/**
+ * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
+ * @q:  the request queue for the device
+ * @max_size:  max size of segment in bytes
+ *
+ * Description:
+ *    Enables a low level driver to set an upper limit on the size of a
+ *    coalesced segment
+ **/
+void blk_queue_max_segment_size(request_queue_t *q, unsigned int max_size)
+{
+	if (max_size < PAGE_CACHE_SIZE) {
+		max_size = PAGE_CACHE_SIZE;
+		printk("%s: set to minimum %d\n", __FUNCTION__, max_size);
+	}
+
+	q->max_segment_size = max_size;
+}
+
+EXPORT_SYMBOL(blk_queue_max_segment_size);
+
+/**
+ * blk_queue_hardsect_size - set hardware sector size for the queue
+ * @q:  the request queue for the device
+ * @size:  the hardware sector size, in bytes
+ *
+ * Description:
+ *   This should typically be set to the lowest possible sector size
+ *   that the hardware can operate on (possible without reverting to
+ *   even internal read-modify-write operations). Usually the default
+ *   of 512 covers most hardware.
+ **/
+void blk_queue_hardsect_size(request_queue_t *q, unsigned short size)
+{
+	q->hardsect_size = size;
+}
+
+EXPORT_SYMBOL(blk_queue_hardsect_size);
+
+/*
+ * Returns the minimum that is _not_ zero, unless both are zero.
+ */
+#define min_not_zero(l, r) (l == 0) ? r : ((r == 0) ? l : min(l, r))
+
+/**
+ * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
+ * @t:	the stacking driver (top)
+ * @b:  the underlying device (bottom)
+ **/
+void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b)
+{
+	/* zero is "infinity" */
+	t->max_sectors = t->max_hw_sectors =
+		min_not_zero(t->max_sectors,b->max_sectors);
+
+	t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments);
+	t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments);
+	t->max_segment_size = min(t->max_segment_size,b->max_segment_size);
+	t->hardsect_size = max(t->hardsect_size,b->hardsect_size);
+}
+
+EXPORT_SYMBOL(blk_queue_stack_limits);
+
+/**
+ * blk_queue_segment_boundary - set boundary rules for segment merging
+ * @q:  the request queue for the device
+ * @mask:  the memory boundary mask
+ **/
+void blk_queue_segment_boundary(request_queue_t *q, unsigned long mask)
+{
+	if (mask < PAGE_CACHE_SIZE - 1) {
+		mask = PAGE_CACHE_SIZE - 1;
+		printk("%s: set to minimum %lx\n", __FUNCTION__, mask);
+	}
+
+	q->seg_boundary_mask = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_segment_boundary);
+
+/**
+ * blk_queue_dma_alignment - set dma length and memory alignment
+ * @q:     the request queue for the device
+ * @mask:  alignment mask
+ *
+ * description:
+ *    set required memory and length aligment for direct dma transactions.
+ *    this is used when buiding direct io requests for the queue.
+ *
+ **/
+void blk_queue_dma_alignment(request_queue_t *q, int mask)
+{
+	q->dma_alignment = mask;
+}
+
+EXPORT_SYMBOL(blk_queue_dma_alignment);
+
+/**
+ * blk_queue_find_tag - find a request by its tag and queue
+ *
+ * @q:	 The request queue for the device
+ * @tag: The tag of the request
+ *
+ * Notes:
+ *    Should be used when a device returns a tag and you want to match
+ *    it with a request.
+ *
+ *    no locks need be held.
+ **/
+struct request *blk_queue_find_tag(request_queue_t *q, int tag)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+
+	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
+		return NULL;
+
+	return bqt->tag_index[tag];
+}
+
+EXPORT_SYMBOL(blk_queue_find_tag);
+
+/**
+ * __blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *    blk_cleanup_queue() will take care of calling this function, if tagging
+ *    has been used. So there's no need to call this directly.
+ **/
+static void __blk_queue_free_tags(request_queue_t *q)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+
+	if (!bqt)
+		return;
+
+	if (atomic_dec_and_test(&bqt->refcnt)) {
+		BUG_ON(bqt->busy);
+		BUG_ON(!list_empty(&bqt->busy_list));
+
+		kfree(bqt->tag_index);
+		bqt->tag_index = NULL;
+
+		kfree(bqt->tag_map);
+		bqt->tag_map = NULL;
+
+		kfree(bqt);
+	}
+
+	q->queue_tags = NULL;
+	q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
+}
+
+/**
+ * blk_queue_free_tags - release tag maintenance info
+ * @q:  the request queue for the device
+ *
+ *  Notes:
+ *	This is used to disabled tagged queuing to a device, yet leave
+ *	queue in function.
+ **/
+void blk_queue_free_tags(request_queue_t *q)
+{
+	clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+}
+
+EXPORT_SYMBOL(blk_queue_free_tags);
+
+static int
+init_tag_map(request_queue_t *q, struct blk_queue_tag *tags, int depth)
+{
+	int bits, i;
+	struct request **tag_index;
+	unsigned long *tag_map;
+
+	if (depth > q->nr_requests * 2) {
+		depth = q->nr_requests * 2;
+		printk(KERN_ERR "%s: adjusted depth to %d\n",
+				__FUNCTION__, depth);
+	}
+
+	tag_index = kmalloc(depth * sizeof(struct request *), GFP_ATOMIC);
+	if (!tag_index)
+		goto fail;
+
+	bits = (depth / BLK_TAGS_PER_LONG) + 1;
+	tag_map = kmalloc(bits * sizeof(unsigned long), GFP_ATOMIC);
+	if (!tag_map)
+		goto fail;
+
+	memset(tag_index, 0, depth * sizeof(struct request *));
+	memset(tag_map, 0, bits * sizeof(unsigned long));
+	tags->max_depth = depth;
+	tags->real_max_depth = bits * BITS_PER_LONG;
+	tags->tag_index = tag_index;
+	tags->tag_map = tag_map;
+
+	/*
+	 * set the upper bits if the depth isn't a multiple of the word size
+	 */
+	for (i = depth; i < bits * BLK_TAGS_PER_LONG; i++)
+		__set_bit(i, tag_map);
+
+	return 0;
+fail:
+	kfree(tag_index);
+	return -ENOMEM;
+}
+
+/**
+ * blk_queue_init_tags - initialize the queue tag info
+ * @q:  the request queue for the device
+ * @depth:  the maximum queue depth supported
+ * @tags: the tag to use
+ **/
+int blk_queue_init_tags(request_queue_t *q, int depth,
+			struct blk_queue_tag *tags)
+{
+	int rc;
+
+	BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
+
+	if (!tags && !q->queue_tags) {
+		tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
+		if (!tags)
+			goto fail;
+
+		if (init_tag_map(q, tags, depth))
+			goto fail;
+
+		INIT_LIST_HEAD(&tags->busy_list);
+		tags->busy = 0;
+		atomic_set(&tags->refcnt, 1);
+	} else if (q->queue_tags) {
+		if ((rc = blk_queue_resize_tags(q, depth)))
+			return rc;
+		set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
+		return 0;
+	} else
+		atomic_inc(&tags->refcnt);
+
+	/*
+	 * assign it, all done
+	 */
+	q->queue_tags = tags;
+	q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
+	return 0;
+fail:
+	kfree(tags);
+	return -ENOMEM;
+}
+
+EXPORT_SYMBOL(blk_queue_init_tags);
+
+/**
+ * blk_queue_resize_tags - change the queueing depth
+ * @q:  the request queue for the device
+ * @new_depth: the new max command queueing depth
+ *
+ *  Notes:
+ *    Must be called with the queue lock held.
+ **/
+int blk_queue_resize_tags(request_queue_t *q, int new_depth)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	struct request **tag_index;
+	unsigned long *tag_map;
+	int bits, max_depth;
+
+	if (!bqt)
+		return -ENXIO;
+
+	/*
+	 * don't bother sizing down
+	 */
+	if (new_depth <= bqt->real_max_depth) {
+		bqt->max_depth = new_depth;
+		return 0;
+	}
+
+	/*
+	 * save the old state info, so we can copy it back
+	 */
+	tag_index = bqt->tag_index;
+	tag_map = bqt->tag_map;
+	max_depth = bqt->real_max_depth;
+
+	if (init_tag_map(q, bqt, new_depth))
+		return -ENOMEM;
+
+	memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
+	bits = max_depth / BLK_TAGS_PER_LONG;
+	memcpy(bqt->tag_map, tag_map, bits * sizeof(unsigned long));
+
+	kfree(tag_index);
+	kfree(tag_map);
+	return 0;
+}
+
+EXPORT_SYMBOL(blk_queue_resize_tags);
+
+/**
+ * blk_queue_end_tag - end tag operations for a request
+ * @q:  the request queue for the device
+ * @rq: the request that has completed
+ *
+ *  Description:
+ *    Typically called when end_that_request_first() returns 0, meaning
+ *    all transfers have been done for a request. It's important to call
+ *    this function before end_that_request_last(), as that will put the
+ *    request back on the free list thus corrupting the internal tag list.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+void blk_queue_end_tag(request_queue_t *q, struct request *rq)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	int tag = rq->tag;
+
+	BUG_ON(tag == -1);
+
+	if (unlikely(tag >= bqt->real_max_depth))
+		return;
+
+	if (unlikely(!__test_and_clear_bit(tag, bqt->tag_map))) {
+		printk("attempt to clear non-busy tag (%d)\n", tag);
+		return;
+	}
+
+	list_del_init(&rq->queuelist);
+	rq->flags &= ~REQ_QUEUED;
+	rq->tag = -1;
+
+	if (unlikely(bqt->tag_index[tag] == NULL))
+		printk("tag %d is missing\n", tag);
+
+	bqt->tag_index[tag] = NULL;
+	bqt->busy--;
+}
+
+EXPORT_SYMBOL(blk_queue_end_tag);
+
+/**
+ * blk_queue_start_tag - find a free tag and assign it
+ * @q:  the request queue for the device
+ * @rq:  the block request that needs tagging
+ *
+ *  Description:
+ *    This can either be used as a stand-alone helper, or possibly be
+ *    assigned as the queue &prep_rq_fn (in which case &struct request
+ *    automagically gets a tag assigned). Note that this function
+ *    assumes that any type of request can be queued! if this is not
+ *    true for your device, you must check the request type before
+ *    calling this function.  The request will also be removed from
+ *    the request queue, so it's the drivers responsibility to readd
+ *    it if it should need to be restarted for some reason.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+int blk_queue_start_tag(request_queue_t *q, struct request *rq)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	unsigned long *map = bqt->tag_map;
+	int tag = 0;
+
+	if (unlikely((rq->flags & REQ_QUEUED))) {
+		printk(KERN_ERR 
+		       "request %p for device [%s] already tagged %d",
+		       rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
+		BUG();
+	}
+
+	for (map = bqt->tag_map; *map == -1UL; map++) {
+		tag += BLK_TAGS_PER_LONG;
+
+		if (tag >= bqt->max_depth)
+			return 1;
+	}
+
+	tag += ffz(*map);
+	__set_bit(tag, bqt->tag_map);
+
+	rq->flags |= REQ_QUEUED;
+	rq->tag = tag;
+	bqt->tag_index[tag] = rq;
+	blkdev_dequeue_request(rq);
+	list_add(&rq->queuelist, &bqt->busy_list);
+	bqt->busy++;
+	return 0;
+}
+
+EXPORT_SYMBOL(blk_queue_start_tag);
+
+/**
+ * blk_queue_invalidate_tags - invalidate all pending tags
+ * @q:  the request queue for the device
+ *
+ *  Description:
+ *   Hardware conditions may dictate a need to stop all pending requests.
+ *   In this case, we will safely clear the block side of the tag queue and
+ *   readd all requests to the request queue in the right order.
+ *
+ *  Notes:
+ *   queue lock must be held.
+ **/
+void blk_queue_invalidate_tags(request_queue_t *q)
+{
+	struct blk_queue_tag *bqt = q->queue_tags;
+	struct list_head *tmp, *n;
+	struct request *rq;
+
+	list_for_each_safe(tmp, n, &bqt->busy_list) {
+		rq = list_entry_rq(tmp);
+
+		if (rq->tag == -1) {
+			printk("bad tag found on list\n");
+			list_del_init(&rq->queuelist);
+			rq->flags &= ~REQ_QUEUED;
+		} else
+			blk_queue_end_tag(q, rq);
+
+		rq->flags &= ~REQ_STARTED;
+		__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0);
+	}
+}
+
+EXPORT_SYMBOL(blk_queue_invalidate_tags);
+
+static char *rq_flags[] = {
+	"REQ_RW",
+	"REQ_FAILFAST",
+	"REQ_SOFTBARRIER",
+	"REQ_HARDBARRIER",
+	"REQ_CMD",
+	"REQ_NOMERGE",
+	"REQ_STARTED",
+	"REQ_DONTPREP",
+	"REQ_QUEUED",
+	"REQ_PC",
+	"REQ_BLOCK_PC",
+	"REQ_SENSE",
+	"REQ_FAILED",
+	"REQ_QUIET",
+	"REQ_SPECIAL",
+	"REQ_DRIVE_CMD",
+	"REQ_DRIVE_TASK",
+	"REQ_DRIVE_TASKFILE",
+	"REQ_PREEMPT",
+	"REQ_PM_SUSPEND",
+	"REQ_PM_RESUME",
+	"REQ_PM_SHUTDOWN",
+};
+
+void blk_dump_rq_flags(struct request *rq, char *msg)
+{
+	int bit;
+
+	printk("%s: dev %s: flags = ", msg,
+		rq->rq_disk ? rq->rq_disk->disk_name : "?");
+	bit = 0;
+	do {
+		if (rq->flags & (1 << bit))
+			printk("%s ", rq_flags[bit]);
+		bit++;
+	} while (bit < __REQ_NR_BITS);
+
+	printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector,
+						       rq->nr_sectors,
+						       rq->current_nr_sectors);
+	printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len);
+
+	if (rq->flags & (REQ_BLOCK_PC | REQ_PC)) {
+		printk("cdb: ");
+		for (bit = 0; bit < sizeof(rq->cmd); bit++)
+			printk("%02x ", rq->cmd[bit]);
+		printk("\n");
+	}
+}
+
+EXPORT_SYMBOL(blk_dump_rq_flags);
+
+void blk_recount_segments(request_queue_t *q, struct bio *bio)
+{
+	struct bio_vec *bv, *bvprv = NULL;
+	int i, nr_phys_segs, nr_hw_segs, seg_size, hw_seg_size, cluster;
+	int high, highprv = 1;
+
+	if (unlikely(!bio->bi_io_vec))
+		return;
+
+	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+	hw_seg_size = seg_size = nr_phys_segs = nr_hw_segs = 0;
+	bio_for_each_segment(bv, bio, i) {
+		/*
+		 * the trick here is making sure that a high page is never
+		 * considered part of another segment, since that might
+		 * change with the bounce page.
+		 */
+		high = page_to_pfn(bv->bv_page) >= q->bounce_pfn;
+		if (high || highprv)
+			goto new_hw_segment;
+		if (cluster) {
+			if (seg_size + bv->bv_len > q->max_segment_size)
+				goto new_segment;
+			if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv))
+				goto new_segment;
+			if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv))
+				goto new_segment;
+			if (BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len))
+				goto new_hw_segment;
+
+			seg_size += bv->bv_len;
+			hw_seg_size += bv->bv_len;
+			bvprv = bv;
+			continue;
+		}
+new_segment:
+		if (BIOVEC_VIRT_MERGEABLE(bvprv, bv) &&
+		    !BIOVEC_VIRT_OVERSIZE(hw_seg_size + bv->bv_len)) {
+			hw_seg_size += bv->bv_len;
+		} else {
+new_hw_segment:
+			if (hw_seg_size > bio->bi_hw_front_size)
+				bio->bi_hw_front_size = hw_seg_size;
+			hw_seg_size = BIOVEC_VIRT_START_SIZE(bv) + bv->bv_len;
+			nr_hw_segs++;
+		}
+
+		nr_phys_segs++;
+		bvprv = bv;
+		seg_size = bv->bv_len;
+		highprv = high;
+	}
+	if (hw_seg_size > bio->bi_hw_back_size)
+		bio->bi_hw_back_size = hw_seg_size;
+	if (nr_hw_segs == 1 && hw_seg_size > bio->bi_hw_front_size)
+		bio->bi_hw_front_size = hw_seg_size;
+	bio->bi_phys_segments = nr_phys_segs;
+	bio->bi_hw_segments = nr_hw_segs;
+	bio->bi_flags |= (1 << BIO_SEG_VALID);
+}
+
+
+int blk_phys_contig_segment(request_queue_t *q, struct bio *bio,
+				   struct bio *nxt)
+{
+	if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER)))
+		return 0;
+
+	if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)))
+		return 0;
+	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+		return 0;
+
+	/*
+	 * bio and nxt are contigous in memory, check if the queue allows
+	 * these two to be merged into one
+	 */
+	if (BIO_SEG_BOUNDARY(q, bio, nxt))
+		return 1;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(blk_phys_contig_segment);
+
+int blk_hw_contig_segment(request_queue_t *q, struct bio *bio,
+				 struct bio *nxt)
+{
+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+		blk_recount_segments(q, bio);
+	if (unlikely(!bio_flagged(nxt, BIO_SEG_VALID)))
+		blk_recount_segments(q, nxt);
+	if (!BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt)) ||
+	    BIOVEC_VIRT_OVERSIZE(bio->bi_hw_front_size + bio->bi_hw_back_size))
+		return 0;
+	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+		return 0;
+
+	return 1;
+}
+
+EXPORT_SYMBOL(blk_hw_contig_segment);
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(request_queue_t *q, struct request *rq, struct scatterlist *sg)
+{
+	struct bio_vec *bvec, *bvprv;
+	struct bio *bio;
+	int nsegs, i, cluster;
+
+	nsegs = 0;
+	cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER);
+
+	/*
+	 * for each bio in rq
+	 */
+	bvprv = NULL;
+	rq_for_each_bio(bio, rq) {
+		/*
+		 * for each segment in bio
+		 */
+		bio_for_each_segment(bvec, bio, i) {
+			int nbytes = bvec->bv_len;
+
+			if (bvprv && cluster) {
+				if (sg[nsegs - 1].length + nbytes > q->max_segment_size)
+					goto new_segment;
+
+				if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
+					goto new_segment;
+				if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
+					goto new_segment;
+
+				sg[nsegs - 1].length += nbytes;
+			} else {
+new_segment:
+				memset(&sg[nsegs],0,sizeof(struct scatterlist));
+				sg[nsegs].page = bvec->bv_page;
+				sg[nsegs].length = nbytes;
+				sg[nsegs].offset = bvec->bv_offset;
+
+				nsegs++;
+			}
+			bvprv = bvec;
+		} /* segments in bio */
+	} /* bios in rq */
+
+	return nsegs;
+}
+
+EXPORT_SYMBOL(blk_rq_map_sg);
+
+/*
+ * the standard queue merge functions, can be overridden with device
+ * specific ones if so desired
+ */
+
+static inline int ll_new_mergeable(request_queue_t *q,
+				   struct request *req,
+				   struct bio *bio)
+{
+	int nr_phys_segs = bio_phys_segments(q, bio);
+
+	if (req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+		req->flags |= REQ_NOMERGE;
+		if (req == q->last_merge)
+			q->last_merge = NULL;
+		return 0;
+	}
+
+	/*
+	 * A hw segment is just getting larger, bump just the phys
+	 * counter.
+	 */
+	req->nr_phys_segments += nr_phys_segs;
+	return 1;
+}
+
+static inline int ll_new_hw_segment(request_queue_t *q,
+				    struct request *req,
+				    struct bio *bio)
+{
+	int nr_hw_segs = bio_hw_segments(q, bio);
+	int nr_phys_segs = bio_phys_segments(q, bio);
+
+	if (req->nr_hw_segments + nr_hw_segs > q->max_hw_segments
+	    || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) {
+		req->flags |= REQ_NOMERGE;
+		if (req == q->last_merge)
+			q->last_merge = NULL;
+		return 0;
+	}
+
+	/*
+	 * This will form the start of a new hw segment.  Bump both
+	 * counters.
+	 */
+	req->nr_hw_segments += nr_hw_segs;
+	req->nr_phys_segments += nr_phys_segs;
+	return 1;
+}
+
+static int ll_back_merge_fn(request_queue_t *q, struct request *req, 
+			    struct bio *bio)
+{
+	int len;
+
+	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+		req->flags |= REQ_NOMERGE;
+		if (req == q->last_merge)
+			q->last_merge = NULL;
+		return 0;
+	}
+	if (unlikely(!bio_flagged(req->biotail, BIO_SEG_VALID)))
+		blk_recount_segments(q, req->biotail);
+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+		blk_recount_segments(q, bio);
+	len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size;
+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) &&
+	    !BIOVEC_VIRT_OVERSIZE(len)) {
+		int mergeable =  ll_new_mergeable(q, req, bio);
+
+		if (mergeable) {
+			if (req->nr_hw_segments == 1)
+				req->bio->bi_hw_front_size = len;
+			if (bio->bi_hw_segments == 1)
+				bio->bi_hw_back_size = len;
+		}
+		return mergeable;
+	}
+
+	return ll_new_hw_segment(q, req, bio);
+}
+
+static int ll_front_merge_fn(request_queue_t *q, struct request *req, 
+			     struct bio *bio)
+{
+	int len;
+
+	if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) {
+		req->flags |= REQ_NOMERGE;
+		if (req == q->last_merge)
+			q->last_merge = NULL;
+		return 0;
+	}
+	len = bio->bi_hw_back_size + req->bio->bi_hw_front_size;
+	if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
+		blk_recount_segments(q, bio);
+	if (unlikely(!bio_flagged(req->bio, BIO_SEG_VALID)))
+		blk_recount_segments(q, req->bio);
+	if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(bio), __BVEC_START(req->bio)) &&
+	    !BIOVEC_VIRT_OVERSIZE(len)) {
+		int mergeable =  ll_new_mergeable(q, req, bio);
+
+		if (mergeable) {
+			if (bio->bi_hw_segments == 1)
+				bio->bi_hw_front_size = len;
+			if (req->nr_hw_segments == 1)
+				req->biotail->bi_hw_back_size = len;
+		}
+		return mergeable;
+	}
+
+	return ll_new_hw_segment(q, req, bio);
+}
+
+static int ll_merge_requests_fn(request_queue_t *q, struct request *req,
+				struct request *next)
+{
+	int total_phys_segments = req->nr_phys_segments +next->nr_phys_segments;
+	int total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+
+	/*
+	 * First check if the either of the requests are re-queued
+	 * requests.  Can't merge them if they are.
+	 */
+	if (req->special || next->special)
+		return 0;
+
+	/*
+	 * Will it become to large?
+	 */
+	if ((req->nr_sectors + next->nr_sectors) > q->max_sectors)
+		return 0;
+
+	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
+	if (blk_phys_contig_segment(q, req->biotail, next->bio))
+		total_phys_segments--;
+
+	if (total_phys_segments > q->max_phys_segments)
+		return 0;
+
+	total_hw_segments = req->nr_hw_segments + next->nr_hw_segments;
+	if (blk_hw_contig_segment(q, req->biotail, next->bio)) {
+		int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size;
+		/*
+		 * propagate the combined length to the end of the requests
+		 */
+		if (req->nr_hw_segments == 1)
+			req->bio->bi_hw_front_size = len;
+		if (next->nr_hw_segments == 1)
+			next->biotail->bi_hw_back_size = len;
+		total_hw_segments--;
+	}
+
+	if (total_hw_segments > q->max_hw_segments)
+		return 0;
+
+	/* Merge is OK... */
+	req->nr_phys_segments = total_phys_segments;
+	req->nr_hw_segments = total_hw_segments;
+	return 1;
+}
+
+/*
+ * "plug" the device if there are no outstanding requests: this will
+ * force the transfer to start only after we have put all the requests
+ * on the list.
+ *
+ * This is called with interrupts off and no requests on the queue and
+ * with the queue lock held.
+ */
+void blk_plug_device(request_queue_t *q)
+{
+	WARN_ON(!irqs_disabled());
+
+	/*
+	 * don't plug a stopped queue, it must be paired with blk_start_queue()
+	 * which will restart the queueing
+	 */
+	if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+		return;
+
+	if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+		mod_timer(&q->unplug_timer, jiffies + q->unplug_delay);
+}
+
+EXPORT_SYMBOL(blk_plug_device);
+
+/*
+ * remove the queue from the plugged list, if present. called with
+ * queue lock held and interrupts disabled.
+ */
+int blk_remove_plug(request_queue_t *q)
+{
+	WARN_ON(!irqs_disabled());
+
+	if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags))
+		return 0;
+
+	del_timer(&q->unplug_timer);
+	return 1;
+}
+
+EXPORT_SYMBOL(blk_remove_plug);
+
+/*
+ * remove the plug and let it rip..
+ */
+void __generic_unplug_device(request_queue_t *q)
+{
+	if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))
+		return;
+
+	if (!blk_remove_plug(q))
+		return;
+
+	/*
+	 * was plugged, fire request_fn if queue has stuff to do
+	 */
+	if (elv_next_request(q))
+		q->request_fn(q);
+}
+EXPORT_SYMBOL(__generic_unplug_device);
+
+/**
+ * generic_unplug_device - fire a request queue
+ * @q:    The &request_queue_t in question
+ *
+ * Description:
+ *   Linux uses plugging to build bigger requests queues before letting
+ *   the device have at them. If a queue is plugged, the I/O scheduler
+ *   is still adding and merging requests on the queue. Once the queue
+ *   gets unplugged, the request_fn defined for the queue is invoked and
+ *   transfers started.
+ **/
+void generic_unplug_device(request_queue_t *q)
+{
+	spin_lock_irq(q->queue_lock);
+	__generic_unplug_device(q);
+	spin_unlock_irq(q->queue_lock);
+}
+EXPORT_SYMBOL(generic_unplug_device);
+
+static void blk_backing_dev_unplug(struct backing_dev_info *bdi,
+				   struct page *page)
+{
+	request_queue_t *q = bdi->unplug_io_data;
+
+	/*
+	 * devices don't necessarily have an ->unplug_fn defined
+	 */
+	if (q->unplug_fn)
+		q->unplug_fn(q);
+}
+
+static void blk_unplug_work(void *data)
+{
+	request_queue_t *q = data;
+
+	q->unplug_fn(q);
+}
+
+static void blk_unplug_timeout(unsigned long data)
+{
+	request_queue_t *q = (request_queue_t *)data;
+
+	kblockd_schedule_work(&q->unplug_work);
+}
+
+/**
+ * blk_start_queue - restart a previously stopped queue
+ * @q:    The &request_queue_t in question
+ *
+ * Description:
+ *   blk_start_queue() will clear the stop flag on the queue, and call
+ *   the request_fn for the queue if it was in a stopped state when
+ *   entered. Also see blk_stop_queue(). Queue lock must be held.
+ **/
+void blk_start_queue(request_queue_t *q)
+{
+	clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+
+	/*
+	 * one level of recursion is ok and is much faster than kicking
+	 * the unplug handling
+	 */
+	if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) {
+		q->request_fn(q);
+		clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags);
+	} else {
+		blk_plug_device(q);
+		kblockd_schedule_work(&q->unplug_work);
+	}
+}
+
+EXPORT_SYMBOL(blk_start_queue);
+
+/**
+ * blk_stop_queue - stop a queue
+ * @q:    The &request_queue_t in question
+ *
+ * Description:
+ *   The Linux block layer assumes that a block driver will consume all
+ *   entries on the request queue when the request_fn strategy is called.
+ *   Often this will not happen, because of hardware limitations (queue
+ *   depth settings). If a device driver gets a 'queue full' response,
+ *   or if it simply chooses not to queue more I/O at one point, it can
+ *   call this function to prevent the request_fn from being called until
+ *   the driver has signalled it's ready to go again. This happens by calling
+ *   blk_start_queue() to restart queue operations. Queue lock must be held.
+ **/
+void blk_stop_queue(request_queue_t *q)
+{
+	blk_remove_plug(q);
+	set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags);
+}
+EXPORT_SYMBOL(blk_stop_queue);
+
+/**
+ * blk_sync_queue - cancel any pending callbacks on a queue
+ * @q: the queue
+ *
+ * Description:
+ *     The block layer may perform asynchronous callback activity
+ *     on a queue, such as calling the unplug function after a timeout.
+ *     A block device may call blk_sync_queue to ensure that any
+ *     such activity is cancelled, thus allowing it to release resources
+ *     the the callbacks might use. The caller must already have made sure
+ *     that its ->make_request_fn will not re-add plugging prior to calling
+ *     this function.
+ *
+ */
+void blk_sync_queue(struct request_queue *q)
+{
+	del_timer_sync(&q->unplug_timer);
+	kblockd_flush();
+}
+EXPORT_SYMBOL(blk_sync_queue);
+
+/**
+ * blk_run_queue - run a single device queue
+ * @q:	The queue to run
+ */
+void blk_run_queue(struct request_queue *q)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	blk_remove_plug(q);
+	q->request_fn(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+EXPORT_SYMBOL(blk_run_queue);
+
+/**
+ * blk_cleanup_queue: - release a &request_queue_t when it is no longer needed
+ * @q:    the request queue to be released
+ *
+ * Description:
+ *     blk_cleanup_queue is the pair to blk_init_queue() or
+ *     blk_queue_make_request().  It should be called when a request queue is
+ *     being released; typically when a block device is being de-registered.
+ *     Currently, its primary task it to free all the &struct request
+ *     structures that were allocated to the queue and the queue itself.
+ *
+ * Caveat:
+ *     Hopefully the low level driver will have finished any
+ *     outstanding requests first...
+ **/
+void blk_cleanup_queue(request_queue_t * q)
+{
+	struct request_list *rl = &q->rq;
+
+	if (!atomic_dec_and_test(&q->refcnt))
+		return;
+
+	if (q->elevator)
+		elevator_exit(q->elevator);
+
+	blk_sync_queue(q);
+
+	if (rl->rq_pool)
+		mempool_destroy(rl->rq_pool);
+
+	if (q->queue_tags)
+		__blk_queue_free_tags(q);
+
+	blk_queue_ordered(q, QUEUE_ORDERED_NONE);
+
+	kmem_cache_free(requestq_cachep, q);
+}
+
+EXPORT_SYMBOL(blk_cleanup_queue);
+
+static int blk_init_free_list(request_queue_t *q)
+{
+	struct request_list *rl = &q->rq;
+
+	rl->count[READ] = rl->count[WRITE] = 0;
+	rl->starved[READ] = rl->starved[WRITE] = 0;
+	init_waitqueue_head(&rl->wait[READ]);
+	init_waitqueue_head(&rl->wait[WRITE]);
+	init_waitqueue_head(&rl->drain);
+
+	rl->rq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, request_cachep);
+
+	if (!rl->rq_pool)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static int __make_request(request_queue_t *, struct bio *);
+
+request_queue_t *blk_alloc_queue(int gfp_mask)
+{
+	request_queue_t *q = kmem_cache_alloc(requestq_cachep, gfp_mask);
+
+	if (!q)
+		return NULL;
+
+	memset(q, 0, sizeof(*q));
+	init_timer(&q->unplug_timer);
+	atomic_set(&q->refcnt, 1);
+
+	q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
+	q->backing_dev_info.unplug_io_data = q;
+
+	return q;
+}
+
+EXPORT_SYMBOL(blk_alloc_queue);
+
+/**
+ * blk_init_queue  - prepare a request queue for use with a block device
+ * @rfn:  The function to be called to process requests that have been
+ *        placed on the queue.
+ * @lock: Request queue spin lock
+ *
+ * Description:
+ *    If a block device wishes to use the standard request handling procedures,
+ *    which sorts requests and coalesces adjacent requests, then it must
+ *    call blk_init_queue().  The function @rfn will be called when there
+ *    are requests on the queue that need to be processed.  If the device
+ *    supports plugging, then @rfn may not be called immediately when requests
+ *    are available on the queue, but may be called at some time later instead.
+ *    Plugged queues are generally unplugged when a buffer belonging to one
+ *    of the requests on the queue is needed, or due to memory pressure.
+ *
+ *    @rfn is not required, or even expected, to remove all requests off the
+ *    queue, but only as many as it can handle at a time.  If it does leave
+ *    requests on the queue, it is responsible for arranging that the requests
+ *    get dealt with eventually.
+ *
+ *    The queue spin lock must be held while manipulating the requests on the
+ *    request queue.
+ *
+ *    Function returns a pointer to the initialized request queue, or NULL if
+ *    it didn't succeed.
+ *
+ * Note:
+ *    blk_init_queue() must be paired with a blk_cleanup_queue() call
+ *    when the block device is deactivated (such as at module unload).
+ **/
+request_queue_t *blk_init_queue(request_fn_proc *rfn, spinlock_t *lock)
+{
+	request_queue_t *q = blk_alloc_queue(GFP_KERNEL);
+
+	if (!q)
+		return NULL;
+
+	if (blk_init_free_list(q))
+		goto out_init;
+
+	q->request_fn		= rfn;
+	q->back_merge_fn       	= ll_back_merge_fn;
+	q->front_merge_fn      	= ll_front_merge_fn;
+	q->merge_requests_fn	= ll_merge_requests_fn;
+	q->prep_rq_fn		= NULL;
+	q->unplug_fn		= generic_unplug_device;
+	q->queue_flags		= (1 << QUEUE_FLAG_CLUSTER);
+	q->queue_lock		= lock;
+
+	blk_queue_segment_boundary(q, 0xffffffff);
+
+	blk_queue_make_request(q, __make_request);
+	blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
+
+	blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
+	blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
+
+	/*
+	 * all done
+	 */
+	if (!elevator_init(q, NULL)) {
+		blk_queue_congestion_threshold(q);
+		return q;
+	}
+
+	blk_cleanup_queue(q);
+out_init:
+	kmem_cache_free(requestq_cachep, q);
+	return NULL;
+}
+
+EXPORT_SYMBOL(blk_init_queue);
+
+int blk_get_queue(request_queue_t *q)
+{
+	if (!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
+		atomic_inc(&q->refcnt);
+		return 0;
+	}
+
+	return 1;
+}
+
+EXPORT_SYMBOL(blk_get_queue);
+
+static inline void blk_free_request(request_queue_t *q, struct request *rq)
+{
+	elv_put_request(q, rq);
+	mempool_free(rq, q->rq.rq_pool);
+}
+
+static inline struct request *blk_alloc_request(request_queue_t *q, int rw,
+						int gfp_mask)
+{
+	struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask);
+
+	if (!rq)
+		return NULL;
+
+	/*
+	 * first three bits are identical in rq->flags and bio->bi_rw,
+	 * see bio.h and blkdev.h
+	 */
+	rq->flags = rw;
+
+	if (!elv_set_request(q, rq, gfp_mask))
+		return rq;
+
+	mempool_free(rq, q->rq.rq_pool);
+	return NULL;
+}
+
+/*
+ * ioc_batching returns true if the ioc is a valid batching request and
+ * should be given priority access to a request.
+ */
+static inline int ioc_batching(request_queue_t *q, struct io_context *ioc)
+{
+	if (!ioc)
+		return 0;
+
+	/*
+	 * Make sure the process is able to allocate at least 1 request
+	 * even if the batch times out, otherwise we could theoretically
+	 * lose wakeups.
+	 */
+	return ioc->nr_batch_requests == q->nr_batching ||
+		(ioc->nr_batch_requests > 0
+		&& time_before(jiffies, ioc->last_waited + BLK_BATCH_TIME));
+}
+
+/*
+ * ioc_set_batching sets ioc to be a new "batcher" if it is not one. This
+ * will cause the process to be a "batcher" on all queues in the system. This
+ * is the behaviour we want though - once it gets a wakeup it should be given
+ * a nice run.
+ */
+void ioc_set_batching(request_queue_t *q, struct io_context *ioc)
+{
+	if (!ioc || ioc_batching(q, ioc))
+		return;
+
+	ioc->nr_batch_requests = q->nr_batching;
+	ioc->last_waited = jiffies;
+}
+
+static void __freed_request(request_queue_t *q, int rw)
+{
+	struct request_list *rl = &q->rq;
+
+	if (rl->count[rw] < queue_congestion_off_threshold(q))
+		clear_queue_congested(q, rw);
+
+	if (rl->count[rw] + 1 <= q->nr_requests) {
+		smp_mb();
+		if (waitqueue_active(&rl->wait[rw]))
+			wake_up(&rl->wait[rw]);
+
+		blk_clear_queue_full(q, rw);
+	}
+}
+
+/*
+ * A request has just been released.  Account for it, update the full and
+ * congestion status, wake up any waiters.   Called under q->queue_lock.
+ */
+static void freed_request(request_queue_t *q, int rw)
+{
+	struct request_list *rl = &q->rq;
+
+	rl->count[rw]--;
+
+	__freed_request(q, rw);
+
+	if (unlikely(rl->starved[rw ^ 1]))
+		__freed_request(q, rw ^ 1);
+
+	if (!rl->count[READ] && !rl->count[WRITE]) {
+		smp_mb();
+		if (unlikely(waitqueue_active(&rl->drain)))
+			wake_up(&rl->drain);
+	}
+}
+
+#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
+/*
+ * Get a free request, queue_lock must not be held
+ */
+static struct request *get_request(request_queue_t *q, int rw, int gfp_mask)
+{
+	struct request *rq = NULL;
+	struct request_list *rl = &q->rq;
+	struct io_context *ioc = get_io_context(gfp_mask);
+
+	if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
+		goto out;
+
+	spin_lock_irq(q->queue_lock);
+	if (rl->count[rw]+1 >= q->nr_requests) {
+		/*
+		 * The queue will fill after this allocation, so set it as
+		 * full, and mark this process as "batching". This process
+		 * will be allowed to complete a batch of requests, others
+		 * will be blocked.
+		 */
+		if (!blk_queue_full(q, rw)) {
+			ioc_set_batching(q, ioc);
+			blk_set_queue_full(q, rw);
+		}
+	}
+
+	switch (elv_may_queue(q, rw)) {
+		case ELV_MQUEUE_NO:
+			goto rq_starved;
+		case ELV_MQUEUE_MAY:
+			break;
+		case ELV_MQUEUE_MUST:
+			goto get_rq;
+	}
+
+	if (blk_queue_full(q, rw) && !ioc_batching(q, ioc)) {
+		/*
+		 * The queue is full and the allocating process is not a
+		 * "batcher", and not exempted by the IO scheduler
+		 */
+		spin_unlock_irq(q->queue_lock);
+		goto out;
+	}
+
+get_rq:
+	rl->count[rw]++;
+	rl->starved[rw] = 0;
+	if (rl->count[rw] >= queue_congestion_on_threshold(q))
+		set_queue_congested(q, rw);
+	spin_unlock_irq(q->queue_lock);
+
+	rq = blk_alloc_request(q, rw, gfp_mask);
+	if (!rq) {
+		/*
+		 * Allocation failed presumably due to memory. Undo anything
+		 * we might have messed up.
+		 *
+		 * Allocating task should really be put onto the front of the
+		 * wait queue, but this is pretty rare.
+		 */
+		spin_lock_irq(q->queue_lock);
+		freed_request(q, rw);
+
+		/*
+		 * in the very unlikely event that allocation failed and no
+		 * requests for this direction was pending, mark us starved
+		 * so that freeing of a request in the other direction will
+		 * notice us. another possible fix would be to split the
+		 * rq mempool into READ and WRITE
+		 */
+rq_starved:
+		if (unlikely(rl->count[rw] == 0))
+			rl->starved[rw] = 1;
+
+		spin_unlock_irq(q->queue_lock);
+		goto out;
+	}
+
+	if (ioc_batching(q, ioc))
+		ioc->nr_batch_requests--;
+	
+	rq_init(q, rq);
+	rq->rl = rl;
+out:
+	put_io_context(ioc);
+	return rq;
+}
+
+/*
+ * No available requests for this queue, unplug the device and wait for some
+ * requests to become available.
+ */
+static struct request *get_request_wait(request_queue_t *q, int rw)
+{
+	DEFINE_WAIT(wait);
+	struct request *rq;
+
+	generic_unplug_device(q);
+	do {
+		struct request_list *rl = &q->rq;
+
+		prepare_to_wait_exclusive(&rl->wait[rw], &wait,
+				TASK_UNINTERRUPTIBLE);
+
+		rq = get_request(q, rw, GFP_NOIO);
+
+		if (!rq) {
+			struct io_context *ioc;
+
+			io_schedule();
+
+			/*
+			 * After sleeping, we become a "batching" process and
+			 * will be able to allocate at least one request, and
+			 * up to a big batch of them for a small period time.
+			 * See ioc_batching, ioc_set_batching
+			 */
+			ioc = get_io_context(GFP_NOIO);
+			ioc_set_batching(q, ioc);
+			put_io_context(ioc);
+		}
+		finish_wait(&rl->wait[rw], &wait);
+	} while (!rq);
+
+	return rq;
+}
+
+struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
+{
+	struct request *rq;
+
+	BUG_ON(rw != READ && rw != WRITE);
+
+	if (gfp_mask & __GFP_WAIT)
+		rq = get_request_wait(q, rw);
+	else
+		rq = get_request(q, rw, gfp_mask);
+
+	return rq;
+}
+
+EXPORT_SYMBOL(blk_get_request);
+
+/**
+ * blk_requeue_request - put a request back on queue
+ * @q:		request queue where request should be inserted
+ * @rq:		request to be inserted
+ *
+ * Description:
+ *    Drivers often keep queueing requests until the hardware cannot accept
+ *    more, when that condition happens we need to put the request back
+ *    on the queue. Must be called with queue lock held.
+ */
+void blk_requeue_request(request_queue_t *q, struct request *rq)
+{
+	if (blk_rq_tagged(rq))
+		blk_queue_end_tag(q, rq);
+
+	elv_requeue_request(q, rq);
+}
+
+EXPORT_SYMBOL(blk_requeue_request);
+
+/**
+ * blk_insert_request - insert a special request in to a request queue
+ * @q:		request queue where request should be inserted
+ * @rq:		request to be inserted
+ * @at_head:	insert request at head or tail of queue
+ * @data:	private data
+ * @reinsert:	true if request it a reinsertion of previously processed one
+ *
+ * Description:
+ *    Many block devices need to execute commands asynchronously, so they don't
+ *    block the whole kernel from preemption during request execution.  This is
+ *    accomplished normally by inserting aritficial requests tagged as
+ *    REQ_SPECIAL in to the corresponding request queue, and letting them be
+ *    scheduled for actual execution by the request queue.
+ *
+ *    We have the option of inserting the head or the tail of the queue.
+ *    Typically we use the tail for new ioctls and so forth.  We use the head
+ *    of the queue for things like a QUEUE_FULL message from a device, or a
+ *    host that is unable to accept a particular command.
+ */
+void blk_insert_request(request_queue_t *q, struct request *rq,
+			int at_head, void *data, int reinsert)
+{
+	unsigned long flags;
+
+	/*
+	 * tell I/O scheduler that this isn't a regular read/write (ie it
+	 * must not attempt merges on this) and that it acts as a soft
+	 * barrier
+	 */
+	rq->flags |= REQ_SPECIAL | REQ_SOFTBARRIER;
+
+	rq->special = data;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+
+	/*
+	 * If command is tagged, release the tag
+	 */
+	if (reinsert)
+		blk_requeue_request(q, rq);
+	else {
+		int where = ELEVATOR_INSERT_BACK;
+
+		if (at_head)
+			where = ELEVATOR_INSERT_FRONT;
+
+		if (blk_rq_tagged(rq))
+			blk_queue_end_tag(q, rq);
+
+		drive_stat_acct(rq, rq->nr_sectors, 1);
+		__elv_add_request(q, rq, where, 0);
+	}
+	if (blk_queue_plugged(q))
+		__generic_unplug_device(q);
+	else
+		q->request_fn(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+EXPORT_SYMBOL(blk_insert_request);
+
+/**
+ * blk_rq_map_user - map user data to a request, for REQ_BLOCK_PC usage
+ * @q:		request queue where request should be inserted
+ * @rw:		READ or WRITE data
+ * @ubuf:	the user buffer
+ * @len:	length of user data
+ *
+ * Description:
+ *    Data will be mapped directly for zero copy io, if possible. Otherwise
+ *    a kernel bounce buffer is used.
+ *
+ *    A matching blk_rq_unmap_user() must be issued at the end of io, while
+ *    still in process context.
+ *
+ *    Note: The mapped bio may need to be bounced through blk_queue_bounce()
+ *    before being submitted to the device, as pages mapped may be out of
+ *    reach. It's the callers responsibility to make sure this happens. The
+ *    original bio must be passed back in to blk_rq_unmap_user() for proper
+ *    unmapping.
+ */
+struct request *blk_rq_map_user(request_queue_t *q, int rw, void __user *ubuf,
+				unsigned int len)
+{
+	unsigned long uaddr;
+	struct request *rq;
+	struct bio *bio;
+
+	if (len > (q->max_sectors << 9))
+		return ERR_PTR(-EINVAL);
+	if ((!len && ubuf) || (len && !ubuf))
+		return ERR_PTR(-EINVAL);
+
+	rq = blk_get_request(q, rw, __GFP_WAIT);
+	if (!rq)
+		return ERR_PTR(-ENOMEM);
+
+	/*
+	 * if alignment requirement is satisfied, map in user pages for
+	 * direct dma. else, set up kernel bounce buffers
+	 */
+	uaddr = (unsigned long) ubuf;
+	if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q)))
+		bio = bio_map_user(q, NULL, uaddr, len, rw == READ);
+	else
+		bio = bio_copy_user(q, uaddr, len, rw == READ);
+
+	if (!IS_ERR(bio)) {
+		rq->bio = rq->biotail = bio;
+		blk_rq_bio_prep(q, rq, bio);
+
+		rq->buffer = rq->data = NULL;
+		rq->data_len = len;
+		return rq;
+	}
+
+	/*
+	 * bio is the err-ptr
+	 */
+	blk_put_request(rq);
+	return (struct request *) bio;
+}
+
+EXPORT_SYMBOL(blk_rq_map_user);
+
+/**
+ * blk_rq_unmap_user - unmap a request with user data
+ * @rq:		request to be unmapped
+ * @bio:	bio for the request
+ * @ulen:	length of user buffer
+ *
+ * Description:
+ *    Unmap a request previously mapped by blk_rq_map_user().
+ */
+int blk_rq_unmap_user(struct request *rq, struct bio *bio, unsigned int ulen)
+{
+	int ret = 0;
+
+	if (bio) {
+		if (bio_flagged(bio, BIO_USER_MAPPED))
+			bio_unmap_user(bio);
+		else
+			ret = bio_uncopy_user(bio);
+	}
+
+	blk_put_request(rq);
+	return ret;
+}
+
+EXPORT_SYMBOL(blk_rq_unmap_user);
+
+/**
+ * blk_execute_rq - insert a request into queue for execution
+ * @q:		queue to insert the request in
+ * @bd_disk:	matching gendisk
+ * @rq:		request to insert
+ *
+ * Description:
+ *    Insert a fully prepared request at the back of the io scheduler queue
+ *    for execution.
+ */
+int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
+		   struct request *rq)
+{
+	DECLARE_COMPLETION(wait);
+	char sense[SCSI_SENSE_BUFFERSIZE];
+	int err = 0;
+
+	rq->rq_disk = bd_disk;
+
+	/*
+	 * we need an extra reference to the request, so we can look at
+	 * it after io completion
+	 */
+	rq->ref_count++;
+
+	if (!rq->sense) {
+		memset(sense, 0, sizeof(sense));
+		rq->sense = sense;
+		rq->sense_len = 0;
+	}
+
+	rq->flags |= REQ_NOMERGE;
+	rq->waiting = &wait;
+	rq->end_io = blk_end_sync_rq;
+	elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+	generic_unplug_device(q);
+	wait_for_completion(&wait);
+	rq->waiting = NULL;
+
+	if (rq->errors)
+		err = -EIO;
+
+	return err;
+}
+
+EXPORT_SYMBOL(blk_execute_rq);
+
+/**
+ * blkdev_issue_flush - queue a flush
+ * @bdev:	blockdev to issue flush for
+ * @error_sector:	error sector
+ *
+ * Description:
+ *    Issue a flush for the block device in question. Caller can supply
+ *    room for storing the error offset in case of a flush error, if they
+ *    wish to.  Caller must run wait_for_completion() on its own.
+ */
+int blkdev_issue_flush(struct block_device *bdev, sector_t *error_sector)
+{
+	request_queue_t *q;
+
+	if (bdev->bd_disk == NULL)
+		return -ENXIO;
+
+	q = bdev_get_queue(bdev);
+	if (!q)
+		return -ENXIO;
+	if (!q->issue_flush_fn)
+		return -EOPNOTSUPP;
+
+	return q->issue_flush_fn(q, bdev->bd_disk, error_sector);
+}
+
+EXPORT_SYMBOL(blkdev_issue_flush);
+
+/**
+ * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices
+ * @q:		device queue
+ * @disk:	gendisk
+ * @error_sector:	error offset
+ *
+ * Description:
+ *    Devices understanding the SCSI command set, can use this function as
+ *    a helper for issuing a cache flush. Note: driver is required to store
+ *    the error offset (in case of error flushing) in ->sector of struct
+ *    request.
+ */
+int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk,
+			       sector_t *error_sector)
+{
+	struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT);
+	int ret;
+
+	rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER;
+	rq->sector = 0;
+	memset(rq->cmd, 0, sizeof(rq->cmd));
+	rq->cmd[0] = 0x35;
+	rq->cmd_len = 12;
+	rq->data = NULL;
+	rq->data_len = 0;
+	rq->timeout = 60 * HZ;
+
+	ret = blk_execute_rq(q, disk, rq);
+
+	if (ret && error_sector)
+		*error_sector = rq->sector;
+
+	blk_put_request(rq);
+	return ret;
+}
+
+EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn);
+
+void drive_stat_acct(struct request *rq, int nr_sectors, int new_io)
+{
+	int rw = rq_data_dir(rq);
+
+	if (!blk_fs_request(rq) || !rq->rq_disk)
+		return;
+
+	if (rw == READ) {
+		__disk_stat_add(rq->rq_disk, read_sectors, nr_sectors);
+		if (!new_io)
+			__disk_stat_inc(rq->rq_disk, read_merges);
+	} else if (rw == WRITE) {
+		__disk_stat_add(rq->rq_disk, write_sectors, nr_sectors);
+		if (!new_io)
+			__disk_stat_inc(rq->rq_disk, write_merges);
+	}
+	if (new_io) {
+		disk_round_stats(rq->rq_disk);
+		rq->rq_disk->in_flight++;
+	}
+}
+
+/*
+ * add-request adds a request to the linked list.
+ * queue lock is held and interrupts disabled, as we muck with the
+ * request queue list.
+ */
+static inline void add_request(request_queue_t * q, struct request * req)
+{
+	drive_stat_acct(req, req->nr_sectors, 1);
+
+	if (q->activity_fn)
+		q->activity_fn(q->activity_data, rq_data_dir(req));
+
+	/*
+	 * elevator indicated where it wants this request to be
+	 * inserted at elevator_merge time
+	 */
+	__elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0);
+}
+ 
+/*
+ * disk_round_stats()	- Round off the performance stats on a struct
+ * disk_stats.
+ *
+ * The average IO queue length and utilisation statistics are maintained
+ * by observing the current state of the queue length and the amount of
+ * time it has been in this state for.
+ *
+ * Normally, that accounting is done on IO completion, but that can result
+ * in more than a second's worth of IO being accounted for within any one
+ * second, leading to >100% utilisation.  To deal with that, we call this
+ * function to do a round-off before returning the results when reading
+ * /proc/diskstats.  This accounts immediately for all queue usage up to
+ * the current jiffies and restarts the counters again.
+ */
+void disk_round_stats(struct gendisk *disk)
+{
+	unsigned long now = jiffies;
+
+	__disk_stat_add(disk, time_in_queue,
+			disk->in_flight * (now - disk->stamp));
+	disk->stamp = now;
+
+	if (disk->in_flight)
+		__disk_stat_add(disk, io_ticks, (now - disk->stamp_idle));
+	disk->stamp_idle = now;
+}
+
+/*
+ * queue lock must be held
+ */
+static void __blk_put_request(request_queue_t *q, struct request *req)
+{
+	struct request_list *rl = req->rl;
+
+	if (unlikely(!q))
+		return;
+	if (unlikely(--req->ref_count))
+		return;
+
+	req->rq_status = RQ_INACTIVE;
+	req->q = NULL;
+	req->rl = NULL;
+
+	/*
+	 * Request may not have originated from ll_rw_blk. if not,
+	 * it didn't come out of our reserved rq pools
+	 */
+	if (rl) {
+		int rw = rq_data_dir(req);
+
+		elv_completed_request(q, req);
+
+		BUG_ON(!list_empty(&req->queuelist));
+
+		blk_free_request(q, req);
+		freed_request(q, rw);
+	}
+}
+
+void blk_put_request(struct request *req)
+{
+	/*
+	 * if req->rl isn't set, this request didnt originate from the
+	 * block layer, so it's safe to just disregard it
+	 */
+	if (req->rl) {
+		unsigned long flags;
+		request_queue_t *q = req->q;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+		__blk_put_request(q, req);
+		spin_unlock_irqrestore(q->queue_lock, flags);
+	}
+}
+
+EXPORT_SYMBOL(blk_put_request);
+
+/**
+ * blk_end_sync_rq - executes a completion event on a request
+ * @rq: request to complete
+ */
+void blk_end_sync_rq(struct request *rq)
+{
+	struct completion *waiting = rq->waiting;
+
+	rq->waiting = NULL;
+	__blk_put_request(rq->q, rq);
+
+	/*
+	 * complete last, if this is a stack request the process (and thus
+	 * the rq pointer) could be invalid right after this complete()
+	 */
+	complete(waiting);
+}
+EXPORT_SYMBOL(blk_end_sync_rq);
+
+/**
+ * blk_congestion_wait - wait for a queue to become uncongested
+ * @rw: READ or WRITE
+ * @timeout: timeout in jiffies
+ *
+ * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion.
+ * If no queues are congested then just wait for the next request to be
+ * returned.
+ */
+long blk_congestion_wait(int rw, long timeout)
+{
+	long ret;
+	DEFINE_WAIT(wait);
+	wait_queue_head_t *wqh = &congestion_wqh[rw];
+
+	prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+	ret = io_schedule_timeout(timeout);
+	finish_wait(wqh, &wait);
+	return ret;
+}
+
+EXPORT_SYMBOL(blk_congestion_wait);
+
+/*
+ * Has to be called with the request spinlock acquired
+ */
+static int attempt_merge(request_queue_t *q, struct request *req,
+			  struct request *next)
+{
+	if (!rq_mergeable(req) || !rq_mergeable(next))
+		return 0;
+
+	/*
+	 * not contigious
+	 */
+	if (req->sector + req->nr_sectors != next->sector)
+		return 0;
+
+	if (rq_data_dir(req) != rq_data_dir(next)
+	    || req->rq_disk != next->rq_disk
+	    || next->waiting || next->special)
+		return 0;
+
+	/*
+	 * If we are allowed to merge, then append bio list
+	 * from next to rq and release next. merge_requests_fn
+	 * will have updated segment counts, update sector
+	 * counts here.
+	 */
+	if (!q->merge_requests_fn(q, req, next))
+		return 0;
+
+	/*
+	 * At this point we have either done a back merge
+	 * or front merge. We need the smaller start_time of
+	 * the merged requests to be the current request
+	 * for accounting purposes.
+	 */
+	if (time_after(req->start_time, next->start_time))
+		req->start_time = next->start_time;
+
+	req->biotail->bi_next = next->bio;
+	req->biotail = next->biotail;
+
+	req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
+
+	elv_merge_requests(q, req, next);
+
+	if (req->rq_disk) {
+		disk_round_stats(req->rq_disk);
+		req->rq_disk->in_flight--;
+	}
+
+	__blk_put_request(q, next);
+	return 1;
+}
+
+static inline int attempt_back_merge(request_queue_t *q, struct request *rq)
+{
+	struct request *next = elv_latter_request(q, rq);
+
+	if (next)
+		return attempt_merge(q, rq, next);
+
+	return 0;
+}
+
+static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
+{
+	struct request *prev = elv_former_request(q, rq);
+
+	if (prev)
+		return attempt_merge(q, prev, rq);
+
+	return 0;
+}
+
+/**
+ * blk_attempt_remerge  - attempt to remerge active head with next request
+ * @q:    The &request_queue_t belonging to the device
+ * @rq:   The head request (usually)
+ *
+ * Description:
+ *    For head-active devices, the queue can easily be unplugged so quickly
+ *    that proper merging is not done on the front request. This may hurt
+ *    performance greatly for some devices. The block layer cannot safely
+ *    do merging on that first request for these queues, but the driver can
+ *    call this function and make it happen any way. Only the driver knows
+ *    when it is safe to do so.
+ **/
+void blk_attempt_remerge(request_queue_t *q, struct request *rq)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	attempt_back_merge(q, rq);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+EXPORT_SYMBOL(blk_attempt_remerge);
+
+/*
+ * Non-locking blk_attempt_remerge variant.
+ */
+void __blk_attempt_remerge(request_queue_t *q, struct request *rq)
+{
+	attempt_back_merge(q, rq);
+}
+
+EXPORT_SYMBOL(__blk_attempt_remerge);
+
+static int __make_request(request_queue_t *q, struct bio *bio)
+{
+	struct request *req, *freereq = NULL;
+	int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err;
+	sector_t sector;
+
+	sector = bio->bi_sector;
+	nr_sectors = bio_sectors(bio);
+	cur_nr_sectors = bio_cur_sectors(bio);
+
+	rw = bio_data_dir(bio);
+
+	/*
+	 * low level driver can indicate that it wants pages above a
+	 * certain limit bounced to low memory (ie for highmem, or even
+	 * ISA dma in theory)
+	 */
+	blk_queue_bounce(q, &bio);
+
+	spin_lock_prefetch(q->queue_lock);
+
+	barrier = bio_barrier(bio);
+	if (barrier && (q->ordered == QUEUE_ORDERED_NONE)) {
+		err = -EOPNOTSUPP;
+		goto end_io;
+	}
+
+again:
+	spin_lock_irq(q->queue_lock);
+
+	if (elv_queue_empty(q)) {
+		blk_plug_device(q);
+		goto get_rq;
+	}
+	if (barrier)
+		goto get_rq;
+
+	el_ret = elv_merge(q, &req, bio);
+	switch (el_ret) {
+		case ELEVATOR_BACK_MERGE:
+			BUG_ON(!rq_mergeable(req));
+
+			if (!q->back_merge_fn(q, req, bio))
+				break;
+
+			req->biotail->bi_next = bio;
+			req->biotail = bio;
+			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+			drive_stat_acct(req, nr_sectors, 0);
+			if (!attempt_back_merge(q, req))
+				elv_merged_request(q, req);
+			goto out;
+
+		case ELEVATOR_FRONT_MERGE:
+			BUG_ON(!rq_mergeable(req));
+
+			if (!q->front_merge_fn(q, req, bio))
+				break;
+
+			bio->bi_next = req->bio;
+			req->bio = bio;
+
+			/*
+			 * may not be valid. if the low level driver said
+			 * it didn't need a bounce buffer then it better
+			 * not touch req->buffer either...
+			 */
+			req->buffer = bio_data(bio);
+			req->current_nr_sectors = cur_nr_sectors;
+			req->hard_cur_sectors = cur_nr_sectors;
+			req->sector = req->hard_sector = sector;
+			req->nr_sectors = req->hard_nr_sectors += nr_sectors;
+			drive_stat_acct(req, nr_sectors, 0);
+			if (!attempt_front_merge(q, req))
+				elv_merged_request(q, req);
+			goto out;
+
+		/*
+		 * elevator says don't/can't merge. get new request
+		 */
+		case ELEVATOR_NO_MERGE:
+			break;
+
+		default:
+			printk("elevator returned crap (%d)\n", el_ret);
+			BUG();
+	}
+
+	/*
+	 * Grab a free request from the freelist - if that is empty, check
+	 * if we are doing read ahead and abort instead of blocking for
+	 * a free slot.
+	 */
+get_rq:
+	if (freereq) {
+		req = freereq;
+		freereq = NULL;
+	} else {
+		spin_unlock_irq(q->queue_lock);
+		if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) {
+			/*
+			 * READA bit set
+			 */
+			err = -EWOULDBLOCK;
+			if (bio_rw_ahead(bio))
+				goto end_io;
+	
+			freereq = get_request_wait(q, rw);
+		}
+		goto again;
+	}
+
+	req->flags |= REQ_CMD;
+
+	/*
+	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
+	 */
+	if (bio_rw_ahead(bio) || bio_failfast(bio))
+		req->flags |= REQ_FAILFAST;
+
+	/*
+	 * REQ_BARRIER implies no merging, but lets make it explicit
+	 */
+	if (barrier)
+		req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE);
+
+	req->errors = 0;
+	req->hard_sector = req->sector = sector;
+	req->hard_nr_sectors = req->nr_sectors = nr_sectors;
+	req->current_nr_sectors = req->hard_cur_sectors = cur_nr_sectors;
+	req->nr_phys_segments = bio_phys_segments(q, bio);
+	req->nr_hw_segments = bio_hw_segments(q, bio);
+	req->buffer = bio_data(bio);	/* see ->buffer comment above */
+	req->waiting = NULL;
+	req->bio = req->biotail = bio;
+	req->rq_disk = bio->bi_bdev->bd_disk;
+	req->start_time = jiffies;
+
+	add_request(q, req);
+out:
+	if (freereq)
+		__blk_put_request(q, freereq);
+	if (bio_sync(bio))
+		__generic_unplug_device(q);
+
+	spin_unlock_irq(q->queue_lock);
+	return 0;
+
+end_io:
+	bio_endio(bio, nr_sectors << 9, err);
+	return 0;
+}
+
+/*
+ * If bio->bi_dev is a partition, remap the location
+ */
+static inline void blk_partition_remap(struct bio *bio)
+{
+	struct block_device *bdev = bio->bi_bdev;
+
+	if (bdev != bdev->bd_contains) {
+		struct hd_struct *p = bdev->bd_part;
+
+		switch (bio->bi_rw) {
+		case READ:
+			p->read_sectors += bio_sectors(bio);
+			p->reads++;
+			break;
+		case WRITE:
+			p->write_sectors += bio_sectors(bio);
+			p->writes++;
+			break;
+		}
+		bio->bi_sector += p->start_sect;
+		bio->bi_bdev = bdev->bd_contains;
+	}
+}
+
+void blk_finish_queue_drain(request_queue_t *q)
+{
+	struct request_list *rl = &q->rq;
+	struct request *rq;
+
+	spin_lock_irq(q->queue_lock);
+	clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
+
+	while (!list_empty(&q->drain_list)) {
+		rq = list_entry_rq(q->drain_list.next);
+
+		list_del_init(&rq->queuelist);
+		__elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+	}
+
+	spin_unlock_irq(q->queue_lock);
+
+	wake_up(&rl->wait[0]);
+	wake_up(&rl->wait[1]);
+	wake_up(&rl->drain);
+}
+
+static int wait_drain(request_queue_t *q, struct request_list *rl, int dispatch)
+{
+	int wait = rl->count[READ] + rl->count[WRITE];
+
+	if (dispatch)
+		wait += !list_empty(&q->queue_head);
+
+	return wait;
+}
+
+/*
+ * We rely on the fact that only requests allocated through blk_alloc_request()
+ * have io scheduler private data structures associated with them. Any other
+ * type of request (allocated on stack or through kmalloc()) should not go
+ * to the io scheduler core, but be attached to the queue head instead.
+ */
+void blk_wait_queue_drained(request_queue_t *q, int wait_dispatch)
+{
+	struct request_list *rl = &q->rq;
+	DEFINE_WAIT(wait);
+
+	spin_lock_irq(q->queue_lock);
+	set_bit(QUEUE_FLAG_DRAIN, &q->queue_flags);
+
+	while (wait_drain(q, rl, wait_dispatch)) {
+		prepare_to_wait(&rl->drain, &wait, TASK_UNINTERRUPTIBLE);
+
+		if (wait_drain(q, rl, wait_dispatch)) {
+			__generic_unplug_device(q);
+			spin_unlock_irq(q->queue_lock);
+			io_schedule();
+			spin_lock_irq(q->queue_lock);
+		}
+
+		finish_wait(&rl->drain, &wait);
+	}
+
+	spin_unlock_irq(q->queue_lock);
+}
+
+/*
+ * block waiting for the io scheduler being started again.
+ */
+static inline void block_wait_queue_running(request_queue_t *q)
+{
+	DEFINE_WAIT(wait);
+
+	while (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)) {
+		struct request_list *rl = &q->rq;
+
+		prepare_to_wait_exclusive(&rl->drain, &wait,
+				TASK_UNINTERRUPTIBLE);
+
+		/*
+		 * re-check the condition. avoids using prepare_to_wait()
+		 * in the fast path (queue is running)
+		 */
+		if (test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))
+			io_schedule();
+
+		finish_wait(&rl->drain, &wait);
+	}
+}
+
+static void handle_bad_sector(struct bio *bio)
+{
+	char b[BDEVNAME_SIZE];
+
+	printk(KERN_INFO "attempt to access beyond end of device\n");
+	printk(KERN_INFO "%s: rw=%ld, want=%Lu, limit=%Lu\n",
+			bdevname(bio->bi_bdev, b),
+			bio->bi_rw,
+			(unsigned long long)bio->bi_sector + bio_sectors(bio),
+			(long long)(bio->bi_bdev->bd_inode->i_size >> 9));
+
+	set_bit(BIO_EOF, &bio->bi_flags);
+}
+
+/**
+ * generic_make_request: hand a buffer to its device driver for I/O
+ * @bio:  The bio describing the location in memory and on the device.
+ *
+ * generic_make_request() is used to make I/O requests of block
+ * devices. It is passed a &struct bio, which describes the I/O that needs
+ * to be done.
+ *
+ * generic_make_request() does not return any status.  The
+ * success/failure status of the request, along with notification of
+ * completion, is delivered asynchronously through the bio->bi_end_io
+ * function described (one day) else where.
+ *
+ * The caller of generic_make_request must make sure that bi_io_vec
+ * are set to describe the memory buffer, and that bi_dev and bi_sector are
+ * set to describe the device address, and the
+ * bi_end_io and optionally bi_private are set to describe how
+ * completion notification should be signaled.
+ *
+ * generic_make_request and the drivers it calls may use bi_next if this
+ * bio happens to be merged with someone else, and may change bi_dev and
+ * bi_sector for remaps as it sees fit.  So the values of these fields
+ * should NOT be depended on after the call to generic_make_request.
+ */
+void generic_make_request(struct bio *bio)
+{
+	request_queue_t *q;
+	sector_t maxsector;
+	int ret, nr_sectors = bio_sectors(bio);
+
+	might_sleep();
+	/* Test device or partition size, when known. */
+	maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
+	if (maxsector) {
+		sector_t sector = bio->bi_sector;
+
+		if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
+			/*
+			 * This may well happen - the kernel calls bread()
+			 * without checking the size of the device, e.g., when
+			 * mounting a device.
+			 */
+			handle_bad_sector(bio);
+			goto end_io;
+		}
+	}
+
+	/*
+	 * Resolve the mapping until finished. (drivers are
+	 * still free to implement/resolve their own stacking
+	 * by explicitly returning 0)
+	 *
+	 * NOTE: we don't repeat the blk_size check for each new device.
+	 * Stacking drivers are expected to know what they are doing.
+	 */
+	do {
+		char b[BDEVNAME_SIZE];
+
+		q = bdev_get_queue(bio->bi_bdev);
+		if (!q) {
+			printk(KERN_ERR
+			       "generic_make_request: Trying to access "
+				"nonexistent block-device %s (%Lu)\n",
+				bdevname(bio->bi_bdev, b),
+				(long long) bio->bi_sector);
+end_io:
+			bio_endio(bio, bio->bi_size, -EIO);
+			break;
+		}
+
+		if (unlikely(bio_sectors(bio) > q->max_hw_sectors)) {
+			printk("bio too big device %s (%u > %u)\n", 
+				bdevname(bio->bi_bdev, b),
+				bio_sectors(bio),
+				q->max_hw_sectors);
+			goto end_io;
+		}
+
+		if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))
+			goto end_io;
+
+		block_wait_queue_running(q);
+
+		/*
+		 * If this device has partitions, remap block n
+		 * of partition p to block n+start(p) of the disk.
+		 */
+		blk_partition_remap(bio);
+
+		ret = q->make_request_fn(q, bio);
+	} while (ret);
+}
+
+EXPORT_SYMBOL(generic_make_request);
+
+/**
+ * submit_bio: submit a bio to the block device layer for I/O
+ * @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
+ * @bio: The &struct bio which describes the I/O
+ *
+ * submit_bio() is very similar in purpose to generic_make_request(), and
+ * uses that function to do most of the work. Both are fairly rough
+ * interfaces, @bio must be presetup and ready for I/O.
+ *
+ */
+void submit_bio(int rw, struct bio *bio)
+{
+	int count = bio_sectors(bio);
+
+	BIO_BUG_ON(!bio->bi_size);
+	BIO_BUG_ON(!bio->bi_io_vec);
+	bio->bi_rw = rw;
+	if (rw & WRITE)
+		mod_page_state(pgpgout, count);
+	else
+		mod_page_state(pgpgin, count);
+
+	if (unlikely(block_dump)) {
+		char b[BDEVNAME_SIZE];
+		printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n",
+			current->comm, current->pid,
+			(rw & WRITE) ? "WRITE" : "READ",
+			(unsigned long long)bio->bi_sector,
+			bdevname(bio->bi_bdev,b));
+	}
+
+	generic_make_request(bio);
+}
+
+EXPORT_SYMBOL(submit_bio);
+
+void blk_recalc_rq_segments(struct request *rq)
+{
+	struct bio *bio, *prevbio = NULL;
+	int nr_phys_segs, nr_hw_segs;
+	unsigned int phys_size, hw_size;
+	request_queue_t *q = rq->q;
+
+	if (!rq->bio)
+		return;
+
+	phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0;
+	rq_for_each_bio(bio, rq) {
+		/* Force bio hw/phys segs to be recalculated. */
+		bio->bi_flags &= ~(1 << BIO_SEG_VALID);
+
+		nr_phys_segs += bio_phys_segments(q, bio);
+		nr_hw_segs += bio_hw_segments(q, bio);
+		if (prevbio) {
+			int pseg = phys_size + prevbio->bi_size + bio->bi_size;
+			int hseg = hw_size + prevbio->bi_size + bio->bi_size;
+
+			if (blk_phys_contig_segment(q, prevbio, bio) &&
+			    pseg <= q->max_segment_size) {
+				nr_phys_segs--;
+				phys_size += prevbio->bi_size + bio->bi_size;
+			} else
+				phys_size = 0;
+
+			if (blk_hw_contig_segment(q, prevbio, bio) &&
+			    hseg <= q->max_segment_size) {
+				nr_hw_segs--;
+				hw_size += prevbio->bi_size + bio->bi_size;
+			} else
+				hw_size = 0;
+		}
+		prevbio = bio;
+	}
+
+	rq->nr_phys_segments = nr_phys_segs;
+	rq->nr_hw_segments = nr_hw_segs;
+}
+
+void blk_recalc_rq_sectors(struct request *rq, int nsect)
+{
+	if (blk_fs_request(rq)) {
+		rq->hard_sector += nsect;
+		rq->hard_nr_sectors -= nsect;
+
+		/*
+		 * Move the I/O submission pointers ahead if required.
+		 */
+		if ((rq->nr_sectors >= rq->hard_nr_sectors) &&
+		    (rq->sector <= rq->hard_sector)) {
+			rq->sector = rq->hard_sector;
+			rq->nr_sectors = rq->hard_nr_sectors;
+			rq->hard_cur_sectors = bio_cur_sectors(rq->bio);
+			rq->current_nr_sectors = rq->hard_cur_sectors;
+			rq->buffer = bio_data(rq->bio);
+		}
+
+		/*
+		 * if total number of sectors is less than the first segment
+		 * size, something has gone terribly wrong
+		 */
+		if (rq->nr_sectors < rq->current_nr_sectors) {
+			printk("blk: request botched\n");
+			rq->nr_sectors = rq->current_nr_sectors;
+		}
+	}
+}
+
+static int __end_that_request_first(struct request *req, int uptodate,
+				    int nr_bytes)
+{
+	int total_bytes, bio_nbytes, error, next_idx = 0;
+	struct bio *bio;
+
+	/*
+	 * extend uptodate bool to allow < 0 value to be direct io error
+	 */
+	error = 0;
+	if (end_io_error(uptodate))
+		error = !uptodate ? -EIO : uptodate;
+
+	/*
+	 * for a REQ_BLOCK_PC request, we want to carry any eventual
+	 * sense key with us all the way through
+	 */
+	if (!blk_pc_request(req))
+		req->errors = 0;
+
+	if (!uptodate) {
+		if (blk_fs_request(req) && !(req->flags & REQ_QUIET))
+			printk("end_request: I/O error, dev %s, sector %llu\n",
+				req->rq_disk ? req->rq_disk->disk_name : "?",
+				(unsigned long long)req->sector);
+	}
+
+	total_bytes = bio_nbytes = 0;
+	while ((bio = req->bio) != NULL) {
+		int nbytes;
+
+		if (nr_bytes >= bio->bi_size) {
+			req->bio = bio->bi_next;
+			nbytes = bio->bi_size;
+			bio_endio(bio, nbytes, error);
+			next_idx = 0;
+			bio_nbytes = 0;
+		} else {
+			int idx = bio->bi_idx + next_idx;
+
+			if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
+				blk_dump_rq_flags(req, "__end_that");
+				printk("%s: bio idx %d >= vcnt %d\n",
+						__FUNCTION__,
+						bio->bi_idx, bio->bi_vcnt);
+				break;
+			}
+
+			nbytes = bio_iovec_idx(bio, idx)->bv_len;
+			BIO_BUG_ON(nbytes > bio->bi_size);
+
+			/*
+			 * not a complete bvec done
+			 */
+			if (unlikely(nbytes > nr_bytes)) {
+				bio_nbytes += nr_bytes;
+				total_bytes += nr_bytes;
+				break;
+			}
+
+			/*
+			 * advance to the next vector
+			 */
+			next_idx++;
+			bio_nbytes += nbytes;
+		}
+
+		total_bytes += nbytes;
+		nr_bytes -= nbytes;
+
+		if ((bio = req->bio)) {
+			/*
+			 * end more in this run, or just return 'not-done'
+			 */
+			if (unlikely(nr_bytes <= 0))
+				break;
+		}
+	}
+
+	/*
+	 * completely done
+	 */
+	if (!req->bio)
+		return 0;
+
+	/*
+	 * if the request wasn't completed, update state
+	 */
+	if (bio_nbytes) {
+		bio_endio(bio, bio_nbytes, error);
+		bio->bi_idx += next_idx;
+		bio_iovec(bio)->bv_offset += nr_bytes;
+		bio_iovec(bio)->bv_len -= nr_bytes;
+	}
+
+	blk_recalc_rq_sectors(req, total_bytes >> 9);
+	blk_recalc_rq_segments(req);
+	return 1;
+}
+
+/**
+ * end_that_request_first - end I/O on a request
+ * @req:      the request being processed
+ * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
+ * @nr_sectors: number of sectors to end I/O on
+ *
+ * Description:
+ *     Ends I/O on a number of sectors attached to @req, and sets it up
+ *     for the next range of segments (if any) in the cluster.
+ *
+ * Return:
+ *     0 - we are done with this request, call end_that_request_last()
+ *     1 - still buffers pending for this request
+ **/
+int end_that_request_first(struct request *req, int uptodate, int nr_sectors)
+{
+	return __end_that_request_first(req, uptodate, nr_sectors << 9);
+}
+
+EXPORT_SYMBOL(end_that_request_first);
+
+/**
+ * end_that_request_chunk - end I/O on a request
+ * @req:      the request being processed
+ * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error
+ * @nr_bytes: number of bytes to complete
+ *
+ * Description:
+ *     Ends I/O on a number of bytes attached to @req, and sets it up
+ *     for the next range of segments (if any). Like end_that_request_first(),
+ *     but deals with bytes instead of sectors.
+ *
+ * Return:
+ *     0 - we are done with this request, call end_that_request_last()
+ *     1 - still buffers pending for this request
+ **/
+int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
+{
+	return __end_that_request_first(req, uptodate, nr_bytes);
+}
+
+EXPORT_SYMBOL(end_that_request_chunk);
+
+/*
+ * queue lock must be held
+ */
+void end_that_request_last(struct request *req)
+{
+	struct gendisk *disk = req->rq_disk;
+
+	if (unlikely(laptop_mode) && blk_fs_request(req))
+		laptop_io_completion();
+
+	if (disk && blk_fs_request(req)) {
+		unsigned long duration = jiffies - req->start_time;
+		switch (rq_data_dir(req)) {
+		    case WRITE:
+			__disk_stat_inc(disk, writes);
+			__disk_stat_add(disk, write_ticks, duration);
+			break;
+		    case READ:
+			__disk_stat_inc(disk, reads);
+			__disk_stat_add(disk, read_ticks, duration);
+			break;
+		}
+		disk_round_stats(disk);
+		disk->in_flight--;
+	}
+	if (req->end_io)
+		req->end_io(req);
+	else
+		__blk_put_request(req->q, req);
+}
+
+EXPORT_SYMBOL(end_that_request_last);
+
+void end_request(struct request *req, int uptodate)
+{
+	if (!end_that_request_first(req, uptodate, req->hard_cur_sectors)) {
+		add_disk_randomness(req->rq_disk);
+		blkdev_dequeue_request(req);
+		end_that_request_last(req);
+	}
+}
+
+EXPORT_SYMBOL(end_request);
+
+void blk_rq_bio_prep(request_queue_t *q, struct request *rq, struct bio *bio)
+{
+	/* first three bits are identical in rq->flags and bio->bi_rw */
+	rq->flags |= (bio->bi_rw & 7);
+
+	rq->nr_phys_segments = bio_phys_segments(q, bio);
+	rq->nr_hw_segments = bio_hw_segments(q, bio);
+	rq->current_nr_sectors = bio_cur_sectors(bio);
+	rq->hard_cur_sectors = rq->current_nr_sectors;
+	rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio);
+	rq->buffer = bio_data(bio);
+
+	rq->bio = rq->biotail = bio;
+}
+
+EXPORT_SYMBOL(blk_rq_bio_prep);
+
+int kblockd_schedule_work(struct work_struct *work)
+{
+	return queue_work(kblockd_workqueue, work);
+}
+
+EXPORT_SYMBOL(kblockd_schedule_work);
+
+void kblockd_flush(void)
+{
+	flush_workqueue(kblockd_workqueue);
+}
+EXPORT_SYMBOL(kblockd_flush);
+
+int __init blk_dev_init(void)
+{
+	kblockd_workqueue = create_workqueue("kblockd");
+	if (!kblockd_workqueue)
+		panic("Failed to create kblockd\n");
+
+	request_cachep = kmem_cache_create("blkdev_requests",
+			sizeof(struct request), 0, SLAB_PANIC, NULL, NULL);
+
+	requestq_cachep = kmem_cache_create("blkdev_queue",
+			sizeof(request_queue_t), 0, SLAB_PANIC, NULL, NULL);
+
+	iocontext_cachep = kmem_cache_create("blkdev_ioc",
+			sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
+
+	blk_max_low_pfn = max_low_pfn;
+	blk_max_pfn = max_pfn;
+
+	return 0;
+}
+
+/*
+ * IO Context helper functions
+ */
+void put_io_context(struct io_context *ioc)
+{
+	if (ioc == NULL)
+		return;
+
+	BUG_ON(atomic_read(&ioc->refcount) == 0);
+
+	if (atomic_dec_and_test(&ioc->refcount)) {
+		if (ioc->aic && ioc->aic->dtor)
+			ioc->aic->dtor(ioc->aic);
+		if (ioc->cic && ioc->cic->dtor)
+			ioc->cic->dtor(ioc->cic);
+
+		kmem_cache_free(iocontext_cachep, ioc);
+	}
+}
+EXPORT_SYMBOL(put_io_context);
+
+/* Called by the exitting task */
+void exit_io_context(void)
+{
+	unsigned long flags;
+	struct io_context *ioc;
+
+	local_irq_save(flags);
+	ioc = current->io_context;
+	current->io_context = NULL;
+	local_irq_restore(flags);
+
+	if (ioc->aic && ioc->aic->exit)
+		ioc->aic->exit(ioc->aic);
+	if (ioc->cic && ioc->cic->exit)
+		ioc->cic->exit(ioc->cic);
+
+	put_io_context(ioc);
+}
+
+/*
+ * If the current task has no IO context then create one and initialise it.
+ * If it does have a context, take a ref on it.
+ *
+ * This is always called in the context of the task which submitted the I/O.
+ * But weird things happen, so we disable local interrupts to ensure exclusive
+ * access to *current.
+ */
+struct io_context *get_io_context(int gfp_flags)
+{
+	struct task_struct *tsk = current;
+	unsigned long flags;
+	struct io_context *ret;
+
+	local_irq_save(flags);
+	ret = tsk->io_context;
+	if (ret)
+		goto out;
+
+	local_irq_restore(flags);
+
+	ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
+	if (ret) {
+		atomic_set(&ret->refcount, 1);
+		ret->pid = tsk->pid;
+		ret->last_waited = jiffies; /* doesn't matter... */
+		ret->nr_batch_requests = 0; /* because this is 0 */
+		ret->aic = NULL;
+		ret->cic = NULL;
+		spin_lock_init(&ret->lock);
+
+		local_irq_save(flags);
+
+		/*
+		 * very unlikely, someone raced with us in setting up the task
+		 * io context. free new context and just grab a reference.
+		 */
+		if (!tsk->io_context)
+			tsk->io_context = ret;
+		else {
+			kmem_cache_free(iocontext_cachep, ret);
+			ret = tsk->io_context;
+		}
+
+out:
+		atomic_inc(&ret->refcount);
+		local_irq_restore(flags);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(get_io_context);
+
+void copy_io_context(struct io_context **pdst, struct io_context **psrc)
+{
+	struct io_context *src = *psrc;
+	struct io_context *dst = *pdst;
+
+	if (src) {
+		BUG_ON(atomic_read(&src->refcount) == 0);
+		atomic_inc(&src->refcount);
+		put_io_context(dst);
+		*pdst = src;
+	}
+}
+EXPORT_SYMBOL(copy_io_context);
+
+void swap_io_context(struct io_context **ioc1, struct io_context **ioc2)
+{
+	struct io_context *temp;
+	temp = *ioc1;
+	*ioc1 = *ioc2;
+	*ioc2 = temp;
+}
+EXPORT_SYMBOL(swap_io_context);
+
+/*
+ * sysfs parts below
+ */
+struct queue_sysfs_entry {
+	struct attribute attr;
+	ssize_t (*show)(struct request_queue *, char *);
+	ssize_t (*store)(struct request_queue *, const char *, size_t);
+};
+
+static ssize_t
+queue_var_show(unsigned int var, char *page)
+{
+	return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+queue_var_store(unsigned long *var, const char *page, size_t count)
+{
+	char *p = (char *) page;
+
+	*var = simple_strtoul(p, &p, 10);
+	return count;
+}
+
+static ssize_t queue_requests_show(struct request_queue *q, char *page)
+{
+	return queue_var_show(q->nr_requests, (page));
+}
+
+static ssize_t
+queue_requests_store(struct request_queue *q, const char *page, size_t count)
+{
+	struct request_list *rl = &q->rq;
+
+	int ret = queue_var_store(&q->nr_requests, page, count);
+	if (q->nr_requests < BLKDEV_MIN_RQ)
+		q->nr_requests = BLKDEV_MIN_RQ;
+	blk_queue_congestion_threshold(q);
+
+	if (rl->count[READ] >= queue_congestion_on_threshold(q))
+		set_queue_congested(q, READ);
+	else if (rl->count[READ] < queue_congestion_off_threshold(q))
+		clear_queue_congested(q, READ);
+
+	if (rl->count[WRITE] >= queue_congestion_on_threshold(q))
+		set_queue_congested(q, WRITE);
+	else if (rl->count[WRITE] < queue_congestion_off_threshold(q))
+		clear_queue_congested(q, WRITE);
+
+	if (rl->count[READ] >= q->nr_requests) {
+		blk_set_queue_full(q, READ);
+	} else if (rl->count[READ]+1 <= q->nr_requests) {
+		blk_clear_queue_full(q, READ);
+		wake_up(&rl->wait[READ]);
+	}
+
+	if (rl->count[WRITE] >= q->nr_requests) {
+		blk_set_queue_full(q, WRITE);
+	} else if (rl->count[WRITE]+1 <= q->nr_requests) {
+		blk_clear_queue_full(q, WRITE);
+		wake_up(&rl->wait[WRITE]);
+	}
+	return ret;
+}
+
+static ssize_t queue_ra_show(struct request_queue *q, char *page)
+{
+	int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+
+	return queue_var_show(ra_kb, (page));
+}
+
+static ssize_t
+queue_ra_store(struct request_queue *q, const char *page, size_t count)
+{
+	unsigned long ra_kb;
+	ssize_t ret = queue_var_store(&ra_kb, page, count);
+
+	spin_lock_irq(q->queue_lock);
+	if (ra_kb > (q->max_sectors >> 1))
+		ra_kb = (q->max_sectors >> 1);
+
+	q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
+static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
+{
+	int max_sectors_kb = q->max_sectors >> 1;
+
+	return queue_var_show(max_sectors_kb, (page));
+}
+
+static ssize_t
+queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
+{
+	unsigned long max_sectors_kb,
+			max_hw_sectors_kb = q->max_hw_sectors >> 1,
+			page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
+	ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
+	int ra_kb;
+
+	if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
+		return -EINVAL;
+	/*
+	 * Take the queue lock to update the readahead and max_sectors
+	 * values synchronously:
+	 */
+	spin_lock_irq(q->queue_lock);
+	/*
+	 * Trim readahead window as well, if necessary:
+	 */
+	ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
+	if (ra_kb > max_sectors_kb)
+		q->backing_dev_info.ra_pages =
+				max_sectors_kb >> (PAGE_CACHE_SHIFT - 10);
+
+	q->max_sectors = max_sectors_kb << 1;
+	spin_unlock_irq(q->queue_lock);
+
+	return ret;
+}
+
+static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
+{
+	int max_hw_sectors_kb = q->max_hw_sectors >> 1;
+
+	return queue_var_show(max_hw_sectors_kb, (page));
+}
+
+
+static struct queue_sysfs_entry queue_requests_entry = {
+	.attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_requests_show,
+	.store = queue_requests_store,
+};
+
+static struct queue_sysfs_entry queue_ra_entry = {
+	.attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_ra_show,
+	.store = queue_ra_store,
+};
+
+static struct queue_sysfs_entry queue_max_sectors_entry = {
+	.attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
+	.show = queue_max_sectors_show,
+	.store = queue_max_sectors_store,
+};
+
+static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
+	.attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
+	.show = queue_max_hw_sectors_show,
+};
+
+static struct queue_sysfs_entry queue_iosched_entry = {
+	.attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
+	.show = elv_iosched_show,
+	.store = elv_iosched_store,
+};
+
+static struct attribute *default_attrs[] = {
+	&queue_requests_entry.attr,
+	&queue_ra_entry.attr,
+	&queue_max_hw_sectors_entry.attr,
+	&queue_max_sectors_entry.attr,
+	&queue_iosched_entry.attr,
+	NULL,
+};
+
+#define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
+
+static ssize_t
+queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
+{
+	struct queue_sysfs_entry *entry = to_queue(attr);
+	struct request_queue *q;
+
+	q = container_of(kobj, struct request_queue, kobj);
+	if (!entry->show)
+		return 0;
+
+	return entry->show(q, page);
+}
+
+static ssize_t
+queue_attr_store(struct kobject *kobj, struct attribute *attr,
+		    const char *page, size_t length)
+{
+	struct queue_sysfs_entry *entry = to_queue(attr);
+	struct request_queue *q;
+
+	q = container_of(kobj, struct request_queue, kobj);
+	if (!entry->store)
+		return -EINVAL;
+
+	return entry->store(q, page, length);
+}
+
+static struct sysfs_ops queue_sysfs_ops = {
+	.show	= queue_attr_show,
+	.store	= queue_attr_store,
+};
+
+struct kobj_type queue_ktype = {
+	.sysfs_ops	= &queue_sysfs_ops,
+	.default_attrs	= default_attrs,
+};
+
+int blk_register_queue(struct gendisk *disk)
+{
+	int ret;
+
+	request_queue_t *q = disk->queue;
+
+	if (!q || !q->request_fn)
+		return -ENXIO;
+
+	q->kobj.parent = kobject_get(&disk->kobj);
+	if (!q->kobj.parent)
+		return -EBUSY;
+
+	snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
+	q->kobj.ktype = &queue_ktype;
+
+	ret = kobject_register(&q->kobj);
+	if (ret < 0)
+		return ret;
+
+	ret = elv_register_queue(q);
+	if (ret) {
+		kobject_unregister(&q->kobj);
+		return ret;
+	}
+
+	return 0;
+}
+
+void blk_unregister_queue(struct gendisk *disk)
+{
+	request_queue_t *q = disk->queue;
+
+	if (q && q->request_fn) {
+		elv_unregister_queue(q);
+
+		kobject_unregister(&q->kobj);
+		kobject_put(&disk->kobj);
+	}
+}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
new file mode 100644
index 0000000..6f011d0
--- /dev/null
+++ b/drivers/block/loop.c
@@ -0,0 +1,1348 @@
+/*
+ *  linux/drivers/block/loop.c
+ *
+ *  Written by Theodore Ts'o, 3/29/93
+ *
+ * Copyright 1993 by Theodore Ts'o.  Redistribution of this file is
+ * permitted under the GNU General Public License.
+ *
+ * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
+ * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
+ *
+ * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
+ * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
+ *
+ * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
+ *
+ * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
+ *
+ * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
+ *
+ * Loadable modules and other fixes by AK, 1998
+ *
+ * Make real block number available to downstream transfer functions, enables
+ * CBC (and relatives) mode encryption requiring unique IVs per data block.
+ * Reed H. Petty, rhp@draper.net
+ *
+ * Maximum number of loop devices now dynamic via max_loop module parameter.
+ * Russell Kroll <rkroll@exploits.org> 19990701
+ *
+ * Maximum number of loop devices when compiled-in now selectable by passing
+ * max_loop=<1-255> to the kernel on boot.
+ * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
+ *
+ * Completely rewrite request handling to be make_request_fn style and
+ * non blocking, pushing work to a helper thread. Lots of fixes from
+ * Al Viro too.
+ * Jens Axboe <axboe@suse.de>, Nov 2000
+ *
+ * Support up to 256 loop devices
+ * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
+ *
+ * Support for falling back on the write file operation when the address space
+ * operations prepare_write and/or commit_write are not available on the
+ * backing filesystem.
+ * Anton Altaparmakov, 16 Feb 2005
+ *
+ * Still To Fix:
+ * - Advisory locking is ignored here.
+ * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/major.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/init.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/smp_lock.h>
+#include <linux/swap.h>
+#include <linux/slab.h>
+#include <linux/loop.h>
+#include <linux/suspend.h>
+#include <linux/writeback.h>
+#include <linux/buffer_head.h>		/* for invalidate_bdev() */
+#include <linux/completion.h>
+#include <linux/highmem.h>
+#include <linux/gfp.h>
+
+#include <asm/uaccess.h>
+
+static int max_loop = 8;
+static struct loop_device *loop_dev;
+static struct gendisk **disks;
+
+/*
+ * Transfer functions
+ */
+static int transfer_none(struct loop_device *lo, int cmd,
+			 struct page *raw_page, unsigned raw_off,
+			 struct page *loop_page, unsigned loop_off,
+			 int size, sector_t real_block)
+{
+	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
+	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
+
+	if (cmd == READ)
+		memcpy(loop_buf, raw_buf, size);
+	else
+		memcpy(raw_buf, loop_buf, size);
+
+	kunmap_atomic(raw_buf, KM_USER0);
+	kunmap_atomic(loop_buf, KM_USER1);
+	cond_resched();
+	return 0;
+}
+
+static int transfer_xor(struct loop_device *lo, int cmd,
+			struct page *raw_page, unsigned raw_off,
+			struct page *loop_page, unsigned loop_off,
+			int size, sector_t real_block)
+{
+	char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off;
+	char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off;
+	char *in, *out, *key;
+	int i, keysize;
+
+	if (cmd == READ) {
+		in = raw_buf;
+		out = loop_buf;
+	} else {
+		in = loop_buf;
+		out = raw_buf;
+	}
+
+	key = lo->lo_encrypt_key;
+	keysize = lo->lo_encrypt_key_size;
+	for (i = 0; i < size; i++)
+		*out++ = *in++ ^ key[(i & 511) % keysize];
+
+	kunmap_atomic(raw_buf, KM_USER0);
+	kunmap_atomic(loop_buf, KM_USER1);
+	cond_resched();
+	return 0;
+}
+
+static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
+{
+	if (unlikely(info->lo_encrypt_key_size <= 0))
+		return -EINVAL;
+	return 0;
+}
+
+static struct loop_func_table none_funcs = {
+	.number = LO_CRYPT_NONE,
+	.transfer = transfer_none,
+}; 	
+
+static struct loop_func_table xor_funcs = {
+	.number = LO_CRYPT_XOR,
+	.transfer = transfer_xor,
+	.init = xor_init
+}; 	
+
+/* xfer_funcs[0] is special - its release function is never called */
+static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
+	&none_funcs,
+	&xor_funcs
+};
+
+static loff_t get_loop_size(struct loop_device *lo, struct file *file)
+{
+	loff_t size, offset, loopsize;
+
+	/* Compute loopsize in bytes */
+	size = i_size_read(file->f_mapping->host);
+	offset = lo->lo_offset;
+	loopsize = size - offset;
+	if (lo->lo_sizelimit > 0 && lo->lo_sizelimit < loopsize)
+		loopsize = lo->lo_sizelimit;
+
+	/*
+	 * Unfortunately, if we want to do I/O on the device,
+	 * the number of 512-byte sectors has to fit into a sector_t.
+	 */
+	return loopsize >> 9;
+}
+
+static int
+figure_loop_size(struct loop_device *lo)
+{
+	loff_t size = get_loop_size(lo, lo->lo_backing_file);
+	sector_t x = (sector_t)size;
+
+	if (unlikely((loff_t)x != size))
+		return -EFBIG;
+
+	set_capacity(disks[lo->lo_number], x);
+	return 0;					
+}
+
+static inline int
+lo_do_transfer(struct loop_device *lo, int cmd,
+	       struct page *rpage, unsigned roffs,
+	       struct page *lpage, unsigned loffs,
+	       int size, sector_t rblock)
+{
+	if (unlikely(!lo->transfer))
+		return 0;
+
+	return lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
+}
+
+/**
+ * do_lo_send_aops - helper for writing data to a loop device
+ *
+ * This is the fast version for backing filesystems which implement the address
+ * space operations prepare_write and commit_write.
+ */
+static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec,
+		int bsize, loff_t pos, struct page *page)
+{
+	struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */
+	struct address_space *mapping = file->f_mapping;
+	struct address_space_operations *aops = mapping->a_ops;
+	pgoff_t index;
+	unsigned offset, bv_offs;
+	int len, ret = 0;
+
+	down(&mapping->host->i_sem);
+	index = pos >> PAGE_CACHE_SHIFT;
+	offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1);
+	bv_offs = bvec->bv_offset;
+	len = bvec->bv_len;
+	while (len > 0) {
+		sector_t IV;
+		unsigned size;
+		int transfer_result;
+
+		IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
+		size = PAGE_CACHE_SIZE - offset;
+		if (size > len)
+			size = len;
+		page = grab_cache_page(mapping, index);
+		if (unlikely(!page))
+			goto fail;
+		if (unlikely(aops->prepare_write(file, page, offset,
+				offset + size)))
+			goto unlock;
+		transfer_result = lo_do_transfer(lo, WRITE, page, offset,
+				bvec->bv_page, bv_offs, size, IV);
+		if (unlikely(transfer_result)) {
+			char *kaddr;
+
+			/*
+			 * The transfer failed, but we still write the data to
+			 * keep prepare/commit calls balanced.
+			 */
+			printk(KERN_ERR "loop: transfer error block %llu\n",
+			       (unsigned long long)index);
+			kaddr = kmap_atomic(page, KM_USER0);
+			memset(kaddr + offset, 0, size);
+			kunmap_atomic(kaddr, KM_USER0);
+		}
+		flush_dcache_page(page);
+		if (unlikely(aops->commit_write(file, page, offset,
+				offset + size)))
+			goto unlock;
+		if (unlikely(transfer_result))
+			goto unlock;
+		bv_offs += size;
+		len -= size;
+		offset = 0;
+		index++;
+		pos += size;
+		unlock_page(page);
+		page_cache_release(page);
+	}
+out:
+	up(&mapping->host->i_sem);
+	return ret;
+unlock:
+	unlock_page(page);
+	page_cache_release(page);
+fail:
+	ret = -1;
+	goto out;
+}
+
+/**
+ * __do_lo_send_write - helper for writing data to a loop device
+ *
+ * This helper just factors out common code between do_lo_send_direct_write()
+ * and do_lo_send_write().
+ */
+static inline int __do_lo_send_write(struct file *file,
+		u8 __user *buf, const int len, loff_t pos)
+{
+	ssize_t bw;
+	mm_segment_t old_fs = get_fs();
+
+	set_fs(get_ds());
+	bw = file->f_op->write(file, buf, len, &pos);
+	set_fs(old_fs);
+	if (likely(bw == len))
+		return 0;
+	printk(KERN_ERR "loop: Write error at byte offset %llu, length %i.\n",
+			(unsigned long long)pos, len);
+	if (bw >= 0)
+		bw = -EIO;
+	return bw;
+}
+
+/**
+ * do_lo_send_direct_write - helper for writing data to a loop device
+ *
+ * This is the fast, non-transforming version for backing filesystems which do
+ * not implement the address space operations prepare_write and commit_write.
+ * It uses the write file operation which should be present on all writeable
+ * filesystems.
+ */
+static int do_lo_send_direct_write(struct loop_device *lo,
+		struct bio_vec *bvec, int bsize, loff_t pos, struct page *page)
+{
+	ssize_t bw = __do_lo_send_write(lo->lo_backing_file,
+			(u8 __user *)kmap(bvec->bv_page) + bvec->bv_offset,
+			bvec->bv_len, pos);
+	kunmap(bvec->bv_page);
+	cond_resched();
+	return bw;
+}
+
+/**
+ * do_lo_send_write - helper for writing data to a loop device
+ *
+ * This is the slow, transforming version for filesystems which do not
+ * implement the address space operations prepare_write and commit_write.  It
+ * uses the write file operation which should be present on all writeable
+ * filesystems.
+ *
+ * Using fops->write is slower than using aops->{prepare,commit}_write in the
+ * transforming case because we need to double buffer the data as we cannot do
+ * the transformations in place as we do not have direct access to the
+ * destination pages of the backing file.
+ */
+static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec,
+		int bsize, loff_t pos, struct page *page)
+{
+	int ret = lo_do_transfer(lo, WRITE, page, 0, bvec->bv_page,
+			bvec->bv_offset, bvec->bv_len, pos >> 9);
+	if (likely(!ret))
+		return __do_lo_send_write(lo->lo_backing_file,
+				(u8 __user *)page_address(page), bvec->bv_len,
+				pos);
+	printk(KERN_ERR "loop: Transfer error at byte offset %llu, "
+			"length %i.\n", (unsigned long long)pos, bvec->bv_len);
+	if (ret > 0)
+		ret = -EIO;
+	return ret;
+}
+
+static int lo_send(struct loop_device *lo, struct bio *bio, int bsize,
+		loff_t pos)
+{
+	int (*do_lo_send)(struct loop_device *, struct bio_vec *, int, loff_t,
+			struct page *page);
+	struct bio_vec *bvec;
+	struct page *page = NULL;
+	int i, ret = 0;
+
+	do_lo_send = do_lo_send_aops;
+	if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) {
+		do_lo_send = do_lo_send_direct_write;
+		if (lo->transfer != transfer_none) {
+			page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
+			if (unlikely(!page))
+				goto fail;
+			kmap(page);
+			do_lo_send = do_lo_send_write;
+		}
+	}
+	bio_for_each_segment(bvec, bio, i) {
+		ret = do_lo_send(lo, bvec, bsize, pos, page);
+		if (ret < 0)
+			break;
+		pos += bvec->bv_len;
+	}
+	if (page) {
+		kunmap(page);
+		__free_page(page);
+	}
+out:
+	return ret;
+fail:
+	printk(KERN_ERR "loop: Failed to allocate temporary page for write.\n");
+	ret = -ENOMEM;
+	goto out;
+}
+
+struct lo_read_data {
+	struct loop_device *lo;
+	struct page *page;
+	unsigned offset;
+	int bsize;
+};
+
+static int
+lo_read_actor(read_descriptor_t *desc, struct page *page,
+	      unsigned long offset, unsigned long size)
+{
+	unsigned long count = desc->count;
+	struct lo_read_data *p = desc->arg.data;
+	struct loop_device *lo = p->lo;
+	sector_t IV;
+
+	IV = ((sector_t) page->index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9);
+
+	if (size > count)
+		size = count;
+
+	if (lo_do_transfer(lo, READ, page, offset, p->page, p->offset, size, IV)) {
+		size = 0;
+		printk(KERN_ERR "loop: transfer error block %ld\n",
+		       page->index);
+		desc->error = -EINVAL;
+	}
+
+	flush_dcache_page(p->page);
+
+	desc->count = count - size;
+	desc->written += size;
+	p->offset += size;
+	return size;
+}
+
+static int
+do_lo_receive(struct loop_device *lo,
+	      struct bio_vec *bvec, int bsize, loff_t pos)
+{
+	struct lo_read_data cookie;
+	struct file *file;
+	int retval;
+
+	cookie.lo = lo;
+	cookie.page = bvec->bv_page;
+	cookie.offset = bvec->bv_offset;
+	cookie.bsize = bsize;
+	file = lo->lo_backing_file;
+	retval = file->f_op->sendfile(file, &pos, bvec->bv_len,
+			lo_read_actor, &cookie);
+	return (retval < 0)? retval: 0;
+}
+
+static int
+lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
+{
+	struct bio_vec *bvec;
+	int i, ret = 0;
+
+	bio_for_each_segment(bvec, bio, i) {
+		ret = do_lo_receive(lo, bvec, bsize, pos);
+		if (ret < 0)
+			break;
+		pos += bvec->bv_len;
+	}
+	return ret;
+}
+
+static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
+{
+	loff_t pos;
+	int ret;
+
+	pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset;
+	if (bio_rw(bio) == WRITE)
+		ret = lo_send(lo, bio, lo->lo_blocksize, pos);
+	else
+		ret = lo_receive(lo, bio, lo->lo_blocksize, pos);
+	return ret;
+}
+
+/*
+ * Add bio to back of pending list
+ */
+static void loop_add_bio(struct loop_device *lo, struct bio *bio)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&lo->lo_lock, flags);
+	if (lo->lo_biotail) {
+		lo->lo_biotail->bi_next = bio;
+		lo->lo_biotail = bio;
+	} else
+		lo->lo_bio = lo->lo_biotail = bio;
+	spin_unlock_irqrestore(&lo->lo_lock, flags);
+
+	up(&lo->lo_bh_mutex);
+}
+
+/*
+ * Grab first pending buffer
+ */
+static struct bio *loop_get_bio(struct loop_device *lo)
+{
+	struct bio *bio;
+
+	spin_lock_irq(&lo->lo_lock);
+	if ((bio = lo->lo_bio)) {
+		if (bio == lo->lo_biotail)
+			lo->lo_biotail = NULL;
+		lo->lo_bio = bio->bi_next;
+		bio->bi_next = NULL;
+	}
+	spin_unlock_irq(&lo->lo_lock);
+
+	return bio;
+}
+
+static int loop_make_request(request_queue_t *q, struct bio *old_bio)
+{
+	struct loop_device *lo = q->queuedata;
+	int rw = bio_rw(old_bio);
+
+	if (!lo)
+		goto out;
+
+	spin_lock_irq(&lo->lo_lock);
+	if (lo->lo_state != Lo_bound)
+		goto inactive;
+	atomic_inc(&lo->lo_pending);
+	spin_unlock_irq(&lo->lo_lock);
+
+	if (rw == WRITE) {
+		if (lo->lo_flags & LO_FLAGS_READ_ONLY)
+			goto err;
+	} else if (rw == READA) {
+		rw = READ;
+	} else if (rw != READ) {
+		printk(KERN_ERR "loop: unknown command (%x)\n", rw);
+		goto err;
+	}
+	loop_add_bio(lo, old_bio);
+	return 0;
+err:
+	if (atomic_dec_and_test(&lo->lo_pending))
+		up(&lo->lo_bh_mutex);
+out:
+	bio_io_error(old_bio, old_bio->bi_size);
+	return 0;
+inactive:
+	spin_unlock_irq(&lo->lo_lock);
+	goto out;
+}
+
+/*
+ * kick off io on the underlying address space
+ */
+static void loop_unplug(request_queue_t *q)
+{
+	struct loop_device *lo = q->queuedata;
+
+	clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags);
+	blk_run_address_space(lo->lo_backing_file->f_mapping);
+}
+
+struct switch_request {
+	struct file *file;
+	struct completion wait;
+};
+
+static void do_loop_switch(struct loop_device *, struct switch_request *);
+
+static inline void loop_handle_bio(struct loop_device *lo, struct bio *bio)
+{
+	int ret;
+
+	if (unlikely(!bio->bi_bdev)) {
+		do_loop_switch(lo, bio->bi_private);
+		bio_put(bio);
+	} else {
+		ret = do_bio_filebacked(lo, bio);
+		bio_endio(bio, bio->bi_size, ret);
+	}
+}
+
+/*
+ * worker thread that handles reads/writes to file backed loop devices,
+ * to avoid blocking in our make_request_fn. it also does loop decrypting
+ * on reads for block backed loop, as that is too heavy to do from
+ * b_end_io context where irqs may be disabled.
+ */
+static int loop_thread(void *data)
+{
+	struct loop_device *lo = data;
+	struct bio *bio;
+
+	daemonize("loop%d", lo->lo_number);
+
+	/*
+	 * loop can be used in an encrypted device,
+	 * hence, it mustn't be stopped at all
+	 * because it could be indirectly used during suspension
+	 */
+	current->flags |= PF_NOFREEZE;
+
+	set_user_nice(current, -20);
+
+	lo->lo_state = Lo_bound;
+	atomic_inc(&lo->lo_pending);
+
+	/*
+	 * up sem, we are running
+	 */
+	up(&lo->lo_sem);
+
+	for (;;) {
+		down_interruptible(&lo->lo_bh_mutex);
+		/*
+		 * could be upped because of tear-down, not because of
+		 * pending work
+		 */
+		if (!atomic_read(&lo->lo_pending))
+			break;
+
+		bio = loop_get_bio(lo);
+		if (!bio) {
+			printk("loop: missing bio\n");
+			continue;
+		}
+		loop_handle_bio(lo, bio);
+
+		/*
+		 * upped both for pending work and tear-down, lo_pending
+		 * will hit zero then
+		 */
+		if (atomic_dec_and_test(&lo->lo_pending))
+			break;
+	}
+
+	up(&lo->lo_sem);
+	return 0;
+}
+
+/*
+ * loop_switch performs the hard work of switching a backing store.
+ * First it needs to flush existing IO, it does this by sending a magic
+ * BIO down the pipe. The completion of this BIO does the actual switch.
+ */
+static int loop_switch(struct loop_device *lo, struct file *file)
+{
+	struct switch_request w;
+	struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+	if (!bio)
+		return -ENOMEM;
+	init_completion(&w.wait);
+	w.file = file;
+	bio->bi_private = &w;
+	bio->bi_bdev = NULL;
+	loop_make_request(lo->lo_queue, bio);
+	wait_for_completion(&w.wait);
+	return 0;
+}
+
+/*
+ * Do the actual switch; called from the BIO completion routine
+ */
+static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
+{
+	struct file *file = p->file;
+	struct file *old_file = lo->lo_backing_file;
+	struct address_space *mapping = file->f_mapping;
+
+	mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
+	lo->lo_backing_file = file;
+	lo->lo_blocksize = mapping->host->i_blksize;
+	lo->old_gfp_mask = mapping_gfp_mask(mapping);
+	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+	complete(&p->wait);
+}
+
+
+/*
+ * loop_change_fd switched the backing store of a loopback device to
+ * a new file. This is useful for operating system installers to free up
+ * the original file and in High Availability environments to switch to
+ * an alternative location for the content in case of server meltdown.
+ * This can only work if the loop device is used read-only, and if the
+ * new backing store is the same size and type as the old backing store.
+ */
+static int loop_change_fd(struct loop_device *lo, struct file *lo_file,
+		       struct block_device *bdev, unsigned int arg)
+{
+	struct file	*file, *old_file;
+	struct inode	*inode;
+	int		error;
+
+	error = -ENXIO;
+	if (lo->lo_state != Lo_bound)
+		goto out;
+
+	/* the loop device has to be read-only */
+	error = -EINVAL;
+	if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
+		goto out;
+
+	error = -EBADF;
+	file = fget(arg);
+	if (!file)
+		goto out;
+
+	inode = file->f_mapping->host;
+	old_file = lo->lo_backing_file;
+
+	error = -EINVAL;
+
+	if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
+		goto out_putf;
+
+	/* new backing store needs to support loop (eg sendfile) */
+	if (!inode->i_fop->sendfile)
+		goto out_putf;
+
+	/* size of the new backing store needs to be the same */
+	if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
+		goto out_putf;
+
+	/* and ... switch */
+	error = loop_switch(lo, file);
+	if (error)
+		goto out_putf;
+
+	fput(old_file);
+	return 0;
+
+ out_putf:
+	fput(file);
+ out:
+	return error;
+}
+
+static inline int is_loop_device(struct file *file)
+{
+	struct inode *i = file->f_mapping->host;
+
+	return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
+}
+
+static int loop_set_fd(struct loop_device *lo, struct file *lo_file,
+		       struct block_device *bdev, unsigned int arg)
+{
+	struct file	*file, *f;
+	struct inode	*inode;
+	struct address_space *mapping;
+	unsigned lo_blocksize;
+	int		lo_flags = 0;
+	int		error;
+	loff_t		size;
+
+	/* This is safe, since we have a reference from open(). */
+	__module_get(THIS_MODULE);
+
+	error = -EBADF;
+	file = fget(arg);
+	if (!file)
+		goto out;
+
+	error = -EBUSY;
+	if (lo->lo_state != Lo_unbound)
+		goto out_putf;
+
+	/* Avoid recursion */
+	f = file;
+	while (is_loop_device(f)) {
+		struct loop_device *l;
+
+		if (f->f_mapping->host->i_rdev == lo_file->f_mapping->host->i_rdev)
+			goto out_putf;
+
+		l = f->f_mapping->host->i_bdev->bd_disk->private_data;
+		if (l->lo_state == Lo_unbound) {
+			error = -EINVAL;
+			goto out_putf;
+		}
+		f = l->lo_backing_file;
+	}
+
+	mapping = file->f_mapping;
+	inode = mapping->host;
+
+	if (!(file->f_mode & FMODE_WRITE))
+		lo_flags |= LO_FLAGS_READ_ONLY;
+
+	error = -EINVAL;
+	if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) {
+		struct address_space_operations *aops = mapping->a_ops;
+		/*
+		 * If we can't read - sorry. If we only can't write - well,
+		 * it's going to be read-only.
+		 */
+		if (!file->f_op->sendfile)
+			goto out_putf;
+		if (aops->prepare_write && aops->commit_write)
+			lo_flags |= LO_FLAGS_USE_AOPS;
+		if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write)
+			lo_flags |= LO_FLAGS_READ_ONLY;
+
+		lo_blocksize = inode->i_blksize;
+		error = 0;
+	} else {
+		goto out_putf;
+	}
+
+	size = get_loop_size(lo, file);
+
+	if ((loff_t)(sector_t)size != size) {
+		error = -EFBIG;
+		goto out_putf;
+	}
+
+	if (!(lo_file->f_mode & FMODE_WRITE))
+		lo_flags |= LO_FLAGS_READ_ONLY;
+
+	set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0);
+
+	lo->lo_blocksize = lo_blocksize;
+	lo->lo_device = bdev;
+	lo->lo_flags = lo_flags;
+	lo->lo_backing_file = file;
+	lo->transfer = NULL;
+	lo->ioctl = NULL;
+	lo->lo_sizelimit = 0;
+	lo->old_gfp_mask = mapping_gfp_mask(mapping);
+	mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
+
+	lo->lo_bio = lo->lo_biotail = NULL;
+
+	/*
+	 * set queue make_request_fn, and add limits based on lower level
+	 * device
+	 */
+	blk_queue_make_request(lo->lo_queue, loop_make_request);
+	lo->lo_queue->queuedata = lo;
+	lo->lo_queue->unplug_fn = loop_unplug;
+
+	set_capacity(disks[lo->lo_number], size);
+	bd_set_size(bdev, size << 9);
+
+	set_blocksize(bdev, lo_blocksize);
+
+	kernel_thread(loop_thread, lo, CLONE_KERNEL);
+	down(&lo->lo_sem);
+	return 0;
+
+ out_putf:
+	fput(file);
+ out:
+	/* This is safe: open() is still holding a reference. */
+	module_put(THIS_MODULE);
+	return error;
+}
+
+static int
+loop_release_xfer(struct loop_device *lo)
+{
+	int err = 0;
+	struct loop_func_table *xfer = lo->lo_encryption;
+
+	if (xfer) {
+		if (xfer->release)
+			err = xfer->release(lo);
+		lo->transfer = NULL;
+		lo->lo_encryption = NULL;
+		module_put(xfer->owner);
+	}
+	return err;
+}
+
+static int
+loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
+	       const struct loop_info64 *i)
+{
+	int err = 0;
+
+	if (xfer) {
+		struct module *owner = xfer->owner;
+
+		if (!try_module_get(owner))
+			return -EINVAL;
+		if (xfer->init)
+			err = xfer->init(lo, i);
+		if (err)
+			module_put(owner);
+		else
+			lo->lo_encryption = xfer;
+	}
+	return err;
+}
+
+static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
+{
+	struct file *filp = lo->lo_backing_file;
+	int gfp = lo->old_gfp_mask;
+
+	if (lo->lo_state != Lo_bound)
+		return -ENXIO;
+
+	if (lo->lo_refcnt > 1)	/* we needed one fd for the ioctl */
+		return -EBUSY;
+
+	if (filp == NULL)
+		return -EINVAL;
+
+	spin_lock_irq(&lo->lo_lock);
+	lo->lo_state = Lo_rundown;
+	if (atomic_dec_and_test(&lo->lo_pending))
+		up(&lo->lo_bh_mutex);
+	spin_unlock_irq(&lo->lo_lock);
+
+	down(&lo->lo_sem);
+
+	lo->lo_backing_file = NULL;
+
+	loop_release_xfer(lo);
+	lo->transfer = NULL;
+	lo->ioctl = NULL;
+	lo->lo_device = NULL;
+	lo->lo_encryption = NULL;
+	lo->lo_offset = 0;
+	lo->lo_sizelimit = 0;
+	lo->lo_encrypt_key_size = 0;
+	lo->lo_flags = 0;
+	memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
+	memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
+	memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+	invalidate_bdev(bdev, 0);
+	set_capacity(disks[lo->lo_number], 0);
+	bd_set_size(bdev, 0);
+	mapping_set_gfp_mask(filp->f_mapping, gfp);
+	lo->lo_state = Lo_unbound;
+	fput(filp);
+	/* This is safe: open() is still holding a reference. */
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static int
+loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
+{
+	int err;
+	struct loop_func_table *xfer;
+
+	if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
+	    !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+	if (lo->lo_state != Lo_bound)
+		return -ENXIO;
+	if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
+		return -EINVAL;
+
+	err = loop_release_xfer(lo);
+	if (err)
+		return err;
+
+	if (info->lo_encrypt_type) {
+		unsigned int type = info->lo_encrypt_type;
+
+		if (type >= MAX_LO_CRYPT)
+			return -EINVAL;
+		xfer = xfer_funcs[type];
+		if (xfer == NULL)
+			return -EINVAL;
+	} else
+		xfer = NULL;
+
+	err = loop_init_xfer(lo, xfer, info);
+	if (err)
+		return err;
+
+	if (lo->lo_offset != info->lo_offset ||
+	    lo->lo_sizelimit != info->lo_sizelimit) {
+		lo->lo_offset = info->lo_offset;
+		lo->lo_sizelimit = info->lo_sizelimit;
+		if (figure_loop_size(lo))
+			return -EFBIG;
+	}
+
+	memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
+	memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
+	lo->lo_file_name[LO_NAME_SIZE-1] = 0;
+	lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
+
+	if (!xfer)
+		xfer = &none_funcs;
+	lo->transfer = xfer->transfer;
+	lo->ioctl = xfer->ioctl;
+
+	lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
+	lo->lo_init[0] = info->lo_init[0];
+	lo->lo_init[1] = info->lo_init[1];
+	if (info->lo_encrypt_key_size) {
+		memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
+		       info->lo_encrypt_key_size);
+		lo->lo_key_owner = current->uid;
+	}	
+
+	return 0;
+}
+
+static int
+loop_get_status(struct loop_device *lo, struct loop_info64 *info)
+{
+	struct file *file = lo->lo_backing_file;
+	struct kstat stat;
+	int error;
+
+	if (lo->lo_state != Lo_bound)
+		return -ENXIO;
+	error = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
+	if (error)
+		return error;
+	memset(info, 0, sizeof(*info));
+	info->lo_number = lo->lo_number;
+	info->lo_device = huge_encode_dev(stat.dev);
+	info->lo_inode = stat.ino;
+	info->lo_rdevice = huge_encode_dev(lo->lo_device ? stat.rdev : stat.dev);
+	info->lo_offset = lo->lo_offset;
+	info->lo_sizelimit = lo->lo_sizelimit;
+	info->lo_flags = lo->lo_flags;
+	memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
+	memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
+	info->lo_encrypt_type =
+		lo->lo_encryption ? lo->lo_encryption->number : 0;
+	if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
+		info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
+		memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
+		       lo->lo_encrypt_key_size);
+	}
+	return 0;
+}
+
+static void
+loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
+{
+	memset(info64, 0, sizeof(*info64));
+	info64->lo_number = info->lo_number;
+	info64->lo_device = info->lo_device;
+	info64->lo_inode = info->lo_inode;
+	info64->lo_rdevice = info->lo_rdevice;
+	info64->lo_offset = info->lo_offset;
+	info64->lo_sizelimit = 0;
+	info64->lo_encrypt_type = info->lo_encrypt_type;
+	info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
+	info64->lo_flags = info->lo_flags;
+	info64->lo_init[0] = info->lo_init[0];
+	info64->lo_init[1] = info->lo_init[1];
+	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
+		memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
+	else
+		memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
+	memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
+}
+
+static int
+loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
+{
+	memset(info, 0, sizeof(*info));
+	info->lo_number = info64->lo_number;
+	info->lo_device = info64->lo_device;
+	info->lo_inode = info64->lo_inode;
+	info->lo_rdevice = info64->lo_rdevice;
+	info->lo_offset = info64->lo_offset;
+	info->lo_encrypt_type = info64->lo_encrypt_type;
+	info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
+	info->lo_flags = info64->lo_flags;
+	info->lo_init[0] = info64->lo_init[0];
+	info->lo_init[1] = info64->lo_init[1];
+	if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
+		memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
+	else
+		memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
+	memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
+
+	/* error in case values were truncated */
+	if (info->lo_device != info64->lo_device ||
+	    info->lo_rdevice != info64->lo_rdevice ||
+	    info->lo_inode != info64->lo_inode ||
+	    info->lo_offset != info64->lo_offset)
+		return -EOVERFLOW;
+
+	return 0;
+}
+
+static int
+loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
+{
+	struct loop_info info;
+	struct loop_info64 info64;
+
+	if (copy_from_user(&info, arg, sizeof (struct loop_info)))
+		return -EFAULT;
+	loop_info64_from_old(&info, &info64);
+	return loop_set_status(lo, &info64);
+}
+
+static int
+loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
+{
+	struct loop_info64 info64;
+
+	if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
+		return -EFAULT;
+	return loop_set_status(lo, &info64);
+}
+
+static int
+loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
+	struct loop_info info;
+	struct loop_info64 info64;
+	int err = 0;
+
+	if (!arg)
+		err = -EINVAL;
+	if (!err)
+		err = loop_get_status(lo, &info64);
+	if (!err)
+		err = loop_info64_to_old(&info64, &info);
+	if (!err && copy_to_user(arg, &info, sizeof(info)))
+		err = -EFAULT;
+
+	return err;
+}
+
+static int
+loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
+	struct loop_info64 info64;
+	int err = 0;
+
+	if (!arg)
+		err = -EINVAL;
+	if (!err)
+		err = loop_get_status(lo, &info64);
+	if (!err && copy_to_user(arg, &info64, sizeof(info64)))
+		err = -EFAULT;
+
+	return err;
+}
+
+static int lo_ioctl(struct inode * inode, struct file * file,
+	unsigned int cmd, unsigned long arg)
+{
+	struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+	int err;
+
+	down(&lo->lo_ctl_mutex);
+	switch (cmd) {
+	case LOOP_SET_FD:
+		err = loop_set_fd(lo, file, inode->i_bdev, arg);
+		break;
+	case LOOP_CHANGE_FD:
+		err = loop_change_fd(lo, file, inode->i_bdev, arg);
+		break;
+	case LOOP_CLR_FD:
+		err = loop_clr_fd(lo, inode->i_bdev);
+		break;
+	case LOOP_SET_STATUS:
+		err = loop_set_status_old(lo, (struct loop_info __user *) arg);
+		break;
+	case LOOP_GET_STATUS:
+		err = loop_get_status_old(lo, (struct loop_info __user *) arg);
+		break;
+	case LOOP_SET_STATUS64:
+		err = loop_set_status64(lo, (struct loop_info64 __user *) arg);
+		break;
+	case LOOP_GET_STATUS64:
+		err = loop_get_status64(lo, (struct loop_info64 __user *) arg);
+		break;
+	default:
+		err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
+	}
+	up(&lo->lo_ctl_mutex);
+	return err;
+}
+
+static int lo_open(struct inode *inode, struct file *file)
+{
+	struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+
+	down(&lo->lo_ctl_mutex);
+	lo->lo_refcnt++;
+	up(&lo->lo_ctl_mutex);
+
+	return 0;
+}
+
+static int lo_release(struct inode *inode, struct file *file)
+{
+	struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+
+	down(&lo->lo_ctl_mutex);
+	--lo->lo_refcnt;
+	up(&lo->lo_ctl_mutex);
+
+	return 0;
+}
+
+static struct block_device_operations lo_fops = {
+	.owner =	THIS_MODULE,
+	.open =		lo_open,
+	.release =	lo_release,
+	.ioctl =	lo_ioctl,
+};
+
+/*
+ * And now the modules code and kernel interface.
+ */
+module_param(max_loop, int, 0);
+MODULE_PARM_DESC(max_loop, "Maximum number of loop devices (1-256)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
+
+int loop_register_transfer(struct loop_func_table *funcs)
+{
+	unsigned int n = funcs->number;
+
+	if (n >= MAX_LO_CRYPT || xfer_funcs[n])
+		return -EINVAL;
+	xfer_funcs[n] = funcs;
+	return 0;
+}
+
+int loop_unregister_transfer(int number)
+{
+	unsigned int n = number;
+	struct loop_device *lo;
+	struct loop_func_table *xfer;
+
+	if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
+		return -EINVAL;
+
+	xfer_funcs[n] = NULL;
+
+	for (lo = &loop_dev[0]; lo < &loop_dev[max_loop]; lo++) {
+		down(&lo->lo_ctl_mutex);
+
+		if (lo->lo_encryption == xfer)
+			loop_release_xfer(lo);
+
+		up(&lo->lo_ctl_mutex);
+	}
+
+	return 0;
+}
+
+EXPORT_SYMBOL(loop_register_transfer);
+EXPORT_SYMBOL(loop_unregister_transfer);
+
+static int __init loop_init(void)
+{
+	int	i;
+
+	if (max_loop < 1 || max_loop > 256) {
+		printk(KERN_WARNING "loop: invalid max_loop (must be between"
+				    " 1 and 256), using default (8)\n");
+		max_loop = 8;
+	}
+
+	if (register_blkdev(LOOP_MAJOR, "loop"))
+		return -EIO;
+
+	loop_dev = kmalloc(max_loop * sizeof(struct loop_device), GFP_KERNEL);
+	if (!loop_dev)
+		goto out_mem1;
+	memset(loop_dev, 0, max_loop * sizeof(struct loop_device));
+
+	disks = kmalloc(max_loop * sizeof(struct gendisk *), GFP_KERNEL);
+	if (!disks)
+		goto out_mem2;
+
+	for (i = 0; i < max_loop; i++) {
+		disks[i] = alloc_disk(1);
+		if (!disks[i])
+			goto out_mem3;
+	}
+
+	devfs_mk_dir("loop");
+
+	for (i = 0; i < max_loop; i++) {
+		struct loop_device *lo = &loop_dev[i];
+		struct gendisk *disk = disks[i];
+
+		memset(lo, 0, sizeof(*lo));
+		lo->lo_queue = blk_alloc_queue(GFP_KERNEL);
+		if (!lo->lo_queue)
+			goto out_mem4;
+		init_MUTEX(&lo->lo_ctl_mutex);
+		init_MUTEX_LOCKED(&lo->lo_sem);
+		init_MUTEX_LOCKED(&lo->lo_bh_mutex);
+		lo->lo_number = i;
+		spin_lock_init(&lo->lo_lock);
+		disk->major = LOOP_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &lo_fops;
+		sprintf(disk->disk_name, "loop%d", i);
+		sprintf(disk->devfs_name, "loop/%d", i);
+		disk->private_data = lo;
+		disk->queue = lo->lo_queue;
+	}
+
+	/* We cannot fail after we call this, so another loop!*/
+	for (i = 0; i < max_loop; i++)
+		add_disk(disks[i]);
+	printk(KERN_INFO "loop: loaded (max %d devices)\n", max_loop);
+	return 0;
+
+out_mem4:
+	while (i--)
+		blk_put_queue(loop_dev[i].lo_queue);
+	devfs_remove("loop");
+	i = max_loop;
+out_mem3:
+	while (i--)
+		put_disk(disks[i]);
+	kfree(disks);
+out_mem2:
+	kfree(loop_dev);
+out_mem1:
+	unregister_blkdev(LOOP_MAJOR, "loop");
+	printk(KERN_ERR "loop: ran out of memory\n");
+	return -ENOMEM;
+}
+
+static void loop_exit(void)
+{
+	int i;
+
+	for (i = 0; i < max_loop; i++) {
+		del_gendisk(disks[i]);
+		blk_put_queue(loop_dev[i].lo_queue);
+		put_disk(disks[i]);
+	}
+	devfs_remove("loop");
+	if (unregister_blkdev(LOOP_MAJOR, "loop"))
+		printk(KERN_WARNING "loop: cannot unregister blkdev\n");
+
+	kfree(disks);
+	kfree(loop_dev);
+}
+
+module_init(loop_init);
+module_exit(loop_exit);
+
+#ifndef MODULE
+static int __init max_loop_setup(char *str)
+{
+	max_loop = simple_strtol(str, NULL, 0);
+	return 1;
+}
+
+__setup("max_loop=", max_loop_setup);
+#endif
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
new file mode 100644
index 0000000..efdf044
--- /dev/null
+++ b/drivers/block/nbd.c
@@ -0,0 +1,731 @@
+/*
+ * Network block device - make block devices work over TCP
+ *
+ * Note that you can not swap over this thing, yet. Seems to work but
+ * deadlocks sometimes - you can not swap over TCP in general.
+ * 
+ * Copyright 1997-2000 Pavel Machek <pavel@ucw.cz>
+ * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com>
+ *
+ * (part of code stolen from loop.c)
+ *
+ * 97-3-25 compiled 0-th version, not yet tested it 
+ *   (it did not work, BTW) (later that day) HEY! it works!
+ *   (bit later) hmm, not that much... 2:00am next day:
+ *   yes, it works, but it gives something like 50kB/sec
+ * 97-4-01 complete rewrite to make it possible for many requests at 
+ *   once to be processed
+ * 97-4-11 Making protocol independent of endianity etc.
+ * 97-9-13 Cosmetic changes
+ * 98-5-13 Attempt to make 64-bit-clean on 64-bit machines
+ * 99-1-11 Attempt to make 64-bit-clean on 32-bit machines <ankry@mif.pg.gda.pl>
+ * 01-2-27 Fix to store proper blockcount for kernel (calculated using
+ *   BLOCK_SIZE_BITS, not device blocksize) <aga@permonline.ru>
+ * 01-3-11 Make nbd work with new Linux block layer code. It now supports
+ *   plugging like all the other block devices. Also added in MSG_MORE to
+ *   reduce number of partial TCP segments sent. <steve@chygwyn.com>
+ * 01-12-6 Fix deadlock condition by making queue locks independent of
+ *   the transmit lock. <steve@chygwyn.com>
+ * 02-10-11 Allow hung xmit to be aborted via SIGKILL & various fixes.
+ *   <Paul.Clements@SteelEye.com> <James.Bottomley@SteelEye.com>
+ * 03-06-22 Make nbd work with new linux 2.5 block layer design. This fixes
+ *   memory corruption from module removal and possible memory corruption
+ *   from sending/receiving disk data. <ldl@aros.net>
+ * 03-06-23 Cosmetic changes. <ldl@aros.net>
+ * 03-06-23 Enhance diagnostics support. <ldl@aros.net>
+ * 03-06-24 Remove unneeded blksize_bits field from nbd_device struct.
+ *   <ldl@aros.net>
+ * 03-06-24 Cleanup PARANOIA usage & code. <ldl@aros.net>
+ * 04-02-19 Remove PARANOIA, plus various cleanups (Paul Clements)
+ * possible FIXME: make set_sock / set_blksize / set_size / do_it one syscall
+ * why not: would need access_ok and friends, would share yet another
+ *          structure with userland
+ */
+
+#include <linux/major.h>
+
+#include <linux/blkdev.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/stat.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/ioctl.h>
+#include <net/sock.h>
+
+#include <linux/devfs_fs_kernel.h>
+
+#include <asm/uaccess.h>
+#include <asm/types.h>
+
+#include <linux/nbd.h>
+
+#define LO_MAGIC 0x68797548
+
+#ifdef NDEBUG
+#define dprintk(flags, fmt...)
+#else /* NDEBUG */
+#define dprintk(flags, fmt...) do { \
+	if (debugflags & (flags)) printk(KERN_DEBUG fmt); \
+} while (0)
+#define DBG_IOCTL       0x0004
+#define DBG_INIT        0x0010
+#define DBG_EXIT        0x0020
+#define DBG_BLKDEV      0x0100
+#define DBG_RX          0x0200
+#define DBG_TX          0x0400
+static unsigned int debugflags;
+#endif /* NDEBUG */
+
+static struct nbd_device nbd_dev[MAX_NBD];
+
+/*
+ * Use just one lock (or at most 1 per NIC). Two arguments for this:
+ * 1. Each NIC is essentially a synchronization point for all servers
+ *    accessed through that NIC so there's no need to have more locks
+ *    than NICs anyway.
+ * 2. More locks lead to more "Dirty cache line bouncing" which will slow
+ *    down each lock to the point where they're actually slower than just
+ *    a single lock.
+ * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this!
+ */
+static DEFINE_SPINLOCK(nbd_lock);
+
+#ifndef NDEBUG
+static const char *ioctl_cmd_to_ascii(int cmd)
+{
+	switch (cmd) {
+	case NBD_SET_SOCK: return "set-sock";
+	case NBD_SET_BLKSIZE: return "set-blksize";
+	case NBD_SET_SIZE: return "set-size";
+	case NBD_DO_IT: return "do-it";
+	case NBD_CLEAR_SOCK: return "clear-sock";
+	case NBD_CLEAR_QUE: return "clear-que";
+	case NBD_PRINT_DEBUG: return "print-debug";
+	case NBD_SET_SIZE_BLOCKS: return "set-size-blocks";
+	case NBD_DISCONNECT: return "disconnect";
+	case BLKROSET: return "set-read-only";
+	case BLKFLSBUF: return "flush-buffer-cache";
+	}
+	return "unknown";
+}
+
+static const char *nbdcmd_to_ascii(int cmd)
+{
+	switch (cmd) {
+	case  NBD_CMD_READ: return "read";
+	case NBD_CMD_WRITE: return "write";
+	case  NBD_CMD_DISC: return "disconnect";
+	}
+	return "invalid";
+}
+#endif /* NDEBUG */
+
+static void nbd_end_request(struct request *req)
+{
+	int uptodate = (req->errors == 0) ? 1 : 0;
+	request_queue_t *q = req->q;
+	unsigned long flags;
+
+	dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name,
+			req, uptodate? "done": "failed");
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	if (!end_that_request_first(req, uptodate, req->nr_sectors)) {
+		end_that_request_last(req);
+	}
+	spin_unlock_irqrestore(q->queue_lock, flags);
+}
+
+/*
+ *  Send or receive packet.
+ */
+static int sock_xmit(struct socket *sock, int send, void *buf, int size,
+		int msg_flags)
+{
+	int result;
+	struct msghdr msg;
+	struct kvec iov;
+	unsigned long flags;
+	sigset_t oldset;
+
+	/* Allow interception of SIGKILL only
+	 * Don't allow other signals to interrupt the transmission */
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	oldset = current->blocked;
+	sigfillset(&current->blocked);
+	sigdelsetmask(&current->blocked, sigmask(SIGKILL));
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+	do {
+		sock->sk->sk_allocation = GFP_NOIO;
+		iov.iov_base = buf;
+		iov.iov_len = size;
+		msg.msg_name = NULL;
+		msg.msg_namelen = 0;
+		msg.msg_control = NULL;
+		msg.msg_controllen = 0;
+		msg.msg_namelen = 0;
+		msg.msg_flags = msg_flags | MSG_NOSIGNAL;
+
+		if (send)
+			result = kernel_sendmsg(sock, &msg, &iov, 1, size);
+		else
+			result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
+
+		if (signal_pending(current)) {
+			siginfo_t info;
+			spin_lock_irqsave(&current->sighand->siglock, flags);
+			printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n",
+				current->pid, current->comm, 
+				dequeue_signal(current, &current->blocked, &info));
+			spin_unlock_irqrestore(&current->sighand->siglock, flags);
+			result = -EINTR;
+			break;
+		}
+
+		if (result <= 0) {
+			if (result == 0)
+				result = -EPIPE; /* short read */
+			break;
+		}
+		size -= result;
+		buf += result;
+	} while (size > 0);
+
+	spin_lock_irqsave(&current->sighand->siglock, flags);
+	current->blocked = oldset;
+	recalc_sigpending();
+	spin_unlock_irqrestore(&current->sighand->siglock, flags);
+
+	return result;
+}
+
+static inline int sock_send_bvec(struct socket *sock, struct bio_vec *bvec,
+		int flags)
+{
+	int result;
+	void *kaddr = kmap(bvec->bv_page);
+	result = sock_xmit(sock, 1, kaddr + bvec->bv_offset, bvec->bv_len,
+			flags);
+	kunmap(bvec->bv_page);
+	return result;
+}
+
+static int nbd_send_req(struct nbd_device *lo, struct request *req)
+{
+	int result, i, flags;
+	struct nbd_request request;
+	unsigned long size = req->nr_sectors << 9;
+	struct socket *sock = lo->sock;
+
+	request.magic = htonl(NBD_REQUEST_MAGIC);
+	request.type = htonl(nbd_cmd(req));
+	request.from = cpu_to_be64((u64) req->sector << 9);
+	request.len = htonl(size);
+	memcpy(request.handle, &req, sizeof(req));
+
+	down(&lo->tx_lock);
+
+	if (!sock || !lo->sock) {
+		printk(KERN_ERR "%s: Attempted send on closed socket\n",
+				lo->disk->disk_name);
+		goto error_out;
+	}
+
+	dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n",
+			lo->disk->disk_name, req,
+			nbdcmd_to_ascii(nbd_cmd(req)),
+			(unsigned long long)req->sector << 9,
+			req->nr_sectors << 9);
+	result = sock_xmit(sock, 1, &request, sizeof(request),
+			(nbd_cmd(req) == NBD_CMD_WRITE)? MSG_MORE: 0);
+	if (result <= 0) {
+		printk(KERN_ERR "%s: Send control failed (result %d)\n",
+				lo->disk->disk_name, result);
+		goto error_out;
+	}
+
+	if (nbd_cmd(req) == NBD_CMD_WRITE) {
+		struct bio *bio;
+		/*
+		 * we are really probing at internals to determine
+		 * whether to set MSG_MORE or not...
+		 */
+		rq_for_each_bio(bio, req) {
+			struct bio_vec *bvec;
+			bio_for_each_segment(bvec, bio, i) {
+				flags = 0;
+				if ((i < (bio->bi_vcnt - 1)) || bio->bi_next)
+					flags = MSG_MORE;
+				dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
+						lo->disk->disk_name, req,
+						bvec->bv_len);
+				result = sock_send_bvec(sock, bvec, flags);
+				if (result <= 0) {
+					printk(KERN_ERR "%s: Send data failed (result %d)\n",
+							lo->disk->disk_name,
+							result);
+					goto error_out;
+				}
+			}
+		}
+	}
+	up(&lo->tx_lock);
+	return 0;
+
+error_out:
+	up(&lo->tx_lock);
+	return 1;
+}
+
+static struct request *nbd_find_request(struct nbd_device *lo, char *handle)
+{
+	struct request *req;
+	struct list_head *tmp;
+	struct request *xreq;
+
+	memcpy(&xreq, handle, sizeof(xreq));
+
+	spin_lock(&lo->queue_lock);
+	list_for_each(tmp, &lo->queue_head) {
+		req = list_entry(tmp, struct request, queuelist);
+		if (req != xreq)
+			continue;
+		list_del_init(&req->queuelist);
+		spin_unlock(&lo->queue_lock);
+		return req;
+	}
+	spin_unlock(&lo->queue_lock);
+	return NULL;
+}
+
+static inline int sock_recv_bvec(struct socket *sock, struct bio_vec *bvec)
+{
+	int result;
+	void *kaddr = kmap(bvec->bv_page);
+	result = sock_xmit(sock, 0, kaddr + bvec->bv_offset, bvec->bv_len,
+			MSG_WAITALL);
+	kunmap(bvec->bv_page);
+	return result;
+}
+
+/* NULL returned = something went wrong, inform userspace */
+static struct request *nbd_read_stat(struct nbd_device *lo)
+{
+	int result;
+	struct nbd_reply reply;
+	struct request *req;
+	struct socket *sock = lo->sock;
+
+	reply.magic = 0;
+	result = sock_xmit(sock, 0, &reply, sizeof(reply), MSG_WAITALL);
+	if (result <= 0) {
+		printk(KERN_ERR "%s: Receive control failed (result %d)\n",
+				lo->disk->disk_name, result);
+		goto harderror;
+	}
+	req = nbd_find_request(lo, reply.handle);
+	if (req == NULL) {
+		printk(KERN_ERR "%s: Unexpected reply (%p)\n",
+				lo->disk->disk_name, reply.handle);
+		result = -EBADR;
+		goto harderror;
+	}
+
+	if (ntohl(reply.magic) != NBD_REPLY_MAGIC) {
+		printk(KERN_ERR "%s: Wrong magic (0x%lx)\n",
+				lo->disk->disk_name,
+				(unsigned long)ntohl(reply.magic));
+		result = -EPROTO;
+		goto harderror;
+	}
+	if (ntohl(reply.error)) {
+		printk(KERN_ERR "%s: Other side returned error (%d)\n",
+				lo->disk->disk_name, ntohl(reply.error));
+		req->errors++;
+		return req;
+	}
+
+	dprintk(DBG_RX, "%s: request %p: got reply\n",
+			lo->disk->disk_name, req);
+	if (nbd_cmd(req) == NBD_CMD_READ) {
+		int i;
+		struct bio *bio;
+		rq_for_each_bio(bio, req) {
+			struct bio_vec *bvec;
+			bio_for_each_segment(bvec, bio, i) {
+				result = sock_recv_bvec(sock, bvec);
+				if (result <= 0) {
+					printk(KERN_ERR "%s: Receive data failed (result %d)\n",
+							lo->disk->disk_name,
+							result);
+					goto harderror;
+				}
+				dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
+					lo->disk->disk_name, req, bvec->bv_len);
+			}
+		}
+	}
+	return req;
+harderror:
+	lo->harderror = result;
+	return NULL;
+}
+
+static void nbd_do_it(struct nbd_device *lo)
+{
+	struct request *req;
+
+	BUG_ON(lo->magic != LO_MAGIC);
+
+	while ((req = nbd_read_stat(lo)) != NULL)
+		nbd_end_request(req);
+	return;
+}
+
+static void nbd_clear_que(struct nbd_device *lo)
+{
+	struct request *req;
+
+	BUG_ON(lo->magic != LO_MAGIC);
+
+	do {
+		req = NULL;
+		spin_lock(&lo->queue_lock);
+		if (!list_empty(&lo->queue_head)) {
+			req = list_entry(lo->queue_head.next, struct request, queuelist);
+			list_del_init(&req->queuelist);
+		}
+		spin_unlock(&lo->queue_lock);
+		if (req) {
+			req->errors++;
+			nbd_end_request(req);
+		}
+	} while (req);
+}
+
+/*
+ * We always wait for result of write, for now. It would be nice to make it optional
+ * in future
+ * if ((req->cmd == WRITE) && (lo->flags & NBD_WRITE_NOCHK)) 
+ *   { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
+ */
+
+static void do_nbd_request(request_queue_t * q)
+{
+	struct request *req;
+	
+	while ((req = elv_next_request(q)) != NULL) {
+		struct nbd_device *lo;
+
+		blkdev_dequeue_request(req);
+		dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%lx)\n",
+				req->rq_disk->disk_name, req, req->flags);
+
+		if (!(req->flags & REQ_CMD))
+			goto error_out;
+
+		lo = req->rq_disk->private_data;
+
+		BUG_ON(lo->magic != LO_MAGIC);
+
+		if (!lo->file) {
+			printk(KERN_ERR "%s: Request when not-ready\n",
+					lo->disk->disk_name);
+			goto error_out;
+		}
+		nbd_cmd(req) = NBD_CMD_READ;
+		if (rq_data_dir(req) == WRITE) {
+			nbd_cmd(req) = NBD_CMD_WRITE;
+			if (lo->flags & NBD_READ_ONLY) {
+				printk(KERN_ERR "%s: Write on read-only\n",
+						lo->disk->disk_name);
+				goto error_out;
+			}
+		}
+
+		req->errors = 0;
+		spin_unlock_irq(q->queue_lock);
+
+		spin_lock(&lo->queue_lock);
+
+		if (!lo->file) {
+			spin_unlock(&lo->queue_lock);
+			printk(KERN_ERR "%s: failed between accept and semaphore, file lost\n",
+					lo->disk->disk_name);
+			req->errors++;
+			nbd_end_request(req);
+			spin_lock_irq(q->queue_lock);
+			continue;
+		}
+
+		list_add(&req->queuelist, &lo->queue_head);
+		spin_unlock(&lo->queue_lock);
+
+		if (nbd_send_req(lo, req) != 0) {
+			printk(KERN_ERR "%s: Request send failed\n",
+					lo->disk->disk_name);
+			if (nbd_find_request(lo, (char *)&req) != NULL) {
+				/* we still own req */
+				req->errors++;
+				nbd_end_request(req);
+			} else /* we're racing with nbd_clear_que */
+				printk(KERN_DEBUG "nbd: can't find req\n");
+		}
+
+		spin_lock_irq(q->queue_lock);
+		continue;
+
+error_out:
+		req->errors++;
+		spin_unlock(q->queue_lock);
+		nbd_end_request(req);
+		spin_lock(q->queue_lock);
+	}
+	return;
+}
+
+static int nbd_ioctl(struct inode *inode, struct file *file,
+		     unsigned int cmd, unsigned long arg)
+{
+	struct nbd_device *lo = inode->i_bdev->bd_disk->private_data;
+	int error;
+	struct request sreq ;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	BUG_ON(lo->magic != LO_MAGIC);
+
+	/* Anyone capable of this syscall can do *real bad* things */
+	dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n",
+			lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg);
+
+	switch (cmd) {
+	case NBD_DISCONNECT:
+	        printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name);
+		sreq.flags = REQ_SPECIAL;
+		nbd_cmd(&sreq) = NBD_CMD_DISC;
+		/*
+		 * Set these to sane values in case server implementation
+		 * fails to check the request type first and also to keep
+		 * debugging output cleaner.
+		 */
+		sreq.sector = 0;
+		sreq.nr_sectors = 0;
+                if (!lo->sock)
+			return -EINVAL;
+                nbd_send_req(lo, &sreq);
+                return 0;
+ 
+	case NBD_CLEAR_SOCK:
+		error = 0;
+		down(&lo->tx_lock);
+		lo->sock = NULL;
+		up(&lo->tx_lock);
+		spin_lock(&lo->queue_lock);
+		file = lo->file;
+		lo->file = NULL;
+		spin_unlock(&lo->queue_lock);
+		nbd_clear_que(lo);
+		spin_lock(&lo->queue_lock);
+		if (!list_empty(&lo->queue_head)) {
+			printk(KERN_ERR "nbd: disconnect: some requests are in progress -> please try again.\n");
+			error = -EBUSY;
+		}
+		spin_unlock(&lo->queue_lock);
+		if (file)
+			fput(file);
+		return error;
+	case NBD_SET_SOCK:
+		if (lo->file)
+			return -EBUSY;
+		error = -EINVAL;
+		file = fget(arg);
+		if (file) {
+			inode = file->f_dentry->d_inode;
+			if (S_ISSOCK(inode->i_mode)) {
+				lo->file = file;
+				lo->sock = SOCKET_I(inode);
+				error = 0;
+			} else {
+				fput(file);
+			}
+		}
+		return error;
+	case NBD_SET_BLKSIZE:
+		lo->blksize = arg;
+		lo->bytesize &= ~(lo->blksize-1);
+		inode->i_bdev->bd_inode->i_size = lo->bytesize;
+		set_blocksize(inode->i_bdev, lo->blksize);
+		set_capacity(lo->disk, lo->bytesize >> 9);
+		return 0;
+	case NBD_SET_SIZE:
+		lo->bytesize = arg & ~(lo->blksize-1);
+		inode->i_bdev->bd_inode->i_size = lo->bytesize;
+		set_blocksize(inode->i_bdev, lo->blksize);
+		set_capacity(lo->disk, lo->bytesize >> 9);
+		return 0;
+	case NBD_SET_SIZE_BLOCKS:
+		lo->bytesize = ((u64) arg) * lo->blksize;
+		inode->i_bdev->bd_inode->i_size = lo->bytesize;
+		set_blocksize(inode->i_bdev, lo->blksize);
+		set_capacity(lo->disk, lo->bytesize >> 9);
+		return 0;
+	case NBD_DO_IT:
+		if (!lo->file)
+			return -EINVAL;
+		nbd_do_it(lo);
+		/* on return tidy up in case we have a signal */
+		/* Forcibly shutdown the socket causing all listeners
+		 * to error
+		 *
+		 * FIXME: This code is duplicated from sys_shutdown, but
+		 * there should be a more generic interface rather than
+		 * calling socket ops directly here */
+		down(&lo->tx_lock);
+		if (lo->sock) {
+			printk(KERN_WARNING "%s: shutting down socket\n",
+				lo->disk->disk_name);
+			lo->sock->ops->shutdown(lo->sock,
+				SEND_SHUTDOWN|RCV_SHUTDOWN);
+			lo->sock = NULL;
+		}
+		up(&lo->tx_lock);
+		spin_lock(&lo->queue_lock);
+		file = lo->file;
+		lo->file = NULL;
+		spin_unlock(&lo->queue_lock);
+		nbd_clear_que(lo);
+		printk(KERN_WARNING "%s: queue cleared\n", lo->disk->disk_name);
+		if (file)
+			fput(file);
+		return lo->harderror;
+	case NBD_CLEAR_QUE:
+		down(&lo->tx_lock);
+		if (lo->sock) {
+			up(&lo->tx_lock);
+			return 0; /* probably should be error, but that would
+				   * break "nbd-client -d", so just return 0 */
+		}
+		up(&lo->tx_lock);
+		nbd_clear_que(lo);
+		return 0;
+	case NBD_PRINT_DEBUG:
+		printk(KERN_INFO "%s: next = %p, prev = %p, head = %p\n",
+			inode->i_bdev->bd_disk->disk_name,
+			lo->queue_head.next, lo->queue_head.prev,
+			&lo->queue_head);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static struct block_device_operations nbd_fops =
+{
+	.owner =	THIS_MODULE,
+	.ioctl =	nbd_ioctl,
+};
+
+/*
+ * And here should be modules and kernel interface 
+ *  (Just smiley confuses emacs :-)
+ */
+
+static int __init nbd_init(void)
+{
+	int err = -ENOMEM;
+	int i;
+
+	if (sizeof(struct nbd_request) != 28) {
+		printk(KERN_CRIT "nbd: sizeof nbd_request needs to be 28 in order to work!\n" );
+		return -EIO;
+	}
+
+	for (i = 0; i < MAX_NBD; i++) {
+		struct gendisk *disk = alloc_disk(1);
+		if (!disk)
+			goto out;
+		nbd_dev[i].disk = disk;
+		/*
+		 * The new linux 2.5 block layer implementation requires
+		 * every gendisk to have its very own request_queue struct.
+		 * These structs are big so we dynamically allocate them.
+		 */
+		disk->queue = blk_init_queue(do_nbd_request, &nbd_lock);
+		if (!disk->queue) {
+			put_disk(disk);
+			goto out;
+		}
+	}
+
+	if (register_blkdev(NBD_MAJOR, "nbd")) {
+		err = -EIO;
+		goto out;
+	}
+
+	printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR);
+	dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags);
+
+	devfs_mk_dir("nbd");
+	for (i = 0; i < MAX_NBD; i++) {
+		struct gendisk *disk = nbd_dev[i].disk;
+		nbd_dev[i].file = NULL;
+		nbd_dev[i].magic = LO_MAGIC;
+		nbd_dev[i].flags = 0;
+		spin_lock_init(&nbd_dev[i].queue_lock);
+		INIT_LIST_HEAD(&nbd_dev[i].queue_head);
+		init_MUTEX(&nbd_dev[i].tx_lock);
+		nbd_dev[i].blksize = 1024;
+		nbd_dev[i].bytesize = 0x7ffffc00ULL << 10; /* 2TB */
+		disk->major = NBD_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &nbd_fops;
+		disk->private_data = &nbd_dev[i];
+		disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
+		sprintf(disk->disk_name, "nbd%d", i);
+		sprintf(disk->devfs_name, "nbd/%d", i);
+		set_capacity(disk, 0x7ffffc00ULL << 1); /* 2 TB */
+		add_disk(disk);
+	}
+
+	return 0;
+out:
+	while (i--) {
+		blk_cleanup_queue(nbd_dev[i].disk->queue);
+		put_disk(nbd_dev[i].disk);
+	}
+	return err;
+}
+
+static void __exit nbd_cleanup(void)
+{
+	int i;
+	for (i = 0; i < MAX_NBD; i++) {
+		struct gendisk *disk = nbd_dev[i].disk;
+		if (disk) {
+			del_gendisk(disk);
+			blk_cleanup_queue(disk->queue);
+			put_disk(disk);
+		}
+	}
+	devfs_remove("nbd");
+	unregister_blkdev(NBD_MAJOR, "nbd");
+	printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR);
+}
+
+module_init(nbd_init);
+module_exit(nbd_cleanup);
+
+MODULE_DESCRIPTION("Network Block Device");
+MODULE_LICENSE("GPL");
+
+#ifndef NDEBUG
+module_param(debugflags, int, 0644);
+MODULE_PARM_DESC(debugflags, "flags for controlling debug output");
+#endif
diff --git a/drivers/block/noop-iosched.c b/drivers/block/noop-iosched.c
new file mode 100644
index 0000000..888c477
--- /dev/null
+++ b/drivers/block/noop-iosched.c
@@ -0,0 +1,104 @@
+/*
+ * elevator noop
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+
+/*
+ * See if we can find a request that this buffer can be coalesced with.
+ */
+static int elevator_noop_merge(request_queue_t *q, struct request **req,
+			       struct bio *bio)
+{
+	struct list_head *entry = &q->queue_head;
+	struct request *__rq;
+	int ret;
+
+	if ((ret = elv_try_last_merge(q, bio))) {
+		*req = q->last_merge;
+		return ret;
+	}
+
+	while ((entry = entry->prev) != &q->queue_head) {
+		__rq = list_entry_rq(entry);
+
+		if (__rq->flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER))
+			break;
+		else if (__rq->flags & REQ_STARTED)
+			break;
+
+		if (!blk_fs_request(__rq))
+			continue;
+
+		if ((ret = elv_try_merge(__rq, bio))) {
+			*req = __rq;
+			q->last_merge = __rq;
+			return ret;
+		}
+	}
+
+	return ELEVATOR_NO_MERGE;
+}
+
+static void elevator_noop_merge_requests(request_queue_t *q, struct request *req,
+					 struct request *next)
+{
+	list_del_init(&next->queuelist);
+}
+
+static void elevator_noop_add_request(request_queue_t *q, struct request *rq,
+				      int where)
+{
+	if (where == ELEVATOR_INSERT_FRONT)
+		list_add(&rq->queuelist, &q->queue_head);
+	else
+		list_add_tail(&rq->queuelist, &q->queue_head);
+
+	/*
+	 * new merges must not precede this barrier
+	 */
+	if (rq->flags & REQ_HARDBARRIER)
+		q->last_merge = NULL;
+	else if (!q->last_merge)
+		q->last_merge = rq;
+}
+
+static struct request *elevator_noop_next_request(request_queue_t *q)
+{
+	if (!list_empty(&q->queue_head))
+		return list_entry_rq(q->queue_head.next);
+
+	return NULL;
+}
+
+static struct elevator_type elevator_noop = {
+	.ops = {
+		.elevator_merge_fn		= elevator_noop_merge,
+		.elevator_merge_req_fn		= elevator_noop_merge_requests,
+		.elevator_next_req_fn		= elevator_noop_next_request,
+		.elevator_add_req_fn		= elevator_noop_add_request,
+	},
+	.elevator_name = "noop",
+	.elevator_owner = THIS_MODULE,
+};
+
+static int __init noop_init(void)
+{
+	return elv_register(&elevator_noop);
+}
+
+static void __exit noop_exit(void)
+{
+	elv_unregister(&elevator_noop);
+}
+
+module_init(noop_init);
+module_exit(noop_exit);
+
+
+MODULE_AUTHOR("Jens Axboe");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("No-op IO scheduler");
diff --git a/drivers/block/paride/Kconfig b/drivers/block/paride/Kconfig
new file mode 100644
index 0000000..17ff405
--- /dev/null
+++ b/drivers/block/paride/Kconfig
@@ -0,0 +1,305 @@
+#
+# PARIDE configuration
+#
+# PARIDE doesn't need PARPORT, but if PARPORT is configured as a module,
+# PARIDE must also be a module.  The bogus CONFIG_PARIDE_PARPORT option
+# controls the choices given to the user ...
+config PARIDE_PARPORT
+	tristate
+	depends on PARIDE!=n
+	default m if PARPORT=m
+	default y if PARPORT!=m
+
+comment "Parallel IDE high-level drivers"
+	depends on PARIDE
+
+config PARIDE_PD
+	tristate "Parallel port IDE disks"
+	depends on PARIDE
+	help
+	  This option enables the high-level driver for IDE-type disk devices
+	  connected through a parallel port. If you chose to build PARIDE
+	  support into your kernel, you may answer Y here to build in the
+	  parallel port IDE driver, otherwise you should answer M to build
+	  it as a loadable module. The module will be called pd. You
+	  must also have at least one parallel port protocol driver in your
+	  system. Among the devices supported by this driver are the SyQuest
+	  EZ-135, EZ-230 and SparQ drives, the Avatar Shark and the backpack
+	  hard drives from MicroSolutions.
+
+config PARIDE_PCD
+	tristate "Parallel port ATAPI CD-ROMs"
+	depends on PARIDE
+	---help---
+	  This option enables the high-level driver for ATAPI CD-ROM devices
+	  connected through a parallel port. If you chose to build PARIDE
+	  support into your kernel, you may answer Y here to build in the
+	  parallel port ATAPI CD-ROM driver, otherwise you should answer M to
+	  build it as a loadable module. The module will be called pcd. You
+	  must also have at least one parallel port protocol driver in your
+	  system. Among the devices supported by this driver are the
+	  MicroSolutions backpack CD-ROM drives and the Freecom Power CD. If
+	  you have such a CD-ROM drive, you should also say Y or M to "ISO
+	  9660 CD-ROM file system support" below, because that's the file
+	  system used on CD-ROMs.
+
+config PARIDE_PF
+	tristate "Parallel port ATAPI disks"
+	depends on PARIDE
+	help
+	  This option enables the high-level driver for ATAPI disk devices
+	  connected through a parallel port. If you chose to build PARIDE
+	  support into your kernel, you may answer Y here to build in the
+	  parallel port ATAPI disk driver, otherwise you should answer M
+	  to build it as a loadable module. The module will be called pf.
+	  You must also have at least one parallel port protocol driver in
+	  your system. Among the devices supported by this driver are the
+	  MicroSolutions backpack PD/CD drive and the Imation Superdisk
+	  LS-120 drive.
+
+config PARIDE_PT
+	tristate "Parallel port ATAPI tapes"
+	depends on PARIDE
+	help
+	  This option enables the high-level driver for ATAPI tape devices
+	  connected through a parallel port. If you chose to build PARIDE
+	  support into your kernel, you may answer Y here to build in the
+	  parallel port ATAPI disk driver, otherwise you should answer M
+	  to build it as a loadable module. The module will be called pt.
+	  You must also have at least one parallel port protocol driver in
+	  your system. Among the devices supported by this driver is the
+	  parallel port version of the HP 5GB drive.
+
+config PARIDE_PG
+	tristate "Parallel port generic ATAPI devices"
+	depends on PARIDE
+	---help---
+	  This option enables a special high-level driver for generic ATAPI
+	  devices connected through a parallel port. The driver allows user
+	  programs, such as cdrtools, to send ATAPI commands directly to a
+	  device.
+
+	  If you chose to build PARIDE support into your kernel, you may
+	  answer Y here to build in the parallel port generic ATAPI driver,
+	  otherwise you should answer M to build it as a loadable module. The
+	  module will be called pg.
+
+	  You must also have at least one parallel port protocol driver in
+	  your system.
+
+	  This driver implements an API loosely related to the generic SCSI
+	  driver. See <file:include/linux/pg.h>. for details.
+
+	  You can obtain the most recent version of cdrtools from
+	  <ftp://ftp.berlios.de/pub/cdrecord/>. Versions 1.6.1a3 and
+	  later fully support this driver.
+
+comment "Parallel IDE protocol modules"
+	depends on PARIDE
+
+config PARIDE_ATEN
+	tristate "ATEN EH-100 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the ATEN EH-100 parallel port IDE
+	  protocol. This protocol is used in some inexpensive low performance
+	  parallel port kits made in Hong Kong. If you chose to build PARIDE
+	  support into your kernel, you may answer Y here to build in the
+	  protocol driver, otherwise you should answer M to build it as a
+	  loadable module. The module will be called aten. You must also
+	  have a high-level driver for the type of device that you want to
+	  support.
+
+config PARIDE_BPCK
+	tristate "MicroSolutions backpack (Series 5) protocol"
+	depends on PARIDE
+	---help---
+	  This option enables support for the Micro Solutions BACKPACK
+	  parallel port Series 5 IDE protocol.  (Most BACKPACK drives made
+	  before 1999 were Series 5) Series 5 drives will NOT always have the
+	  Series noted on the bottom of the drive. Series 6 drivers will.
+
+	  In other words, if your BACKPACK drive doesn't say "Series 6" on the
+	  bottom, enable this option.
+
+	  If you chose to build PARIDE support into your kernel, you may
+	  answer Y here to build in the protocol driver, otherwise you should
+	  answer M to build it as a loadable module.  The module will be
+	  called bpck.  You must also have a high-level driver for the type
+	  of device that you want to support.
+
+config PARIDE_BPCK6
+	tristate "MicroSolutions backpack (Series 6) protocol"
+	depends on PARIDE && !64BIT
+	---help---
+	  This option enables support for the Micro Solutions BACKPACK
+	  parallel port Series 6 IDE protocol.  (Most BACKPACK drives made
+	  after 1999 were Series 6) Series 6 drives will have the Series noted
+	  on the bottom of the drive.  Series 5 drivers don't always have it
+	  noted.
+
+	  In other words, if your BACKPACK drive says "Series 6" on the
+	  bottom, enable this option.
+
+	  If you chose to build PARIDE support into your kernel, you may
+	  answer Y here to build in the protocol driver, otherwise you should
+	  answer M to build it as a loadable module.  The module will be
+	  called bpck6.  You must also have a high-level driver for the type
+	  of device that you want to support.
+
+config PARIDE_COMM
+	tristate "DataStor Commuter protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the Commuter parallel port IDE
+	  protocol from DataStor. If you chose to build PARIDE support
+	  into your kernel, you may answer Y here to build in the protocol
+	  driver, otherwise you should answer M to build it as a loadable
+	  module. The module will be called comm. You must also have
+	  a high-level driver for the type of device that you want to support.
+
+config PARIDE_DSTR
+	tristate "DataStor EP-2000 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the EP-2000 parallel port IDE
+	  protocol from DataStor. If you chose to build PARIDE support
+	  into your kernel, you may answer Y here to build in the protocol
+	  driver, otherwise you should answer M to build it as a loadable
+	  module. The module will be called dstr. You must also have
+	  a high-level driver for the type of device that you want to support.
+
+config PARIDE_FIT2
+	tristate "FIT TD-2000 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the TD-2000 parallel port IDE
+	  protocol from Fidelity International Technology. This is a simple
+	  (low speed) adapter that is used in some portable hard drives. If
+	  you chose to build PARIDE support into your kernel, you may answer Y
+	  here to build in the protocol driver, otherwise you should answer M
+	  to build it as a loadable module. The module will be called ktti.
+	  You must also have a high-level driver for the type of device that
+	  you want to support.
+
+config PARIDE_FIT3
+	tristate "FIT TD-3000 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the TD-3000 parallel port IDE
+	  protocol from Fidelity International Technology. This protocol is
+	  used in newer models of their portable disk, CD-ROM and PD/CD
+	  devices. If you chose to build PARIDE support into your kernel, you
+	  may answer Y here to build in the protocol driver, otherwise you
+	  should answer M to build it as a loadable module. The module will be
+	  called fit3. You must also have a high-level driver for the type
+	  of device that you want to support.
+
+config PARIDE_EPAT
+	tristate "Shuttle EPAT/EPEZ protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the EPAT parallel port IDE protocol.
+	  EPAT is a parallel port IDE adapter manufactured by Shuttle
+	  Technology and widely used in devices from major vendors such as
+	  Hewlett-Packard, SyQuest, Imation and Avatar. If you chose to build
+	  PARIDE support into your kernel, you may answer Y here to build in
+	  the protocol driver, otherwise you should answer M to build it as a
+	  loadable module. The module will be called epat. You must also
+	  have a high-level driver for the type of device that you want to
+	  support.
+
+config PARIDE_EPATC8
+	bool "Support c7/c8 chips (EXPERIMENTAL)"
+	depends on PARIDE_EPAT && EXPERIMENTAL
+	help
+	  This option enables support for the newer Shuttle EP1284 (aka c7 and
+	  c8) chip. You need this if you are using any recent Imation SuperDisk
+	  (LS-120) drive.
+
+config PARIDE_EPIA
+	tristate "Shuttle EPIA protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the (obsolete) EPIA parallel port
+	  IDE protocol from Shuttle Technology. This adapter can still be
+	  found in some no-name kits. If you chose to build PARIDE support
+	  into your kernel, you may answer Y here to build in the protocol
+	  driver, otherwise you should answer M to build it as a loadable
+	  module. The module will be called epia. You must also have a
+	  high-level driver for the type of device that you want to support.
+
+config PARIDE_FRIQ
+	tristate "Freecom IQ ASIC-2 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for version 2 of the Freecom IQ parallel
+	  port IDE adapter.  This adapter is used by the Maxell Superdisk
+	  drive.  If you chose to build PARIDE support into your kernel, you
+	  may answer Y here to build in the protocol driver, otherwise you
+	  should answer M to build it as a loadable module. The module will be
+	  called friq. You must also have a high-level driver for the type
+	  of device that you want to support.
+
+config PARIDE_FRPW
+	tristate "FreeCom power protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the Freecom power parallel port IDE
+	  protocol. If you chose to build PARIDE support into your kernel, you
+	  may answer Y here to build in the protocol driver, otherwise you
+	  should answer M to build it as a loadable module. The module will be
+	  called frpw. You must also have a high-level driver for the type
+	  of device that you want to support.
+
+config PARIDE_KBIC
+	tristate "KingByte KBIC-951A/971A protocols"
+	depends on PARIDE
+	help
+	  This option enables support for the KBIC-951A and KBIC-971A parallel
+	  port IDE protocols from KingByte Information Corp. KingByte's
+	  adapters appear in many no-name portable disk and CD-ROM products,
+	  especially in Europe. If you chose to build PARIDE support into your
+	  kernel, you may answer Y here to build in the protocol driver,
+	  otherwise you should answer M to build it as a loadable module. The
+	  module will be called kbic. You must also have a high-level driver
+	  for the type of device that you want to support.
+
+config PARIDE_KTTI
+	tristate "KT PHd protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the "PHd" parallel port IDE protocol
+	  from KT Technology. This is a simple (low speed) adapter that is
+	  used in some 2.5" portable hard drives. If you chose to build PARIDE
+	  support into your kernel, you may answer Y here to build in the
+	  protocol driver, otherwise you should answer M to build it as a
+	  loadable module. The module will be called ktti. You must also
+	  have a high-level driver for the type of device that you want to
+	  support.
+
+config PARIDE_ON20
+	tristate "OnSpec 90c20 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the (obsolete) 90c20 parallel port
+	  IDE protocol from OnSpec (often marketed under the ValuStore brand
+	  name). If you chose to build PARIDE support into your kernel, you
+	  may answer Y here to build in the protocol driver, otherwise you
+	  should answer M to build it as a loadable module. The module will
+	  be called on20. You must also have a high-level driver for the
+	  type of device that you want to support.
+
+config PARIDE_ON26
+	tristate "OnSpec 90c26 protocol"
+	depends on PARIDE
+	help
+	  This option enables support for the 90c26 parallel port IDE protocol
+	  from OnSpec Electronics (often marketed under the ValuStore brand
+	  name). If you chose to build PARIDE support into your kernel, you
+	  may answer Y here to build in the protocol driver, otherwise you
+	  should answer M to build it as a loadable module. The module will be
+	  called on26. You must also have a high-level driver for the type
+	  of device that you want to support.
+
+#
diff --git a/drivers/block/paride/Makefile b/drivers/block/paride/Makefile
new file mode 100644
index 0000000..a539e00
--- /dev/null
+++ b/drivers/block/paride/Makefile
@@ -0,0 +1,28 @@
+#
+# Makefile for Parallel port IDE device drivers.
+#
+# 7 October 2000, Bartlomiej Zolnierkiewicz <bkz@linux-ide.org>
+# Rewritten to use lists instead of if-statements.
+#
+
+obj-$(CONFIG_PARIDE)		+= paride.o
+obj-$(CONFIG_PARIDE_ATEN)	+= aten.o
+obj-$(CONFIG_PARIDE_BPCK)	+= bpck.o
+obj-$(CONFIG_PARIDE_COMM)	+= comm.o
+obj-$(CONFIG_PARIDE_DSTR)	+= dstr.o
+obj-$(CONFIG_PARIDE_KBIC)	+= kbic.o
+obj-$(CONFIG_PARIDE_EPAT)	+= epat.o
+obj-$(CONFIG_PARIDE_EPIA)	+= epia.o
+obj-$(CONFIG_PARIDE_FRPW)	+= frpw.o
+obj-$(CONFIG_PARIDE_FRIQ)	+= friq.o
+obj-$(CONFIG_PARIDE_FIT2)	+= fit2.o
+obj-$(CONFIG_PARIDE_FIT3)	+= fit3.o
+obj-$(CONFIG_PARIDE_ON20)	+= on20.o
+obj-$(CONFIG_PARIDE_ON26)	+= on26.o
+obj-$(CONFIG_PARIDE_KTTI)	+= ktti.o
+obj-$(CONFIG_PARIDE_BPCK6)	+= bpck6.o
+obj-$(CONFIG_PARIDE_PD)		+= pd.o
+obj-$(CONFIG_PARIDE_PCD)	+= pcd.o
+obj-$(CONFIG_PARIDE_PF)		+= pf.o
+obj-$(CONFIG_PARIDE_PT)		+= pt.o
+obj-$(CONFIG_PARIDE_PG)		+= pg.o
diff --git a/drivers/block/paride/Transition-notes b/drivers/block/paride/Transition-notes
new file mode 100644
index 0000000..7037490
--- /dev/null
+++ b/drivers/block/paride/Transition-notes
@@ -0,0 +1,128 @@
+Lemma 1:
+	If ps_tq is scheduled, ps_tq_active is 1.  ps_tq_int() can be called
+	only when ps_tq_active is 1.
+Proof:	All assignments to ps_tq_active and all scheduling of ps_tq happen
+	under ps_spinlock.  There are three places where that can happen:
+	one in ps_set_intr() (A) and two in ps_tq_int() (B and C).
+	Consider the sequnce of these events.  A can not be preceded by
+	anything except B, since it is under if (!ps_tq_active) under
+	ps_spinlock.  C is always preceded by B, since we can't reach it
+	other than through B and we don't drop ps_spinlock between them.
+	IOW, the sequence is A?(BA|BC|B)*.  OTOH, number of B can not exceed
+	the sum of numbers of A and C, since each call of ps_tq_int() is
+	the result of ps_tq execution.  Therefore, the sequence starts with
+	A and each B is preceded by either A or C.  Moments when we enter
+	ps_tq_int() are sandwiched between {A,C} and B in that sequence,
+	since at any time number of B can not exceed the number of these
+	moments which, in turn, can not exceed the number of A and C.
+	In other words, the sequence of events is (A or C set ps_tq_active to
+	1 and schedule ps_tq, ps_tq is executed, ps_tq_int() is entered,
+	B resets ps_tq_active)*.
+
+
+consider the following area:
+	* in do_pd_request1(): to calls of pi_do_claimed() and return in
+	  case when pd_req is NULL.
+	* in next_request(): to call of do_pd_request1()
+	* in do_pd_read(): to call of ps_set_intr()
+	* in do_pd_read_start(): to calls of pi_do_claimed(), next_request()
+and ps_set_intr()
+	* in do_pd_read_drq(): to calls of pi_do_claimed() and next_request()
+	* in do_pd_write(): to call of ps_set_intr()
+	* in do_pd_write_start(): to calls of pi_do_claimed(), next_request()
+and ps_set_intr()
+	* in do_pd_write_done(): to calls of pi_do_claimed() and next_request()
+	* in ps_set_intr(): to check for ps_tq_active and to scheduling
+	  ps_tq if ps_tq_active was 0.
+	* in ps_tq_int(): from the moment when we get ps_spinlock() to the
+	  return, call of con() or scheduling ps_tq.
+	* in pi_schedule_claimed() when called from pi_do_claimed() called from
+	  pd.c, everything until returning 1 or setting or setting ->claim_cont
+	  on the path that returns 0
+	* in pi_do_claimed() when called from pd.c, everything until the call
+	  of pi_do_claimed() plus the everything until the call of cont() if
+	  pi_do_claimed() has returned 1.
+	* in pi_wake_up() called for PIA that belongs to pd.c, everything from
+	  the moment when pi_spinlock has been acquired.
+
+Lemma 2:
+	1) at any time at most one thread of execution can be in that area or
+	be preempted there.
+	2) When there is such a thread, pd_busy is set or pd_lock is held by
+	that thread.
+	3) When there is such a thread, ps_tq_active is 0 or ps_spinlock is
+	held by that thread.
+	4) When there is such a thread, all PIA belonging to pd.c have NULL
+	->claim_cont or pi_spinlock is held by thread in question.
+
+Proof:	consider the first moment when the above is not true.
+
+(1) can become not true if some thread enters that area while another is there.
+	a) do_pd_request1() can be called from next_request() or do_pd_request()
+	   In the first case the thread was already in the area.  In the second,
+	   the thread was holding pd_lock and found pd_busy not set, which would
+	   mean that (2) was already not true.
+	b) ps_set_intr() and pi_schedule_claimed() can be called only from the
+	   area.
+	c) pi_do_claimed() is called by pd.c only from the area.
+	d) ps_tq_int() can enter the area only when the thread is holding
+	   ps_spinlock and ps_tq_active is 1 (due to Lemma 1).  It means that
+	   (3) was already not true.
+	e) do_pd_{read,write}* could be called only from the area.  The only
+	   case that needs consideration is call from pi_wake_up() and there
+	   we would have to be called for the PIA that got ->claimed_cont
+	   from pd.c.  That could happen only if pi_do_claimed() had been
+	   called from pd.c for that PIA, which happens only for PIA belonging
+	   to pd.c.
+	f) pi_wake_up() can enter the area only when the thread is holding
+	   pi_spinlock and ->claimed_cont is non-NULL for PIA belonging to
+	   pd.c.  It means that (4) was already not true.
+
+(2) can become not true only when pd_lock is released by the thread in question.
+	Indeed, pd_busy is reset only in the area and thread that resets
+	it is holding pd_lock.	The only place within the area where we
+	release pd_lock is in pd_next_buf() (called from within the area).
+	But that code does not reset pd_busy, so pd_busy would have to be
+	0 when pd_next_buf() had acquired pd_lock.  If it become 0 while
+	we were acquiring the lock, (1) would be already false, since
+	the thread that had reset it would be in the area simulateously.
+	If it was 0 before we tried to acquire pd_lock, (2) would be
+	already false.
+
+For similar reasons, (3) can become not true only when ps_spinlock is released
+by the thread in question.  However, all such places within the area are right
+after resetting ps_tq_active to 0.
+
+(4) is done the same way - all places where we release pi_spinlock within
+the area are either after resetting ->claimed_cont to NULL while holding
+pi_spinlock, or after not tocuhing ->claimed_cont since acquiring pi_spinlock
+also in the area.  The only place where ->claimed_cont is made non-NULL is
+in the area, under pi_spinlock and we do not release it until after leaving
+the area.
+
+QED.
+
+
+Corollary 1: ps_tq_active can be killed.  Indeed, the only place where we
+check its value is in ps_set_intr() and if it had been non-zero at that
+point, we would have violated either (2.1) (if it was set while ps_set_intr()
+was acquiring ps_spinlock) or (2.3) (if it was set when we started to
+acquire ps_spinlock).
+
+Corollary 2: ps_spinlock can be killed.  Indeed, Lemma 1 and Lemma 2 show
+that the only possible contention is between scheduling ps_tq followed by
+immediate release of spinlock and beginning of execution of ps_tq on
+another CPU.
+
+Corollary 3: assignment to pd_busy in do_pd_read_start() and do_pd_write_start()
+can be killed.  Indeed, we are not holding pd_lock and thus pd_busy is already
+1 here.
+
+Corollary 4: in ps_tq_int() uses of con can be replaced with uses of
+ps_continuation, since the latter is changed only from the area.
+We don't need to reset it to NULL, since we are guaranteed that there
+will be a call of ps_set_intr() before we look at ps_continuation again.
+We can remove the check for ps_continuation being NULL for the same
+reason - the value is guaranteed to be set by the last ps_set_intr() and
+we never pass it NULL.  Assignements in the beginning of ps_set_intr()
+can be taken to callers as long as they remain within the area.
diff --git a/drivers/block/paride/aten.c b/drivers/block/paride/aten.c
new file mode 100644
index 0000000..c4d696d
--- /dev/null
+++ b/drivers/block/paride/aten.c
@@ -0,0 +1,162 @@
+/* 
+        aten.c  (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                            Under the terms of the GNU General Public License.
+
+	aten.c is a low-level protocol driver for the ATEN EH-100
+	parallel port adapter.  The EH-100 supports 4-bit and 8-bit
+        modes only.  There is also an EH-132 which supports EPP mode
+        transfers.  The EH-132 is not yet supported.
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.05.05	init_proto, release_proto
+
+*/
+
+#define ATEN_VERSION      "1.01"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/wait.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define j44(a,b)                ((((a>>4)&0x0f)|(b&0xf0))^0x88)
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x08, 0x20 };
+
+static void  aten_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	int r;
+
+	r = regr + cont_map[cont] + 0x80;
+
+	w0(r); w2(0xe); w2(6); w0(val); w2(7); w2(6); w2(0xc);
+}
+
+static int aten_read_regr( PIA *pi, int cont, int regr )
+
+{	int  a, b, r;
+
+        r = regr + cont_map[cont] + 0x40;
+
+	switch (pi->mode) {
+
+        case 0: w0(r); w2(0xe); w2(6); 
+		w2(7); w2(6); w2(0);
+		a = r1(); w0(0x10); b = r1(); w2(0xc);
+		return j44(a,b);
+
+        case 1: r |= 0x10;
+		w0(r); w2(0xe); w2(6); w0(0xff); 
+		w2(0x27); w2(0x26); w2(0x20);
+		a = r0();
+		w2(0x26); w2(0xc);
+		return a;
+	}
+	return -1;
+}
+
+static void aten_read_block( PIA *pi, char * buf, int count )
+
+{	int  k, a, b, c, d;
+
+	switch (pi->mode) {
+
+	case 0:	w0(0x48); w2(0xe); w2(6);
+		for (k=0;k<count/2;k++) {
+			w2(7); w2(6); w2(2);
+			a = r1(); w0(0x58); b = r1();
+			w2(0); d = r1(); w0(0x48); c = r1();
+			buf[2*k] = j44(c,d);
+			buf[2*k+1] = j44(a,b);
+		}
+		w2(0xc);
+		break;
+
+	case 1: w0(0x58); w2(0xe); w2(6);
+		for (k=0;k<count/2;k++) {
+			w2(0x27); w2(0x26); w2(0x22);
+			a = r0(); w2(0x20); b = r0();
+			buf[2*k] = b; buf[2*k+1] = a;
+		}
+		w2(0x26); w2(0xc);
+		break;
+	}
+}
+
+static void aten_write_block( PIA *pi, char * buf, int count )
+
+{	int k;
+
+	w0(0x88); w2(0xe); w2(6);
+	for (k=0;k<count/2;k++) {
+		w0(buf[2*k+1]); w2(0xe); w2(6);
+		w0(buf[2*k]); w2(7); w2(6);
+	}
+	w2(0xc);
+}
+
+static void aten_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	w2(0xc);	
+}
+
+static void aten_disconnect ( PIA *pi )
+
+{       w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void aten_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[2] = {"4-bit","8-bit"};
+
+        printk("%s: aten %s, ATEN EH-100 at 0x%x, ",
+                pi->device,ATEN_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol aten = {
+	.owner		= THIS_MODULE,
+	.name		= "aten",
+	.max_mode	= 2,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= aten_write_regr,
+	.read_regr	= aten_read_regr,
+	.write_block	= aten_write_block,
+	.read_block	= aten_read_block,
+	.connect	= aten_connect,
+	.disconnect	= aten_disconnect,
+	.log_adapter	= aten_log_adapter,
+};
+
+static int __init aten_init(void)
+{
+	return pi_register(&aten)-1;
+}
+
+static void __exit aten_exit(void)
+{
+	pi_unregister( &aten );
+}
+
+MODULE_LICENSE("GPL");
+module_init(aten_init)
+module_exit(aten_exit)
diff --git a/drivers/block/paride/bpck.c b/drivers/block/paride/bpck.c
new file mode 100644
index 0000000..d462ff6
--- /dev/null
+++ b/drivers/block/paride/bpck.c
@@ -0,0 +1,477 @@
+/* 
+	bpck.c	(c) 1996-8  Grant R. Guenther <grant@torque.net>
+		            Under the terms of the GNU General Public License.
+
+	bpck.c is a low-level protocol driver for the MicroSolutions 
+	"backpack" parallel port IDE adapter.  
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.05.05 init_proto, release_proto, pi->delay 
+	1.02    GRG 1998.08.15 default pi->delay returned to 4
+
+*/
+
+#define	BPCK_VERSION	"1.02" 
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#undef r2
+#undef w2
+
+#define PC			pi->private
+#define r2()			(PC=(in_p(2) & 0xff))
+#define w2(byte)  		{out_p(2,byte); PC = byte;}
+#define t2(pat)   		{PC ^= pat; out_p(2,PC);}
+#define e2()			{PC &= 0xfe; out_p(2,PC);}
+#define o2()			{PC |= 1; out_p(2,PC);}
+
+#define j44(l,h)     (((l>>3)&0x7)|((l>>4)&0x8)|((h<<1)&0x70)|(h&0x80))
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+   cont = 2 - use internal bpck register addressing
+*/
+
+static int  cont_map[3] = { 0x40, 0x48, 0 };
+
+static int bpck_read_regr( PIA *pi, int cont, int regr )
+
+{       int r, l, h;
+
+	r = regr + cont_map[cont];
+
+	switch (pi->mode) {
+
+	case 0: w0(r & 0xf); w0(r); t2(2); t2(4);
+	        l = r1();
+        	t2(4);
+        	h = r1();
+        	return j44(l,h);
+
+	case 1: w0(r & 0xf); w0(r); t2(2);
+	        e2(); t2(0x20);
+		t2(4); h = r0();
+	        t2(1); t2(0x20);
+	        return h;
+
+	case 2:
+	case 3:
+	case 4: w0(r); w2(9); w2(0); w2(0x20);
+		h = r4();
+		w2(0);
+		return h;
+
+	}
+	return -1;
+}	
+
+static void bpck_write_regr( PIA *pi, int cont, int regr, int val )
+
+{	int	r;
+
+        r = regr + cont_map[cont];
+
+	switch (pi->mode) {
+
+	case 0:
+	case 1: w0(r);
+		t2(2);
+		w0(val);
+		o2(); t2(4); t2(1);
+		break;
+
+	case 2:
+	case 3:
+	case 4: w0(r); w2(9); w2(0);
+		w0(val); w2(1); w2(3); w2(0);
+		break;
+
+	}
+}
+
+/* These macros access the bpck registers in native addressing */
+
+#define WR(r,v)		bpck_write_regr(pi,2,r,v)
+#define RR(r)		(bpck_read_regr(pi,2,r))
+
+static void bpck_write_block( PIA *pi, char * buf, int count )
+
+{	int i;
+
+	switch (pi->mode) {
+
+	case 0: WR(4,0x40);
+		w0(0x40); t2(2); t2(1);
+		for (i=0;i<count;i++) { w0(buf[i]); t2(4); }
+		WR(4,0);
+		break;
+
+	case 1: WR(4,0x50);
+                w0(0x40); t2(2); t2(1);
+                for (i=0;i<count;i++) { w0(buf[i]); t2(4); }
+                WR(4,0x10);
+		break;
+
+	case 2: WR(4,0x48);
+		w0(0x40); w2(9); w2(0); w2(1);
+		for (i=0;i<count;i++) w4(buf[i]);
+		w2(0);
+		WR(4,8);
+		break;
+
+        case 3: WR(4,0x48);
+                w0(0x40); w2(9); w2(0); w2(1);
+                for (i=0;i<count/2;i++) w4w(((u16 *)buf)[i]);
+                w2(0);
+                WR(4,8);
+                break;
+ 
+        case 4: WR(4,0x48);
+                w0(0x40); w2(9); w2(0); w2(1);
+                for (i=0;i<count/4;i++) w4l(((u32 *)buf)[i]);
+                w2(0);
+                WR(4,8);
+                break;
+ 	}
+}
+
+static void bpck_read_block( PIA *pi, char * buf, int count )
+
+{	int i, l, h;
+
+	switch (pi->mode) {
+
+      	case 0: WR(4,0x40);
+		w0(0x40); t2(2);
+		for (i=0;i<count;i++) {
+		    t2(4); l = r1();
+		    t2(4); h = r1();
+		    buf[i] = j44(l,h);
+		}
+		WR(4,0);
+		break;
+
+	case 1: WR(4,0x50);
+		w0(0x40); t2(2); t2(0x20);
+      	        for(i=0;i<count;i++) { t2(4); buf[i] = r0(); }
+	        t2(1); t2(0x20);
+	        WR(4,0x10);
+		break;
+
+	case 2: WR(4,0x48);
+		w0(0x40); w2(9); w2(0); w2(0x20);
+		for (i=0;i<count;i++) buf[i] = r4();
+		w2(0);
+		WR(4,8);
+		break;
+
+        case 3: WR(4,0x48);
+                w0(0x40); w2(9); w2(0); w2(0x20);
+                for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w();
+                w2(0);
+                WR(4,8);
+                break;
+
+        case 4: WR(4,0x48);
+                w0(0x40); w2(9); w2(0); w2(0x20);
+                for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l();
+                w2(0);
+                WR(4,8);
+                break;
+
+	}
+}
+
+static int bpck_probe_unit ( PIA *pi )
+
+{	int o1, o0, f7, id;
+	int t, s;
+
+	id = pi->unit;
+	s = 0;
+	w2(4); w2(0xe); r2(); t2(2); 
+	o1 = r1()&0xf8;
+	o0 = r0();
+	w0(255-id); w2(4); w0(id);
+	t2(8); t2(8); t2(8);
+	t2(2); t = r1()&0xf8;
+	f7 = ((id % 8) == 7);
+	if ((f7) || (t != o1)) { t2(2); s = r1()&0xf8; }
+	if ((t == o1) && ((!f7) || (s == o1)))  {
+		w2(0x4c); w0(o0);
+		return 0;	
+	}
+	t2(8); w0(0); t2(2); w2(0x4c); w0(o0);
+	return 1;
+}
+	
+static void bpck_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+	w0(0xff-pi->unit); w2(4); w0(pi->unit);
+	t2(8); t2(8); t2(8); 
+	t2(2); t2(2);
+	
+	switch (pi->mode) {
+
+	case 0: t2(8); WR(4,0);
+		break;
+
+	case 1: t2(8); WR(4,0x10);
+		break;
+
+	case 2:
+        case 3:
+	case 4: w2(0); WR(4,8);
+		break;
+
+	}
+
+	WR(5,8);
+
+	if (pi->devtype == PI_PCD) {
+		WR(0x46,0x10);		/* fiddle with ESS logic ??? */
+		WR(0x4c,0x38);
+		WR(0x4d,0x88);
+		WR(0x46,0xa0);
+		WR(0x41,0);
+		WR(0x4e,8);
+		}
+}
+
+static void bpck_disconnect ( PIA *pi )
+
+{	w0(0); 
+	if (pi->mode >= 2) { w2(9); w2(0); } else t2(2);
+	w2(0x4c); w0(pi->saved_r0);
+} 
+
+static void bpck_force_spp ( PIA *pi )
+
+/* This fakes the EPP protocol to turn off EPP ... */
+
+{       pi->saved_r0 = r0();
+        w0(0xff-pi->unit); w2(4); w0(pi->unit);
+        t2(8); t2(8); t2(8); 
+        t2(2); t2(2);
+
+        w2(0); 
+        w0(4); w2(9); w2(0); 
+        w0(0); w2(1); w2(3); w2(0);     
+        w0(0); w2(9); w2(0);
+        w2(0x4c); w0(pi->saved_r0);
+}
+
+#define TEST_LEN  16
+
+static int bpck_test_proto( PIA *pi, char * scratch, int verbose )
+
+{	int i, e, l, h, om;
+	char buf[TEST_LEN];
+
+	bpck_force_spp(pi);
+
+	switch (pi->mode) {
+
+	case 0: bpck_connect(pi);
+		WR(0x13,0x7f);
+		w0(0x13); t2(2);
+		for(i=0;i<TEST_LEN;i++) {
+                    t2(4); l = r1();
+                    t2(4); h = r1();
+                    buf[i] = j44(l,h);
+		}
+		bpck_disconnect(pi);
+		break;
+
+        case 1: bpck_connect(pi);
+		WR(0x13,0x7f);
+                w0(0x13); t2(2); t2(0x20);
+                for(i=0;i<TEST_LEN;i++) { t2(4); buf[i] = r0(); }
+                t2(1); t2(0x20);
+		bpck_disconnect(pi);
+		break;
+
+	case 2:
+	case 3:
+	case 4: om = pi->mode;
+		pi->mode = 0;
+		bpck_connect(pi);
+		WR(7,3);
+		WR(4,8);
+		bpck_disconnect(pi);
+
+		pi->mode = om;
+		bpck_connect(pi);
+		w0(0x13); w2(9); w2(1); w0(0); w2(3); w2(0); w2(0xe0);
+
+		switch (pi->mode) {
+		  case 2: for (i=0;i<TEST_LEN;i++) buf[i] = r4();
+			  break;
+		  case 3: for (i=0;i<TEST_LEN/2;i++) ((u16 *)buf)[i] = r4w();
+                          break;
+		  case 4: for (i=0;i<TEST_LEN/4;i++) ((u32 *)buf)[i] = r4l();
+                          break;
+		}
+
+		w2(0);
+		WR(7,0);
+		bpck_disconnect(pi);
+
+		break;
+
+	}
+
+	if (verbose) {
+	    printk("%s: bpck: 0x%x unit %d mode %d: ",
+		   pi->device,pi->port,pi->unit,pi->mode);
+	    for (i=0;i<TEST_LEN;i++) printk("%3d",buf[i]);
+	    printk("\n");
+	}
+
+	e = 0;
+	for (i=0;i<TEST_LEN;i++) if (buf[i] != (i+1)) e++;
+	return e;
+}
+
+static void bpck_read_eeprom ( PIA *pi, char * buf )
+
+{       int i,j,k,n,p,v,f, om, od;
+
+	bpck_force_spp(pi);
+
+	om = pi->mode;  od = pi->delay;
+	pi->mode = 0; pi->delay = 6;
+
+	bpck_connect(pi);
+	
+	n = 0;
+	WR(4,0);
+	for (i=0;i<64;i++) {
+	    WR(6,8);  
+	    WR(6,0xc);
+	    p = 0x100;
+	    for (k=0;k<9;k++) {
+		f = (((i + 0x180) & p) != 0) * 2;
+		WR(6,f+0xc); 
+		WR(6,f+0xd); 
+		WR(6,f+0xc);
+		p = (p >> 1);
+	    }
+	    for (j=0;j<2;j++) {
+		v = 0;
+		for (k=0;k<8;k++) {
+		    WR(6,0xc); 
+		    WR(6,0xd); 
+		    WR(6,0xc); 
+		    f = RR(0);
+		    v = 2*v + (f == 0x84);
+		}
+		buf[2*i+1-j] = v;
+	    }
+	}
+	WR(6,8);
+	WR(6,0);
+	WR(5,8);
+
+	bpck_disconnect(pi);
+
+        if (om >= 2) {
+                bpck_connect(pi);
+                WR(7,3);
+                WR(4,8);
+                bpck_disconnect(pi);
+        }
+
+	pi->mode = om; pi->delay = od;
+}
+
+static int bpck_test_port ( PIA *pi ) 	/* check for 8-bit port */
+
+{	int	i, r, m;
+
+	w2(0x2c); i = r0(); w0(255-i); r = r0(); w0(i);
+	m = -1;
+	if (r == i) m = 2;
+	if (r == (255-i)) m = 0;
+
+	w2(0xc); i = r0(); w0(255-i); r = r0(); w0(i);
+	if (r != (255-i)) m = -1;
+	
+	if (m == 0) { w2(6); w2(0xc); r = r0(); w0(0xaa); w0(r); w0(0xaa); }
+	if (m == 2) { w2(0x26); w2(0xc); }
+
+	if (m == -1) return 0;
+	return 5;
+}
+
+static void bpck_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{	char	*mode_string[5] = { "4-bit","8-bit","EPP-8",
+				    "EPP-16","EPP-32" };
+
+#ifdef DUMP_EEPROM
+	int i;
+#endif
+
+	bpck_read_eeprom(pi,scratch);
+
+#ifdef DUMP_EEPROM
+	if (verbose) {
+	   for(i=0;i<128;i++)
+		if ((scratch[i] < ' ') || (scratch[i] > '~'))
+		    scratch[i] = '.';
+	   printk("%s: bpck EEPROM: %64.64s\n",pi->device,scratch);
+	   printk("%s:              %64.64s\n",pi->device,&scratch[64]);
+	}
+#endif
+
+	printk("%s: bpck %s, backpack %8.8s unit %d",
+		pi->device,BPCK_VERSION,&scratch[110],pi->unit);
+	printk(" at 0x%x, mode %d (%s), delay %d\n",pi->port,
+		pi->mode,mode_string[pi->mode],pi->delay);
+}
+
+static struct pi_protocol bpck = {
+	.owner		= THIS_MODULE,
+	.name		= "bpck",
+	.max_mode	= 5,
+	.epp_first	= 2,
+	.default_delay	= 4,
+	.max_units	= 255,
+	.write_regr	= bpck_write_regr,
+	.read_regr	= bpck_read_regr,
+	.write_block	= bpck_write_block,
+	.read_block	= bpck_read_block,
+	.connect	= bpck_connect,
+	.disconnect	= bpck_disconnect,
+	.test_port	= bpck_test_port,
+	.probe_unit	= bpck_probe_unit,
+	.test_proto	= bpck_test_proto,
+	.log_adapter	= bpck_log_adapter,
+};
+
+static int __init bpck_init(void)
+{
+	return pi_register(&bpck)-1;
+}
+
+static void __exit bpck_exit(void)
+{
+	pi_unregister(&bpck);
+}
+
+MODULE_LICENSE("GPL");
+module_init(bpck_init)
+module_exit(bpck_exit)
diff --git a/drivers/block/paride/bpck6.c b/drivers/block/paride/bpck6.c
new file mode 100644
index 0000000..08d858a
--- /dev/null
+++ b/drivers/block/paride/bpck6.c
@@ -0,0 +1,282 @@
+/*
+	backpack.c (c) 2001 Micro Solutions Inc.
+		Released under the terms of the GNU General Public license
+
+	backpack.c is a low-level protocol driver for the Micro Solutions
+		"BACKPACK" parallel port IDE adapter
+		(Works on Series 6 drives)
+
+	Written by: Ken Hahn     (linux-dev@micro-solutions.com)
+	            Clive Turvey (linux-dev@micro-solutions.com)
+
+*/
+
+/*
+   This is Ken's linux wrapper for the PPC library
+   Version 1.0.0 is the backpack driver for which source is not available
+   Version 2.0.0 is the first to have source released 
+   Version 2.0.1 is the "Cox-ified" source code 
+   Version 2.0.2 - fixed version string usage, and made ppc functions static 
+*/
+
+
+/* PARAMETERS */
+static int verbose; /* set this to 1 to see debugging messages and whatnot */
+
+#define BACKPACK_VERSION "2.0.2"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/io.h>
+
+#if defined(CONFIG_PARPORT_MODULE)||defined(CONFIG_PARPORT)
+#include <linux/parport.h>
+#endif
+
+#include "ppc6lnx.c"
+#include "paride.h"
+
+ 
+
+#define PPCSTRUCT(pi) ((Interface *)(pi->private))
+
+/****************************************************************/
+/*
+ ATAPI CDROM DRIVE REGISTERS
+*/
+#define ATAPI_DATA       0      /* data port                  */
+#define ATAPI_ERROR      1      /* error register (read)      */
+#define ATAPI_FEATURES   1      /* feature register (write)   */
+#define ATAPI_INT_REASON 2      /* interrupt reason register  */
+#define ATAPI_COUNT_LOW  4      /* byte count register (low)  */
+#define ATAPI_COUNT_HIGH 5      /* byte count register (high) */
+#define ATAPI_DRIVE_SEL  6      /* drive select register      */
+#define ATAPI_STATUS     7      /* status port (read)         */
+#define ATAPI_COMMAND    7      /* command port (write)       */
+#define ATAPI_ALT_STATUS 0x0e /* alternate status reg (read) */
+#define ATAPI_DEVICE_CONTROL 0x0e /* device control (write)   */
+/****************************************************************/
+
+static int bpck6_read_regr(PIA *pi, int cont, int reg)
+{
+	unsigned int out;
+
+	/* check for bad settings */
+	if (reg<0 || reg>7 || cont<0 || cont>2)
+	{
+		return(-1);
+	}
+	out=ppc6_rd_port(PPCSTRUCT(pi),cont?reg|8:reg);
+	return(out);
+}
+
+static void bpck6_write_regr(PIA *pi, int cont, int reg, int val)
+{
+	/* check for bad settings */
+	if (reg>=0 && reg<=7 && cont>=0 && cont<=1)
+	{
+		ppc6_wr_port(PPCSTRUCT(pi),cont?reg|8:reg,(u8)val);
+	}
+}
+
+static void bpck6_write_block( PIA *pi, char * buf, int len )
+{
+	ppc6_wr_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1); 
+}
+
+static void bpck6_read_block( PIA *pi, char * buf, int len )
+{
+	ppc6_rd_port16_blk(PPCSTRUCT(pi),ATAPI_DATA,buf,(u32)len>>1);
+}
+
+static void bpck6_connect ( PIA *pi  )
+{
+	if(verbose)
+	{
+		printk(KERN_DEBUG "connect\n");
+	}
+
+	if(pi->mode >=2)
+  	{
+		PPCSTRUCT(pi)->mode=4+pi->mode-2;	
+	}
+	else if(pi->mode==1)
+	{
+		PPCSTRUCT(pi)->mode=3;	
+	}
+	else
+	{
+		PPCSTRUCT(pi)->mode=1;		
+	}
+
+	ppc6_open(PPCSTRUCT(pi));  
+	ppc6_wr_extout(PPCSTRUCT(pi),0x3);
+}
+
+static void bpck6_disconnect ( PIA *pi )
+{
+	if(verbose)
+	{
+		printk("disconnect\n");
+	}
+	ppc6_wr_extout(PPCSTRUCT(pi),0x0);
+	ppc6_close(PPCSTRUCT(pi));
+}
+
+static int bpck6_test_port ( PIA *pi )   /* check for 8-bit port */
+{
+	if(verbose)
+	{
+		printk(KERN_DEBUG "PARPORT indicates modes=%x for lp=0x%lx\n",
+               		((struct pardevice*)(pi->pardev))->port->modes,
+			((struct pardevice *)(pi->pardev))->port->base); 
+	}
+
+	/*copy over duplicate stuff.. initialize state info*/
+	PPCSTRUCT(pi)->ppc_id=pi->unit;
+	PPCSTRUCT(pi)->lpt_addr=pi->port;
+
+#ifdef CONFIG_PARPORT_PC_MODULE
+#define CONFIG_PARPORT_PC
+#endif
+
+#ifdef CONFIG_PARPORT_PC
+	/* look at the parport device to see if what modes we can use */
+	if(((struct pardevice *)(pi->pardev))->port->modes & 
+		(PARPORT_MODE_EPP)
+          )
+	{
+		return 5; /* Can do EPP*/
+	}
+	else if(((struct pardevice *)(pi->pardev))->port->modes & 
+			(PARPORT_MODE_TRISTATE)
+               )
+	{
+		return 2;
+	}
+	else /*Just flat SPP*/
+	{
+		return 1;
+	}
+#else
+	/* there is no way of knowing what kind of port we have
+	   default to the highest mode possible */
+	return 5;
+#endif
+}
+
+static int bpck6_probe_unit ( PIA *pi )
+{
+	int out;
+
+	if(verbose)
+	{
+		printk(KERN_DEBUG "PROBE UNIT %x on port:%x\n",pi->unit,pi->port);
+	}
+
+	/*SET PPC UNIT NUMBER*/
+	PPCSTRUCT(pi)->ppc_id=pi->unit;
+
+	/*LOWER DOWN TO UNIDIRECTIONAL*/
+	PPCSTRUCT(pi)->mode=1;		
+
+	out=ppc6_open(PPCSTRUCT(pi));
+
+	if(verbose)
+	{
+		printk(KERN_DEBUG "ppc_open returned %2x\n",out);
+	}
+
+  	if(out)
+ 	{
+		ppc6_close(PPCSTRUCT(pi));
+		if(verbose)
+		{
+			printk(KERN_DEBUG "leaving probe\n");
+		}
+               return(1);
+	}
+  	else
+  	{
+		if(verbose)
+		{
+			printk(KERN_DEBUG "Failed open\n");
+		}
+    		return(0);
+  	}
+}
+
+static void bpck6_log_adapter( PIA *pi, char * scratch, int verbose )
+{
+	char *mode_string[5]=
+		{"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
+
+	printk("%s: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n",pi->device);
+	printk("%s: Copyright 2001 by Micro Solutions, Inc., DeKalb IL.\n",pi->device);
+	printk("%s: BACKPACK %s, Micro Solutions BACKPACK Drive at 0x%x\n",
+		pi->device,BACKPACK_VERSION,pi->port);
+	printk("%s: Unit: %d Mode:%d (%s) Delay %d\n",pi->device,
+		pi->unit,pi->mode,mode_string[pi->mode],pi->delay);
+}
+
+static int bpck6_init_proto(PIA *pi)
+{
+	Interface *p = kmalloc(sizeof(Interface), GFP_KERNEL);
+
+	if (p) {
+		memset(p, 0, sizeof(Interface));
+		pi->private = (unsigned long)p;
+		return 0;
+	}
+
+	printk(KERN_ERR "%s: ERROR COULDN'T ALLOCATE MEMORY\n", pi->device); 
+	return -1;
+}
+
+static void bpck6_release_proto(PIA *pi)
+{
+	kfree((void *)(pi->private)); 
+}
+
+static struct pi_protocol bpck6 = {
+	.owner		= THIS_MODULE,
+	.name		= "bpck6",
+	.max_mode	= 5,
+	.epp_first	= 2, /* 2-5 use epp (need 8 ports) */
+	.max_units	= 255,
+	.write_regr	= bpck6_write_regr,
+	.read_regr	= bpck6_read_regr,
+	.write_block	= bpck6_write_block,
+	.read_block	= bpck6_read_block,
+	.connect	= bpck6_connect,
+	.disconnect	= bpck6_disconnect,
+	.test_port	= bpck6_test_port,
+	.probe_unit	= bpck6_probe_unit,
+	.log_adapter	= bpck6_log_adapter,
+	.init_proto	= bpck6_init_proto,
+	.release_proto	= bpck6_release_proto,
+};
+
+static int __init bpck6_init(void)
+{
+	printk(KERN_INFO "bpck6: BACKPACK Protocol Driver V"BACKPACK_VERSION"\n");
+	printk(KERN_INFO "bpck6: Copyright 2001 by Micro Solutions, Inc., DeKalb IL. USA\n");
+	if(verbose)
+		printk(KERN_DEBUG "bpck6: verbose debug enabled.\n");
+	return pi_register(&bpck6) - 1;  
+}
+
+static void __exit bpck6_exit(void)
+{
+	pi_unregister(&bpck6);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Micro Solutions Inc.");
+MODULE_DESCRIPTION("BACKPACK Protocol module, compatible with PARIDE");
+module_param(verbose, bool, 0644);
+module_init(bpck6_init)
+module_exit(bpck6_exit)
diff --git a/drivers/block/paride/comm.c b/drivers/block/paride/comm.c
new file mode 100644
index 0000000..d842956
--- /dev/null
+++ b/drivers/block/paride/comm.c
@@ -0,0 +1,218 @@
+/* 
+        comm.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                              Under the terms of the GNU General Public License.
+
+	comm.c is a low-level protocol driver for some older models
+	of the DataStor "Commuter" parallel to IDE adapter.  Some of
+	the parallel port devices marketed by Arista currently
+	use this adapter.
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.05.05  init_proto, release_proto
+
+*/
+
+#define COMM_VERSION      "1.01"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+/* mode codes:  0  nybble reads, 8-bit writes
+                1  8-bit reads and writes
+                2  8-bit EPP mode
+*/
+
+#define j44(a,b)	(((a>>3)&0x0f)|((b<<1)&0xf0))
+
+#define P1	w2(5);w2(0xd);w2(0xd);w2(5);w2(4);
+#define P2	w2(5);w2(7);w2(7);w2(5);w2(4);
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x08, 0x10 };
+
+static int comm_read_regr( PIA *pi, int cont, int regr )
+
+{       int     l, h, r;
+
+        r = regr + cont_map[cont];
+
+        switch (pi->mode)  {
+
+        case 0: w0(r); P1; w0(0);
+        	w2(6); l = r1(); w0(0x80); h = r1(); w2(4);
+                return j44(l,h);
+
+        case 1: w0(r+0x20); P1; 
+        	w0(0); w2(0x26); h = r0(); w2(4);
+                return h;
+
+	case 2:
+	case 3:
+        case 4: w3(r+0x20); r1(); 
+        	w2(0x24); h = r4(); w2(4);
+                return h;
+
+        }
+        return -1;
+}       
+
+static void comm_write_regr( PIA *pi, int cont, int regr, int val )
+
+{       int  r;
+
+        r = regr + cont_map[cont];
+
+        switch (pi->mode)  {
+
+        case 0:
+        case 1: w0(r); P1; w0(val); P2;
+		break;
+
+	case 2:
+	case 3:
+        case 4: w3(r); r1(); w4(val); 
+                break;
+        }
+}
+
+static void comm_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+        w2(4); w0(0xff); w2(6);
+        w2(4); w0(0xaa); w2(6);
+        w2(4); w0(0x00); w2(6);
+        w2(4); w0(0x87); w2(6);
+        w2(4); w0(0xe0); w2(0xc); w2(0xc); w2(4);
+}
+
+static void comm_disconnect ( PIA *pi )
+
+{       w2(0); w2(0); w2(0); w2(4); 
+	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void comm_read_block( PIA *pi, char * buf, int count )
+
+{       int     i, l, h;
+
+        switch (pi->mode) {
+        
+        case 0: w0(0x48); P1;
+                for(i=0;i<count;i++) {
+                        w0(0); w2(6); l = r1();
+                        w0(0x80); h = r1(); w2(4);
+                        buf[i] = j44(l,h);
+                }
+                break;
+
+        case 1: w0(0x68); P1; w0(0);
+                for(i=0;i<count;i++) {
+                        w2(0x26); buf[i] = r0(); w2(0x24);
+                }
+		w2(4);
+		break;
+		
+	case 2: w3(0x68); r1(); w2(0x24);
+		for (i=0;i<count;i++) buf[i] = r4();
+		w2(4);
+		break;
+
+        case 3: w3(0x68); r1(); w2(0x24);
+                for (i=0;i<count/2;i++) ((u16 *)buf)[i] = r4w();
+                w2(4);
+                break;
+
+        case 4: w3(0x68); r1(); w2(0x24);
+                for (i=0;i<count/4;i++) ((u32 *)buf)[i] = r4l();
+                w2(4);
+                break;
+		
+	}
+}
+
+/* NB: Watch out for the byte swapped writes ! */
+
+static void comm_write_block( PIA *pi, char * buf, int count )
+
+{       int	k;
+
+        switch (pi->mode) {
+
+        case 0:
+        case 1: w0(0x68); P1;
+        	for (k=0;k<count;k++) {
+                        w2(5); w0(buf[k^1]); w2(7);
+                }
+                w2(5); w2(4);
+                break;
+
+        case 2: w3(0x48); r1();
+                for (k=0;k<count;k++) w4(buf[k^1]);
+                break;
+
+        case 3: w3(0x48); r1();
+                for (k=0;k<count/2;k++) w4w(pi_swab16(buf,k));
+                break;
+
+        case 4: w3(0x48); r1();
+                for (k=0;k<count/4;k++) w4l(pi_swab32(buf,k));
+                break;
+
+
+        }
+}
+
+static void comm_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[5] = {"4-bit","8-bit","EPP-8","EPP-16","EPP-32"};
+
+        printk("%s: comm %s, DataStor Commuter at 0x%x, ",
+                pi->device,COMM_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol comm = {
+	.owner		= THIS_MODULE,
+	.name		= "comm",
+	.max_mode	= 5,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= comm_write_regr,
+	.read_regr	= comm_read_regr,
+	.write_block	= comm_write_block,
+	.read_block	= comm_read_block,
+	.connect	= comm_connect,
+	.disconnect	= comm_disconnect,
+	.log_adapter	= comm_log_adapter,
+};
+
+static int __init comm_init(void)
+{
+	return pi_register(&comm)-1;
+}
+
+static void __exit comm_exit(void)
+{
+	pi_unregister(&comm);
+}
+
+MODULE_LICENSE("GPL");
+module_init(comm_init)
+module_exit(comm_exit)
diff --git a/drivers/block/paride/dstr.c b/drivers/block/paride/dstr.c
new file mode 100644
index 0000000..04d53bf
--- /dev/null
+++ b/drivers/block/paride/dstr.c
@@ -0,0 +1,233 @@
+/* 
+        dstr.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                              Under the terms of the GNU General Public License.
+
+        dstr.c is a low-level protocol driver for the 
+        DataStor EP2000 parallel to IDE adapter chip.
+
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+
+*/
+
+#define DSTR_VERSION      "1.01"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+/* mode codes:  0  nybble reads, 8-bit writes
+                1  8-bit reads and writes
+                2  8-bit EPP mode
+		3  EPP-16
+		4  EPP-32
+*/
+
+#define j44(a,b)  (((a>>3)&0x07)|((~a>>4)&0x08)|((b<<1)&0x70)|((~b)&0x80))
+
+#define P1	w2(5);w2(0xd);w2(5);w2(4);
+#define P2	w2(5);w2(7);w2(5);w2(4);
+#define P3      w2(6);w2(4);w2(6);w2(4);
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x20, 0x40 };
+
+static int dstr_read_regr( PIA *pi, int cont, int regr )
+
+{       int     a, b, r;
+
+        r = regr + cont_map[cont];
+
+	w0(0x81); P1;
+	if (pi->mode) { w0(0x11); } else { w0(1); }
+	P2; w0(r); P1;
+
+        switch (pi->mode)  {
+
+        case 0: w2(6); a = r1(); w2(4); w2(6); b = r1(); w2(4);
+                return j44(a,b);
+
+        case 1: w0(0); w2(0x26); a = r0(); w2(4);
+                return a;
+
+	case 2:
+	case 3:
+        case 4: w2(0x24); a = r4(); w2(4);
+                return a;
+
+        }
+        return -1;
+}       
+
+static void dstr_write_regr(  PIA *pi, int cont, int regr, int val )
+
+{       int  r;
+
+        r = regr + cont_map[cont];
+
+	w0(0x81); P1; 
+	if (pi->mode >= 2) { w0(0x11); } else { w0(1); }
+	P2; w0(r); P1;
+	
+        switch (pi->mode)  {
+
+        case 0:
+        case 1: w0(val); w2(5); w2(7); w2(5); w2(4);
+		break;
+
+	case 2:
+	case 3:
+        case 4: w4(val); 
+                break;
+        }
+}
+
+#define  CCP(x)  w0(0xff);w2(0xc);w2(4);\
+		 w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);w0(0x78);\
+		 w0(x);w2(5);w2(4);
+
+static void dstr_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+        w2(4); CCP(0xe0); w0(0xff);
+}
+
+static void dstr_disconnect ( PIA *pi )
+
+{       CCP(0x30);
+        w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void dstr_read_block( PIA *pi, char * buf, int count )
+
+{       int     k, a, b;
+
+        w0(0x81); P1;
+        if (pi->mode) { w0(0x19); } else { w0(9); }
+	P2; w0(0x82); P1; P3; w0(0x20); P1;
+
+        switch (pi->mode) {
+
+        case 0: for (k=0;k<count;k++) {
+                        w2(6); a = r1(); w2(4);
+                        w2(6); b = r1(); w2(4);
+                        buf[k] = j44(a,b);
+                } 
+                break;
+
+        case 1: w0(0);
+                for (k=0;k<count;k++) {
+                        w2(0x26); buf[k] = r0(); w2(0x24);
+                }
+                w2(4);
+                break;
+
+        case 2: w2(0x24); 
+                for (k=0;k<count;k++) buf[k] = r4();
+                w2(4);
+                break;
+
+        case 3: w2(0x24); 
+                for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
+                w2(4);
+                break;
+
+        case 4: w2(0x24); 
+                for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
+                w2(4);
+                break;
+
+        }
+}
+
+static void dstr_write_block( PIA *pi, char * buf, int count )
+
+{       int	k;
+
+        w0(0x81); P1;
+        if (pi->mode) { w0(0x19); } else { w0(9); }
+        P2; w0(0x82); P1; P3; w0(0x20); P1;
+
+        switch (pi->mode) {
+
+        case 0:
+        case 1: for (k=0;k<count;k++) {
+                        w2(5); w0(buf[k]); w2(7);
+                }
+                w2(5); w2(4);
+                break;
+
+        case 2: w2(0xc5);
+                for (k=0;k<count;k++) w4(buf[k]);
+		w2(0xc4);
+                break;
+
+        case 3: w2(0xc5);
+                for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
+                w2(0xc4);
+                break;
+
+        case 4: w2(0xc5);
+                for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
+                w2(0xc4);
+                break;
+
+        }
+}
+
+
+static void dstr_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[5] = {"4-bit","8-bit","EPP-8",
+				   "EPP-16","EPP-32"};
+
+        printk("%s: dstr %s, DataStor EP2000 at 0x%x, ",
+                pi->device,DSTR_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol dstr = {
+	.owner		= THIS_MODULE,
+	.name		= "dstr",
+	.max_mode	= 5,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= dstr_write_regr,
+	.read_regr	= dstr_read_regr,
+	.write_block	= dstr_write_block,
+	.read_block	= dstr_read_block,
+	.connect	= dstr_connect,
+	.disconnect	= dstr_disconnect,
+	.log_adapter	= dstr_log_adapter,
+};
+
+static int __init dstr_init(void)
+{
+	return pi_register(&dstr)-1;
+}
+
+static void __exit dstr_exit(void)
+{
+	pi_unregister(&dstr);
+}
+
+MODULE_LICENSE("GPL");
+module_init(dstr_init)
+module_exit(dstr_exit)
diff --git a/drivers/block/paride/epat.c b/drivers/block/paride/epat.c
new file mode 100644
index 0000000..55d1c0a
--- /dev/null
+++ b/drivers/block/paride/epat.c
@@ -0,0 +1,340 @@
+/* 
+        epat.c  (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                            Under the terms of the GNU General Public License.
+
+	This is the low level protocol driver for the EPAT parallel
+        to IDE adapter from Shuttle Technologies.  This adapter is
+        used in many popular parallel port disk products such as the
+        SyQuest EZ drives, the Avatar Shark and the Imation SuperDisk.
+	
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+        1.02    Joshua b. Jore CPP(renamed), epat_connect, epat_disconnect
+
+*/
+
+#define EPAT_VERSION      "1.02"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define j44(a,b)		(((a>>4)&0x0f)+(b&0xf0))
+#define j53(a,b)		(((a>>3)&0x1f)+((b<<4)&0xe0))
+
+static int epatc8;
+
+module_param(epatc8, int, 0);
+MODULE_PARM_DESC(epatc8, "support for the Shuttle EP1284 chip, "
+	"used in any recent Imation SuperDisk (LS-120) drive.");
+
+/* cont =  0   IDE register file
+   cont =  1   IDE control registers
+   cont =  2   internal EPAT registers
+*/
+
+static int cont_map[3] = { 0x18, 0x10, 0 };
+
+static void epat_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	int r;
+
+	r = regr + cont_map[cont];
+
+	switch (pi->mode) {
+
+	case 0:
+	case 1:
+	case 2:	w0(0x60+r); w2(1); w0(val); w2(4);
+		break;
+
+	case 3:
+	case 4:
+	case 5: w3(0x40+r); w4(val);
+		break;
+
+	}
+}
+
+static int epat_read_regr( PIA *pi, int cont, int regr )
+
+{	int  a, b, r;
+
+	r = regr + cont_map[cont];
+
+	switch (pi->mode) {
+
+	case 0:	w0(r); w2(1); w2(3); 
+		a = r1(); w2(4); b = r1();
+		return j44(a,b);
+
+	case 1: w0(0x40+r); w2(1); w2(4);
+		a = r1(); b = r2(); w0(0xff);
+		return j53(a,b);
+
+	case 2: w0(0x20+r); w2(1); w2(0x25);
+		a = r0(); w2(4);
+		return a;
+
+	case 3:
+	case 4:
+	case 5: w3(r); w2(0x24); a = r4(); w2(4);
+		return a;
+
+	}
+	return -1;	/* never gets here */
+}
+
+static void epat_read_block( PIA *pi, char * buf, int count )
+
+{	int  k, ph, a, b;
+
+	switch (pi->mode) {
+
+	case 0:	w0(7); w2(1); w2(3); w0(0xff);
+		ph = 0;
+		for(k=0;k<count;k++) {
+			if (k == count-1) w0(0xfd);
+			w2(6+ph); a = r1();
+			if (a & 8) b = a; 
+			  else { w2(4+ph); b = r1(); }
+			buf[k] = j44(a,b);
+			ph =  1 - ph;
+		}
+		w0(0); w2(4);
+		break;
+
+	case 1: w0(0x47); w2(1); w2(5); w0(0xff);
+		ph = 0;
+		for(k=0;k<count;k++) {
+			if (k == count-1) w0(0xfd); 
+			w2(4+ph);
+			a = r1(); b = r2();
+			buf[k] = j53(a,b);
+			ph = 1 - ph;
+		}
+		w0(0); w2(4);
+		break;
+
+	case 2: w0(0x27); w2(1); w2(0x25); w0(0);
+		ph = 0;
+		for(k=0;k<count-1;k++) {
+			w2(0x24+ph);
+			buf[k] = r0();
+			ph = 1 - ph;
+		}
+		w2(0x26); w2(0x27); buf[count-1] = r0(); 
+		w2(0x25); w2(4);
+		break;
+
+	case 3: w3(0x80); w2(0x24);
+		for(k=0;k<count-1;k++) buf[k] = r4();
+		w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4();
+		w2(4);
+		break;
+
+	case 4: w3(0x80); w2(0x24);
+		for(k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
+		buf[count-2] = r4();
+		w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4();
+		w2(4);
+		break;
+
+	case 5: w3(0x80); w2(0x24);
+		for(k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
+		for(k=count-4;k<count-1;k++) buf[k] = r4();
+		w2(4); w3(0xa0); w2(0x24); buf[count-1] = r4();
+		w2(4);
+		break;
+
+	}
+}
+
+static void epat_write_block( PIA *pi, char * buf, int count )   
+
+{	int ph, k;
+
+	switch (pi->mode) {
+
+	case 0:
+	case 1:
+	case 2: w0(0x67); w2(1); w2(5);
+		ph = 0;
+		for(k=0;k<count;k++) {
+		  	w0(buf[k]);
+			w2(4+ph);
+			ph = 1 - ph;
+		}
+		w2(7); w2(4);
+		break;
+
+	case 3: w3(0xc0); 
+		for(k=0;k<count;k++) w4(buf[k]);
+		w2(4);
+		break;
+
+	case 4: w3(0xc0); 
+		for(k=0;k<(count/2);k++) w4w(((u16 *)buf)[k]);
+		w2(4);
+		break;
+
+	case 5: w3(0xc0); 
+		for(k=0;k<(count/4);k++) w4l(((u32 *)buf)[k]);
+		w2(4);
+		break;
+
+	}
+}
+
+/* these macros access the EPAT registers in native addressing */
+
+#define	WR(r,v)		epat_write_regr(pi,2,r,v)
+#define	RR(r)		(epat_read_regr(pi,2,r))
+
+/* and these access the IDE task file */
+
+#define WRi(r,v)         epat_write_regr(pi,0,r,v)
+#define RRi(r)           (epat_read_regr(pi,0,r))
+
+/* FIXME:  the CPP stuff should be fixed to handle multiple EPATs on a chain */
+
+#define CPP(x) 	w2(4);w0(0x22);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
+                w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
+
+static void epat_connect ( PIA *pi )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+
+ 	/* Initialize the chip */
+	CPP(0);
+
+	if (epatc8) {
+		CPP(0x40);CPP(0xe0);
+		w0(0);w2(1);w2(4);
+		WR(0x8,0x12);WR(0xc,0x14);WR(0x12,0x10);
+		WR(0xe,0xf);WR(0xf,4);
+		/* WR(0xe,0xa);WR(0xf,4); */
+		WR(0xe,0xd);WR(0xf,0);
+		/* CPP(0x30); */
+	}
+
+        /* Connect to the chip */
+	CPP(0xe0);
+        w0(0);w2(1);w2(4); /* Idle into SPP */
+        if (pi->mode >= 3) {
+          w0(0);w2(1);w2(4);w2(0xc);
+          /* Request EPP */
+          w0(0x40);w2(6);w2(7);w2(4);w2(0xc);w2(4);
+        }
+
+	if (!epatc8) {
+		WR(8,0x10); WR(0xc,0x14); WR(0xa,0x38); WR(0x12,0x10);
+	}
+}
+
+static void epat_disconnect (PIA *pi)
+{	CPP(0x30);
+	w0(pi->saved_r0);
+	w2(pi->saved_r2);
+}
+
+static int epat_test_proto( PIA *pi, char * scratch, int verbose )
+
+{       int     k, j, f, cc;
+	int	e[2] = {0,0};
+
+        epat_connect(pi);
+	cc = RR(0xd);
+	epat_disconnect(pi);
+
+	epat_connect(pi);
+	for (j=0;j<2;j++) {
+  	    WRi(6,0xa0+j*0x10);
+            for (k=0;k<256;k++) {
+                WRi(2,k^0xaa);
+                WRi(3,k^0x55);
+                if (RRi(2) != (k^0xaa)) e[j]++;
+                }
+	    }
+        epat_disconnect(pi);
+
+        f = 0;
+        epat_connect(pi);
+        WR(0x13,1); WR(0x13,0); WR(0xa,0x11);
+        epat_read_block(pi,scratch,512);
+	
+        for (k=0;k<256;k++) {
+            if ((scratch[2*k] & 0xff) != k) f++;
+            if ((scratch[2*k+1] & 0xff) != (0xff-k)) f++;
+        }
+        epat_disconnect(pi);
+
+        if (verbose)  {
+            printk("%s: epat: port 0x%x, mode %d, ccr %x, test=(%d,%d,%d)\n",
+		   pi->device,pi->port,pi->mode,cc,e[0],e[1],f);
+	}
+	
+        return (e[0] && e[1]) || f;
+}
+
+static void epat_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{	int	ver;
+        char    *mode_string[6] = 
+		   {"4-bit","5/3","8-bit","EPP-8","EPP-16","EPP-32"};
+
+	epat_connect(pi);
+	WR(0xa,0x38);		/* read the version code */
+        ver = RR(0xb);
+        epat_disconnect(pi);
+
+	printk("%s: epat %s, Shuttle EPAT chip %x at 0x%x, ",
+		pi->device,EPAT_VERSION,ver,pi->port);
+	printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol epat = {
+	.owner		= THIS_MODULE,
+	.name		= "epat",
+	.max_mode	= 6,
+	.epp_first	= 3,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= epat_write_regr,
+	.read_regr	= epat_read_regr,
+	.write_block	= epat_write_block,
+	.read_block	= epat_read_block,
+	.connect	= epat_connect,
+	.disconnect	= epat_disconnect,
+	.test_proto	= epat_test_proto,
+	.log_adapter	= epat_log_adapter,
+};
+
+static int __init epat_init(void)
+{
+#ifdef CONFIG_PARIDE_EPATC8
+	epatc8 = 1;
+#endif
+	return pi_register(&epat)-1;
+}
+
+static void __exit epat_exit(void)
+{
+	pi_unregister(&epat);
+}
+
+MODULE_LICENSE("GPL");
+module_init(epat_init)
+module_exit(epat_exit)
diff --git a/drivers/block/paride/epia.c b/drivers/block/paride/epia.c
new file mode 100644
index 0000000..0f2e0c2
--- /dev/null
+++ b/drivers/block/paride/epia.c
@@ -0,0 +1,316 @@
+/* 
+        epia.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                              Under the terms of the GNU General Public License.
+
+        epia.c is a low-level protocol driver for Shuttle Technologies 
+	EPIA parallel to IDE adapter chip.  This device is now obsolete
+	and has been replaced with the EPAT chip, which is supported
+	by epat.c, however, some devices based on EPIA are still
+	available.
+
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+	1.02    GRG 1998.06.17 support older versions of EPIA
+
+*/
+
+#define EPIA_VERSION      "1.02"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+/* mode codes:  0  nybble reads on port 1, 8-bit writes
+                1  5/3 reads on ports 1 & 2, 8-bit writes
+                2  8-bit reads and writes
+                3  8-bit EPP mode
+		4  16-bit EPP
+		5  32-bit EPP
+*/
+
+#define j44(a,b)                (((a>>4)&0x0f)+(b&0xf0))
+#define j53(a,b)                (((a>>3)&0x1f)+((b<<4)&0xe0))
+
+/* cont =  0   IDE register file
+   cont =  1   IDE control registers
+*/
+
+static int cont_map[2] = { 0, 0x80 };
+
+static int epia_read_regr( PIA *pi, int cont, int regr )
+
+{       int     a, b, r;
+
+	regr += cont_map[cont];
+
+        switch (pi->mode)  {
+
+        case 0: r = regr^0x39;
+                w0(r); w2(1); w2(3); w0(r);
+                a = r1(); w2(1); b = r1(); w2(4);
+                return j44(a,b);
+
+        case 1: r = regr^0x31;
+                w0(r); w2(1); w0(r&0x37); 
+                w2(3); w2(5); w0(r|0xf0);
+                a = r1(); b = r2(); w2(4);
+                return j53(a,b);
+
+        case 2: r = regr^0x29;
+                w0(r); w2(1); w2(0X21); w2(0x23); 
+                a = r0(); w2(4);
+                return a;
+
+	case 3:
+	case 4:
+        case 5: w3(regr); w2(0x24); a = r4(); w2(4);
+                return a;
+
+        }
+        return -1;
+}       
+
+static void epia_write_regr( PIA *pi, int cont, int regr, int val)
+
+{       int  r;
+
+	regr += cont_map[cont];
+
+        switch (pi->mode)  {
+
+        case 0:
+        case 1:
+        case 2: r = regr^0x19;
+                w0(r); w2(1); w0(val); w2(3); w2(4);
+                break;
+
+	case 3:
+	case 4:
+        case 5: r = regr^0x40;
+                w3(r); w4(val); w2(4);
+                break;
+        }
+}
+
+#define WR(r,v)         epia_write_regr(pi,0,r,v)
+#define RR(r)           (epia_read_regr(pi,0,r))
+
+/* The use of register 0x84 is entirely unclear - it seems to control
+   some EPP counters ...  currently we know about 3 different block
+   sizes:  the standard 512 byte reads and writes, 12 byte writes and 
+   2048 byte reads (the last two being used in the CDrom drivers.
+*/
+
+static void epia_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+
+        w2(4); w0(0xa0); w0(0x50); w0(0xc0); w0(0x30); w0(0xa0); w0(0);
+        w2(1); w2(4);
+        if (pi->mode >= 3) { 
+                w0(0xa); w2(1); w2(4); w0(0x82); w2(4); w2(0xc); w2(4);
+                w2(0x24); w2(0x26); w2(4);
+        }
+        WR(0x86,8);  
+}
+
+static void epia_disconnect ( PIA *pi )
+
+{       /* WR(0x84,0x10); */
+        w0(pi->saved_r0);
+        w2(1); w2(4);
+        w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void epia_read_block( PIA *pi, char * buf, int count )
+
+{       int     k, ph, a, b;
+
+        switch (pi->mode) {
+
+        case 0: w0(0x81); w2(1); w2(3); w0(0xc1);
+                ph = 1;
+                for (k=0;k<count;k++) {
+                        w2(2+ph); a = r1();
+                        w2(4+ph); b = r1();
+                        buf[k] = j44(a,b);
+                        ph = 1 - ph;
+                } 
+                w0(0); w2(4);
+                break;
+
+        case 1: w0(0x91); w2(1); w0(0x10); w2(3); 
+                w0(0x51); w2(5); w0(0xd1); 
+                ph = 1;
+                for (k=0;k<count;k++) {
+                        w2(4+ph);
+                        a = r1(); b = r2();
+                        buf[k] = j53(a,b);
+                        ph = 1 - ph;
+                }
+                w0(0); w2(4);
+                break;
+
+        case 2: w0(0x89); w2(1); w2(0x23); w2(0x21); 
+                ph = 1;
+                for (k=0;k<count;k++) {
+                        w2(0x24+ph);
+                        buf[k] = r0();
+                        ph = 1 - ph;
+                }
+                w2(6); w2(4);
+                break;
+
+        case 3: if (count > 512) WR(0x84,3);
+		w3(0); w2(0x24);
+                for (k=0;k<count;k++) buf[k] = r4();
+                w2(4); WR(0x84,0);
+                break;
+
+        case 4: if (count > 512) WR(0x84,3);
+		w3(0); w2(0x24);
+		for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
+                w2(4); WR(0x84,0);
+                break;
+
+        case 5: if (count > 512) WR(0x84,3);
+		w3(0); w2(0x24);
+                for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
+                w2(4); WR(0x84,0);
+                break;
+
+        }
+}
+
+static void epia_write_block( PIA *pi, char * buf, int count )
+
+{       int     ph, k, last, d;
+
+        switch (pi->mode) {
+
+        case 0:
+        case 1:
+        case 2: w0(0xa1); w2(1); w2(3); w2(1); w2(5);
+                ph = 0;  last = 0x8000;
+                for (k=0;k<count;k++) {
+                        d = buf[k];
+                        if (d != last) { last = d; w0(d); }
+                        w2(4+ph);
+                        ph = 1 - ph;
+                }
+                w2(7); w2(4);
+                break;
+
+        case 3: if (count < 512) WR(0x84,1);
+		w3(0x40);
+                for (k=0;k<count;k++) w4(buf[k]);
+		if (count < 512) WR(0x84,0);
+                break;
+
+        case 4: if (count < 512) WR(0x84,1);
+		w3(0x40);
+                for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
+		if (count < 512) WR(0x84,0);
+                break;
+
+        case 5: if (count < 512) WR(0x84,1);
+		w3(0x40);
+                for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
+		if (count < 512) WR(0x84,0);
+                break;
+
+        }
+
+}
+
+static int epia_test_proto( PIA *pi, char * scratch, int verbose )
+
+{       int     j, k, f;
+	int	e[2] = {0,0};
+
+        epia_connect(pi);
+        for (j=0;j<2;j++) {
+            WR(6,0xa0+j*0x10);
+            for (k=0;k<256;k++) {
+                WR(2,k^0xaa);
+                WR(3,k^0x55);
+                if (RR(2) != (k^0xaa)) e[j]++;
+                }
+	    WR(2,1); WR(3,1);
+            }
+        epia_disconnect(pi);
+
+        f = 0;
+        epia_connect(pi);
+        WR(0x84,8);
+        epia_read_block(pi,scratch,512);
+        for (k=0;k<256;k++) {
+            if ((scratch[2*k] & 0xff) != ((k+1) & 0xff)) f++;
+            if ((scratch[2*k+1] & 0xff) != ((-2-k) & 0xff)) f++;
+        }
+        WR(0x84,0);
+        epia_disconnect(pi);
+
+        if (verbose)  {
+            printk("%s: epia: port 0x%x, mode %d, test=(%d,%d,%d)\n",
+                   pi->device,pi->port,pi->mode,e[0],e[1],f);
+        }
+        
+        return (e[0] && e[1]) || f;
+
+}
+
+
+static void epia_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[6] = {"4-bit","5/3","8-bit",
+				   "EPP-8","EPP-16","EPP-32"};
+
+        printk("%s: epia %s, Shuttle EPIA at 0x%x, ",
+                pi->device,EPIA_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol epia = {
+	.owner		= THIS_MODULE,
+	.name		= "epia",
+	.max_mode	= 6,
+	.epp_first	= 3,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= epia_write_regr,
+	.read_regr	= epia_read_regr,
+	.write_block	= epia_write_block,
+	.read_block	= epia_read_block,
+	.connect	= epia_connect,
+	.disconnect	= epia_disconnect,
+	.test_proto	= epia_test_proto,
+	.log_adapter	= epia_log_adapter,
+};
+
+static int __init epia_init(void)
+{
+	return pi_register(&epia)-1;
+}
+
+static void __exit epia_exit(void)
+{
+	pi_unregister(&epia);
+}
+
+MODULE_LICENSE("GPL");
+module_init(epia_init)
+module_exit(epia_exit)
diff --git a/drivers/block/paride/fit2.c b/drivers/block/paride/fit2.c
new file mode 100644
index 0000000..e0f0691
--- /dev/null
+++ b/drivers/block/paride/fit2.c
@@ -0,0 +1,151 @@
+/* 
+        fit2.c        (c) 1998  Grant R. Guenther <grant@torque.net>
+                          Under the terms of the GNU General Public License.
+
+	fit2.c is a low-level protocol driver for the older version
+        of the Fidelity International Technology parallel port adapter.  
+	This adapter is used in their TransDisk 2000 and older TransDisk
+	3000 portable hard-drives.  As far as I can tell, this device
+	supports 4-bit mode _only_.  
+
+	Newer models of the FIT products use an enhanced protocol.
+	The "fit3" protocol module should support current drives.
+
+*/
+
+#define FIT2_VERSION      "1.0"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define j44(a,b)                (((a>>4)&0x0f)|(b&0xf0))
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+
+NB:  The FIT adapter does not appear to use the control registers.
+So, we map ALT_STATUS to STATUS and NO-OP writes to the device
+control register - this means that IDE reset will not work on these
+devices.
+
+*/
+
+static void  fit2_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	if (cont == 1) return;
+	w2(0xc); w0(regr); w2(4); w0(val); w2(5); w0(0); w2(4);
+}
+
+static int fit2_read_regr( PIA *pi, int cont, int regr )
+
+{	int  a, b, r;
+
+	if (cont) {
+	  if (regr != 6) return 0xff;
+	  r = 7;
+	} else r = regr + 0x10;
+
+	w2(0xc); w0(r); w2(4); w2(5); 
+	         w0(0); a = r1();
+	         w0(1); b = r1();
+	w2(4);
+
+	return j44(a,b);
+
+}
+
+static void fit2_read_block( PIA *pi, char * buf, int count )
+
+{	int  k, a, b, c, d;
+
+	w2(0xc); w0(0x10);
+
+	for (k=0;k<count/4;k++) {
+
+		w2(4); w2(5);
+		w0(0); a = r1(); w0(1); b = r1();
+		w0(3); c = r1(); w0(2); d = r1(); 
+		buf[4*k+0] = j44(a,b);
+		buf[4*k+1] = j44(d,c);
+
+                w2(4); w2(5);
+                       a = r1(); w0(3); b = r1();
+                w0(1); c = r1(); w0(0); d = r1(); 
+                buf[4*k+2] = j44(d,c);
+                buf[4*k+3] = j44(a,b);
+
+	}
+
+	w2(4);
+
+}
+
+static void fit2_write_block( PIA *pi, char * buf, int count )
+
+{	int k;
+
+
+	w2(0xc); w0(0); 
+	for (k=0;k<count/2;k++) {
+		w2(4); w0(buf[2*k]); 
+		w2(5); w0(buf[2*k+1]);
+	}
+	w2(4);
+}
+
+static void fit2_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	w2(0xcc); 
+}
+
+static void fit2_disconnect ( PIA *pi )
+
+{       w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void fit2_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       printk("%s: fit2 %s, FIT 2000 adapter at 0x%x, delay %d\n",
+                pi->device,FIT2_VERSION,pi->port,pi->delay);
+
+}
+
+static struct pi_protocol fit2 = {
+	.owner		= THIS_MODULE,
+	.name		= "fit2",
+	.max_mode	= 1,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= fit2_write_regr,
+	.read_regr	= fit2_read_regr,
+	.write_block	= fit2_write_block,
+	.read_block	= fit2_read_block,
+	.connect	= fit2_connect,
+	.disconnect	= fit2_disconnect,
+	.log_adapter	= fit2_log_adapter,
+};
+
+static int __init fit2_init(void)
+{
+	return pi_register(&fit2)-1;
+}
+
+static void __exit fit2_exit(void)
+{
+	pi_unregister(&fit2);
+}
+
+MODULE_LICENSE("GPL");
+module_init(fit2_init)
+module_exit(fit2_exit)
diff --git a/drivers/block/paride/fit3.c b/drivers/block/paride/fit3.c
new file mode 100644
index 0000000..15400e7
--- /dev/null
+++ b/drivers/block/paride/fit3.c
@@ -0,0 +1,211 @@
+/* 
+        fit3.c        (c) 1998  Grant R. Guenther <grant@torque.net>
+                          Under the terms of the GNU General Public License.
+
+	fit3.c is a low-level protocol driver for newer models 
+        of the Fidelity International Technology parallel port adapter.  
+	This adapter is used in their TransDisk 3000 portable 
+	hard-drives, as well as CD-ROM, PD-CD and other devices.
+
+	The TD-2000 and certain older devices use a different protocol.
+	Try the fit2 protocol module with them.
+
+        NB:  The FIT adapters do not appear to support the control 
+	registers.  So, we map ALT_STATUS to STATUS and NO-OP writes 
+	to the device control register - this means that IDE reset 
+	will not work on these devices.
+
+*/
+
+#define FIT3_VERSION      "1.0"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define j44(a,b)                (((a>>3)&0x0f)|((b<<1)&0xf0))
+
+#define w7(byte)                {out_p(7,byte);}
+#define r7()                    (in_p(7) & 0xff)
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+
+*/
+
+static void  fit3_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	if (cont == 1) return;
+
+	switch (pi->mode) {
+
+	case 0:
+	case 1: w2(0xc); w0(regr); w2(0x8); w2(0xc); 
+		w0(val); w2(0xd); 
+		w0(0);   w2(0xc);
+		break;
+
+	case 2: w2(0xc); w0(regr); w2(0x8); w2(0xc);
+		w4(val); w4(0);
+		w2(0xc);
+		break;
+
+	}
+}
+
+static int fit3_read_regr( PIA *pi, int cont, int regr )
+
+{	int  a, b;
+
+	if (cont) {
+	  if (regr != 6) return 0xff;
+	  regr = 7;
+	} 
+
+	switch (pi->mode) {
+
+	case 0: w2(0xc); w0(regr + 0x10); w2(0x8); w2(0xc);
+		w2(0xd); a = r1();
+		w2(0xf); b = r1(); 
+		w2(0xc);
+		return j44(a,b);
+
+	case 1: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc);
+		w2(0xec); w2(0xee); w2(0xef); a = r0(); 
+		w2(0xc);
+		return a;
+
+	case 2: w2(0xc); w0(regr + 0x90); w2(0x8); w2(0xc); 
+		w2(0xec); 
+		a = r4(); b = r4(); 
+		w2(0xc);
+		return a;
+
+	}
+	return -1; 
+
+}
+
+static void fit3_read_block( PIA *pi, char * buf, int count )
+
+{	int  k, a, b, c, d;
+
+	switch (pi->mode) {
+
+	case 0: w2(0xc); w0(0x10); w2(0x8); w2(0xc);
+		for (k=0;k<count/2;k++) {
+		    w2(0xd); a = r1();
+		    w2(0xf); b = r1();
+		    w2(0xc); c = r1();
+		    w2(0xe); d = r1();
+		    buf[2*k  ] = j44(a,b);
+		    buf[2*k+1] = j44(c,d);
+		}
+		w2(0xc);
+		break;
+
+	case 1: w2(0xc); w0(0x90); w2(0x8); w2(0xc); 
+		w2(0xec); w2(0xee);
+		for (k=0;k<count/2;k++) {
+		    w2(0xef); a = r0();
+		    w2(0xee); b = r0();
+                    buf[2*k  ] = a;
+                    buf[2*k+1] = b;
+		}
+		w2(0xec); 
+		w2(0xc);
+		break;
+
+	case 2: w2(0xc); w0(0x90); w2(0x8); w2(0xc); 
+                w2(0xec);
+		for (k=0;k<count;k++) buf[k] = r4();
+                w2(0xc);
+		break;
+
+	}
+}
+
+static void fit3_write_block( PIA *pi, char * buf, int count )
+
+{	int k;
+
+        switch (pi->mode) {
+
+	case 0:
+        case 1: w2(0xc); w0(0); w2(0x8); w2(0xc);
+                for (k=0;k<count/2;k++) {
+ 		    w0(buf[2*k  ]); w2(0xd);
+ 		    w0(buf[2*k+1]); w2(0xc);
+		}
+		break;
+
+        case 2: w2(0xc); w0(0); w2(0x8); w2(0xc); 
+                for (k=0;k<count;k++) w4(buf[k]);
+                w2(0xc);
+		break;
+	}
+}
+
+static void fit3_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	w2(0xc); w0(0); w2(0xa);
+	if (pi->mode == 2) { 
+		w2(0xc); w0(0x9); w2(0x8); w2(0xc); 
+		}
+}
+
+static void fit3_disconnect ( PIA *pi )
+
+{       w2(0xc); w0(0xa); w2(0x8); w2(0xc);
+	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void fit3_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[3] = {"4-bit","8-bit","EPP"};
+
+	printk("%s: fit3 %s, FIT 3000 adapter at 0x%x, "
+	       "mode %d (%s), delay %d\n",
+                pi->device,FIT3_VERSION,pi->port,
+		pi->mode,mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol fit3 = {
+	.owner		= THIS_MODULE,
+	.name		= "fit3",
+	.max_mode	= 3,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= fit3_write_regr,
+	.read_regr	= fit3_read_regr,
+	.write_block	= fit3_write_block,
+	.read_block	= fit3_read_block,
+	.connect	= fit3_connect,
+	.disconnect	= fit3_disconnect,
+	.log_adapter	= fit3_log_adapter,
+};
+
+static int __init fit3_init(void)
+{
+	return pi_register(&fit3)-1;
+}
+
+static void __exit fit3_exit(void)
+{
+	pi_unregister(&fit3);
+}
+
+MODULE_LICENSE("GPL");
+module_init(fit3_init)
+module_exit(fit3_exit)
diff --git a/drivers/block/paride/friq.c b/drivers/block/paride/friq.c
new file mode 100644
index 0000000..5ea2904
--- /dev/null
+++ b/drivers/block/paride/friq.c
@@ -0,0 +1,276 @@
+/* 
+	friq.c	(c) 1998    Grant R. Guenther <grant@torque.net>
+		            Under the terms of the GNU General Public License
+
+	friq.c is a low-level protocol driver for the Freecom "IQ"
+	parallel port IDE adapter.   Early versions of this adapter
+	use the 'frpw' protocol.
+	
+	Freecom uses this adapter in a battery powered external 
+	CD-ROM drive.  It is also used in LS-120 drives by
+	Maxell and Panasonic, and other devices.
+
+	The battery powered drive requires software support to
+	control the power to the drive.  This module enables the
+	drive power when the high level driver (pcd) is loaded
+	and disables it when the module is unloaded.  Note, if
+	the friq module is built in to the kernel, the power
+	will never be switched off, so other means should be
+	used to conserve battery power.
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.12.20	 Added support for soft power switch
+*/
+
+#define	FRIQ_VERSION	"1.01" 
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define CMD(x)		w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\
+			w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x);
+
+#define j44(l,h)	(((l>>4)&0x0f)|(h&0xf0))
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x08, 0x10 };
+
+static int friq_read_regr( PIA *pi, int cont, int regr )
+
+{	int	h,l,r;
+
+	r = regr + cont_map[cont];
+
+	CMD(r);
+	w2(6); l = r1();
+	w2(4); h = r1();
+	w2(4); 
+
+	return j44(l,h);
+
+}
+
+static void friq_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	int r;
+
+        r = regr + cont_map[cont];
+
+	CMD(r);
+	w0(val);
+	w2(5);w2(7);w2(5);w2(4);
+}
+
+static void friq_read_block_int( PIA *pi, char * buf, int count, int regr )
+
+{       int     h, l, k, ph;
+
+        switch(pi->mode) {
+
+        case 0: CMD(regr); 
+                for (k=0;k<count;k++) {
+                        w2(6); l = r1();
+                        w2(4); h = r1();
+                        buf[k] = j44(l,h);
+                }
+                w2(4);
+                break;
+
+        case 1: ph = 2;
+                CMD(regr+0xc0); 
+                w0(0xff);
+                for (k=0;k<count;k++) {
+                        w2(0xa4 + ph); 
+                        buf[k] = r0();
+                        ph = 2 - ph;
+                } 
+                w2(0xac); w2(0xa4); w2(4);
+                break;
+
+	case 2: CMD(regr+0x80);
+		for (k=0;k<count-2;k++) buf[k] = r4();
+		w2(0xac); w2(0xa4);
+		buf[count-2] = r4();
+		buf[count-1] = r4();
+		w2(4);
+		break;
+
+	case 3: CMD(regr+0x80);
+                for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
+                w2(0xac); w2(0xa4);
+                buf[count-2] = r4();
+                buf[count-1] = r4();
+                w2(4);
+                break;
+
+	case 4: CMD(regr+0x80);
+                for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
+                buf[count-4] = r4();
+                buf[count-3] = r4();
+                w2(0xac); w2(0xa4);
+                buf[count-2] = r4();
+                buf[count-1] = r4();
+                w2(4);
+                break;
+
+        }
+}
+
+static void friq_read_block( PIA *pi, char * buf, int count)
+
+{	friq_read_block_int(pi,buf,count,0x08);
+}
+
+static void friq_write_block( PIA *pi, char * buf, int count )
+ 
+{	int	k;
+
+	switch(pi->mode) {
+
+	case 0:
+	case 1: CMD(8); w2(5);
+        	for (k=0;k<count;k++) {
+			w0(buf[k]);
+			w2(7);w2(5);
+		}
+		w2(4);
+		break;
+
+	case 2: CMD(0xc8); w2(5);
+		for (k=0;k<count;k++) w4(buf[k]);
+		w2(4);
+		break;
+
+        case 3: CMD(0xc8); w2(5);
+                for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
+                w2(4);
+                break;
+
+        case 4: CMD(0xc8); w2(5);
+                for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
+                w2(4);
+                break;
+	}
+}
+
+static void friq_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	w2(4);
+}
+
+static void friq_disconnect ( PIA *pi )
+
+{       CMD(0x20);
+	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static int friq_test_proto( PIA *pi, char * scratch, int verbose )
+
+{       int     j, k, r;
+	int	e[2] = {0,0};
+
+	pi->saved_r0 = r0();	
+	w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */
+	udelay(500);
+	w0(pi->saved_r0);
+
+	friq_connect(pi);
+	for (j=0;j<2;j++) {
+                friq_write_regr(pi,0,6,0xa0+j*0x10);
+                for (k=0;k<256;k++) {
+                        friq_write_regr(pi,0,2,k^0xaa);
+                        friq_write_regr(pi,0,3,k^0x55);
+                        if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
+                        }
+                }
+	friq_disconnect(pi);
+
+	friq_connect(pi);
+        friq_read_block_int(pi,scratch,512,0x10);
+        r = 0;
+        for (k=0;k<128;k++) if (scratch[k] != k) r++;
+	friq_disconnect(pi);
+
+        if (verbose)  {
+            printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n",
+                   pi->device,pi->port,pi->mode,e[0],e[1],r);
+        }
+
+        return (r || (e[0] && e[1]));
+}
+
+
+static void friq_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[6] = {"4-bit","8-bit",
+				   "EPP-8","EPP-16","EPP-32"};
+
+        printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device,
+		FRIQ_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+	pi->private = 1;
+	friq_connect(pi);
+	CMD(0x9e);  		/* disable sleep timer */
+	friq_disconnect(pi);
+
+}
+
+static void friq_release_proto( PIA *pi)
+{
+	if (pi->private) {		/* turn off the power */
+		friq_connect(pi);
+		CMD(0x1d); CMD(0x1e);
+		friq_disconnect(pi);
+		pi->private = 0;
+	}
+}
+
+static struct pi_protocol friq = {
+	.owner		= THIS_MODULE,
+	.name		= "friq",
+	.max_mode	= 5,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= friq_write_regr,
+	.read_regr	= friq_read_regr,
+	.write_block	= friq_write_block,
+	.read_block	= friq_read_block,
+	.connect	= friq_connect,
+	.disconnect	= friq_disconnect,
+	.test_proto	= friq_test_proto,
+	.log_adapter	= friq_log_adapter,
+	.release_proto	= friq_release_proto,
+};
+
+static int __init friq_init(void)
+{
+	return pi_register(&friq)-1;
+}
+
+static void __exit friq_exit(void)
+{
+	pi_unregister(&friq);
+}
+
+MODULE_LICENSE("GPL");
+module_init(friq_init)
+module_exit(friq_exit)
diff --git a/drivers/block/paride/frpw.c b/drivers/block/paride/frpw.c
new file mode 100644
index 0000000..56b3824
--- /dev/null
+++ b/drivers/block/paride/frpw.c
@@ -0,0 +1,313 @@
+/* 
+	frpw.c	(c) 1996-8  Grant R. Guenther <grant@torque.net>
+		            Under the terms of the GNU General Public License
+
+	frpw.c is a low-level protocol driver for the Freecom "Power"
+	parallel port IDE adapter.
+	
+	Some applications of this adapter may require a "printer" reset
+	prior to loading the driver.  This can be done by loading and
+	unloading the "lp" driver, or it can be done by this driver
+	if you define FRPW_HARD_RESET.  The latter is not recommended
+	as it may upset devices on other ports.
+
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+			       fix chip detect
+			       added EPP-16 and EPP-32
+	1.02    GRG 1998.09.23 added hard reset to initialisation process
+	1.03    GRG 1998.12.14 made hard reset conditional
+
+*/
+
+#define	FRPW_VERSION	"1.03" 
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define cec4		w2(0xc);w2(0xe);w2(0xe);w2(0xc);w2(4);w2(4);w2(4);
+#define j44(l,h)	(((l>>4)&0x0f)|(h&0xf0))
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x08, 0x10 };
+
+static int frpw_read_regr( PIA *pi, int cont, int regr )
+
+{	int	h,l,r;
+
+	r = regr + cont_map[cont];
+
+	w2(4);
+	w0(r); cec4;
+	w2(6); l = r1();
+	w2(4); h = r1();
+	w2(4); 
+
+	return j44(l,h);
+
+}
+
+static void frpw_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	int r;
+
+        r = regr + cont_map[cont];
+
+	w2(4); w0(r); cec4; 
+	w0(val);
+	w2(5);w2(7);w2(5);w2(4);
+}
+
+static void frpw_read_block_int( PIA *pi, char * buf, int count, int regr )
+
+{       int     h, l, k, ph;
+
+        switch(pi->mode) {
+
+        case 0: w2(4); w0(regr); cec4;
+                for (k=0;k<count;k++) {
+                        w2(6); l = r1();
+                        w2(4); h = r1();
+                        buf[k] = j44(l,h);
+                }
+                w2(4);
+                break;
+
+        case 1: ph = 2;
+                w2(4); w0(regr + 0xc0); cec4;
+                w0(0xff);
+                for (k=0;k<count;k++) {
+                        w2(0xa4 + ph); 
+                        buf[k] = r0();
+                        ph = 2 - ph;
+                } 
+                w2(0xac); w2(0xa4); w2(4);
+                break;
+
+        case 2: w2(4); w0(regr + 0x80); cec4;
+                for (k=0;k<count;k++) buf[k] = r4();
+                w2(0xac); w2(0xa4);
+                w2(4);
+                break;
+
+	case 3: w2(4); w0(regr + 0x80); cec4;
+		for (k=0;k<count-2;k++) buf[k] = r4();
+		w2(0xac); w2(0xa4);
+		buf[count-2] = r4();
+		buf[count-1] = r4();
+		w2(4);
+		break;
+
+	case 4: w2(4); w0(regr + 0x80); cec4;
+                for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w();
+                w2(0xac); w2(0xa4);
+                buf[count-2] = r4();
+                buf[count-1] = r4();
+                w2(4);
+                break;
+
+	case 5: w2(4); w0(regr + 0x80); cec4;
+                for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l();
+                buf[count-4] = r4();
+                buf[count-3] = r4();
+                w2(0xac); w2(0xa4);
+                buf[count-2] = r4();
+                buf[count-1] = r4();
+                w2(4);
+                break;
+
+        }
+}
+
+static void frpw_read_block( PIA *pi, char * buf, int count)
+
+{	frpw_read_block_int(pi,buf,count,0x08);
+}
+
+static void frpw_write_block( PIA *pi, char * buf, int count )
+ 
+{	int	k;
+
+	switch(pi->mode) {
+
+	case 0:
+	case 1:
+	case 2: w2(4); w0(8); cec4; w2(5);
+        	for (k=0;k<count;k++) {
+			w0(buf[k]);
+			w2(7);w2(5);
+		}
+		w2(4);
+		break;
+
+	case 3: w2(4); w0(0xc8); cec4; w2(5);
+		for (k=0;k<count;k++) w4(buf[k]);
+		w2(4);
+		break;
+
+        case 4: w2(4); w0(0xc8); cec4; w2(5);
+                for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
+                w2(4);
+                break;
+
+        case 5: w2(4); w0(0xc8); cec4; w2(5);
+                for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
+                w2(4);
+                break;
+	}
+}
+
+static void frpw_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	w2(4);
+}
+
+static void frpw_disconnect ( PIA *pi )
+
+{       w2(4); w0(0x20); cec4;
+	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+/* Stub logic to see if PNP string is available - used to distinguish
+   between the Xilinx and ASIC implementations of the Freecom adapter.
+*/
+
+static int frpw_test_pnp ( PIA *pi )
+
+/*  returns chip_type:   0 = Xilinx, 1 = ASIC   */
+
+{	int olddelay, a, b;
+
+#ifdef FRPW_HARD_RESET
+        w0(0); w2(8); udelay(50); w2(0xc);   /* parallel bus reset */
+        mdelay(1500);
+#endif
+
+	olddelay = pi->delay;
+	pi->delay = 10;
+
+	pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	
+	w2(4); w0(4); w2(6); w2(7);
+	a = r1() & 0xff; w2(4); b = r1() & 0xff;
+	w2(0xc); w2(0xe); w2(4);
+
+	pi->delay = olddelay;
+        w0(pi->saved_r0);
+        w2(pi->saved_r2);
+
+	return ((~a&0x40) && (b&0x40));
+} 
+
+/* We use the pi->private to remember the result of the PNP test.
+   To make this work, private = port*2 + chip.  Yes, I know it's
+   a hack :-(
+*/
+
+static int frpw_test_proto( PIA *pi, char * scratch, int verbose )
+
+{       int     j, k, r;
+	int	e[2] = {0,0};
+
+	if ((pi->private>>1) != pi->port)
+	   pi->private = frpw_test_pnp(pi) + 2*pi->port;
+
+	if (((pi->private%2) == 0) && (pi->mode > 2)) {
+	   if (verbose) 
+		printk("%s: frpw: Xilinx does not support mode %d\n",
+			pi->device, pi->mode);
+	   return 1;
+	}
+
+	if (((pi->private%2) == 1) && (pi->mode == 2)) {
+	   if (verbose)
+		printk("%s: frpw: ASIC does not support mode 2\n",
+			pi->device);
+	   return 1;
+	}
+
+	frpw_connect(pi);
+	for (j=0;j<2;j++) {
+                frpw_write_regr(pi,0,6,0xa0+j*0x10);
+                for (k=0;k<256;k++) {
+                        frpw_write_regr(pi,0,2,k^0xaa);
+                        frpw_write_regr(pi,0,3,k^0x55);
+                        if (frpw_read_regr(pi,0,2) != (k^0xaa)) e[j]++;
+                        }
+                }
+	frpw_disconnect(pi);
+
+	frpw_connect(pi);
+        frpw_read_block_int(pi,scratch,512,0x10);
+        r = 0;
+        for (k=0;k<128;k++) if (scratch[k] != k) r++;
+	frpw_disconnect(pi);
+
+        if (verbose)  {
+            printk("%s: frpw: port 0x%x, chip %ld, mode %d, test=(%d,%d,%d)\n",
+                   pi->device,pi->port,(pi->private%2),pi->mode,e[0],e[1],r);
+        }
+
+        return (r || (e[0] && e[1]));
+}
+
+
+static void frpw_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[6] = {"4-bit","8-bit","EPP",
+				   "EPP-8","EPP-16","EPP-32"};
+
+        printk("%s: frpw %s, Freecom (%s) adapter at 0x%x, ", pi->device,
+		FRPW_VERSION,((pi->private%2) == 0)?"Xilinx":"ASIC",pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol frpw = {
+	.owner		= THIS_MODULE,
+	.name		= "frpw",
+	.max_mode	= 6,
+	.epp_first	= 2,
+	.default_delay	= 2,
+	.max_units	= 1,
+	.write_regr	= frpw_write_regr,
+	.read_regr	= frpw_read_regr,
+	.write_block	= frpw_write_block,
+	.read_block	= frpw_read_block,
+	.connect	= frpw_connect,
+	.disconnect	= frpw_disconnect,
+	.test_proto	= frpw_test_proto,
+	.log_adapter	= frpw_log_adapter,
+};
+
+static int __init frpw_init(void)
+{
+	return pi_register(&frpw)-1;
+}
+
+static void __exit frpw_exit(void)
+{
+	pi_unregister(&frpw);
+}
+
+MODULE_LICENSE("GPL");
+module_init(frpw_init)
+module_exit(frpw_exit)
diff --git a/drivers/block/paride/jumbo b/drivers/block/paride/jumbo
new file mode 100644
index 0000000..e793b9c
--- /dev/null
+++ b/drivers/block/paride/jumbo
@@ -0,0 +1,70 @@
+#!/bin/sh
+#
+# This script can be used to build "jumbo" modules that contain the
+# base PARIDE support, one protocol module and one high-level driver.
+#
+echo -n "High level driver [pcd] : "
+read X
+HLD=${X:-pcd}
+#
+echo -n "Protocol module [bpck] : "
+read X
+PROTO=${X:-bpck}
+#
+echo -n "Use MODVERSIONS [y] ? "
+read X
+UMODV=${X:-y}
+#
+echo -n "For SMP kernel [n] ? "
+read X
+USMP=${X:-n}
+#
+echo -n "Support PARPORT [n] ? "
+read X
+UPARP=${X:-n}
+#
+echo
+#
+case $USMP in
+	y* | Y* ) FSMP="-DCONFIG_SMP"
+		  ;;
+	*)	  FSMP=""
+		  ;;
+esac
+#
+MODI="-include ../../../include/linux/modversions.h"
+#
+case $UMODV in
+	y* | Y* ) FMODV="-DMODVERSIONS $MODI"
+		  ;;
+	*)	  FMODV=""
+		  ;;
+esac
+#
+case $UPARP in
+	y* | Y* ) FPARP="-DCONFIG_PARPORT"
+		  ;;
+	*)	  FPARP=""
+		  ;;
+esac
+#
+TARG=$HLD-$PROTO.o
+FPROTO=-DCONFIG_PARIDE_`echo "$PROTO" | tr [a-z] [A-Z]`
+FK="-D__KERNEL__ -I ../../../include"
+FLCH=-D_LINUX_CONFIG_H
+#
+echo cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
+cc $FK $FSMP $FLCH $FPARP $FPROTO $FMODV -Wall -O2 -o Jb.o -c paride.c
+#
+echo cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
+cc $FK $FSMP $FMODV -Wall -O2 -o Jp.o -c $PROTO.c
+#
+echo cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
+cc $FK $FSMP $FMODV -DMODULE -DPARIDE_JUMBO -Wall -O2 -o Jd.o -c $HLD.c
+#
+echo ld -r -o $TARG Jp.o Jb.o Jd.o
+ld -r -o $TARG Jp.o Jb.o Jd.o
+#
+#
+rm Jp.o Jb.o Jd.o
+#
diff --git a/drivers/block/paride/kbic.c b/drivers/block/paride/kbic.c
new file mode 100644
index 0000000..d983bce
--- /dev/null
+++ b/drivers/block/paride/kbic.c
@@ -0,0 +1,297 @@
+/*
+        kbic.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                              Under the terms of the GNU General Public License.
+
+        This is a low-level driver for the KBIC-951A and KBIC-971A
+        parallel to IDE adapter chips from KingByte Information Systems.
+
+	The chips are almost identical, however, the wakeup code 
+	required for the 971A interferes with the correct operation of
+        the 951A, so this driver registers itself twice, once for
+	each chip.
+
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+
+*/
+
+#define KBIC_VERSION      "1.01"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define r12w()			(delay_p,inw(pi->port+1)&0xffff) 
+
+#define j44(a,b)                ((((a>>4)&0x0f)|(b&0xf0))^0x88)
+#define j53(w)                  (((w>>3)&0x1f)|((w>>4)&0xe0))
+
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x80, 0x40 };
+
+static int kbic_read_regr( PIA *pi, int cont, int regr )
+
+{       int     a, b, s;
+
+        s = cont_map[cont];
+
+	switch (pi->mode) {
+
+	case 0: w0(regr|0x18|s); w2(4); w2(6); w2(4); w2(1); w0(8);
+	        a = r1(); w0(0x28); b = r1(); w2(4);
+		return j44(a,b);
+
+	case 1: w0(regr|0x38|s); w2(4); w2(6); w2(4); w2(5); w0(8);
+		a = r12w(); w2(4);
+		return j53(a);
+
+	case 2: w0(regr|0x08|s); w2(4); w2(6); w2(4); w2(0xa5); w2(0xa1);
+		a = r0(); w2(4);
+       		return a;
+
+	case 3:
+	case 4:
+	case 5: w0(0x20|s); w2(4); w2(6); w2(4); w3(regr);
+		a = r4(); b = r4(); w2(4); w2(0); w2(4);
+		return a;
+
+	}
+	return -1;
+}       
+
+static void  kbic_write_regr( PIA *pi, int cont, int regr, int val)
+
+{       int  s;
+
+        s = cont_map[cont];
+
+        switch (pi->mode) {
+
+	case 0: 
+        case 1:
+	case 2:	w0(regr|0x10|s); w2(4); w2(6); w2(4); 
+		w0(val); w2(5); w2(4);
+		break;
+
+	case 3:
+	case 4:
+	case 5: w0(0x20|s); w2(4); w2(6); w2(4); w3(regr);
+		w4(val); w4(val);
+		w2(4); w2(0); w2(4);
+                break;
+
+	}
+}
+
+static void k951_connect ( PIA *pi  )
+
+{ 	pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+        w2(4); 
+}
+
+static void k951_disconnect ( PIA *pi )
+
+{      	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+}
+
+#define	CCP(x)	w2(0xc4);w0(0xaa);w0(0x55);w0(0);w0(0xff);w0(0x87);\
+		w0(0x78);w0(x);w2(0xc5);w2(0xc4);w0(0xff);
+
+static void k971_connect ( PIA *pi  )
+
+{ 	pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	CCP(0x20);
+        w2(4); 
+}
+
+static void k971_disconnect ( PIA *pi )
+
+{       CCP(0x30);
+	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+}
+
+/* counts must be congruent to 0 MOD 4, but all known applications
+   have this property.
+*/
+
+static void kbic_read_block( PIA *pi, char * buf, int count )
+
+{       int     k, a, b;
+
+        switch (pi->mode) {
+
+        case 0: w0(0x98); w2(4); w2(6); w2(4);
+                for (k=0;k<count/2;k++) {
+			w2(1); w0(8);    a = r1();
+			       w0(0x28); b = r1();
+			buf[2*k]   = j44(a,b);
+			w2(5);           b = r1();
+			       w0(8);    a = r1();
+			buf[2*k+1] = j44(a,b);
+			w2(4);
+                } 
+                break;
+
+        case 1: w0(0xb8); w2(4); w2(6); w2(4); 
+                for (k=0;k<count/4;k++) {
+                        w0(0xb8); 
+			w2(4); w2(5); 
+                        w0(8);    buf[4*k]   = j53(r12w());
+			w0(0xb8); buf[4*k+1] = j53(r12w());
+			w2(4); w2(5);
+			          buf[4*k+3] = j53(r12w());
+			w0(8);    buf[4*k+2] = j53(r12w());
+                }
+                w2(4);
+                break;
+
+        case 2: w0(0x88); w2(4); w2(6); w2(4);
+                for (k=0;k<count/2;k++) {
+                        w2(0xa0); w2(0xa1); buf[2*k] = r0();
+                        w2(0xa5); buf[2*k+1] = r0();
+                }
+                w2(4);
+                break;
+
+        case 3: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
+                for (k=0;k<count;k++) buf[k] = r4();
+                w2(4); w2(0); w2(4);
+                break;
+
+	case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
+                for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
+                w2(4); w2(0); w2(4);
+                break;
+
+        case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
+                for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
+                w2(4); w2(0); w2(4);
+                break;
+
+
+        }
+}
+
+static void kbic_write_block( PIA *pi, char * buf, int count )
+
+{       int     k;
+
+        switch (pi->mode) {
+
+        case 0:
+        case 1:
+        case 2: w0(0x90); w2(4); w2(6); w2(4); 
+		for(k=0;k<count/2;k++) {
+			w0(buf[2*k+1]); w2(0); w2(4); 
+			w0(buf[2*k]);   w2(5); w2(4); 
+		}
+		break;
+
+        case 3: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
+		for(k=0;k<count/2;k++) {
+			w4(buf[2*k+1]); 
+                        w4(buf[2*k]);
+                }
+		w2(4); w2(0); w2(4);
+		break;
+
+	case 4: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
+                for(k=0;k<count/2;k++) w4w(pi_swab16(buf,k));
+                w2(4); w2(0); w2(4);
+                break;
+
+        case 5: w0(0xa0); w2(4); w2(6); w2(4); w3(0);
+                for(k=0;k<count/4;k++) w4l(pi_swab32(buf,k));
+                w2(4); w2(0); w2(4);
+                break;
+
+        }
+
+}
+
+static void kbic_log_adapter( PIA *pi, char * scratch, 
+			      int verbose, char * chip )
+
+{       char    *mode_string[6] = {"4-bit","5/3","8-bit",
+				   "EPP-8","EPP_16","EPP-32"};
+
+        printk("%s: kbic %s, KingByte %s at 0x%x, ",
+                pi->device,KBIC_VERSION,chip,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static void k951_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{	kbic_log_adapter(pi,scratch,verbose,"KBIC-951A");
+}
+
+static void k971_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       kbic_log_adapter(pi,scratch,verbose,"KBIC-971A");
+}
+
+static struct pi_protocol k951 = {
+	.owner		= THIS_MODULE,
+	.name		= "k951",
+	.max_mode	= 6,
+	.epp_first	= 3,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= kbic_write_regr,
+	.read_regr	= kbic_read_regr,
+	.write_block	= kbic_write_block,
+	.read_block	= kbic_read_block,
+	.connect	= k951_connect,
+	.disconnect	= k951_disconnect,
+	.log_adapter	= k951_log_adapter,
+};
+
+static struct pi_protocol k971 = {
+	.owner		= THIS_MODULE,
+	.name		= "k971",
+	.max_mode	= 6,
+	.epp_first	= 3,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= kbic_write_regr,
+	.read_regr	= kbic_read_regr,
+	.write_block	= kbic_write_block,
+	.read_block	= kbic_read_block,
+	.connect	= k971_connect,
+	.disconnect	= k971_disconnect,
+	.log_adapter	= k971_log_adapter,
+};
+
+static int __init kbic_init(void)
+{
+	return (pi_register(&k951)||pi_register(&k971))-1;
+}
+
+static void __exit kbic_exit(void)
+{
+	pi_unregister(&k951);
+	pi_unregister(&k971);
+}
+
+MODULE_LICENSE("GPL");
+module_init(kbic_init)
+module_exit(kbic_exit)
diff --git a/drivers/block/paride/ktti.c b/drivers/block/paride/ktti.c
new file mode 100644
index 0000000..6c7edbf
--- /dev/null
+++ b/drivers/block/paride/ktti.c
@@ -0,0 +1,128 @@
+/* 
+        ktti.c        (c) 1998  Grant R. Guenther <grant@torque.net>
+                          Under the terms of the GNU General Public License.
+
+	ktti.c is a low-level protocol driver for the KT Technology
+	parallel port adapter.  This adapter is used in the "PHd" 
+        portable hard-drives.  As far as I can tell, this device
+	supports 4-bit mode _only_.  
+
+*/
+
+#define KTTI_VERSION      "1.0"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define j44(a,b)                (((a>>4)&0x0f)|(b&0xf0))
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int  cont_map[2] = { 0x10, 0x08 };
+
+static void  ktti_write_regr( PIA *pi, int cont, int regr, int val)
+
+{	int r;
+
+	r = regr + cont_map[cont];
+
+	w0(r); w2(0xb); w2(0xa); w2(3); w2(6); 
+	w0(val); w2(3); w0(0); w2(6); w2(0xb);
+}
+
+static int ktti_read_regr( PIA *pi, int cont, int regr )
+
+{	int  a, b, r;
+
+        r = regr + cont_map[cont];
+
+        w0(r); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9); 
+	a = r1(); w2(0xc);  b = r1(); w2(9); w2(0xc); w2(9);
+	return j44(a,b);
+
+}
+
+static void ktti_read_block( PIA *pi, char * buf, int count )
+
+{	int  k, a, b;
+
+	for (k=0;k<count/2;k++) {
+		w0(0x10); w2(0xb); w2(0xa); w2(9); w2(0xc); w2(9);
+		a = r1(); w2(0xc); b = r1(); w2(9);
+		buf[2*k] = j44(a,b);
+		a = r1(); w2(0xc); b = r1(); w2(9);
+		buf[2*k+1] = j44(a,b);
+	}
+}
+
+static void ktti_write_block( PIA *pi, char * buf, int count )
+
+{	int k;
+
+	for (k=0;k<count/2;k++) {
+		w0(0x10); w2(0xb); w2(0xa); w2(3); w2(6);
+		w0(buf[2*k]); w2(3);
+		w0(buf[2*k+1]); w2(6);
+		w2(0xb);
+	}
+}
+
+static void ktti_connect ( PIA *pi  )
+
+{       pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+	w2(0xb); w2(0xa); w0(0); w2(3); w2(6);	
+}
+
+static void ktti_disconnect ( PIA *pi )
+
+{       w2(0xb); w2(0xa); w0(0xa0); w2(3); w2(4);
+	w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void ktti_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       printk("%s: ktti %s, KT adapter at 0x%x, delay %d\n",
+                pi->device,KTTI_VERSION,pi->port,pi->delay);
+
+}
+
+static struct pi_protocol ktti = {
+	.owner		= THIS_MODULE,
+	.name		= "ktti",
+	.max_mode	= 1,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= ktti_write_regr,
+	.read_regr	= ktti_read_regr,
+	.write_block	= ktti_write_block,
+	.read_block	= ktti_read_block,
+	.connect	= ktti_connect,
+	.disconnect	= ktti_disconnect,
+	.log_adapter	= ktti_log_adapter,
+};
+
+static int __init ktti_init(void)
+{
+	return pi_register(&ktti)-1;
+}
+
+static void __exit ktti_exit(void)
+{
+	pi_unregister(&ktti);
+}
+
+MODULE_LICENSE("GPL");
+module_init(ktti_init)
+module_exit(ktti_exit)
diff --git a/drivers/block/paride/mkd b/drivers/block/paride/mkd
new file mode 100644
index 0000000..971f099
--- /dev/null
+++ b/drivers/block/paride/mkd
@@ -0,0 +1,30 @@
+#!/bin/bash
+#
+# mkd -- a script to create the device special files for the PARIDE subsystem
+#
+#  block devices:  	pd (45), pcd (46), pf (47)
+#  character devices:	pt (96), pg (97)
+#
+function mkdev {
+  mknod $1 $2 $3 $4 ; chmod 0660 $1 ; chown root:disk $1
+}
+#
+function pd {
+  D=$( printf \\$( printf "x%03x" $[ $1 + 97 ] ) )
+  mkdev pd$D b 45 $[ $1 * 16 ]
+  for P in 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+  do mkdev pd$D$P b 45 $[ $1 * 16 + $P ]
+  done
+}
+#
+cd /dev
+#
+for u in 0 1 2 3 ; do pd $u ; done
+for u in 0 1 2 3 ; do mkdev pcd$u b 46 $u ; done 
+for u in 0 1 2 3 ; do mkdev pf$u  b 47 $u ; done 
+for u in 0 1 2 3 ; do mkdev pt$u  c 96 $u ; done 
+for u in 0 1 2 3 ; do mkdev npt$u c 96 $[ $u + 128 ] ; done 
+for u in 0 1 2 3 ; do mkdev pg$u  c 97 $u ; done
+#
+# end of mkd
+
diff --git a/drivers/block/paride/on20.c b/drivers/block/paride/on20.c
new file mode 100644
index 0000000..9f8e010
--- /dev/null
+++ b/drivers/block/paride/on20.c
@@ -0,0 +1,153 @@
+/* 
+	on20.c	(c) 1996-8  Grant R. Guenther <grant@torque.net>
+		            Under the terms of the GNU General Public License.
+
+        on20.c is a low-level protocol driver for the
+        Onspec 90c20 parallel to IDE adapter. 
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+
+*/
+
+#define	ON20_VERSION	"1.01"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+#define op(f)	w2(4);w0(f);w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
+#define vl(v)	w2(4);w0(v);w2(5);w2(7);w2(5);w2(4);
+
+#define j44(a,b)  (((a>>4)&0x0f)|(b&0xf0))
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int on20_read_regr( PIA *pi, int cont, int regr )
+
+{	int h,l, r ;
+
+        r = (regr<<2) + 1 + cont;
+
+        op(1); vl(r); op(0);
+
+	switch (pi->mode)  {
+
+        case 0:  w2(4); w2(6); l = r1();
+                 w2(4); w2(6); h = r1();
+                 w2(4); w2(6); w2(4); w2(6); w2(4);
+		 return j44(l,h);
+
+	case 1:  w2(4); w2(0x26); r = r0(); 
+                 w2(4); w2(0x26); w2(4);
+		 return r;
+
+	}
+	return -1;
+}	
+
+static void on20_write_regr( PIA *pi, int cont, int regr, int val )
+
+{	int r;
+
+	r = (regr<<2) + 1 + cont;
+
+	op(1); vl(r); 
+	op(0); vl(val); 
+	op(0); vl(val);
+}
+
+static void on20_connect ( PIA *pi)
+
+{	pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+
+	w2(4);w0(0);w2(0xc);w2(4);w2(6);w2(4);w2(6);w2(4); 
+	if (pi->mode) { op(2); vl(8); op(2); vl(9); }
+	       else   { op(2); vl(0); op(2); vl(8); }
+}
+
+static void on20_disconnect ( PIA *pi )
+
+{	w2(4);w0(7);w2(4);w2(0xc);w2(4);
+        w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+static void on20_read_block( PIA *pi, char * buf, int count )
+
+{	int     k, l, h; 
+
+	op(1); vl(1); op(0);
+
+	for (k=0;k<count;k++) 
+	    if (pi->mode) {
+		w2(4); w2(0x26); buf[k] = r0();
+	    } else {
+		w2(6); l = r1(); w2(4);
+		w2(6); h = r1(); w2(4);
+		buf[k] = j44(l,h);
+	    }
+	w2(4);
+}
+
+static void on20_write_block(  PIA *pi, char * buf, int count )
+
+{	int	k;
+
+	op(1); vl(1); op(0);
+
+	for (k=0;k<count;k++) { w2(5); w0(buf[k]); w2(7); }
+	w2(4);
+}
+
+static void on20_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[2] = {"4-bit","8-bit"};
+
+        printk("%s: on20 %s, OnSpec 90c20 at 0x%x, ",
+                pi->device,ON20_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol on20 = {
+	.owner		= THIS_MODULE,
+	.name		= "on20",
+	.max_mode	= 2,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= on20_write_regr,
+	.read_regr	= on20_read_regr,
+	.write_block	= on20_write_block,
+	.read_block	= on20_read_block,
+	.connect	= on20_connect,
+	.disconnect	= on20_disconnect,
+	.log_adapter	= on20_log_adapter,
+};
+
+static int __init on20_init(void)
+{
+	return pi_register(&on20)-1;
+}
+
+static void __exit on20_exit(void)
+{
+	pi_unregister(&on20);
+}
+
+MODULE_LICENSE("GPL");
+module_init(on20_init)
+module_exit(on20_exit)
diff --git a/drivers/block/paride/on26.c b/drivers/block/paride/on26.c
new file mode 100644
index 0000000..9f837d9
--- /dev/null
+++ b/drivers/block/paride/on26.c
@@ -0,0 +1,319 @@
+/* 
+        on26.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                              Under the terms of the GNU General Public License.
+
+        on26.c is a low-level protocol driver for the 
+        OnSpec 90c26 parallel to IDE adapter chip.
+
+*/
+
+/* Changes:
+
+        1.01    GRG 1998.05.06 init_proto, release_proto
+	1.02    GRG 1998.09.23 updates for the -E rev chip
+	1.03    GRG 1998.12.14 fix for slave drives
+	1.04    GRG 1998.12.20 yet another bug fix
+
+*/
+
+#define ON26_VERSION      "1.04"
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <asm/io.h>
+
+#include "paride.h"
+
+/* mode codes:  0  nybble reads, 8-bit writes
+                1  8-bit reads and writes
+                2  8-bit EPP mode
+		3  EPP-16
+		4  EPP-32
+*/
+
+#define j44(a,b)  (((a>>4)&0x0f)|(b&0xf0))
+
+#define P1	w2(5);w2(0xd);w2(5);w2(0xd);w2(5);w2(4);
+#define P2	w2(5);w2(7);w2(5);w2(4);
+
+/* cont = 0 - access the IDE register file 
+   cont = 1 - access the IDE command set 
+*/
+
+static int on26_read_regr( PIA *pi, int cont, int regr )
+
+{       int     a, b, r;
+
+	r = (regr<<2) + 1 + cont;
+
+        switch (pi->mode)  {
+
+        case 0: w0(1); P1; w0(r); P2; w0(0); P1; 
+		w2(6); a = r1(); w2(4);
+		w2(6); b = r1(); w2(4);
+		w2(6); w2(4); w2(6); w2(4);
+                return j44(a,b);
+
+        case 1: w0(1); P1; w0(r); P2; w0(0); P1;
+		w2(0x26); a = r0(); w2(4); w2(0x26); w2(4);
+                return a;
+
+	case 2:
+	case 3:
+        case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
+		w3(0); w3(0); w2(0x24); a = r4(); w2(4);
+		w2(0x24); r4(); w2(4);
+                return a;
+
+        }
+        return -1;
+}       
+
+static void on26_write_regr( PIA *pi, int cont, int regr, int val )
+
+{       int  r;
+
+        r = (regr<<2) + 1 + cont;
+
+        switch (pi->mode)  {
+
+        case 0:
+        case 1: w0(1); P1; w0(r); P2; w0(0); P1;
+		w0(val); P2; w0(val); P2;
+		break;
+
+	case 2:
+	case 3:
+        case 4: w3(1); w3(1); w2(5); w4(r); w2(4);
+		w3(0); w3(0); 
+		w2(5); w4(val); w2(4);
+		w2(5); w4(val); w2(4);
+                break;
+        }
+}
+
+#define  CCP(x)  w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);\
+		 w0(0x87);w0(0x78);w0(x);w2(4);w2(5);w2(4);w0(0xff);
+
+static void on26_connect ( PIA *pi )
+
+{       int	x;
+
+	pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+
+        CCP(0x20);
+	x = 8; if (pi->mode) x = 9;
+
+	w0(2); P1; w0(8); P2;
+	w0(2); P1; w0(x); P2;
+}
+
+static void on26_disconnect ( PIA *pi )
+
+{       if (pi->mode >= 2) { w3(4); w3(4); w3(4); w3(4); }
+	              else { w0(4); P1; w0(4); P1; }
+	CCP(0x30);
+        w0(pi->saved_r0);
+        w2(pi->saved_r2);
+} 
+
+#define	RESET_WAIT  200
+
+static int on26_test_port( PIA *pi)  /* hard reset */
+
+{       int     i, m, d, x=0, y=0;
+
+        pi->saved_r0 = r0();
+        pi->saved_r2 = r2();
+
+        d = pi->delay;
+        m = pi->mode;
+        pi->delay = 5;
+        pi->mode = 0;
+
+        w2(0xc);
+
+        CCP(0x30); CCP(0); 
+
+        w0(0xfe);w0(0xaa);w0(0x55);w0(0);w0(0xff);
+        i = ((r1() & 0xf0) << 4); w0(0x87);
+        i |= (r1() & 0xf0); w0(0x78);
+        w0(0x20);w2(4);w2(5);
+        i |= ((r1() & 0xf0) >> 4);
+        w2(4);w0(0xff);
+
+        if (i == 0xb5f) {
+
+            w0(2); P1; w0(0);   P2;
+            w0(3); P1; w0(0);   P2;
+            w0(2); P1; w0(8);   P2; udelay(100);
+            w0(2); P1; w0(0xa); P2; udelay(100);
+            w0(2); P1; w0(8);   P2; udelay(1000);
+            
+            on26_write_regr(pi,0,6,0xa0);
+
+            for (i=0;i<RESET_WAIT;i++) {
+                on26_write_regr(pi,0,6,0xa0);
+                x = on26_read_regr(pi,0,7);
+                on26_write_regr(pi,0,6,0xb0);
+                y = on26_read_regr(pi,0,7);
+                if (!((x&0x80)||(y&0x80))) break;
+                mdelay(100);
+            }
+
+	    if (i == RESET_WAIT) 
+		printk("on26: Device reset failed (%x,%x)\n",x,y);
+
+            w0(4); P1; w0(4); P1;
+        }
+
+        CCP(0x30);
+
+        pi->delay = d;
+        pi->mode = m;
+        w0(pi->saved_r0);
+        w2(pi->saved_r2);
+
+        return 5;
+}
+
+
+static void on26_read_block( PIA *pi, char * buf, int count )
+
+{       int     k, a, b;
+
+        switch (pi->mode) {
+
+        case 0: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x18); P2; w0(0); P1;
+		udelay(10);
+		for (k=0;k<count;k++) {
+                        w2(6); a = r1();
+                        w2(4); b = r1();
+                        buf[k] = j44(a,b);
+                }
+		w0(2); P1; w0(8); P2; 
+                break;
+
+        case 1: w0(1); P1; w0(1); P2; w0(2); P1; w0(0x19); P2; w0(0); P1;
+		udelay(10);
+                for (k=0;k<count/2;k++) {
+                        w2(0x26); buf[2*k] = r0();  
+			w2(0x24); buf[2*k+1] = r0();
+                }
+                w0(2); P1; w0(9); P2;
+                break;
+
+        case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
+		w3(0); w3(0); w2(0x24);
+		udelay(10);
+                for (k=0;k<count;k++) buf[k] = r4();
+                w2(4);
+                break;
+
+        case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
+                w3(0); w3(0); w2(0x24);
+                udelay(10);
+                for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w();
+                w2(4);
+                break;
+
+        case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
+                w3(0); w3(0); w2(0x24);
+                udelay(10);
+                for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l();
+                w2(4);
+                break;
+
+        }
+}
+
+static void on26_write_block( PIA *pi, char * buf, int count )
+
+{       int	k;
+
+        switch (pi->mode) {
+
+        case 0: 
+        case 1: w0(1); P1; w0(1); P2; 
+		w0(2); P1; w0(0x18+pi->mode); P2; w0(0); P1;
+		udelay(10);
+		for (k=0;k<count/2;k++) {
+                        w2(5); w0(buf[2*k]); 
+			w2(7); w0(buf[2*k+1]);
+                }
+                w2(5); w2(4);
+		w0(2); P1; w0(8+pi->mode); P2;
+                break;
+
+        case 2: w3(1); w3(1); w2(5); w4(1); w2(4);
+		w3(0); w3(0); w2(0xc5);
+		udelay(10);
+                for (k=0;k<count;k++) w4(buf[k]);
+		w2(0xc4);
+                break;
+
+        case 3: w3(1); w3(1); w2(5); w4(1); w2(4);
+                w3(0); w3(0); w2(0xc5);
+                udelay(10);
+                for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]);
+                w2(0xc4);
+                break;
+
+        case 4: w3(1); w3(1); w2(5); w4(1); w2(4);
+                w3(0); w3(0); w2(0xc5);
+                udelay(10);
+                for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]);
+                w2(0xc4);
+                break;
+
+        }
+
+}
+
+static void on26_log_adapter( PIA *pi, char * scratch, int verbose )
+
+{       char    *mode_string[5] = {"4-bit","8-bit","EPP-8",
+				   "EPP-16","EPP-32"};
+
+        printk("%s: on26 %s, OnSpec 90c26 at 0x%x, ",
+                pi->device,ON26_VERSION,pi->port);
+        printk("mode %d (%s), delay %d\n",pi->mode,
+		mode_string[pi->mode],pi->delay);
+
+}
+
+static struct pi_protocol on26 = {
+	.owner		= THIS_MODULE,
+	.name		= "on26",
+	.max_mode	= 5,
+	.epp_first	= 2,
+	.default_delay	= 1,
+	.max_units	= 1,
+	.write_regr	= on26_write_regr,
+	.read_regr	= on26_read_regr,
+	.write_block	= on26_write_block,
+	.read_block	= on26_read_block,
+	.connect	= on26_connect,
+	.disconnect	= on26_disconnect,
+	.test_port	= on26_test_port,
+	.log_adapter	= on26_log_adapter,
+};
+
+static int __init on26_init(void)
+{
+	return pi_register(&on26)-1;
+}
+
+static void __exit on26_exit(void)
+{
+	pi_unregister(&on26);
+}
+
+MODULE_LICENSE("GPL");
+module_init(on26_init)
+module_exit(on26_exit)
diff --git a/drivers/block/paride/paride.c b/drivers/block/paride/paride.c
new file mode 100644
index 0000000..1fef136
--- /dev/null
+++ b/drivers/block/paride/paride.c
@@ -0,0 +1,467 @@
+/* 
+        paride.c  (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                              Under the terms of the GNU General Public License.
+
+	This is the base module for the family of device drivers
+        that support parallel port IDE devices.  
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.05.03	Use spinlocks
+	1.02	GRG 1998.05.05  init_proto, release_proto, ktti
+	1.03	GRG 1998.08.15  eliminate compiler warning
+	1.04    GRG 1998.11.28  added support for FRIQ 
+	1.05    TMW 2000.06.06  use parport_find_number instead of
+				parport_enumerate
+	1.06    TMW 2001.03.26  more sane parport-or-not resource management
+*/
+
+#define PI_VERSION      "1.06"
+
+#include <linux/module.h>
+#include <linux/config.h>
+#include <linux/kmod.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+
+#ifdef CONFIG_PARPORT_MODULE
+#define CONFIG_PARPORT
+#endif
+
+#ifdef CONFIG_PARPORT
+#include <linux/parport.h>
+#endif
+
+#include "paride.h"
+
+MODULE_LICENSE("GPL");
+
+#define MAX_PROTOS	32
+
+static struct pi_protocol *protocols[MAX_PROTOS];
+
+static DEFINE_SPINLOCK(pi_spinlock);
+
+void pi_write_regr(PIA * pi, int cont, int regr, int val)
+{
+	pi->proto->write_regr(pi, cont, regr, val);
+}
+
+EXPORT_SYMBOL(pi_write_regr);
+
+int pi_read_regr(PIA * pi, int cont, int regr)
+{
+	return pi->proto->read_regr(pi, cont, regr);
+}
+
+EXPORT_SYMBOL(pi_read_regr);
+
+void pi_write_block(PIA * pi, char *buf, int count)
+{
+	pi->proto->write_block(pi, buf, count);
+}
+
+EXPORT_SYMBOL(pi_write_block);
+
+void pi_read_block(PIA * pi, char *buf, int count)
+{
+	pi->proto->read_block(pi, buf, count);
+}
+
+EXPORT_SYMBOL(pi_read_block);
+
+#ifdef CONFIG_PARPORT
+
+static void pi_wake_up(void *p)
+{
+	PIA *pi = (PIA *) p;
+	unsigned long flags;
+	void (*cont) (void) = NULL;
+
+	spin_lock_irqsave(&pi_spinlock, flags);
+
+	if (pi->claim_cont && !parport_claim(pi->pardev)) {
+		cont = pi->claim_cont;
+		pi->claim_cont = NULL;
+		pi->claimed = 1;
+	}
+
+	spin_unlock_irqrestore(&pi_spinlock, flags);
+
+	wake_up(&(pi->parq));
+
+	if (cont)
+		cont();
+}
+
+#endif
+
+int pi_schedule_claimed(PIA * pi, void (*cont) (void))
+{
+#ifdef CONFIG_PARPORT
+	unsigned long flags;
+
+	spin_lock_irqsave(&pi_spinlock, flags);
+	if (pi->pardev && parport_claim(pi->pardev)) {
+		pi->claim_cont = cont;
+		spin_unlock_irqrestore(&pi_spinlock, flags);
+		return 0;
+	}
+	pi->claimed = 1;
+	spin_unlock_irqrestore(&pi_spinlock, flags);
+#endif
+	return 1;
+}
+EXPORT_SYMBOL(pi_schedule_claimed);
+
+void pi_do_claimed(PIA * pi, void (*cont) (void))
+{
+	if (pi_schedule_claimed(pi, cont))
+		cont();
+}
+
+EXPORT_SYMBOL(pi_do_claimed);
+
+static void pi_claim(PIA * pi)
+{
+	if (pi->claimed)
+		return;
+	pi->claimed = 1;
+#ifdef CONFIG_PARPORT
+	if (pi->pardev)
+		wait_event(pi->parq,
+			   !parport_claim((struct pardevice *) pi->pardev));
+#endif
+}
+
+static void pi_unclaim(PIA * pi)
+{
+	pi->claimed = 0;
+#ifdef CONFIG_PARPORT
+	if (pi->pardev)
+		parport_release((struct pardevice *) (pi->pardev));
+#endif
+}
+
+void pi_connect(PIA * pi)
+{
+	pi_claim(pi);
+	pi->proto->connect(pi);
+}
+
+EXPORT_SYMBOL(pi_connect);
+
+void pi_disconnect(PIA * pi)
+{
+	pi->proto->disconnect(pi);
+	pi_unclaim(pi);
+}
+
+EXPORT_SYMBOL(pi_disconnect);
+
+static void pi_unregister_parport(PIA * pi)
+{
+#ifdef CONFIG_PARPORT
+	if (pi->pardev) {
+		parport_unregister_device((struct pardevice *) (pi->pardev));
+		pi->pardev = NULL;
+	}
+#endif
+}
+
+void pi_release(PIA * pi)
+{
+	pi_unregister_parport(pi);
+#ifndef CONFIG_PARPORT
+	if (pi->reserved)
+		release_region(pi->port, pi->reserved);
+#endif				/* !CONFIG_PARPORT */
+	if (pi->proto->release_proto)
+		pi->proto->release_proto(pi);
+	module_put(pi->proto->owner);
+}
+
+EXPORT_SYMBOL(pi_release);
+
+static int default_test_proto(PIA * pi, char *scratch, int verbose)
+{
+	int j, k;
+	int e[2] = { 0, 0 };
+
+	pi->proto->connect(pi);
+
+	for (j = 0; j < 2; j++) {
+		pi_write_regr(pi, 0, 6, 0xa0 + j * 0x10);
+		for (k = 0; k < 256; k++) {
+			pi_write_regr(pi, 0, 2, k ^ 0xaa);
+			pi_write_regr(pi, 0, 3, k ^ 0x55);
+			if (pi_read_regr(pi, 0, 2) != (k ^ 0xaa))
+				e[j]++;
+		}
+	}
+	pi->proto->disconnect(pi);
+
+	if (verbose)
+		printk("%s: %s: port 0x%x, mode  %d, test=(%d,%d)\n",
+		       pi->device, pi->proto->name, pi->port,
+		       pi->mode, e[0], e[1]);
+
+	return (e[0] && e[1]);	/* not here if both > 0 */
+}
+
+static int pi_test_proto(PIA * pi, char *scratch, int verbose)
+{
+	int res;
+
+	pi_claim(pi);
+	if (pi->proto->test_proto)
+		res = pi->proto->test_proto(pi, scratch, verbose);
+	else
+		res = default_test_proto(pi, scratch, verbose);
+	pi_unclaim(pi);
+
+	return res;
+}
+
+int pi_register(PIP * pr)
+{
+	int k;
+
+	for (k = 0; k < MAX_PROTOS; k++)
+		if (protocols[k] && !strcmp(pr->name, protocols[k]->name)) {
+			printk("paride: %s protocol already registered\n",
+			       pr->name);
+			return 0;
+		}
+	k = 0;
+	while ((k < MAX_PROTOS) && (protocols[k]))
+		k++;
+	if (k == MAX_PROTOS) {
+		printk("paride: protocol table full\n");
+		return 0;
+	}
+	protocols[k] = pr;
+	pr->index = k;
+	printk("paride: %s registered as protocol %d\n", pr->name, k);
+	return 1;
+}
+
+EXPORT_SYMBOL(pi_register);
+
+void pi_unregister(PIP * pr)
+{
+	if (!pr)
+		return;
+	if (protocols[pr->index] != pr) {
+		printk("paride: %s not registered\n", pr->name);
+		return;
+	}
+	protocols[pr->index] = NULL;
+}
+
+EXPORT_SYMBOL(pi_unregister);
+
+static int pi_register_parport(PIA * pi, int verbose)
+{
+#ifdef CONFIG_PARPORT
+
+	struct parport *port;
+
+	port = parport_find_base(pi->port);
+	if (!port)
+		return 0;
+
+	pi->pardev = parport_register_device(port,
+					     pi->device, NULL,
+					     pi_wake_up, NULL, 0, (void *) pi);
+	parport_put_port(port);
+	if (!pi->pardev)
+		return 0;
+
+	init_waitqueue_head(&pi->parq);
+
+	if (verbose)
+		printk("%s: 0x%x is %s\n", pi->device, pi->port, port->name);
+
+	pi->parname = (char *) port->name;
+#endif
+
+	return 1;
+}
+
+static int pi_probe_mode(PIA * pi, int max, char *scratch, int verbose)
+{
+	int best, range;
+
+	if (pi->mode != -1) {
+		if (pi->mode >= max)
+			return 0;
+		range = 3;
+		if (pi->mode >= pi->proto->epp_first)
+			range = 8;
+		if ((range == 8) && (pi->port % 8))
+			return 0;
+		pi->reserved = range;
+		return (!pi_test_proto(pi, scratch, verbose));
+	}
+	best = -1;
+	for (pi->mode = 0; pi->mode < max; pi->mode++) {
+		range = 3;
+		if (pi->mode >= pi->proto->epp_first)
+			range = 8;
+		if ((range == 8) && (pi->port % 8))
+			break;
+		pi->reserved = range;
+		if (!pi_test_proto(pi, scratch, verbose))
+			best = pi->mode;
+	}
+	pi->mode = best;
+	return (best > -1);
+}
+
+static int pi_probe_unit(PIA * pi, int unit, char *scratch, int verbose)
+{
+	int max, s, e;
+
+	s = unit;
+	e = s + 1;
+
+	if (s == -1) {
+		s = 0;
+		e = pi->proto->max_units;
+	}
+
+	if (!pi_register_parport(pi, verbose))
+		return 0;
+
+	if (pi->proto->test_port) {
+		pi_claim(pi);
+		max = pi->proto->test_port(pi);
+		pi_unclaim(pi);
+	} else
+		max = pi->proto->max_mode;
+
+	if (pi->proto->probe_unit) {
+		pi_claim(pi);
+		for (pi->unit = s; pi->unit < e; pi->unit++)
+			if (pi->proto->probe_unit(pi)) {
+				pi_unclaim(pi);
+				if (pi_probe_mode(pi, max, scratch, verbose))
+					return 1;
+				pi_unregister_parport(pi);
+				return 0;
+			}
+		pi_unclaim(pi);
+		pi_unregister_parport(pi);
+		return 0;
+	}
+
+	if (!pi_probe_mode(pi, max, scratch, verbose)) {
+		pi_unregister_parport(pi);
+		return 0;
+	}
+	return 1;
+
+}
+
+int pi_init(PIA * pi, int autoprobe, int port, int mode,
+	int unit, int protocol, int delay, char *scratch,
+	int devtype, int verbose, char *device)
+{
+	int p, k, s, e;
+	int lpts[7] = { 0x3bc, 0x378, 0x278, 0x268, 0x27c, 0x26c, 0 };
+
+	s = protocol;
+	e = s + 1;
+
+	if (!protocols[0])
+		request_module("paride_protocol");
+
+	if (autoprobe) {
+		s = 0;
+		e = MAX_PROTOS;
+	} else if ((s < 0) || (s >= MAX_PROTOS) || (port <= 0) ||
+		   (!protocols[s]) || (unit < 0) ||
+		   (unit >= protocols[s]->max_units)) {
+		printk("%s: Invalid parameters\n", device);
+		return 0;
+	}
+
+	for (p = s; p < e; p++) {
+		struct pi_protocol *proto = protocols[p];
+		if (!proto)
+			continue;
+		/* still racy */
+		if (!try_module_get(proto->owner))
+			continue;
+		pi->proto = proto;
+		pi->private = 0;
+		if (proto->init_proto && proto->init_proto(pi) < 0) {
+			pi->proto = NULL;
+			module_put(proto->owner);
+			continue;
+		}
+		if (delay == -1)
+			pi->delay = pi->proto->default_delay;
+		else
+			pi->delay = delay;
+		pi->devtype = devtype;
+		pi->device = device;
+
+		pi->parname = NULL;
+		pi->pardev = NULL;
+		init_waitqueue_head(&pi->parq);
+		pi->claimed = 0;
+		pi->claim_cont = NULL;
+
+		pi->mode = mode;
+		if (port != -1) {
+			pi->port = port;
+			if (pi_probe_unit(pi, unit, scratch, verbose))
+				break;
+			pi->port = 0;
+		} else {
+			k = 0;
+			while ((pi->port = lpts[k++]))
+				if (pi_probe_unit
+				    (pi, unit, scratch, verbose))
+					break;
+			if (pi->port)
+				break;
+		}
+		if (pi->proto->release_proto)
+			pi->proto->release_proto(pi);
+		module_put(proto->owner);
+	}
+
+	if (!pi->port) {
+		if (autoprobe)
+			printk("%s: Autoprobe failed\n", device);
+		else
+			printk("%s: Adapter not found\n", device);
+		return 0;
+	}
+#ifndef CONFIG_PARPORT
+	if (!request_region(pi->port, pi->reserved, pi->device)) {
+		printk(KERN_WARNING "paride: Unable to request region 0x%x\n",
+		       pi->port);
+		return 0;
+	}
+#endif				/* !CONFIG_PARPORT */
+
+	if (pi->parname)
+		printk("%s: Sharing %s at 0x%x\n", pi->device,
+		       pi->parname, pi->port);
+
+	pi->proto->log_adapter(pi, scratch, verbose);
+
+	return 1;
+}
+
+EXPORT_SYMBOL(pi_init);
diff --git a/drivers/block/paride/paride.h b/drivers/block/paride/paride.h
new file mode 100644
index 0000000..c6d98ef
--- /dev/null
+++ b/drivers/block/paride/paride.h
@@ -0,0 +1,170 @@
+#ifndef __DRIVERS_PARIDE_H__
+#define __DRIVERS_PARIDE_H__
+
+/* 
+	paride.h	(c) 1997-8  Grant R. Guenther <grant@torque.net>
+   		                    Under the terms of the GPL.
+
+   This file defines the interface between the high-level parallel
+   IDE device drivers (pd, pf, pcd, pt) and the adapter chips.
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.05.05	init_proto, release_proto
+*/
+
+#define PARIDE_H_VERSION 	"1.01"
+
+/* Some adapters need to know what kind of device they are in
+
+   Values for devtype:
+*/
+
+#define	PI_PD	0	/* IDE disk */
+#define PI_PCD	1	/* ATAPI CDrom */
+#define PI_PF   2	/* ATAPI disk */
+#define PI_PT	3	/* ATAPI tape */
+#define PI_PG   4       /* ATAPI generic */
+
+/* The paride module contains no state, instead the drivers allocate
+   a pi_adapter data structure and pass it to paride in every operation.
+
+*/
+
+struct pi_adapter  {
+
+	struct pi_protocol *proto;   /* adapter protocol */
+	int	port;		     /* base address of parallel port */
+	int	mode;		     /* transfer mode in use */
+	int     delay;		     /* adapter delay setting */
+	int	devtype;	     /* device type: PI_PD etc. */
+	char    *device;	     /* name of driver */
+	int     unit;		     /* unit number for chained adapters */
+	int	saved_r0;	     /* saved port state */
+	int	saved_r2;	     /* saved port state */
+	int	reserved;	     /* number of ports reserved */
+	unsigned long	private;     /* for protocol module */
+
+	wait_queue_head_t parq;     /* semaphore for parport sharing */
+	void	*pardev;	     /* pointer to pardevice */
+	char	*parname;	     /* parport name */
+	int	claimed;	     /* parport has already been claimed */
+	void (*claim_cont)(void);    /* continuation for parport wait */
+};
+
+typedef struct pi_adapter PIA;
+
+/* functions exported by paride to the high level drivers */
+
+extern int pi_init(PIA *pi, 
+	int autoprobe,		/* 1 to autoprobe */
+	int port, 		/* base port address */
+	int mode, 		/* -1 for autoprobe */
+	int unit,		/* unit number, if supported */
+	int protocol, 		/* protocol to use */
+	int delay, 		/* -1 to use adapter specific default */
+	char * scratch, 	/* address of 512 byte buffer */
+	int devtype,		/* device type: PI_PD, PI_PCD, etc ... */
+	int verbose,		/* log verbose data while probing */
+	char *device		/* name of the driver */
+	);			/* returns 0 on failure, 1 on success */
+
+extern void pi_release(PIA *pi);
+
+/* registers are addressed as (cont,regr)
+
+       	cont: 0 for command register file, 1 for control register(s)
+	regr: 0-7 for register number.
+
+*/
+
+extern void pi_write_regr(PIA *pi, int cont, int regr, int val);
+
+extern int pi_read_regr(PIA *pi, int cont, int regr);
+
+extern void pi_write_block(PIA *pi, char * buf, int count);
+
+extern void pi_read_block(PIA *pi, char * buf, int count);
+
+extern void pi_connect(PIA *pi);
+
+extern void pi_disconnect(PIA *pi);
+
+extern void pi_do_claimed(PIA *pi, void (*cont)(void));
+extern int pi_schedule_claimed(PIA *pi, void (*cont)(void));
+
+/* macros and functions exported to the protocol modules */
+
+#define delay_p			(pi->delay?udelay(pi->delay):(void)0)
+#define out_p(offs,byte)	outb(byte,pi->port+offs); delay_p;
+#define in_p(offs)		(delay_p,inb(pi->port+offs))
+
+#define w0(byte)                {out_p(0,byte);}
+#define r0()                    (in_p(0) & 0xff)
+#define w1(byte)                {out_p(1,byte);}
+#define r1()                    (in_p(1) & 0xff)
+#define w2(byte)                {out_p(2,byte);}
+#define r2()                    (in_p(2) & 0xff)
+#define w3(byte)                {out_p(3,byte);}
+#define w4(byte)                {out_p(4,byte);}
+#define r4()                    (in_p(4) & 0xff)
+#define w4w(data)     		{outw(data,pi->port+4); delay_p;}
+#define w4l(data)     		{outl(data,pi->port+4); delay_p;}
+#define r4w()         		(delay_p,inw(pi->port+4)&0xffff)
+#define r4l()         		(delay_p,inl(pi->port+4)&0xffffffff)
+
+static inline u16 pi_swab16( char *b, int k)
+
+{ 	union { u16 u; char t[2]; } r;
+
+	r.t[0]=b[2*k+1]; r.t[1]=b[2*k];
+        return r.u;
+}
+
+static inline u32 pi_swab32( char *b, int k)
+
+{ 	union { u32 u; char f[4]; } r;
+
+	r.f[0]=b[4*k+1]; r.f[1]=b[4*k];
+	r.f[2]=b[4*k+3]; r.f[3]=b[4*k+2];
+        return r.u;
+}
+
+struct pi_protocol {
+
+	char	name[8];	/* name for this protocol */
+	int	index;		/* index into protocol table */
+
+	int	max_mode;	/* max mode number */
+	int	epp_first;	/* modes >= this use 8 ports */
+	
+	int	default_delay;  /* delay parameter if not specified */
+	int	max_units;	/* max chained units probed for */
+
+	void (*write_regr)(PIA *,int,int,int);
+	int  (*read_regr)(PIA *,int,int);
+	void (*write_block)(PIA *,char *,int);
+	void (*read_block)(PIA *,char *,int);
+
+	void (*connect)(PIA *);
+	void (*disconnect)(PIA *);
+	
+	int  (*test_port)(PIA *);
+	int  (*probe_unit)(PIA *);
+	int  (*test_proto)(PIA *,char *,int);
+	void (*log_adapter)(PIA *,char *,int);
+	
+	int (*init_proto)(PIA *);
+	void (*release_proto)(PIA *);
+	struct module *owner;
+};
+
+typedef struct pi_protocol PIP;
+
+extern int pi_register( PIP * );
+extern void pi_unregister ( PIP * );
+
+#endif /* __DRIVERS_PARIDE_H__ */
+/* end of paride.h */
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
new file mode 100644
index 0000000..7289f67
--- /dev/null
+++ b/drivers/block/paride/pcd.c
@@ -0,0 +1,971 @@
+/* 
+	pcd.c	(c) 1997-8  Grant R. Guenther <grant@torque.net>
+		            Under the terms of the GNU General Public License.
+
+	This is a high-level driver for parallel port ATAPI CD-ROM
+        drives based on chips supported by the paride module.
+
+        By default, the driver will autoprobe for a single parallel
+        port ATAPI CD-ROM drive, but if their individual parameters are
+        specified, the driver can handle up to 4 drives.
+
+        The behaviour of the pcd driver can be altered by setting
+        some parameters from the insmod command line.  The following
+        parameters are adjustable:
+
+            drive0      These four arguments can be arrays of       
+            drive1      1-6 integers as follows:
+            drive2
+            drive3      <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
+
+                        Where,
+
+                <prt>   is the base of the parallel port address for
+                        the corresponding drive.  (required)
+
+                <pro>   is the protocol number for the adapter that
+                        supports this drive.  These numbers are
+                        logged by 'paride' when the protocol modules
+                        are initialised.  (0 if not given)
+
+                <uni>   for those adapters that support chained
+                        devices, this is the unit selector for the
+                        chain of devices on the given port.  It should
+                        be zero for devices that don't support chaining.
+                        (0 if not given)
+
+                <mod>   this can be -1 to choose the best mode, or one
+                        of the mode numbers supported by the adapter.
+                        (-1 if not given)
+
+		<slv>   ATAPI CD-ROMs can be jumpered to master or slave.
+			Set this to 0 to choose the master drive, 1 to
+                        choose the slave, -1 (the default) to choose the
+			first drive found.
+
+                <dly>   some parallel ports require the driver to 
+                        go more slowly.  -1 sets a default value that
+                        should work with the chosen protocol.  Otherwise,
+                        set this to a small integer, the larger it is
+                        the slower the port i/o.  In some cases, setting
+                        this to zero will speed up the device. (default -1)
+                        
+            major       You may use this parameter to overide the
+                        default major number (46) that this driver
+                        will use.  Be sure to change the device
+                        name as well.
+
+            name        This parameter is a character string that
+                        contains the name the kernel will use for this
+                        device (in /proc output, for instance).
+                        (default "pcd")
+
+            verbose     This parameter controls the amount of logging
+                        that the driver will do.  Set it to 0 for
+                        normal operation, 1 to see autoprobe progress
+                        messages, or 2 to see additional debugging
+                        output.  (default 0)
+  
+            nice        This parameter controls the driver's use of
+                        idle CPU time, at the expense of some speed.
+ 
+	If this driver is built into the kernel, you can use kernel
+        the following command line parameters, with the same values
+        as the corresponding module parameters listed above:
+
+	    pcd.drive0
+	    pcd.drive1
+	    pcd.drive2
+	    pcd.drive3
+	    pcd.nice
+
+        In addition, you can use the parameter pcd.disable to disable
+        the driver entirely.
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.01.24	Added test unit ready support
+	1.02    GRG 1998.05.06  Changes to pcd_completion, ready_wait,
+				and loosen interpretation of ATAPI
+			        standard for clearing error status.
+				Use spinlocks. Eliminate sti().
+	1.03    GRG 1998.06.16  Eliminated an Ugh
+	1.04	GRG 1998.08.15  Added extra debugging, improvements to
+				pcd_completion, use HZ in loop timing
+	1.05	GRG 1998.08.16	Conformed to "Uniform CD-ROM" standard
+	1.06    GRG 1998.08.19  Added audio ioctl support
+	1.07    GRG 1998.09.24  Increased reset timeout, added jumbo support
+
+*/
+
+#define	PCD_VERSION	"1.07"
+#define PCD_MAJOR	46
+#define PCD_NAME	"pcd"
+#define PCD_UNITS	4
+
+/* Here are things one can override from the insmod command.
+   Most are autoprobed by paride unless set here.  Verbose is off
+   by default.
+
+*/
+
+static int verbose = 0;
+static int major = PCD_MAJOR;
+static char *name = PCD_NAME;
+static int nice = 0;
+static int disable = 0;
+
+static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
+
+static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+static int pcd_drive_count;
+
+enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
+
+/* end of parameters */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/cdrom.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <asm/uaccess.h>
+
+static spinlock_t pcd_lock;
+
+module_param(verbose, bool, 0644);
+module_param(major, int, 0);
+module_param(name, charp, 0);
+module_param(nice, int, 0);
+module_param_array(drive0, int, NULL, 0);
+module_param_array(drive1, int, NULL, 0);
+module_param_array(drive2, int, NULL, 0);
+module_param_array(drive3, int, NULL, 0);
+
+#include "paride.h"
+#include "pseudo.h"
+
+#define PCD_RETRIES	     5
+#define PCD_TMO		   800	/* timeout in jiffies */
+#define PCD_DELAY           50	/* spin delay in uS */
+#define PCD_READY_TMO	    20	/* in seconds */
+#define PCD_RESET_TMO	   100	/* in tenths of a second */
+
+#define PCD_SPIN	(1000000*PCD_TMO)/(HZ*PCD_DELAY)
+
+#define IDE_ERR		0x01
+#define IDE_DRQ         0x08
+#define IDE_READY       0x40
+#define IDE_BUSY        0x80
+
+static int pcd_open(struct cdrom_device_info *cdi, int purpose);
+static void pcd_release(struct cdrom_device_info *cdi);
+static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr);
+static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr);
+static int pcd_tray_move(struct cdrom_device_info *cdi, int position);
+static int pcd_lock_door(struct cdrom_device_info *cdi, int lock);
+static int pcd_drive_reset(struct cdrom_device_info *cdi);
+static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn);
+static int pcd_audio_ioctl(struct cdrom_device_info *cdi,
+			   unsigned int cmd, void *arg);
+static int pcd_packet(struct cdrom_device_info *cdi,
+		      struct packet_command *cgc);
+
+static int pcd_detect(void);
+static void pcd_probe_capabilities(void);
+static void do_pcd_read_drq(void);
+static void do_pcd_request(request_queue_t * q);
+static void do_pcd_read(void);
+
+struct pcd_unit {
+	struct pi_adapter pia;	/* interface to paride layer */
+	struct pi_adapter *pi;
+	int drive;		/* master/slave */
+	int last_sense;		/* result of last request sense */
+	int changed;		/* media change seen */
+	int present;		/* does this unit exist ? */
+	char *name;		/* pcd0, pcd1, etc */
+	struct cdrom_device_info info;	/* uniform cdrom interface */
+	struct gendisk *disk;
+};
+
+static struct pcd_unit pcd[PCD_UNITS];
+
+static char pcd_scratch[64];
+static char pcd_buffer[2048];	/* raw block buffer */
+static int pcd_bufblk = -1;	/* block in buffer, in CD units,
+				   -1 for nothing there. See also
+				   pd_unit.
+				 */
+
+/* the variables below are used mainly in the I/O request engine, which
+   processes only one request at a time.
+*/
+
+static struct pcd_unit *pcd_current; /* current request's drive */
+static struct request *pcd_req;
+static int pcd_retries;		/* retries on current request */
+static int pcd_busy;		/* request being processed ? */
+static int pcd_sector;		/* address of next requested sector */
+static int pcd_count;		/* number of blocks still to do */
+static char *pcd_buf;		/* buffer for request in progress */
+
+static int pcd_warned;		/* Have we logged a phase warning ? */
+
+/* kernel glue structures */
+
+static int pcd_block_open(struct inode *inode, struct file *file)
+{
+	struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
+	return cdrom_open(&cd->info, inode, file);
+}
+
+static int pcd_block_release(struct inode *inode, struct file *file)
+{
+	struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
+	return cdrom_release(&cd->info, file);
+}
+
+static int pcd_block_ioctl(struct inode *inode, struct file *file,
+				unsigned cmd, unsigned long arg)
+{
+	struct pcd_unit *cd = inode->i_bdev->bd_disk->private_data;
+	return cdrom_ioctl(file, &cd->info, inode, cmd, arg);
+}
+
+static int pcd_block_media_changed(struct gendisk *disk)
+{
+	struct pcd_unit *cd = disk->private_data;
+	return cdrom_media_changed(&cd->info);
+}
+
+static struct block_device_operations pcd_bdops = {
+	.owner		= THIS_MODULE,
+	.open		= pcd_block_open,
+	.release	= pcd_block_release,
+	.ioctl		= pcd_block_ioctl,
+	.media_changed	= pcd_block_media_changed,
+};
+
+static struct cdrom_device_ops pcd_dops = {
+	.open		= pcd_open,
+	.release	= pcd_release,
+	.drive_status	= pcd_drive_status,
+	.media_changed	= pcd_media_changed,
+	.tray_move	= pcd_tray_move,
+	.lock_door	= pcd_lock_door,
+	.get_mcn	= pcd_get_mcn,
+	.reset		= pcd_drive_reset,
+	.audio_ioctl	= pcd_audio_ioctl,
+	.generic_packet	= pcd_packet,
+	.capability	= CDC_CLOSE_TRAY | CDC_OPEN_TRAY | CDC_LOCK |
+			  CDC_MCN | CDC_MEDIA_CHANGED | CDC_RESET |
+			  CDC_PLAY_AUDIO | CDC_GENERIC_PACKET | CDC_CD_R |
+			  CDC_CD_RW,
+};
+
+static void pcd_init_units(void)
+{
+	struct pcd_unit *cd;
+	int unit;
+
+	pcd_drive_count = 0;
+	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+		struct gendisk *disk = alloc_disk(1);
+		if (!disk)
+			continue;
+		cd->disk = disk;
+		cd->pi = &cd->pia;
+		cd->present = 0;
+		cd->last_sense = 0;
+		cd->changed = 1;
+		cd->drive = (*drives[unit])[D_SLV];
+		if ((*drives[unit])[D_PRT])
+			pcd_drive_count++;
+
+		cd->name = &cd->info.name[0];
+		snprintf(cd->name, sizeof(cd->info.name), "%s%d", name, unit);
+		cd->info.ops = &pcd_dops;
+		cd->info.handle = cd;
+		cd->info.speed = 0;
+		cd->info.capacity = 1;
+		cd->info.mask = 0;
+		disk->major = major;
+		disk->first_minor = unit;
+		strcpy(disk->disk_name, cd->name);	/* umm... */
+		disk->fops = &pcd_bdops;
+	}
+}
+
+static int pcd_open(struct cdrom_device_info *cdi, int purpose)
+{
+	struct pcd_unit *cd = cdi->handle;
+	if (!cd->present)
+		return -ENODEV;
+	return 0;
+}
+
+static void pcd_release(struct cdrom_device_info *cdi)
+{
+}
+
+static inline int status_reg(struct pcd_unit *cd)
+{
+	return pi_read_regr(cd->pi, 1, 6);
+}
+
+static inline int read_reg(struct pcd_unit *cd, int reg)
+{
+	return pi_read_regr(cd->pi, 0, reg);
+}
+
+static inline void write_reg(struct pcd_unit *cd, int reg, int val)
+{
+	pi_write_regr(cd->pi, 0, reg, val);
+}
+
+static int pcd_wait(struct pcd_unit *cd, int go, int stop, char *fun, char *msg)
+{
+	int j, r, e, s, p;
+
+	j = 0;
+	while ((((r = status_reg(cd)) & go) || (stop && (!(r & stop))))
+	       && (j++ < PCD_SPIN))
+		udelay(PCD_DELAY);
+
+	if ((r & (IDE_ERR & stop)) || (j >= PCD_SPIN)) {
+		s = read_reg(cd, 7);
+		e = read_reg(cd, 1);
+		p = read_reg(cd, 2);
+		if (j >= PCD_SPIN)
+			e |= 0x100;
+		if (fun)
+			printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
+			       " loop=%d phase=%d\n",
+			       cd->name, fun, msg, r, s, e, j, p);
+		return (s << 8) + r;
+	}
+	return 0;
+}
+
+static int pcd_command(struct pcd_unit *cd, char *cmd, int dlen, char *fun)
+{
+	pi_connect(cd->pi);
+
+	write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
+
+	if (pcd_wait(cd, IDE_BUSY | IDE_DRQ, 0, fun, "before command")) {
+		pi_disconnect(cd->pi);
+		return -1;
+	}
+
+	write_reg(cd, 4, dlen % 256);
+	write_reg(cd, 5, dlen / 256);
+	write_reg(cd, 7, 0xa0);	/* ATAPI packet command */
+
+	if (pcd_wait(cd, IDE_BUSY, IDE_DRQ, fun, "command DRQ")) {
+		pi_disconnect(cd->pi);
+		return -1;
+	}
+
+	if (read_reg(cd, 2) != 1) {
+		printk("%s: %s: command phase error\n", cd->name, fun);
+		pi_disconnect(cd->pi);
+		return -1;
+	}
+
+	pi_write_block(cd->pi, cmd, 12);
+
+	return 0;
+}
+
+static int pcd_completion(struct pcd_unit *cd, char *buf, char *fun)
+{
+	int r, d, p, n, k, j;
+
+	r = -1;
+	k = 0;
+	j = 0;
+
+	if (!pcd_wait(cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR,
+		      fun, "completion")) {
+		r = 0;
+		while (read_reg(cd, 7) & IDE_DRQ) {
+			d = read_reg(cd, 4) + 256 * read_reg(cd, 5);
+			n = (d + 3) & 0xfffc;
+			p = read_reg(cd, 2) & 3;
+
+			if ((p == 2) && (n > 0) && (j == 0)) {
+				pi_read_block(cd->pi, buf, n);
+				if (verbose > 1)
+					printk("%s: %s: Read %d bytes\n",
+					       cd->name, fun, n);
+				r = 0;
+				j++;
+			} else {
+				if (verbose > 1)
+					printk
+					    ("%s: %s: Unexpected phase %d, d=%d, k=%d\n",
+					     cd->name, fun, p, d, k);
+				if ((verbose < 2) && !pcd_warned) {
+					pcd_warned = 1;
+					printk
+					    ("%s: WARNING: ATAPI phase errors\n",
+					     cd->name);
+				}
+				mdelay(1);
+			}
+			if (k++ > PCD_TMO) {
+				printk("%s: Stuck DRQ\n", cd->name);
+				break;
+			}
+			if (pcd_wait
+			    (cd, IDE_BUSY, IDE_DRQ | IDE_READY | IDE_ERR, fun,
+			     "completion")) {
+				r = -1;
+				break;
+			}
+		}
+	}
+
+	pi_disconnect(cd->pi);
+
+	return r;
+}
+
+static void pcd_req_sense(struct pcd_unit *cd, char *fun)
+{
+	char rs_cmd[12] = { 0x03, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
+	char buf[16];
+	int r, c;
+
+	r = pcd_command(cd, rs_cmd, 16, "Request sense");
+	mdelay(1);
+	if (!r)
+		pcd_completion(cd, buf, "Request sense");
+
+	cd->last_sense = -1;
+	c = 2;
+	if (!r) {
+		if (fun)
+			printk("%s: %s: Sense key: %x, ASC: %x, ASQ: %x\n",
+			       cd->name, fun, buf[2] & 0xf, buf[12], buf[13]);
+		c = buf[2] & 0xf;
+		cd->last_sense =
+		    c | ((buf[12] & 0xff) << 8) | ((buf[13] & 0xff) << 16);
+	}
+	if ((c == 2) || (c == 6))
+		cd->changed = 1;
+}
+
+static int pcd_atapi(struct pcd_unit *cd, char *cmd, int dlen, char *buf, char *fun)
+{
+	int r;
+
+	r = pcd_command(cd, cmd, dlen, fun);
+	mdelay(1);
+	if (!r)
+		r = pcd_completion(cd, buf, fun);
+	if (r)
+		pcd_req_sense(cd, fun);
+
+	return r;
+}
+
+static int pcd_packet(struct cdrom_device_info *cdi, struct packet_command *cgc)
+{
+	return pcd_atapi(cdi->handle, cgc->cmd, cgc->buflen, cgc->buffer,
+			 "generic packet");
+}
+
+#define DBMSG(msg)	((verbose>1)?(msg):NULL)
+
+static int pcd_media_changed(struct cdrom_device_info *cdi, int slot_nr)
+{
+	struct pcd_unit *cd = cdi->handle;
+	int res = cd->changed;
+	if (res)
+		cd->changed = 0;
+	return res;
+}
+
+static int pcd_lock_door(struct cdrom_device_info *cdi, int lock)
+{
+	char un_cmd[12] = { 0x1e, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0 };
+
+	return pcd_atapi(cdi->handle, un_cmd, 0, pcd_scratch,
+			 lock ? "lock door" : "unlock door");
+}
+
+static int pcd_tray_move(struct cdrom_device_info *cdi, int position)
+{
+	char ej_cmd[12] = { 0x1b, 0, 0, 0, 3 - position, 0, 0, 0, 0, 0, 0, 0 };
+
+	return pcd_atapi(cdi->handle, ej_cmd, 0, pcd_scratch,
+			 position ? "eject" : "close tray");
+}
+
+static void pcd_sleep(int cs)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule_timeout(cs);
+}
+
+static int pcd_reset(struct pcd_unit *cd)
+{
+	int i, k, flg;
+	int expect[5] = { 1, 1, 1, 0x14, 0xeb };
+
+	pi_connect(cd->pi);
+	write_reg(cd, 6, 0xa0 + 0x10 * cd->drive);
+	write_reg(cd, 7, 8);
+
+	pcd_sleep(20 * HZ / 1000);	/* delay a bit */
+
+	k = 0;
+	while ((k++ < PCD_RESET_TMO) && (status_reg(cd) & IDE_BUSY))
+		pcd_sleep(HZ / 10);
+
+	flg = 1;
+	for (i = 0; i < 5; i++)
+		flg &= (read_reg(cd, i + 1) == expect[i]);
+
+	if (verbose) {
+		printk("%s: Reset (%d) signature = ", cd->name, k);
+		for (i = 0; i < 5; i++)
+			printk("%3x", read_reg(cd, i + 1));
+		if (!flg)
+			printk(" (incorrect)");
+		printk("\n");
+	}
+
+	pi_disconnect(cd->pi);
+	return flg - 1;
+}
+
+static int pcd_drive_reset(struct cdrom_device_info *cdi)
+{
+	return pcd_reset(cdi->handle);
+}
+
+static int pcd_ready_wait(struct pcd_unit *cd, int tmo)
+{
+	char tr_cmd[12] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	int k, p;
+
+	k = 0;
+	while (k < tmo) {
+		cd->last_sense = 0;
+		pcd_atapi(cd, tr_cmd, 0, NULL, DBMSG("test unit ready"));
+		p = cd->last_sense;
+		if (!p)
+			return 0;
+		if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
+			return p;
+		k++;
+		pcd_sleep(HZ);
+	}
+	return 0x000020;	/* timeout */
+}
+
+static int pcd_drive_status(struct cdrom_device_info *cdi, int slot_nr)
+{
+	char rc_cmd[12] = { 0x25, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	struct pcd_unit *cd = cdi->handle;
+
+	if (pcd_ready_wait(cd, PCD_READY_TMO))
+		return CDS_DRIVE_NOT_READY;
+	if (pcd_atapi(cd, rc_cmd, 8, pcd_scratch, DBMSG("check media")))
+		return CDS_NO_DISC;
+	return CDS_DISC_OK;
+}
+
+static int pcd_identify(struct pcd_unit *cd, char *id)
+{
+	int k, s;
+	char id_cmd[12] = { 0x12, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+
+	pcd_bufblk = -1;
+
+	s = pcd_atapi(cd, id_cmd, 36, pcd_buffer, "identify");
+
+	if (s)
+		return -1;
+	if ((pcd_buffer[0] & 0x1f) != 5) {
+		if (verbose)
+			printk("%s: %s is not a CD-ROM\n",
+			       cd->name, cd->drive ? "Slave" : "Master");
+		return -1;
+	}
+	memcpy(id, pcd_buffer + 16, 16);
+	id[16] = 0;
+	k = 16;
+	while ((k >= 0) && (id[k] <= 0x20)) {
+		id[k] = 0;
+		k--;
+	}
+
+	printk("%s: %s: %s\n", cd->name, cd->drive ? "Slave" : "Master", id);
+
+	return 0;
+}
+
+/*
+ * returns  0, with id set if drive is detected
+ *	    -1, if drive detection failed
+ */
+static int pcd_probe(struct pcd_unit *cd, int ms, char *id)
+{
+	if (ms == -1) {
+		for (cd->drive = 0; cd->drive <= 1; cd->drive++)
+			if (!pcd_reset(cd) && !pcd_identify(cd, id))
+				return 0;
+	} else {
+		cd->drive = ms;
+		if (!pcd_reset(cd) && !pcd_identify(cd, id))
+			return 0;
+	}
+	return -1;
+}
+
+static void pcd_probe_capabilities(void)
+{
+	int unit, r;
+	char buffer[32];
+	char cmd[12] = { 0x5a, 1 << 3, 0x2a, 0, 0, 0, 0, 18, 0, 0, 0, 0 };
+	struct pcd_unit *cd;
+
+	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+		if (!cd->present)
+			continue;
+		r = pcd_atapi(cd, cmd, 18, buffer, "mode sense capabilities");
+		if (r)
+			continue;
+		/* we should now have the cap page */
+		if ((buffer[11] & 1) == 0)
+			cd->info.mask |= CDC_CD_R;
+		if ((buffer[11] & 2) == 0)
+			cd->info.mask |= CDC_CD_RW;
+		if ((buffer[12] & 1) == 0)
+			cd->info.mask |= CDC_PLAY_AUDIO;
+		if ((buffer[14] & 1) == 0)
+			cd->info.mask |= CDC_LOCK;
+		if ((buffer[14] & 8) == 0)
+			cd->info.mask |= CDC_OPEN_TRAY;
+		if ((buffer[14] >> 6) == 0)
+			cd->info.mask |= CDC_CLOSE_TRAY;
+	}
+}
+
+static int pcd_detect(void)
+{
+	char id[18];
+	int k, unit;
+	struct pcd_unit *cd;
+
+	printk("%s: %s version %s, major %d, nice %d\n",
+	       name, name, PCD_VERSION, major, nice);
+
+	k = 0;
+	if (pcd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+		cd = pcd;
+		if (pi_init(cd->pi, 1, -1, -1, -1, -1, -1, pcd_buffer,
+			    PI_PCD, verbose, cd->name)) {
+			if (!pcd_probe(cd, -1, id) && cd->disk) {
+				cd->present = 1;
+				k++;
+			} else
+				pi_release(cd->pi);
+		}
+	} else {
+		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+			int *conf = *drives[unit];
+			if (!conf[D_PRT])
+				continue;
+			if (!pi_init(cd->pi, 0, conf[D_PRT], conf[D_MOD],
+				     conf[D_UNI], conf[D_PRO], conf[D_DLY],
+				     pcd_buffer, PI_PCD, verbose, cd->name)) 
+				continue;
+			if (!pcd_probe(cd, conf[D_SLV], id) && cd->disk) {
+				cd->present = 1;
+				k++;
+			} else
+				pi_release(cd->pi);
+		}
+	}
+	if (k)
+		return 0;
+
+	printk("%s: No CD-ROM drive found\n", name);
+	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+		put_disk(cd->disk);
+	return -1;
+}
+
+/* I/O request processing */
+static struct request_queue *pcd_queue;
+
+static void do_pcd_request(request_queue_t * q)
+{
+	if (pcd_busy)
+		return;
+	while (1) {
+		pcd_req = elv_next_request(q);
+		if (!pcd_req)
+			return;
+
+		if (rq_data_dir(pcd_req) == READ) {
+			struct pcd_unit *cd = pcd_req->rq_disk->private_data;
+			if (cd != pcd_current)
+				pcd_bufblk = -1;
+			pcd_current = cd;
+			pcd_sector = pcd_req->sector;
+			pcd_count = pcd_req->current_nr_sectors;
+			pcd_buf = pcd_req->buffer;
+			pcd_busy = 1;
+			ps_set_intr(do_pcd_read, NULL, 0, nice);
+			return;
+		} else
+			end_request(pcd_req, 0);
+	}
+}
+
+static inline void next_request(int success)
+{
+	unsigned long saved_flags;
+
+	spin_lock_irqsave(&pcd_lock, saved_flags);
+	end_request(pcd_req, success);
+	pcd_busy = 0;
+	do_pcd_request(pcd_queue);
+	spin_unlock_irqrestore(&pcd_lock, saved_flags);
+}
+
+static int pcd_ready(void)
+{
+	return (((status_reg(pcd_current) & (IDE_BUSY | IDE_DRQ)) == IDE_DRQ));
+}
+
+static void pcd_transfer(void)
+{
+
+	while (pcd_count && (pcd_sector / 4 == pcd_bufblk)) {
+		int o = (pcd_sector % 4) * 512;
+		memcpy(pcd_buf, pcd_buffer + o, 512);
+		pcd_count--;
+		pcd_buf += 512;
+		pcd_sector++;
+	}
+}
+
+static void pcd_start(void)
+{
+	int b, i;
+	char rd_cmd[12] = { 0xa8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0 };
+
+	pcd_bufblk = pcd_sector / 4;
+	b = pcd_bufblk;
+	for (i = 0; i < 4; i++) {
+		rd_cmd[5 - i] = b & 0xff;
+		b = b >> 8;
+	}
+
+	if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) {
+		pcd_bufblk = -1;
+		next_request(0);
+		return;
+	}
+
+	mdelay(1);
+
+	ps_set_intr(do_pcd_read_drq, pcd_ready, PCD_TMO, nice);
+}
+
+static void do_pcd_read(void)
+{
+	pcd_busy = 1;
+	pcd_retries = 0;
+	pcd_transfer();
+	if (!pcd_count) {
+		next_request(1);
+		return;
+	}
+
+	pi_do_claimed(pcd_current->pi, pcd_start);
+}
+
+static void do_pcd_read_drq(void)
+{
+	unsigned long saved_flags;
+
+	if (pcd_completion(pcd_current, pcd_buffer, "read block")) {
+		if (pcd_retries < PCD_RETRIES) {
+			mdelay(1);
+			pcd_retries++;
+			pi_do_claimed(pcd_current->pi, pcd_start);
+			return;
+		}
+		pcd_bufblk = -1;
+		next_request(0);
+		return;
+	}
+
+	do_pcd_read();
+	spin_lock_irqsave(&pcd_lock, saved_flags);
+	do_pcd_request(pcd_queue);
+	spin_unlock_irqrestore(&pcd_lock, saved_flags);
+}
+
+/* the audio_ioctl stuff is adapted from sr_ioctl.c */
+
+static int pcd_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, void *arg)
+{
+	struct pcd_unit *cd = cdi->handle;
+
+	switch (cmd) {
+
+	case CDROMREADTOCHDR:
+
+		{
+			char cmd[12] =
+			    { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
+			 0, 0, 0 };
+			struct cdrom_tochdr *tochdr =
+			    (struct cdrom_tochdr *) arg;
+			char buffer[32];
+			int r;
+
+			r = pcd_atapi(cd, cmd, 12, buffer, "read toc header");
+
+			tochdr->cdth_trk0 = buffer[2];
+			tochdr->cdth_trk1 = buffer[3];
+
+			return r ? -EIO : 0;
+		}
+
+	case CDROMREADTOCENTRY:
+
+		{
+			char cmd[12] =
+			    { GPCMD_READ_TOC_PMA_ATIP, 0, 0, 0, 0, 0, 0, 0, 12,
+			 0, 0, 0 };
+
+			struct cdrom_tocentry *tocentry =
+			    (struct cdrom_tocentry *) arg;
+			unsigned char buffer[32];
+			int r;
+
+			cmd[1] =
+			    (tocentry->cdte_format == CDROM_MSF ? 0x02 : 0);
+			cmd[6] = tocentry->cdte_track;
+
+			r = pcd_atapi(cd, cmd, 12, buffer, "read toc entry");
+
+			tocentry->cdte_ctrl = buffer[5] & 0xf;
+			tocentry->cdte_adr = buffer[5] >> 4;
+			tocentry->cdte_datamode =
+			    (tocentry->cdte_ctrl & 0x04) ? 1 : 0;
+			if (tocentry->cdte_format == CDROM_MSF) {
+				tocentry->cdte_addr.msf.minute = buffer[9];
+				tocentry->cdte_addr.msf.second = buffer[10];
+				tocentry->cdte_addr.msf.frame = buffer[11];
+			} else
+				tocentry->cdte_addr.lba =
+				    (((((buffer[8] << 8) + buffer[9]) << 8)
+				      + buffer[10]) << 8) + buffer[11];
+
+			return r ? -EIO : 0;
+		}
+
+	default:
+
+		return -ENOSYS;
+	}
+}
+
+static int pcd_get_mcn(struct cdrom_device_info *cdi, struct cdrom_mcn *mcn)
+{
+	char cmd[12] =
+	    { GPCMD_READ_SUBCHANNEL, 0, 0x40, 2, 0, 0, 0, 0, 24, 0, 0, 0 };
+	char buffer[32];
+
+	if (pcd_atapi(cdi->handle, cmd, 24, buffer, "get mcn"))
+		return -EIO;
+
+	memcpy(mcn->medium_catalog_number, buffer + 9, 13);
+	mcn->medium_catalog_number[13] = 0;
+
+	return 0;
+}
+
+static int __init pcd_init(void)
+{
+	struct pcd_unit *cd;
+	int unit;
+
+	if (disable)
+		return -1;
+
+	pcd_init_units();
+
+	if (pcd_detect())
+		return -1;
+
+	/* get the atapi capabilities page */
+	pcd_probe_capabilities();
+
+	if (register_blkdev(major, name)) {
+		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+			put_disk(cd->disk);
+		return -1;
+	}
+
+	pcd_queue = blk_init_queue(do_pcd_request, &pcd_lock);
+	if (!pcd_queue) {
+		unregister_blkdev(major, name);
+		for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
+			put_disk(cd->disk);
+		return -1;
+	}
+
+	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+		if (cd->present) {
+			register_cdrom(&cd->info);
+			cd->disk->private_data = cd;
+			cd->disk->queue = pcd_queue;
+			add_disk(cd->disk);
+		}
+	}
+
+	return 0;
+}
+
+static void __exit pcd_exit(void)
+{
+	struct pcd_unit *cd;
+	int unit;
+
+	for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
+		if (cd->present) {
+			del_gendisk(cd->disk);
+			pi_release(cd->pi);
+			unregister_cdrom(&cd->info);
+		}
+		put_disk(cd->disk);
+	}
+	blk_cleanup_queue(pcd_queue);
+	unregister_blkdev(major, name);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pcd_init)
+module_exit(pcd_exit)
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c
new file mode 100644
index 0000000..202a5a7
--- /dev/null
+++ b/drivers/block/paride/pd.c
@@ -0,0 +1,950 @@
+/* 
+        pd.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                            Under the terms of the GNU General Public License.
+
+        This is the high-level driver for parallel port IDE hard
+        drives based on chips supported by the paride module.
+
+	By default, the driver will autoprobe for a single parallel
+	port IDE drive, but if their individual parameters are
+        specified, the driver can handle up to 4 drives.
+
+        The behaviour of the pd driver can be altered by setting
+        some parameters from the insmod command line.  The following
+        parameters are adjustable:
+ 
+	    drive0  	These four arguments can be arrays of	    
+	    drive1	1-8 integers as follows:
+	    drive2
+	    drive3	<prt>,<pro>,<uni>,<mod>,<geo>,<sby>,<dly>,<slv>
+
+			Where,
+
+		<prt>	is the base of the parallel port address for
+			the corresponding drive.  (required)
+
+		<pro>   is the protocol number for the adapter that
+			supports this drive.  These numbers are
+                        logged by 'paride' when the protocol modules
+			are initialised.  (0 if not given)
+
+		<uni>   for those adapters that support chained
+			devices, this is the unit selector for the
+		        chain of devices on the given port.  It should
+			be zero for devices that don't support chaining.
+			(0 if not given)
+
+		<mod>   this can be -1 to choose the best mode, or one
+		        of the mode numbers supported by the adapter.
+			(-1 if not given)
+
+		<geo>   this defaults to 0 to indicate that the driver
+			should use the CHS geometry provided by the drive
+			itself.  If set to 1, the driver will provide
+			a logical geometry with 64 heads and 32 sectors
+			per track, to be consistent with most SCSI
+		        drivers.  (0 if not given)
+
+		<sby>   set this to zero to disable the power saving
+			standby mode, if needed.  (1 if not given)
+
+		<dly>   some parallel ports require the driver to 
+			go more slowly.  -1 sets a default value that
+			should work with the chosen protocol.  Otherwise,
+			set this to a small integer, the larger it is
+			the slower the port i/o.  In some cases, setting
+			this to zero will speed up the device. (default -1)
+
+		<slv>   IDE disks can be jumpered to master or slave.
+                        Set this to 0 to choose the master drive, 1 to
+                        choose the slave, -1 (the default) to choose the
+                        first drive found.
+			
+
+            major       You may use this parameter to overide the
+                        default major number (45) that this driver
+                        will use.  Be sure to change the device
+                        name as well.
+
+            name        This parameter is a character string that
+                        contains the name the kernel will use for this
+                        device (in /proc output, for instance).
+			(default "pd")
+
+	    cluster	The driver will attempt to aggregate requests
+			for adjacent blocks into larger multi-block
+			clusters.  The maximum cluster size (in 512
+			byte sectors) is set with this parameter.
+			(default 64)
+
+	    verbose	This parameter controls the amount of logging
+			that the driver will do.  Set it to 0 for 
+			normal operation, 1 to see autoprobe progress
+			messages, or 2 to see additional debugging
+			output.  (default 0)
+
+            nice        This parameter controls the driver's use of
+                        idle CPU time, at the expense of some speed.
+
+        If this driver is built into the kernel, you can use kernel
+        the following command line parameters, with the same values
+        as the corresponding module parameters listed above:
+
+            pd.drive0
+            pd.drive1
+            pd.drive2
+            pd.drive3
+            pd.cluster
+            pd.nice
+
+        In addition, you can use the parameter pd.disable to disable
+        the driver entirely.
+ 
+*/
+
+/* Changes:
+
+	1.01	GRG 1997.01.24	Restored pd_reset()
+				Added eject ioctl
+	1.02    GRG 1998.05.06  SMP spinlock changes, 
+				Added slave support
+	1.03    GRG 1998.06.16  Eliminate an Ugh.
+	1.04	GRG 1998.08.15  Extra debugging, use HZ in loop timing
+	1.05    GRG 1998.09.24  Added jumbo support
+
+*/
+
+#define PD_VERSION      "1.05"
+#define PD_MAJOR	45
+#define PD_NAME		"pd"
+#define PD_UNITS	4
+
+/* Here are things one can override from the insmod command.
+   Most are autoprobed by paride unless set here.  Verbose is off
+   by default.
+
+*/
+
+static int verbose = 0;
+static int major = PD_MAJOR;
+static char *name = PD_NAME;
+static int cluster = 64;
+static int nice = 0;
+static int disable = 0;
+
+static int drive0[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
+static int drive1[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
+static int drive2[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
+static int drive3[8] = { 0, 0, 0, -1, 0, 1, -1, -1 };
+
+static int (*drives[4])[8] = {&drive0, &drive1, &drive2, &drive3};
+
+enum {D_PRT, D_PRO, D_UNI, D_MOD, D_GEO, D_SBY, D_DLY, D_SLV};
+
+/* end of parameters */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/cdrom.h>	/* for the eject ioctl */
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <asm/uaccess.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+static DEFINE_SPINLOCK(pd_lock);
+
+module_param(verbose, bool, 0);
+module_param(major, int, 0);
+module_param(name, charp, 0);
+module_param(cluster, int, 0);
+module_param(nice, int, 0);
+module_param_array(drive0, int, NULL, 0);
+module_param_array(drive1, int, NULL, 0);
+module_param_array(drive2, int, NULL, 0);
+module_param_array(drive3, int, NULL, 0);
+
+#include "paride.h"
+
+#define PD_BITS    4
+
+/* numbers for "SCSI" geometry */
+
+#define PD_LOG_HEADS    64
+#define PD_LOG_SECTS    32
+
+#define PD_ID_OFF       54
+#define PD_ID_LEN       14
+
+#define PD_MAX_RETRIES  5
+#define PD_TMO          800	/* interrupt timeout in jiffies */
+#define PD_SPIN_DEL     50	/* spin delay in micro-seconds  */
+
+#define PD_SPIN         (1000000*PD_TMO)/(HZ*PD_SPIN_DEL)
+
+#define STAT_ERR        0x00001
+#define STAT_INDEX      0x00002
+#define STAT_ECC        0x00004
+#define STAT_DRQ        0x00008
+#define STAT_SEEK       0x00010
+#define STAT_WRERR      0x00020
+#define STAT_READY      0x00040
+#define STAT_BUSY       0x00080
+
+#define ERR_AMNF        0x00100
+#define ERR_TK0NF       0x00200
+#define ERR_ABRT        0x00400
+#define ERR_MCR         0x00800
+#define ERR_IDNF        0x01000
+#define ERR_MC          0x02000
+#define ERR_UNC         0x04000
+#define ERR_TMO         0x10000
+
+#define IDE_READ        	0x20
+#define IDE_WRITE       	0x30
+#define IDE_READ_VRFY		0x40
+#define IDE_INIT_DEV_PARMS	0x91
+#define IDE_STANDBY     	0x96
+#define IDE_ACKCHANGE   	0xdb
+#define IDE_DOORLOCK    	0xde
+#define IDE_DOORUNLOCK  	0xdf
+#define IDE_IDENTIFY    	0xec
+#define IDE_EJECT		0xed
+
+#define PD_NAMELEN	8
+
+struct pd_unit {
+	struct pi_adapter pia;	/* interface to paride layer */
+	struct pi_adapter *pi;
+	int access;		/* count of active opens ... */
+	int capacity;		/* Size of this volume in sectors */
+	int heads;		/* physical geometry */
+	int sectors;
+	int cylinders;
+	int can_lba;
+	int drive;		/* master=0 slave=1 */
+	int changed;		/* Have we seen a disk change ? */
+	int removable;		/* removable media device  ?  */
+	int standby;
+	int alt_geom;
+	char name[PD_NAMELEN];	/* pda, pdb, etc ... */
+	struct gendisk *gd;
+};
+
+static struct pd_unit pd[PD_UNITS];
+
+static char pd_scratch[512];	/* scratch block buffer */
+
+static char *pd_errs[17] = { "ERR", "INDEX", "ECC", "DRQ", "SEEK", "WRERR",
+	"READY", "BUSY", "AMNF", "TK0NF", "ABRT", "MCR",
+	"IDNF", "MC", "UNC", "???", "TMO"
+};
+
+static inline int status_reg(struct pd_unit *disk)
+{
+	return pi_read_regr(disk->pi, 1, 6);
+}
+
+static inline int read_reg(struct pd_unit *disk, int reg)
+{
+	return pi_read_regr(disk->pi, 0, reg);
+}
+
+static inline void write_status(struct pd_unit *disk, int val)
+{
+	pi_write_regr(disk->pi, 1, 6, val);
+}
+
+static inline void write_reg(struct pd_unit *disk, int reg, int val)
+{
+	pi_write_regr(disk->pi, 0, reg, val);
+}
+
+static inline u8 DRIVE(struct pd_unit *disk)
+{
+	return 0xa0+0x10*disk->drive;
+}
+
+/*  ide command interface */
+
+static void pd_print_error(struct pd_unit *disk, char *msg, int status)
+{
+	int i;
+
+	printk("%s: %s: status = 0x%x =", disk->name, msg, status);
+	for (i = 0; i < 18; i++)
+		if (status & (1 << i))
+			printk(" %s", pd_errs[i]);
+	printk("\n");
+}
+
+static void pd_reset(struct pd_unit *disk)
+{				/* called only for MASTER drive */
+	write_status(disk, 4);
+	udelay(50);
+	write_status(disk, 0);
+	udelay(250);
+}
+
+#define DBMSG(msg)	((verbose>1)?(msg):NULL)
+
+static int pd_wait_for(struct pd_unit *disk, int w, char *msg)
+{				/* polled wait */
+	int k, r, e;
+
+	k = 0;
+	while (k < PD_SPIN) {
+		r = status_reg(disk);
+		k++;
+		if (((r & w) == w) && !(r & STAT_BUSY))
+			break;
+		udelay(PD_SPIN_DEL);
+	}
+	e = (read_reg(disk, 1) << 8) + read_reg(disk, 7);
+	if (k >= PD_SPIN)
+		e |= ERR_TMO;
+	if ((e & (STAT_ERR | ERR_TMO)) && (msg != NULL))
+		pd_print_error(disk, msg, e);
+	return e;
+}
+
+static void pd_send_command(struct pd_unit *disk, int n, int s, int h, int c0, int c1, int func)
+{
+	write_reg(disk, 6, DRIVE(disk) + h);
+	write_reg(disk, 1, 0);		/* the IDE task file */
+	write_reg(disk, 2, n);
+	write_reg(disk, 3, s);
+	write_reg(disk, 4, c0);
+	write_reg(disk, 5, c1);
+	write_reg(disk, 7, func);
+
+	udelay(1);
+}
+
+static void pd_ide_command(struct pd_unit *disk, int func, int block, int count)
+{
+	int c1, c0, h, s;
+
+	if (disk->can_lba) {
+		s = block & 255;
+		c0 = (block >>= 8) & 255;
+		c1 = (block >>= 8) & 255;
+		h = ((block >>= 8) & 15) + 0x40;
+	} else {
+		s = (block % disk->sectors) + 1;
+		h = (block /= disk->sectors) % disk->heads;
+		c0 = (block /= disk->heads) % 256;
+		c1 = (block >>= 8);
+	}
+	pd_send_command(disk, count, s, h, c0, c1, func);
+}
+
+/* The i/o request engine */
+
+enum action {Fail = 0, Ok = 1, Hold, Wait};
+
+static struct request *pd_req;	/* current request */
+static enum action (*phase)(void);
+
+static void run_fsm(void);
+
+static void ps_tq_int( void *data);
+
+static DECLARE_WORK(fsm_tq, ps_tq_int, NULL);
+
+static void schedule_fsm(void)
+{
+	if (!nice)
+		schedule_work(&fsm_tq);
+	else
+		schedule_delayed_work(&fsm_tq, nice-1);
+}
+
+static void ps_tq_int(void *data)
+{
+	run_fsm();
+}
+
+static enum action do_pd_io_start(void);
+static enum action pd_special(void);
+static enum action do_pd_read_start(void);
+static enum action do_pd_write_start(void);
+static enum action do_pd_read_drq(void);
+static enum action do_pd_write_done(void);
+
+static struct request_queue *pd_queue;
+static int pd_claimed;
+
+static struct pd_unit *pd_current; /* current request's drive */
+static PIA *pi_current; /* current request's PIA */
+
+static void run_fsm(void)
+{
+	while (1) {
+		enum action res;
+		unsigned long saved_flags;
+		int stop = 0;
+
+		if (!phase) {
+			pd_current = pd_req->rq_disk->private_data;
+			pi_current = pd_current->pi;
+			phase = do_pd_io_start;
+		}
+
+		switch (pd_claimed) {
+			case 0:
+				pd_claimed = 1;
+				if (!pi_schedule_claimed(pi_current, run_fsm))
+					return;
+			case 1:
+				pd_claimed = 2;
+				pi_current->proto->connect(pi_current);
+		}
+
+		switch(res = phase()) {
+			case Ok: case Fail:
+				pi_disconnect(pi_current);
+				pd_claimed = 0;
+				phase = NULL;
+				spin_lock_irqsave(&pd_lock, saved_flags);
+				end_request(pd_req, res);
+				pd_req = elv_next_request(pd_queue);
+				if (!pd_req)
+					stop = 1;
+				spin_unlock_irqrestore(&pd_lock, saved_flags);
+				if (stop)
+					return;
+			case Hold:
+				schedule_fsm();
+				return;
+			case Wait:
+				pi_disconnect(pi_current);
+				pd_claimed = 0;
+		}
+	}
+}
+
+static int pd_retries = 0;	/* i/o error retry count */
+static int pd_block;		/* address of next requested block */
+static int pd_count;		/* number of blocks still to do */
+static int pd_run;		/* sectors in current cluster */
+static int pd_cmd;		/* current command READ/WRITE */
+static char *pd_buf;		/* buffer for request in progress */
+
+static enum action do_pd_io_start(void)
+{
+	if (pd_req->flags & REQ_SPECIAL) {
+		phase = pd_special;
+		return pd_special();
+	}
+
+	pd_cmd = rq_data_dir(pd_req);
+	if (pd_cmd == READ || pd_cmd == WRITE) {
+		pd_block = pd_req->sector;
+		pd_count = pd_req->current_nr_sectors;
+		if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
+			return Fail;
+		pd_run = pd_req->nr_sectors;
+		pd_buf = pd_req->buffer;
+		pd_retries = 0;
+		if (pd_cmd == READ)
+			return do_pd_read_start();
+		else
+			return do_pd_write_start();
+	}
+	return Fail;
+}
+
+static enum action pd_special(void)
+{
+	enum action (*func)(struct pd_unit *) = pd_req->special;
+	return func(pd_current);
+}
+
+static int pd_next_buf(void)
+{
+	unsigned long saved_flags;
+
+	pd_count--;
+	pd_run--;
+	pd_buf += 512;
+	pd_block++;
+	if (!pd_run)
+		return 1;
+	if (pd_count)
+		return 0;
+	spin_lock_irqsave(&pd_lock, saved_flags);
+	end_request(pd_req, 1);
+	pd_count = pd_req->current_nr_sectors;
+	pd_buf = pd_req->buffer;
+	spin_unlock_irqrestore(&pd_lock, saved_flags);
+	return 0;
+}
+
+static unsigned long pd_timeout;
+
+static enum action do_pd_read_start(void)
+{
+	if (pd_wait_for(pd_current, STAT_READY, "do_pd_read") & STAT_ERR) {
+		if (pd_retries < PD_MAX_RETRIES) {
+			pd_retries++;
+			return Wait;
+		}
+		return Fail;
+	}
+	pd_ide_command(pd_current, IDE_READ, pd_block, pd_run);
+	phase = do_pd_read_drq;
+	pd_timeout = jiffies + PD_TMO;
+	return Hold;
+}
+
+static enum action do_pd_write_start(void)
+{
+	if (pd_wait_for(pd_current, STAT_READY, "do_pd_write") & STAT_ERR) {
+		if (pd_retries < PD_MAX_RETRIES) {
+			pd_retries++;
+			return Wait;
+		}
+		return Fail;
+	}
+	pd_ide_command(pd_current, IDE_WRITE, pd_block, pd_run);
+	while (1) {
+		if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_write_drq") & STAT_ERR) {
+			if (pd_retries < PD_MAX_RETRIES) {
+				pd_retries++;
+				return Wait;
+			}
+			return Fail;
+		}
+		pi_write_block(pd_current->pi, pd_buf, 512);
+		if (pd_next_buf())
+			break;
+	}
+	phase = do_pd_write_done;
+	pd_timeout = jiffies + PD_TMO;
+	return Hold;
+}
+
+static inline int pd_ready(void)
+{
+	return !(status_reg(pd_current) & STAT_BUSY);
+}
+
+static enum action do_pd_read_drq(void)
+{
+	if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
+		return Hold;
+
+	while (1) {
+		if (pd_wait_for(pd_current, STAT_DRQ, "do_pd_read_drq") & STAT_ERR) {
+			if (pd_retries < PD_MAX_RETRIES) {
+				pd_retries++;
+				phase = do_pd_read_start;
+				return Wait;
+			}
+			return Fail;
+		}
+		pi_read_block(pd_current->pi, pd_buf, 512);
+		if (pd_next_buf())
+			break;
+	}
+	return Ok;
+}
+
+static enum action do_pd_write_done(void)
+{
+	if (!pd_ready() && !time_after_eq(jiffies, pd_timeout))
+		return Hold;
+
+	if (pd_wait_for(pd_current, STAT_READY, "do_pd_write_done") & STAT_ERR) {
+		if (pd_retries < PD_MAX_RETRIES) {
+			pd_retries++;
+			phase = do_pd_write_start;
+			return Wait;
+		}
+		return Fail;
+	}
+	return Ok;
+}
+
+/* special io requests */
+
+/* According to the ATA standard, the default CHS geometry should be
+   available following a reset.  Some Western Digital drives come up
+   in a mode where only LBA addresses are accepted until the device
+   parameters are initialised.
+*/
+
+static void pd_init_dev_parms(struct pd_unit *disk)
+{
+	pd_wait_for(disk, 0, DBMSG("before init_dev_parms"));
+	pd_send_command(disk, disk->sectors, 0, disk->heads - 1, 0, 0,
+			IDE_INIT_DEV_PARMS);
+	udelay(300);
+	pd_wait_for(disk, 0, "Initialise device parameters");
+}
+
+static enum action pd_door_lock(struct pd_unit *disk)
+{
+	if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
+		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORLOCK);
+		pd_wait_for(disk, STAT_READY, "Lock done");
+	}
+	return Ok;
+}
+
+static enum action pd_door_unlock(struct pd_unit *disk)
+{
+	if (!(pd_wait_for(disk, STAT_READY, "Lock") & STAT_ERR)) {
+		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
+		pd_wait_for(disk, STAT_READY, "Lock done");
+	}
+	return Ok;
+}
+
+static enum action pd_eject(struct pd_unit *disk)
+{
+	pd_wait_for(disk, 0, DBMSG("before unlock on eject"));
+	pd_send_command(disk, 1, 0, 0, 0, 0, IDE_DOORUNLOCK);
+	pd_wait_for(disk, 0, DBMSG("after unlock on eject"));
+	pd_wait_for(disk, 0, DBMSG("before eject"));
+	pd_send_command(disk, 0, 0, 0, 0, 0, IDE_EJECT);
+	pd_wait_for(disk, 0, DBMSG("after eject"));
+	return Ok;
+}
+
+static enum action pd_media_check(struct pd_unit *disk)
+{
+	int r = pd_wait_for(disk, STAT_READY, DBMSG("before media_check"));
+	if (!(r & STAT_ERR)) {
+		pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
+		r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after READ_VRFY"));
+	} else
+		disk->changed = 1;	/* say changed if other error */
+	if (r & ERR_MC) {
+		disk->changed = 1;
+		pd_send_command(disk, 1, 0, 0, 0, 0, IDE_ACKCHANGE);
+		pd_wait_for(disk, STAT_READY, DBMSG("RDY after ACKCHANGE"));
+		pd_send_command(disk, 1, 1, 0, 0, 0, IDE_READ_VRFY);
+		r = pd_wait_for(disk, STAT_READY, DBMSG("RDY after VRFY"));
+	}
+	return Ok;
+}
+
+static void pd_standby_off(struct pd_unit *disk)
+{
+	pd_wait_for(disk, 0, DBMSG("before STANDBY"));
+	pd_send_command(disk, 0, 0, 0, 0, 0, IDE_STANDBY);
+	pd_wait_for(disk, 0, DBMSG("after STANDBY"));
+}
+
+static enum action pd_identify(struct pd_unit *disk)
+{
+	int j;
+	char id[PD_ID_LEN + 1];
+
+/* WARNING:  here there may be dragons.  reset() applies to both drives,
+   but we call it only on probing the MASTER. This should allow most
+   common configurations to work, but be warned that a reset can clear
+   settings on the SLAVE drive.
+*/
+
+	if (disk->drive == 0)
+		pd_reset(disk);
+
+	write_reg(disk, 6, DRIVE(disk));
+	pd_wait_for(disk, 0, DBMSG("before IDENT"));
+	pd_send_command(disk, 1, 0, 0, 0, 0, IDE_IDENTIFY);
+
+	if (pd_wait_for(disk, STAT_DRQ, DBMSG("IDENT DRQ")) & STAT_ERR)
+		return Fail;
+	pi_read_block(disk->pi, pd_scratch, 512);
+	disk->can_lba = pd_scratch[99] & 2;
+	disk->sectors = le16_to_cpu(*(u16 *) (pd_scratch + 12));
+	disk->heads = le16_to_cpu(*(u16 *) (pd_scratch + 6));
+	disk->cylinders = le16_to_cpu(*(u16 *) (pd_scratch + 2));
+	if (disk->can_lba)
+		disk->capacity = le32_to_cpu(*(u32 *) (pd_scratch + 120));
+	else
+		disk->capacity = disk->sectors * disk->heads * disk->cylinders;
+
+	for (j = 0; j < PD_ID_LEN; j++)
+		id[j ^ 1] = pd_scratch[j + PD_ID_OFF];
+	j = PD_ID_LEN - 1;
+	while ((j >= 0) && (id[j] <= 0x20))
+		j--;
+	j++;
+	id[j] = 0;
+
+	disk->removable = pd_scratch[0] & 0x80;
+
+	printk("%s: %s, %s, %d blocks [%dM], (%d/%d/%d), %s media\n",
+	       disk->name, id,
+	       disk->drive ? "slave" : "master",
+	       disk->capacity, disk->capacity / 2048,
+	       disk->cylinders, disk->heads, disk->sectors,
+	       disk->removable ? "removable" : "fixed");
+
+	if (disk->capacity)
+		pd_init_dev_parms(disk);
+	if (!disk->standby)
+		pd_standby_off(disk);
+
+	return Ok;
+}
+
+/* end of io request engine */
+
+static void do_pd_request(request_queue_t * q)
+{
+	if (pd_req)
+		return;
+	pd_req = elv_next_request(q);
+	if (!pd_req)
+		return;
+
+	schedule_fsm();
+}
+
+static int pd_special_command(struct pd_unit *disk,
+		      enum action (*func)(struct pd_unit *disk))
+{
+	DECLARE_COMPLETION(wait);
+	struct request rq;
+	int err = 0;
+
+	memset(&rq, 0, sizeof(rq));
+	rq.errors = 0;
+	rq.rq_status = RQ_ACTIVE;
+	rq.rq_disk = disk->gd;
+	rq.ref_count = 1;
+	rq.waiting = &wait;
+	rq.end_io = blk_end_sync_rq;
+	blk_insert_request(disk->gd->queue, &rq, 0, func, 0);
+	wait_for_completion(&wait);
+	rq.waiting = NULL;
+	if (rq.errors)
+		err = -EIO;
+	blk_put_request(&rq);
+	return err;
+}
+
+/* kernel glue structures */
+
+static int pd_open(struct inode *inode, struct file *file)
+{
+	struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
+
+	disk->access++;
+
+	if (disk->removable) {
+		pd_special_command(disk, pd_media_check);
+		pd_special_command(disk, pd_door_lock);
+	}
+	return 0;
+}
+
+static int pd_ioctl(struct inode *inode, struct file *file,
+	 unsigned int cmd, unsigned long arg)
+{
+	struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
+	struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
+	struct hd_geometry g;
+
+	switch (cmd) {
+	case CDROMEJECT:
+		if (disk->access == 1)
+			pd_special_command(disk, pd_eject);
+		return 0;
+	case HDIO_GETGEO:
+		if (disk->alt_geom) {
+			g.heads = PD_LOG_HEADS;
+			g.sectors = PD_LOG_SECTS;
+			g.cylinders = disk->capacity / (g.heads * g.sectors);
+		} else {
+			g.heads = disk->heads;
+			g.sectors = disk->sectors;
+			g.cylinders = disk->cylinders;
+		}
+		g.start = get_start_sect(inode->i_bdev);
+		if (copy_to_user(geo, &g, sizeof(struct hd_geometry)))
+			return -EFAULT;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+}
+
+static int pd_release(struct inode *inode, struct file *file)
+{
+	struct pd_unit *disk = inode->i_bdev->bd_disk->private_data;
+
+	if (!--disk->access && disk->removable)
+		pd_special_command(disk, pd_door_unlock);
+
+	return 0;
+}
+
+static int pd_check_media(struct gendisk *p)
+{
+	struct pd_unit *disk = p->private_data;
+	int r;
+	if (!disk->removable)
+		return 0;
+	pd_special_command(disk, pd_media_check);
+	r = disk->changed;
+	disk->changed = 0;
+	return r;
+}
+
+static int pd_revalidate(struct gendisk *p)
+{
+	struct pd_unit *disk = p->private_data;
+	if (pd_special_command(disk, pd_identify) == 0)
+		set_capacity(p, disk->capacity);
+	else
+		set_capacity(p, 0);
+	return 0;
+}
+
+static struct block_device_operations pd_fops = {
+	.owner		= THIS_MODULE,
+	.open		= pd_open,
+	.release	= pd_release,
+	.ioctl		= pd_ioctl,
+	.media_changed	= pd_check_media,
+	.revalidate_disk= pd_revalidate
+};
+
+/* probing */
+
+static void pd_probe_drive(struct pd_unit *disk)
+{
+	struct gendisk *p = alloc_disk(1 << PD_BITS);
+	if (!p)
+		return;
+	strcpy(p->disk_name, disk->name);
+	p->fops = &pd_fops;
+	p->major = major;
+	p->first_minor = (disk - pd) << PD_BITS;
+	disk->gd = p;
+	p->private_data = disk;
+	p->queue = pd_queue;
+
+	if (disk->drive == -1) {
+		for (disk->drive = 0; disk->drive <= 1; disk->drive++)
+			if (pd_special_command(disk, pd_identify) == 0)
+				return;
+	} else if (pd_special_command(disk, pd_identify) == 0)
+		return;
+	disk->gd = NULL;
+	put_disk(p);
+}
+
+static int pd_detect(void)
+{
+	int found = 0, unit, pd_drive_count = 0;
+	struct pd_unit *disk;
+
+	for (unit = 0; unit < PD_UNITS; unit++) {
+		int *parm = *drives[unit];
+		struct pd_unit *disk = pd + unit;
+		disk->pi = &disk->pia;
+		disk->access = 0;
+		disk->changed = 1;
+		disk->capacity = 0;
+		disk->drive = parm[D_SLV];
+		snprintf(disk->name, PD_NAMELEN, "%s%c", name, 'a'+unit);
+		disk->alt_geom = parm[D_GEO];
+		disk->standby = parm[D_SBY];
+		if (parm[D_PRT])
+			pd_drive_count++;
+	}
+
+	if (pd_drive_count == 0) { /* nothing spec'd - so autoprobe for 1 */
+		disk = pd;
+		if (pi_init(disk->pi, 1, -1, -1, -1, -1, -1, pd_scratch,
+			    PI_PD, verbose, disk->name)) {
+			pd_probe_drive(disk);
+			if (!disk->gd)
+				pi_release(disk->pi);
+		}
+
+	} else {
+		for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
+			int *parm = *drives[unit];
+			if (!parm[D_PRT])
+				continue;
+			if (pi_init(disk->pi, 0, parm[D_PRT], parm[D_MOD],
+				     parm[D_UNI], parm[D_PRO], parm[D_DLY],
+				     pd_scratch, PI_PD, verbose, disk->name)) {
+				pd_probe_drive(disk);
+				if (!disk->gd)
+					pi_release(disk->pi);
+			}
+		}
+	}
+	for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
+		if (disk->gd) {
+			set_capacity(disk->gd, disk->capacity);
+			add_disk(disk->gd);
+			found = 1;
+		}
+	}
+	if (!found)
+		printk("%s: no valid drive found\n", name);
+	return found;
+}
+
+static int __init pd_init(void)
+{
+	if (disable)
+		goto out1;
+
+	pd_queue = blk_init_queue(do_pd_request, &pd_lock);
+	if (!pd_queue)
+		goto out1;
+
+	blk_queue_max_sectors(pd_queue, cluster);
+
+	if (register_blkdev(major, name))
+		goto out2;
+
+	printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+	       name, name, PD_VERSION, major, cluster, nice);
+	if (!pd_detect())
+		goto out3;
+
+	return 0;
+
+out3:
+	unregister_blkdev(major, name);
+out2:
+	blk_cleanup_queue(pd_queue);
+out1:
+	return -ENODEV;
+}
+
+static void __exit pd_exit(void)
+{
+	struct pd_unit *disk;
+	int unit;
+	unregister_blkdev(major, name);
+	for (unit = 0, disk = pd; unit < PD_UNITS; unit++, disk++) {
+		struct gendisk *p = disk->gd;
+		if (p) {
+			disk->gd = NULL;
+			del_gendisk(p);
+			put_disk(p);
+			pi_release(disk->pi);
+		}
+	}
+	blk_cleanup_queue(pd_queue);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pd_init)
+module_exit(pd_exit)
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c
new file mode 100644
index 0000000..060b1f2
--- /dev/null
+++ b/drivers/block/paride/pf.c
@@ -0,0 +1,982 @@
+/* 
+        pf.c    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                            Under the terms of the GNU General Public License.
+
+        This is the high-level driver for parallel port ATAPI disk
+        drives based on chips supported by the paride module.
+
+        By default, the driver will autoprobe for a single parallel
+        port ATAPI disk drive, but if their individual parameters are
+        specified, the driver can handle up to 4 drives.
+
+        The behaviour of the pf driver can be altered by setting
+        some parameters from the insmod command line.  The following
+        parameters are adjustable:
+
+            drive0      These four arguments can be arrays of       
+            drive1      1-7 integers as follows:
+            drive2
+            drive3      <prt>,<pro>,<uni>,<mod>,<slv>,<lun>,<dly>
+
+                        Where,
+
+                <prt>   is the base of the parallel port address for
+                        the corresponding drive.  (required)
+
+                <pro>   is the protocol number for the adapter that
+                        supports this drive.  These numbers are
+                        logged by 'paride' when the protocol modules
+                        are initialised.  (0 if not given)
+
+                <uni>   for those adapters that support chained
+                        devices, this is the unit selector for the
+                        chain of devices on the given port.  It should
+                        be zero for devices that don't support chaining.
+                        (0 if not given)
+
+                <mod>   this can be -1 to choose the best mode, or one
+                        of the mode numbers supported by the adapter.
+                        (-1 if not given)
+
+                <slv>   ATAPI CDroms can be jumpered to master or slave.
+                        Set this to 0 to choose the master drive, 1 to
+                        choose the slave, -1 (the default) to choose the
+                        first drive found.
+
+		<lun>   Some ATAPI devices support multiple LUNs.
+                        One example is the ATAPI PD/CD drive from
+                        Matshita/Panasonic.  This device has a 
+                        CD drive on LUN 0 and a PD drive on LUN 1.
+                        By default, the driver will search for the
+                        first LUN with a supported device.  Set 
+                        this parameter to force it to use a specific
+                        LUN.  (default -1)
+
+                <dly>   some parallel ports require the driver to 
+                        go more slowly.  -1 sets a default value that
+                        should work with the chosen protocol.  Otherwise,
+                        set this to a small integer, the larger it is
+                        the slower the port i/o.  In some cases, setting
+                        this to zero will speed up the device. (default -1)
+
+	    major	You may use this parameter to overide the
+			default major number (47) that this driver
+			will use.  Be sure to change the device
+			name as well.
+
+	    name	This parameter is a character string that
+			contains the name the kernel will use for this
+			device (in /proc output, for instance).
+			(default "pf").
+
+            cluster     The driver will attempt to aggregate requests
+                        for adjacent blocks into larger multi-block
+                        clusters.  The maximum cluster size (in 512
+                        byte sectors) is set with this parameter.
+                        (default 64)
+
+            verbose     This parameter controls the amount of logging
+                        that the driver will do.  Set it to 0 for
+                        normal operation, 1 to see autoprobe progress
+                        messages, or 2 to see additional debugging
+                        output.  (default 0)
+ 
+	    nice        This parameter controls the driver's use of
+			idle CPU time, at the expense of some speed.
+
+        If this driver is built into the kernel, you can use the
+        following command line parameters, with the same values
+        as the corresponding module parameters listed above:
+
+            pf.drive0
+            pf.drive1
+            pf.drive2
+            pf.drive3
+	    pf.cluster
+            pf.nice
+
+        In addition, you can use the parameter pf.disable to disable
+        the driver entirely.
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.05.03  Changes for SMP.  Eliminate sti().
+				Fix for drives that don't clear STAT_ERR
+			        until after next CDB delivered.
+				Small change in pf_completion to round
+				up transfer size.
+	1.02    GRG 1998.06.16  Eliminated an Ugh
+	1.03    GRG 1998.08.16  Use HZ in loop timings, extra debugging
+	1.04    GRG 1998.09.24  Added jumbo support
+
+*/
+
+#define PF_VERSION      "1.04"
+#define PF_MAJOR	47
+#define PF_NAME		"pf"
+#define PF_UNITS	4
+
+/* Here are things one can override from the insmod command.
+   Most are autoprobed by paride unless set here.  Verbose is off
+   by default.
+
+*/
+
+static int verbose = 0;
+static int major = PF_MAJOR;
+static char *name = PF_NAME;
+static int cluster = 64;
+static int nice = 0;
+static int disable = 0;
+
+static int drive0[7] = { 0, 0, 0, -1, -1, -1, -1 };
+static int drive1[7] = { 0, 0, 0, -1, -1, -1, -1 };
+static int drive2[7] = { 0, 0, 0, -1, -1, -1, -1 };
+static int drive3[7] = { 0, 0, 0, -1, -1, -1, -1 };
+
+static int (*drives[4])[7] = {&drive0, &drive1, &drive2, &drive3};
+static int pf_drive_count;
+
+enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_LUN, D_DLY};
+
+/* end of parameters */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/cdrom.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <asm/uaccess.h>
+
+static spinlock_t pf_spin_lock;
+
+module_param(verbose, bool, 0644);
+module_param(major, int, 0);
+module_param(name, charp, 0);
+module_param(cluster, int, 0);
+module_param(nice, int, 0);
+module_param_array(drive0, int, NULL, 0);
+module_param_array(drive1, int, NULL, 0);
+module_param_array(drive2, int, NULL, 0);
+module_param_array(drive3, int, NULL, 0);
+
+#include "paride.h"
+#include "pseudo.h"
+
+/* constants for faking geometry numbers */
+
+#define PF_FD_MAX	8192	/* use FD geometry under this size */
+#define PF_FD_HDS	2
+#define PF_FD_SPT	18
+#define PF_HD_HDS	64
+#define PF_HD_SPT	32
+
+#define PF_MAX_RETRIES  5
+#define PF_TMO          800	/* interrupt timeout in jiffies */
+#define PF_SPIN_DEL     50	/* spin delay in micro-seconds  */
+
+#define PF_SPIN         (1000000*PF_TMO)/(HZ*PF_SPIN_DEL)
+
+#define STAT_ERR        0x00001
+#define STAT_INDEX      0x00002
+#define STAT_ECC        0x00004
+#define STAT_DRQ        0x00008
+#define STAT_SEEK       0x00010
+#define STAT_WRERR      0x00020
+#define STAT_READY      0x00040
+#define STAT_BUSY       0x00080
+
+#define ATAPI_REQ_SENSE		0x03
+#define ATAPI_LOCK		0x1e
+#define ATAPI_DOOR		0x1b
+#define ATAPI_MODE_SENSE	0x5a
+#define ATAPI_CAPACITY		0x25
+#define ATAPI_IDENTIFY		0x12
+#define ATAPI_READ_10		0x28
+#define ATAPI_WRITE_10		0x2a
+
+static int pf_open(struct inode *inode, struct file *file);
+static void do_pf_request(request_queue_t * q);
+static int pf_ioctl(struct inode *inode, struct file *file,
+		    unsigned int cmd, unsigned long arg);
+
+static int pf_release(struct inode *inode, struct file *file);
+
+static int pf_detect(void);
+static void do_pf_read(void);
+static void do_pf_read_start(void);
+static void do_pf_write(void);
+static void do_pf_write_start(void);
+static void do_pf_read_drq(void);
+static void do_pf_write_done(void);
+
+#define PF_NM           0
+#define PF_RO           1
+#define PF_RW           2
+
+#define PF_NAMELEN      8
+
+struct pf_unit {
+	struct pi_adapter pia;	/* interface to paride layer */
+	struct pi_adapter *pi;
+	int removable;		/* removable media device  ?  */
+	int media_status;	/* media present ?  WP ? */
+	int drive;		/* drive */
+	int lun;
+	int access;		/* count of active opens ... */
+	int present;		/* device present ? */
+	char name[PF_NAMELEN];	/* pf0, pf1, ... */
+	struct gendisk *disk;
+};
+
+static struct pf_unit units[PF_UNITS];
+
+static int pf_identify(struct pf_unit *pf);
+static void pf_lock(struct pf_unit *pf, int func);
+static void pf_eject(struct pf_unit *pf);
+static int pf_check_media(struct gendisk *disk);
+
+static char pf_scratch[512];	/* scratch block buffer */
+
+/* the variables below are used mainly in the I/O request engine, which
+   processes only one request at a time.
+*/
+
+static int pf_retries = 0;	/* i/o error retry count */
+static int pf_busy = 0;		/* request being processed ? */
+static struct request *pf_req;	/* current request */
+static int pf_block;		/* address of next requested block */
+static int pf_count;		/* number of blocks still to do */
+static int pf_run;		/* sectors in current cluster */
+static int pf_cmd;		/* current command READ/WRITE */
+static struct pf_unit *pf_current;/* unit of current request */
+static int pf_mask;		/* stopper for pseudo-int */
+static char *pf_buf;		/* buffer for request in progress */
+
+/* kernel glue structures */
+
+static struct block_device_operations pf_fops = {
+	.owner		= THIS_MODULE,
+	.open		= pf_open,
+	.release	= pf_release,
+	.ioctl		= pf_ioctl,
+	.media_changed	= pf_check_media,
+};
+
+static void __init pf_init_units(void)
+{
+	struct pf_unit *pf;
+	int unit;
+
+	pf_drive_count = 0;
+	for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) {
+		struct gendisk *disk = alloc_disk(1);
+		if (!disk)
+			continue;
+		pf->disk = disk;
+		pf->pi = &pf->pia;
+		pf->media_status = PF_NM;
+		pf->drive = (*drives[unit])[D_SLV];
+		pf->lun = (*drives[unit])[D_LUN];
+		snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit);
+		disk->major = major;
+		disk->first_minor = unit;
+		strcpy(disk->disk_name, pf->name);
+		disk->fops = &pf_fops;
+		if (!(*drives[unit])[D_PRT])
+			pf_drive_count++;
+	}
+}
+
+static int pf_open(struct inode *inode, struct file *file)
+{
+	struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+
+	pf_identify(pf);
+
+	if (pf->media_status == PF_NM)
+		return -ENODEV;
+
+	if ((pf->media_status == PF_RO) && (file->f_mode & 2))
+		return -EROFS;
+
+	pf->access++;
+	if (pf->removable)
+		pf_lock(pf, 1);
+
+	return 0;
+}
+
+static int pf_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+	struct hd_geometry __user *geo = (struct hd_geometry __user *) arg;
+	struct hd_geometry g;
+	sector_t capacity;
+
+	if (cmd == CDROMEJECT) {
+		if (pf->access == 1) {
+			pf_eject(pf);
+			return 0;
+		}
+		return -EBUSY;
+	}
+	if (cmd != HDIO_GETGEO)
+		return -EINVAL;
+	capacity = get_capacity(pf->disk);
+	if (capacity < PF_FD_MAX) {
+		g.cylinders = sector_div(capacity, PF_FD_HDS * PF_FD_SPT);
+		g.heads = PF_FD_HDS;
+		g.sectors = PF_FD_SPT;
+	} else {
+		g.cylinders = sector_div(capacity, PF_HD_HDS * PF_HD_SPT);
+		g.heads = PF_HD_HDS;
+		g.sectors = PF_HD_SPT;
+	}
+	if (copy_to_user(geo, &g, sizeof(g)))
+		return -EFAULT;
+	return 0;
+}
+
+static int pf_release(struct inode *inode, struct file *file)
+{
+	struct pf_unit *pf = inode->i_bdev->bd_disk->private_data;
+
+	if (pf->access <= 0)
+		return -EINVAL;
+
+	pf->access--;
+
+	if (!pf->access && pf->removable)
+		pf_lock(pf, 0);
+
+	return 0;
+
+}
+
+static int pf_check_media(struct gendisk *disk)
+{
+	return 1;
+}
+
+static inline int status_reg(struct pf_unit *pf)
+{
+	return pi_read_regr(pf->pi, 1, 6);
+}
+
+static inline int read_reg(struct pf_unit *pf, int reg)
+{
+	return pi_read_regr(pf->pi, 0, reg);
+}
+
+static inline void write_reg(struct pf_unit *pf, int reg, int val)
+{
+	pi_write_regr(pf->pi, 0, reg, val);
+}
+
+static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg)
+{
+	int j, r, e, s, p;
+
+	j = 0;
+	while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop))))
+	       && (j++ < PF_SPIN))
+		udelay(PF_SPIN_DEL);
+
+	if ((r & (STAT_ERR & stop)) || (j >= PF_SPIN)) {
+		s = read_reg(pf, 7);
+		e = read_reg(pf, 1);
+		p = read_reg(pf, 2);
+		if (j >= PF_SPIN)
+			e |= 0x100;
+		if (fun)
+			printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
+			       " loop=%d phase=%d\n",
+			       pf->name, fun, msg, r, s, e, j, p);
+		return (e << 8) + s;
+	}
+	return 0;
+}
+
+static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun)
+{
+	pi_connect(pf->pi);
+
+	write_reg(pf, 6, 0xa0+0x10*pf->drive);
+
+	if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
+		pi_disconnect(pf->pi);
+		return -1;
+	}
+
+	write_reg(pf, 4, dlen % 256);
+	write_reg(pf, 5, dlen / 256);
+	write_reg(pf, 7, 0xa0);	/* ATAPI packet command */
+
+	if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
+		pi_disconnect(pf->pi);
+		return -1;
+	}
+
+	if (read_reg(pf, 2) != 1) {
+		printk("%s: %s: command phase error\n", pf->name, fun);
+		pi_disconnect(pf->pi);
+		return -1;
+	}
+
+	pi_write_block(pf->pi, cmd, 12);
+
+	return 0;
+}
+
+static int pf_completion(struct pf_unit *pf, char *buf, char *fun)
+{
+	int r, s, n;
+
+	r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
+		    fun, "completion");
+
+	if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) {
+		n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) +
+		      3) & 0xfffc);
+		pi_read_block(pf->pi, buf, n);
+	}
+
+	s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
+
+	pi_disconnect(pf->pi);
+
+	return (r ? r : s);
+}
+
+static void pf_req_sense(struct pf_unit *pf, int quiet)
+{
+	char rs_cmd[12] =
+	    { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
+	char buf[16];
+	int r;
+
+	r = pf_command(pf, rs_cmd, 16, "Request sense");
+	mdelay(1);
+	if (!r)
+		pf_completion(pf, buf, "Request sense");
+
+	if ((!r) && (!quiet))
+		printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
+		       pf->name, buf[2] & 0xf, buf[12], buf[13]);
+}
+
+static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun)
+{
+	int r;
+
+	r = pf_command(pf, cmd, dlen, fun);
+	mdelay(1);
+	if (!r)
+		r = pf_completion(pf, buf, fun);
+	if (r)
+		pf_req_sense(pf, !fun);
+
+	return r;
+}
+
+#define DBMSG(msg)      ((verbose>1)?(msg):NULL)
+
+static void pf_lock(struct pf_unit *pf, int func)
+{
+	char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 };
+
+	pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "unlock" : "lock");
+}
+
+static void pf_eject(struct pf_unit *pf)
+{
+	char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 };
+
+	pf_lock(pf, 0);
+	pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject");
+}
+
+#define PF_RESET_TMO   30	/* in tenths of a second */
+
+static void pf_sleep(int cs)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule_timeout(cs);
+}
+
+/* the ATAPI standard actually specifies the contents of all 7 registers
+   after a reset, but the specification is ambiguous concerning the last
+   two bytes, and different drives interpret the standard differently.
+ */
+
+static int pf_reset(struct pf_unit *pf)
+{
+	int i, k, flg;
+	int expect[5] = { 1, 1, 1, 0x14, 0xeb };
+
+	pi_connect(pf->pi);
+	write_reg(pf, 6, 0xa0+0x10*pf->drive);
+	write_reg(pf, 7, 8);
+
+	pf_sleep(20 * HZ / 1000);
+
+	k = 0;
+	while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY))
+		pf_sleep(HZ / 10);
+
+	flg = 1;
+	for (i = 0; i < 5; i++)
+		flg &= (read_reg(pf, i + 1) == expect[i]);
+
+	if (verbose) {
+		printk("%s: Reset (%d) signature = ", pf->name, k);
+		for (i = 0; i < 5; i++)
+			printk("%3x", read_reg(pf, i + 1));
+		if (!flg)
+			printk(" (incorrect)");
+		printk("\n");
+	}
+
+	pi_disconnect(pf->pi);
+	return flg - 1;
+}
+
+static void pf_mode_sense(struct pf_unit *pf)
+{
+	char ms_cmd[12] =
+	    { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 };
+	char buf[8];
+
+	pf_atapi(pf, ms_cmd, 8, buf, DBMSG("mode sense"));
+	pf->media_status = PF_RW;
+	if (buf[3] & 0x80)
+		pf->media_status = PF_RO;
+}
+
+static void xs(char *buf, char *targ, int offs, int len)
+{
+	int j, k, l;
+
+	j = 0;
+	l = 0;
+	for (k = 0; k < len; k++)
+		if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
+			l = targ[j++] = buf[k + offs];
+	if (l == 0x20)
+		j--;
+	targ[j] = 0;
+}
+
+static int xl(char *buf, int offs)
+{
+	int v, k;
+
+	v = 0;
+	for (k = 0; k < 4; k++)
+		v = v * 256 + (buf[k + offs] & 0xff);
+	return v;
+}
+
+static void pf_get_capacity(struct pf_unit *pf)
+{
+	char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	char buf[8];
+	int bs;
+
+	if (pf_atapi(pf, rc_cmd, 8, buf, DBMSG("get capacity"))) {
+		pf->media_status = PF_NM;
+		return;
+	}
+	set_capacity(pf->disk, xl(buf, 0) + 1);
+	bs = xl(buf, 4);
+	if (bs != 512) {
+		set_capacity(pf->disk, 0);
+		if (verbose)
+			printk("%s: Drive %d, LUN %d,"
+			       " unsupported block size %d\n",
+			       pf->name, pf->drive, pf->lun, bs);
+	}
+}
+
+static int pf_identify(struct pf_unit *pf)
+{
+	int dt, s;
+	char *ms[2] = { "master", "slave" };
+	char mf[10], id[18];
+	char id_cmd[12] =
+	    { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+	char buf[36];
+
+	s = pf_atapi(pf, id_cmd, 36, buf, "identify");
+	if (s)
+		return -1;
+
+	dt = buf[0] & 0x1f;
+	if ((dt != 0) && (dt != 7)) {
+		if (verbose)
+			printk("%s: Drive %d, LUN %d, unsupported type %d\n",
+			       pf->name, pf->drive, pf->lun, dt);
+		return -1;
+	}
+
+	xs(buf, mf, 8, 8);
+	xs(buf, id, 16, 16);
+
+	pf->removable = (buf[1] & 0x80);
+
+	pf_mode_sense(pf);
+	pf_mode_sense(pf);
+	pf_mode_sense(pf);
+
+	pf_get_capacity(pf);
+
+	printk("%s: %s %s, %s LUN %d, type %d",
+	       pf->name, mf, id, ms[pf->drive], pf->lun, dt);
+	if (pf->removable)
+		printk(", removable");
+	if (pf->media_status == PF_NM)
+		printk(", no media\n");
+	else {
+		if (pf->media_status == PF_RO)
+			printk(", RO");
+		printk(", %llu blocks\n",
+			(unsigned long long)get_capacity(pf->disk));
+	}
+	return 0;
+}
+
+/*	returns  0, with id set if drive is detected
+	        -1, if drive detection failed
+*/
+static int pf_probe(struct pf_unit *pf)
+{
+	if (pf->drive == -1) {
+		for (pf->drive = 0; pf->drive <= 1; pf->drive++)
+			if (!pf_reset(pf)) {
+				if (pf->lun != -1)
+					return pf_identify(pf);
+				else
+					for (pf->lun = 0; pf->lun < 8; pf->lun++)
+						if (!pf_identify(pf))
+							return 0;
+			}
+	} else {
+		if (pf_reset(pf))
+			return -1;
+		if (pf->lun != -1)
+			return pf_identify(pf);
+		for (pf->lun = 0; pf->lun < 8; pf->lun++)
+			if (!pf_identify(pf))
+				return 0;
+	}
+	return -1;
+}
+
+static int pf_detect(void)
+{
+	struct pf_unit *pf = units;
+	int k, unit;
+
+	printk("%s: %s version %s, major %d, cluster %d, nice %d\n",
+	       name, name, PF_VERSION, major, cluster, nice);
+
+	k = 0;
+	if (pf_drive_count == 0) {
+		if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF,
+			    verbose, pf->name)) {
+			if (!pf_probe(pf) && pf->disk) {
+				pf->present = 1;
+				k++;
+			} else
+				pi_release(pf->pi);
+		}
+
+	} else
+		for (unit = 0; unit < PF_UNITS; unit++, pf++) {
+			int *conf = *drives[unit];
+			if (!conf[D_PRT])
+				continue;
+			if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD],
+				    conf[D_UNI], conf[D_PRO], conf[D_DLY],
+				    pf_scratch, PI_PF, verbose, pf->name)) {
+				if (!pf_probe(pf) && pf->disk) {
+					pf->present = 1;
+					k++;
+				} else
+					pi_release(pf->pi);
+			}
+		}
+	if (k)
+		return 0;
+
+	printk("%s: No ATAPI disk detected\n", name);
+	for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+		put_disk(pf->disk);
+	return -1;
+}
+
+/* The i/o request engine */
+
+static int pf_start(struct pf_unit *pf, int cmd, int b, int c)
+{
+	int i;
+	char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	for (i = 0; i < 4; i++) {
+		io_cmd[5 - i] = b & 0xff;
+		b = b >> 8;
+	}
+
+	io_cmd[8] = c & 0xff;
+	io_cmd[7] = (c >> 8) & 0xff;
+
+	i = pf_command(pf, io_cmd, c * 512, "start i/o");
+
+	mdelay(1);
+
+	return i;
+}
+
+static int pf_ready(void)
+{
+	return (((status_reg(pf_current) & (STAT_BUSY | pf_mask)) == pf_mask));
+}
+
+static struct request_queue *pf_queue;
+
+static void do_pf_request(request_queue_t * q)
+{
+	if (pf_busy)
+		return;
+repeat:
+	pf_req = elv_next_request(q);
+	if (!pf_req)
+		return;
+
+	pf_current = pf_req->rq_disk->private_data;
+	pf_block = pf_req->sector;
+	pf_run = pf_req->nr_sectors;
+	pf_count = pf_req->current_nr_sectors;
+
+	if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) {
+		end_request(pf_req, 0);
+		goto repeat;
+	}
+
+	pf_cmd = rq_data_dir(pf_req);
+	pf_buf = pf_req->buffer;
+	pf_retries = 0;
+
+	pf_busy = 1;
+	if (pf_cmd == READ)
+		pi_do_claimed(pf_current->pi, do_pf_read);
+	else if (pf_cmd == WRITE)
+		pi_do_claimed(pf_current->pi, do_pf_write);
+	else {
+		pf_busy = 0;
+		end_request(pf_req, 0);
+		goto repeat;
+	}
+}
+
+static int pf_next_buf(void)
+{
+	unsigned long saved_flags;
+
+	pf_count--;
+	pf_run--;
+	pf_buf += 512;
+	pf_block++;
+	if (!pf_run)
+		return 0;
+	if (!pf_count)
+		return 1;
+	spin_lock_irqsave(&pf_spin_lock, saved_flags);
+	end_request(pf_req, 1);
+	pf_count = pf_req->current_nr_sectors;
+	pf_buf = pf_req->buffer;
+	spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
+	return 1;
+}
+
+static inline void next_request(int success)
+{
+	unsigned long saved_flags;
+
+	spin_lock_irqsave(&pf_spin_lock, saved_flags);
+	end_request(pf_req, success);
+	pf_busy = 0;
+	do_pf_request(pf_queue);
+	spin_unlock_irqrestore(&pf_spin_lock, saved_flags);
+}
+
+/* detach from the calling context - in case the spinlock is held */
+static void do_pf_read(void)
+{
+	ps_set_intr(do_pf_read_start, NULL, 0, nice);
+}
+
+static void do_pf_read_start(void)
+{
+	pf_busy = 1;
+
+	if (pf_start(pf_current, ATAPI_READ_10, pf_block, pf_run)) {
+		pi_disconnect(pf_current->pi);
+		if (pf_retries < PF_MAX_RETRIES) {
+			pf_retries++;
+			pi_do_claimed(pf_current->pi, do_pf_read_start);
+			return;
+		}
+		next_request(0);
+		return;
+	}
+	pf_mask = STAT_DRQ;
+	ps_set_intr(do_pf_read_drq, pf_ready, PF_TMO, nice);
+}
+
+static void do_pf_read_drq(void)
+{
+	while (1) {
+		if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
+			    "read block", "completion") & STAT_ERR) {
+			pi_disconnect(pf_current->pi);
+			if (pf_retries < PF_MAX_RETRIES) {
+				pf_req_sense(pf_current, 0);
+				pf_retries++;
+				pi_do_claimed(pf_current->pi, do_pf_read_start);
+				return;
+			}
+			next_request(0);
+			return;
+		}
+		pi_read_block(pf_current->pi, pf_buf, 512);
+		if (pf_next_buf())
+			break;
+	}
+	pi_disconnect(pf_current->pi);
+	next_request(1);
+}
+
+static void do_pf_write(void)
+{
+	ps_set_intr(do_pf_write_start, NULL, 0, nice);
+}
+
+static void do_pf_write_start(void)
+{
+	pf_busy = 1;
+
+	if (pf_start(pf_current, ATAPI_WRITE_10, pf_block, pf_run)) {
+		pi_disconnect(pf_current->pi);
+		if (pf_retries < PF_MAX_RETRIES) {
+			pf_retries++;
+			pi_do_claimed(pf_current->pi, do_pf_write_start);
+			return;
+		}
+		next_request(0);
+		return;
+	}
+
+	while (1) {
+		if (pf_wait(pf_current, STAT_BUSY, STAT_DRQ | STAT_ERR,
+			    "write block", "data wait") & STAT_ERR) {
+			pi_disconnect(pf_current->pi);
+			if (pf_retries < PF_MAX_RETRIES) {
+				pf_retries++;
+				pi_do_claimed(pf_current->pi, do_pf_write_start);
+				return;
+			}
+			next_request(0);
+			return;
+		}
+		pi_write_block(pf_current->pi, pf_buf, 512);
+		if (pf_next_buf())
+			break;
+	}
+	pf_mask = 0;
+	ps_set_intr(do_pf_write_done, pf_ready, PF_TMO, nice);
+}
+
+static void do_pf_write_done(void)
+{
+	if (pf_wait(pf_current, STAT_BUSY, 0, "write block", "done") & STAT_ERR) {
+		pi_disconnect(pf_current->pi);
+		if (pf_retries < PF_MAX_RETRIES) {
+			pf_retries++;
+			pi_do_claimed(pf_current->pi, do_pf_write_start);
+			return;
+		}
+		next_request(0);
+		return;
+	}
+	pi_disconnect(pf_current->pi);
+	next_request(1);
+}
+
+static int __init pf_init(void)
+{				/* preliminary initialisation */
+	struct pf_unit *pf;
+	int unit;
+
+	if (disable)
+		return -1;
+
+	pf_init_units();
+
+	if (pf_detect())
+		return -1;
+	pf_busy = 0;
+
+	if (register_blkdev(major, name)) {
+		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+			put_disk(pf->disk);
+		return -1;
+	}
+	pf_queue = blk_init_queue(do_pf_request, &pf_spin_lock);
+	if (!pf_queue) {
+		unregister_blkdev(major, name);
+		for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
+			put_disk(pf->disk);
+		return -1;
+	}
+
+	blk_queue_max_phys_segments(pf_queue, cluster);
+	blk_queue_max_hw_segments(pf_queue, cluster);
+
+	for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+		struct gendisk *disk = pf->disk;
+
+		if (!pf->present)
+			continue;
+		disk->private_data = pf;
+		disk->queue = pf_queue;
+		add_disk(disk);
+	}
+	return 0;
+}
+
+static void __exit pf_exit(void)
+{
+	struct pf_unit *pf;
+	int unit;
+	unregister_blkdev(major, name);
+	for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
+		if (!pf->present)
+			continue;
+		del_gendisk(pf->disk);
+		put_disk(pf->disk);
+		pi_release(pf->pi);
+	}
+	blk_cleanup_queue(pf_queue);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pf_init)
+module_exit(pf_exit)
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c
new file mode 100644
index 0000000..dbeb107
--- /dev/null
+++ b/drivers/block/paride/pg.c
@@ -0,0 +1,723 @@
+/* 
+	pg.c    (c) 1998  Grant R. Guenther <grant@torque.net>
+			  Under the terms of the GNU General Public License.
+
+	The pg driver provides a simple character device interface for
+	sending ATAPI commands to a device.  With the exception of the
+	ATAPI reset operation, all operations are performed by a pair
+	of read and write operations to the appropriate /dev/pgN device.
+	A write operation delivers a command and any outbound data in
+	a single buffer.  Normally, the write will succeed unless the
+	device is offline or malfunctioning, or there is already another
+	command pending.  If the write succeeds, it should be followed
+	immediately by a read operation, to obtain any returned data and
+	status information.  A read will fail if there is no operation
+	in progress.
+
+	As a special case, the device can be reset with a write operation,
+	and in this case, no following read is expected, or permitted.
+
+	There are no ioctl() operations.  Any single operation
+	may transfer at most PG_MAX_DATA bytes.  Note that the driver must
+	copy the data through an internal buffer.  In keeping with all
+	current ATAPI devices, command packets are assumed to be exactly
+	12 bytes in length.
+
+	To permit future changes to this interface, the headers in the
+	read and write buffers contain a single character "magic" flag.
+	Currently this flag must be the character "P".
+
+	By default, the driver will autoprobe for a single parallel
+	port ATAPI device, but if their individual parameters are
+	specified, the driver can handle up to 4 devices.
+
+	To use this device, you must have the following device 
+	special files defined:
+
+		/dev/pg0 c 97 0
+		/dev/pg1 c 97 1
+		/dev/pg2 c 97 2
+		/dev/pg3 c 97 3
+
+	(You'll need to change the 97 to something else if you use
+	the 'major' parameter to install the driver on a different
+	major number.)
+
+	The behaviour of the pg driver can be altered by setting
+	some parameters from the insmod command line.  The following
+	parameters are adjustable:
+
+	    drive0      These four arguments can be arrays of       
+	    drive1      1-6 integers as follows:
+	    drive2
+	    drive3      <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
+
+			Where,
+
+		<prt>   is the base of the parallel port address for
+			the corresponding drive.  (required)
+
+		<pro>   is the protocol number for the adapter that
+			supports this drive.  These numbers are
+			logged by 'paride' when the protocol modules
+			are initialised.  (0 if not given)
+
+		<uni>   for those adapters that support chained
+			devices, this is the unit selector for the
+			chain of devices on the given port.  It should
+			be zero for devices that don't support chaining.
+			(0 if not given)
+
+		<mod>   this can be -1 to choose the best mode, or one
+			of the mode numbers supported by the adapter.
+			(-1 if not given)
+
+		<slv>   ATAPI devices can be jumpered to master or slave.
+			Set this to 0 to choose the master drive, 1 to
+			choose the slave, -1 (the default) to choose the
+			first drive found.
+
+		<dly>   some parallel ports require the driver to 
+			go more slowly.  -1 sets a default value that
+			should work with the chosen protocol.  Otherwise,
+			set this to a small integer, the larger it is
+			the slower the port i/o.  In some cases, setting
+			this to zero will speed up the device. (default -1)
+
+	    major	You may use this parameter to overide the
+			default major number (97) that this driver
+			will use.  Be sure to change the device
+			name as well.
+
+	    name	This parameter is a character string that
+			contains the name the kernel will use for this
+			device (in /proc output, for instance).
+			(default "pg").
+
+	    verbose     This parameter controls the amount of logging
+			that is done by the driver.  Set it to 0 for 
+			quiet operation, to 1 to enable progress
+			messages while the driver probes for devices,
+			or to 2 for full debug logging.  (default 0)
+
+	If this driver is built into the kernel, you can use 
+	the following command line parameters, with the same values
+	as the corresponding module parameters listed above:
+
+	    pg.drive0
+	    pg.drive1
+	    pg.drive2
+	    pg.drive3
+
+	In addition, you can use the parameter pg.disable to disable
+	the driver entirely.
+
+*/
+
+/* Changes:
+
+	1.01	GRG 1998.06.16	Bug fixes
+	1.02    GRG 1998.09.24  Added jumbo support
+
+*/
+
+#define PG_VERSION      "1.02"
+#define PG_MAJOR	97
+#define PG_NAME		"pg"
+#define PG_UNITS	4
+
+#ifndef PI_PG
+#define PI_PG	4
+#endif
+
+/* Here are things one can override from the insmod command.
+   Most are autoprobed by paride unless set here.  Verbose is 0
+   by default.
+
+*/
+
+static int verbose = 0;
+static int major = PG_MAJOR;
+static char *name = PG_NAME;
+static int disable = 0;
+
+static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
+
+static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+static int pg_drive_count;
+
+enum {D_PRT, D_PRO, D_UNI, D_MOD, D_SLV, D_DLY};
+
+/* end of parameters */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mtio.h>
+#include <linux/pg.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+
+module_param(verbose, bool, 0644);
+module_param(major, int, 0);
+module_param(name, charp, 0);
+module_param_array(drive0, int, NULL, 0);
+module_param_array(drive1, int, NULL, 0);
+module_param_array(drive2, int, NULL, 0);
+module_param_array(drive3, int, NULL, 0);
+
+#include "paride.h"
+
+#define PG_SPIN_DEL     50	/* spin delay in micro-seconds  */
+#define PG_SPIN         200
+#define PG_TMO		HZ
+#define PG_RESET_TMO	10*HZ
+
+#define STAT_ERR        0x01
+#define STAT_INDEX      0x02
+#define STAT_ECC        0x04
+#define STAT_DRQ        0x08
+#define STAT_SEEK       0x10
+#define STAT_WRERR      0x20
+#define STAT_READY      0x40
+#define STAT_BUSY       0x80
+
+#define ATAPI_IDENTIFY		0x12
+
+static int pg_open(struct inode *inode, struct file *file);
+static int pg_release(struct inode *inode, struct file *file);
+static ssize_t pg_read(struct file *filp, char __user *buf,
+		       size_t count, loff_t * ppos);
+static ssize_t pg_write(struct file *filp, const char __user *buf,
+			size_t count, loff_t * ppos);
+static int pg_detect(void);
+
+#define PG_NAMELEN      8
+
+struct pg {
+	struct pi_adapter pia;	/* interface to paride layer */
+	struct pi_adapter *pi;
+	int busy;		/* write done, read expected */
+	int start;		/* jiffies at command start */
+	int dlen;		/* transfer size requested */
+	unsigned long timeout;	/* timeout requested */
+	int status;		/* last sense key */
+	int drive;		/* drive */
+	unsigned long access;	/* count of active opens ... */
+	int present;		/* device present ? */
+	char *bufptr;
+	char name[PG_NAMELEN];	/* pg0, pg1, ... */
+};
+
+static struct pg devices[PG_UNITS];
+
+static int pg_identify(struct pg *dev, int log);
+
+static char pg_scratch[512];	/* scratch block buffer */
+
+static struct class_simple *pg_class;
+
+/* kernel glue structures */
+
+static struct file_operations pg_fops = {
+	.owner = THIS_MODULE,
+	.read = pg_read,
+	.write = pg_write,
+	.open = pg_open,
+	.release = pg_release,
+};
+
+static void pg_init_units(void)
+{
+	int unit;
+
+	pg_drive_count = 0;
+	for (unit = 0; unit < PG_UNITS; unit++) {
+		int *parm = *drives[unit];
+		struct pg *dev = &devices[unit];
+		dev->pi = &dev->pia;
+		clear_bit(0, &dev->access);
+		dev->busy = 0;
+		dev->present = 0;
+		dev->bufptr = NULL;
+		dev->drive = parm[D_SLV];
+		snprintf(dev->name, PG_NAMELEN, "%s%c", name, 'a'+unit);
+		if (parm[D_PRT])
+			pg_drive_count++;
+	}
+}
+
+static inline int status_reg(struct pg *dev)
+{
+	return pi_read_regr(dev->pi, 1, 6);
+}
+
+static inline int read_reg(struct pg *dev, int reg)
+{
+	return pi_read_regr(dev->pi, 0, reg);
+}
+
+static inline void write_reg(struct pg *dev, int reg, int val)
+{
+	pi_write_regr(dev->pi, 0, reg, val);
+}
+
+static inline u8 DRIVE(struct pg *dev)
+{
+	return 0xa0+0x10*dev->drive;
+}
+
+static void pg_sleep(int cs)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule_timeout(cs);
+}
+
+static int pg_wait(struct pg *dev, int go, int stop, unsigned long tmo, char *msg)
+{
+	int j, r, e, s, p, to;
+
+	dev->status = 0;
+
+	j = 0;
+	while ((((r = status_reg(dev)) & go) || (stop && (!(r & stop))))
+	       && time_before(jiffies, tmo)) {
+		if (j++ < PG_SPIN)
+			udelay(PG_SPIN_DEL);
+		else
+			pg_sleep(1);
+	}
+
+	to = time_after_eq(jiffies, tmo);
+
+	if ((r & (STAT_ERR & stop)) || to) {
+		s = read_reg(dev, 7);
+		e = read_reg(dev, 1);
+		p = read_reg(dev, 2);
+		if (verbose > 1)
+			printk("%s: %s: stat=0x%x err=0x%x phase=%d%s\n",
+			       dev->name, msg, s, e, p, to ? " timeout" : "");
+		if (to)
+			e |= 0x100;
+		dev->status = (e >> 4) & 0xff;
+		return -1;
+	}
+	return 0;
+}
+
+static int pg_command(struct pg *dev, char *cmd, int dlen, unsigned long tmo)
+{
+	int k;
+
+	pi_connect(dev->pi);
+
+	write_reg(dev, 6, DRIVE(dev));
+
+	if (pg_wait(dev, STAT_BUSY | STAT_DRQ, 0, tmo, "before command"))
+		goto fail;
+
+	write_reg(dev, 4, dlen % 256);
+	write_reg(dev, 5, dlen / 256);
+	write_reg(dev, 7, 0xa0);	/* ATAPI packet command */
+
+	if (pg_wait(dev, STAT_BUSY, STAT_DRQ, tmo, "command DRQ"))
+		goto fail;
+
+	if (read_reg(dev, 2) != 1) {
+		printk("%s: command phase error\n", dev->name);
+		goto fail;
+	}
+
+	pi_write_block(dev->pi, cmd, 12);
+
+	if (verbose > 1) {
+		printk("%s: Command sent, dlen=%d packet= ", dev->name, dlen);
+		for (k = 0; k < 12; k++)
+			printk("%02x ", cmd[k] & 0xff);
+		printk("\n");
+	}
+	return 0;
+fail:
+	pi_disconnect(dev->pi);
+	return -1;
+}
+
+static int pg_completion(struct pg *dev, char *buf, unsigned long tmo)
+{
+	int r, d, n, p;
+
+	r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
+		    tmo, "completion");
+
+	dev->dlen = 0;
+
+	while (read_reg(dev, 7) & STAT_DRQ) {
+		d = (read_reg(dev, 4) + 256 * read_reg(dev, 5));
+		n = ((d + 3) & 0xfffc);
+		p = read_reg(dev, 2) & 3;
+		if (p == 0)
+			pi_write_block(dev->pi, buf, n);
+		if (p == 2)
+			pi_read_block(dev->pi, buf, n);
+		if (verbose > 1)
+			printk("%s: %s %d bytes\n", dev->name,
+			       p ? "Read" : "Write", n);
+		dev->dlen += (1 - p) * d;
+		buf += d;
+		r = pg_wait(dev, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
+			    tmo, "completion");
+	}
+
+	pi_disconnect(dev->pi);
+
+	return r;
+}
+
+static int pg_reset(struct pg *dev)
+{
+	int i, k, err;
+	int expect[5] = { 1, 1, 1, 0x14, 0xeb };
+	int got[5];
+
+	pi_connect(dev->pi);
+	write_reg(dev, 6, DRIVE(dev));
+	write_reg(dev, 7, 8);
+
+	pg_sleep(20 * HZ / 1000);
+
+	k = 0;
+	while ((k++ < PG_RESET_TMO) && (status_reg(dev) & STAT_BUSY))
+		pg_sleep(1);
+
+	for (i = 0; i < 5; i++)
+		got[i] = read_reg(dev, i + 1);
+
+	err = memcmp(expect, got, sizeof(got)) ? -1 : 0;
+
+	if (verbose) {
+		printk("%s: Reset (%d) signature = ", dev->name, k);
+		for (i = 0; i < 5; i++)
+			printk("%3x", got[i]);
+		if (err)
+			printk(" (incorrect)");
+		printk("\n");
+	}
+
+	pi_disconnect(dev->pi);
+	return err;
+}
+
+static void xs(char *buf, char *targ, int len)
+{
+	char l = '\0';
+	int k;
+
+	for (k = 0; k < len; k++) {
+		char c = *buf++;
+		if (c != ' ' || c != l)
+			l = *targ++ = c;
+	}
+	if (l == ' ')
+		targ--;
+	*targ = '\0';
+}
+
+static int pg_identify(struct pg *dev, int log)
+{
+	int s;
+	char *ms[2] = { "master", "slave" };
+	char mf[10], id[18];
+	char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+	char buf[36];
+
+	s = pg_command(dev, id_cmd, 36, jiffies + PG_TMO);
+	if (s)
+		return -1;
+	s = pg_completion(dev, buf, jiffies + PG_TMO);
+	if (s)
+		return -1;
+
+	if (log) {
+		xs(buf + 8, mf, 8);
+		xs(buf + 16, id, 16);
+		printk("%s: %s %s, %s\n", dev->name, mf, id, ms[dev->drive]);
+	}
+
+	return 0;
+}
+
+/*
+ * returns  0, with id set if drive is detected
+ *	   -1, if drive detection failed
+ */
+static int pg_probe(struct pg *dev)
+{
+	if (dev->drive == -1) {
+		for (dev->drive = 0; dev->drive <= 1; dev->drive++)
+			if (!pg_reset(dev))
+				return pg_identify(dev, 1);
+	} else {
+		if (!pg_reset(dev))
+			return pg_identify(dev, 1);
+	}
+	return -1;
+}
+
+static int pg_detect(void)
+{
+	struct pg *dev = &devices[0];
+	int k, unit;
+
+	printk("%s: %s version %s, major %d\n", name, name, PG_VERSION, major);
+
+	k = 0;
+	if (pg_drive_count == 0) {
+		if (pi_init(dev->pi, 1, -1, -1, -1, -1, -1, pg_scratch,
+			    PI_PG, verbose, dev->name)) {
+			if (!pg_probe(dev)) {
+				dev->present = 1;
+				k++;
+			} else
+				pi_release(dev->pi);
+		}
+
+	} else
+		for (unit = 0; unit < PG_UNITS; unit++, dev++) {
+			int *parm = *drives[unit];
+			if (!parm[D_PRT])
+				continue;
+			if (pi_init(dev->pi, 0, parm[D_PRT], parm[D_MOD],
+				    parm[D_UNI], parm[D_PRO], parm[D_DLY],
+				    pg_scratch, PI_PG, verbose, dev->name)) {
+				if (!pg_probe(dev)) {
+					dev->present = 1;
+					k++;
+				} else
+					pi_release(dev->pi);
+			}
+		}
+
+	if (k)
+		return 0;
+
+	printk("%s: No ATAPI device detected\n", name);
+	return -1;
+}
+
+static int pg_open(struct inode *inode, struct file *file)
+{
+	int unit = iminor(inode) & 0x7f;
+	struct pg *dev = &devices[unit];
+
+	if ((unit >= PG_UNITS) || (!dev->present))
+		return -ENODEV;
+
+	if (test_and_set_bit(0, &dev->access))
+		return -EBUSY;
+
+	if (dev->busy) {
+		pg_reset(dev);
+		dev->busy = 0;
+	}
+
+	pg_identify(dev, (verbose > 1));
+
+	dev->bufptr = kmalloc(PG_MAX_DATA, GFP_KERNEL);
+	if (dev->bufptr == NULL) {
+		clear_bit(0, &dev->access);
+		printk("%s: buffer allocation failed\n", dev->name);
+		return -ENOMEM;
+	}
+
+	file->private_data = dev;
+
+	return 0;
+}
+
+static int pg_release(struct inode *inode, struct file *file)
+{
+	struct pg *dev = file->private_data;
+
+	kfree(dev->bufptr);
+	dev->bufptr = NULL;
+	clear_bit(0, &dev->access);
+
+	return 0;
+}
+
+static ssize_t pg_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos)
+{
+	struct pg *dev = filp->private_data;
+	struct pg_write_hdr hdr;
+	int hs = sizeof (hdr);
+
+	if (dev->busy)
+		return -EBUSY;
+	if (count < hs)
+		return -EINVAL;
+
+	if (copy_from_user(&hdr, buf, hs))
+		return -EFAULT;
+
+	if (hdr.magic != PG_MAGIC)
+		return -EINVAL;
+	if (hdr.dlen > PG_MAX_DATA)
+		return -EINVAL;
+	if ((count - hs) > PG_MAX_DATA)
+		return -EINVAL;
+
+	if (hdr.func == PG_RESET) {
+		if (count != hs)
+			return -EINVAL;
+		if (pg_reset(dev))
+			return -EIO;
+		return count;
+	}
+
+	if (hdr.func != PG_COMMAND)
+		return -EINVAL;
+
+	dev->start = jiffies;
+	dev->timeout = hdr.timeout * HZ + HZ / 2 + jiffies;
+
+	if (pg_command(dev, hdr.packet, hdr.dlen, jiffies + PG_TMO)) {
+		if (dev->status & 0x10)
+			return -ETIME;
+		return -EIO;
+	}
+
+	dev->busy = 1;
+
+	if (copy_from_user(dev->bufptr, buf + hs, count - hs))
+		return -EFAULT;
+	return count;
+}
+
+static ssize_t pg_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos)
+{
+	struct pg *dev = filp->private_data;
+	struct pg_read_hdr hdr;
+	int hs = sizeof (hdr);
+	int copy;
+
+	if (!dev->busy)
+		return -EINVAL;
+	if (count < hs)
+		return -EINVAL;
+
+	dev->busy = 0;
+
+	if (pg_completion(dev, dev->bufptr, dev->timeout))
+		if (dev->status & 0x10)
+			return -ETIME;
+
+	hdr.magic = PG_MAGIC;
+	hdr.dlen = dev->dlen;
+	copy = 0;
+
+	if (hdr.dlen < 0) {
+		hdr.dlen = -1 * hdr.dlen;
+		copy = hdr.dlen;
+		if (copy > (count - hs))
+			copy = count - hs;
+	}
+
+	hdr.duration = (jiffies - dev->start + HZ / 2) / HZ;
+	hdr.scsi = dev->status & 0x0f;
+
+	if (copy_to_user(buf, &hdr, hs))
+		return -EFAULT;
+	if (copy > 0)
+		if (copy_to_user(buf + hs, dev->bufptr, copy))
+			return -EFAULT;
+	return copy + hs;
+}
+
+static int __init pg_init(void)
+{
+	int unit, err = 0;
+
+	if (disable){
+		err = -1;
+		goto out;
+	}
+
+	pg_init_units();
+
+	if (pg_detect()) {
+		err = -1;
+		goto out;
+	}
+
+	if (register_chrdev(major, name, &pg_fops)) {
+		printk("pg_init: unable to get major number %d\n", major);
+		for (unit = 0; unit < PG_UNITS; unit++) {
+			struct pg *dev = &devices[unit];
+			if (dev->present)
+				pi_release(dev->pi);
+		}
+		err = -1;
+		goto out;
+	}
+	pg_class = class_simple_create(THIS_MODULE, "pg");
+	if (IS_ERR(pg_class)) {
+		err = PTR_ERR(pg_class);
+		goto out_chrdev;
+	}
+	devfs_mk_dir("pg");
+	for (unit = 0; unit < PG_UNITS; unit++) {
+		struct pg *dev = &devices[unit];
+		if (dev->present) {
+			class_simple_device_add(pg_class, MKDEV(major, unit), 
+					NULL, "pg%u", unit);
+			err = devfs_mk_cdev(MKDEV(major, unit),
+				      S_IFCHR | S_IRUSR | S_IWUSR, "pg/%u",
+				      unit);
+			if (err) 
+				goto out_class;
+		}
+	}
+	err = 0;
+	goto out;
+
+out_class:
+	class_simple_device_remove(MKDEV(major, unit));
+	class_simple_destroy(pg_class);
+out_chrdev:
+	unregister_chrdev(major, "pg");
+out:
+	return err;
+}
+
+static void __exit pg_exit(void)
+{
+	int unit;
+
+	for (unit = 0; unit < PG_UNITS; unit++) {
+		struct pg *dev = &devices[unit];
+		if (dev->present) {
+			class_simple_device_remove(MKDEV(major, unit));
+			devfs_remove("pg/%u", unit);
+		}
+	}
+	class_simple_destroy(pg_class);
+	devfs_remove("pg");
+	unregister_chrdev(major, name);
+
+	for (unit = 0; unit < PG_UNITS; unit++) {
+		struct pg *dev = &devices[unit];
+		if (dev->present)
+			pi_release(dev->pi);
+	}
+}
+
+MODULE_LICENSE("GPL");
+module_init(pg_init)
+module_exit(pg_exit)
diff --git a/drivers/block/paride/ppc6lnx.c b/drivers/block/paride/ppc6lnx.c
new file mode 100644
index 0000000..5e5521d
--- /dev/null
+++ b/drivers/block/paride/ppc6lnx.c
@@ -0,0 +1,726 @@
+/*
+	ppc6lnx.c (c) 2001 Micro Solutions Inc.
+		Released under the terms of the GNU General Public license
+
+	ppc6lnx.c  is a par of the protocol driver for the Micro Solutions
+		"BACKPACK" parallel port IDE adapter
+		(Works on Series 6 drives)
+
+*/
+
+//***************************************************************************
+
+// PPC 6 Code in C sanitized for LINUX
+// Original x86 ASM by Ron, Converted to C by Clive
+
+//***************************************************************************
+
+
+#define port_stb					1
+#define port_afd					2
+#define cmd_stb						port_afd
+#define port_init					4
+#define data_stb					port_init
+#define port_sel					8
+#define port_int					16
+#define port_dir					0x20
+
+#define ECR_EPP	0x80
+#define ECR_BI	0x20
+
+//***************************************************************************
+
+//  60772 Commands
+
+#define ACCESS_REG				0x00
+#define ACCESS_PORT				0x40
+
+#define ACCESS_READ				0x00
+#define ACCESS_WRITE			0x20
+
+//  60772 Command Prefix
+
+#define CMD_PREFIX_SET		0xe0		// Special command that modifies the next command's operation
+#define CMD_PREFIX_RESET	0xc0		// Resets current cmd modifier reg bits
+ #define PREFIX_IO16			0x01		// perform 16-bit wide I/O
+ #define PREFIX_FASTWR		0x04		// enable PPC mode fast-write
+ #define PREFIX_BLK				0x08		// enable block transfer mode
+
+// 60772 Registers
+
+#define REG_STATUS				0x00		// status register
+ #define STATUS_IRQA			0x01		// Peripheral IRQA line
+ #define STATUS_EEPROM_DO	0x40		// Serial EEPROM data bit
+#define REG_VERSION				0x01		// PPC version register (read)
+#define REG_HWCFG					0x02		// Hardware Config register
+#define REG_RAMSIZE				0x03		// Size of RAM Buffer
+ #define RAMSIZE_128K			0x02
+#define REG_EEPROM				0x06		// EEPROM control register
+ #define EEPROM_SK				0x01		// eeprom SK bit
+ #define EEPROM_DI				0x02		// eeprom DI bit
+ #define EEPROM_CS				0x04		// eeprom CS bit
+ #define EEPROM_EN				0x08		// eeprom output enable
+#define REG_BLKSIZE				0x08		// Block transfer len (24 bit)
+
+//***************************************************************************
+
+typedef struct ppc_storage {
+	u16	lpt_addr;				// LPT base address
+	u8	ppc_id;
+	u8	mode;						// operating mode
+					// 0 = PPC Uni SW
+					// 1 = PPC Uni FW
+					// 2 = PPC Bi SW
+					// 3 = PPC Bi FW
+					// 4 = EPP Byte
+					// 5 = EPP Word
+					// 6 = EPP Dword
+	u8	ppc_flags;
+	u8	org_data;				// original LPT data port contents
+	u8	org_ctrl;				// original LPT control port contents
+	u8	cur_ctrl;				// current control port contents
+} Interface;
+
+//***************************************************************************
+
+// ppc_flags
+
+#define fifo_wait					0x10
+
+//***************************************************************************
+
+// DONT CHANGE THESE LEST YOU BREAK EVERYTHING - BIT FIELD DEPENDENCIES
+
+#define PPCMODE_UNI_SW		0
+#define PPCMODE_UNI_FW		1
+#define PPCMODE_BI_SW			2
+#define PPCMODE_BI_FW			3
+#define PPCMODE_EPP_BYTE	4
+#define PPCMODE_EPP_WORD	5
+#define PPCMODE_EPP_DWORD	6
+
+//***************************************************************************
+
+static int ppc6_select(Interface *ppc);
+static void ppc6_deselect(Interface *ppc);
+static void ppc6_send_cmd(Interface *ppc, u8 cmd);
+static void ppc6_wr_data_byte(Interface *ppc, u8 data);
+static u8 ppc6_rd_data_byte(Interface *ppc);
+static u8 ppc6_rd_port(Interface *ppc, u8 port);
+static void ppc6_wr_port(Interface *ppc, u8 port, u8 data);
+static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count);
+static void ppc6_wait_for_fifo(Interface *ppc);
+static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count);
+static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length);
+static void ppc6_wr_extout(Interface *ppc, u8 regdata);
+static int ppc6_open(Interface *ppc);
+static void ppc6_close(Interface *ppc);
+
+//***************************************************************************
+
+static int ppc6_select(Interface *ppc)
+{
+	u8 i, j, k;
+
+	i = inb(ppc->lpt_addr + 1);
+
+	if (i & 1)
+		outb(i, ppc->lpt_addr + 1);
+
+	ppc->org_data = inb(ppc->lpt_addr);
+
+	ppc->org_ctrl = inb(ppc->lpt_addr + 2) & 0x5F; // readback ctrl
+
+	ppc->cur_ctrl = ppc->org_ctrl;
+
+	ppc->cur_ctrl |= port_sel;
+
+	outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+	if (ppc->org_data == 'b')
+		outb('x', ppc->lpt_addr);
+
+	outb('b', ppc->lpt_addr);
+	outb('p', ppc->lpt_addr);
+	outb(ppc->ppc_id, ppc->lpt_addr);
+	outb(~ppc->ppc_id,ppc->lpt_addr);
+
+	ppc->cur_ctrl &= ~port_sel;
+
+	outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+	ppc->cur_ctrl = (ppc->cur_ctrl & port_int) | port_init;
+
+	outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+	i = ppc->mode & 0x0C;
+
+	if (i == 0)
+		i = (ppc->mode & 2) | 1;
+
+	outb(i, ppc->lpt_addr);
+
+	ppc->cur_ctrl |= port_sel;
+
+	outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+	// DELAY
+
+	ppc->cur_ctrl |= port_afd;
+
+	outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+	j = ((i & 0x08) << 4) | ((i & 0x07) << 3);
+
+	k = inb(ppc->lpt_addr + 1) & 0xB8;
+
+	if (j == k)
+	{
+		ppc->cur_ctrl &= ~port_afd;
+
+		outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+		k = (inb(ppc->lpt_addr + 1) & 0xB8) ^ 0xB8;
+
+		if (j == k)
+		{
+			if (i & 4)	// EPP
+				ppc->cur_ctrl &= ~(port_sel | port_init);
+			else				// PPC/ECP
+				ppc->cur_ctrl &= ~port_sel;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			return(1);
+		}
+	}
+
+	outb(ppc->org_ctrl, ppc->lpt_addr + 2);
+
+	outb(ppc->org_data, ppc->lpt_addr);
+
+	return(0); // FAIL
+}
+
+//***************************************************************************
+
+static void ppc6_deselect(Interface *ppc)
+{
+	if (ppc->mode & 4)	// EPP
+		ppc->cur_ctrl |= port_init;
+	else								// PPC/ECP
+		ppc->cur_ctrl |= port_sel;
+
+	outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+	outb(ppc->org_data, ppc->lpt_addr);
+
+	outb((ppc->org_ctrl | port_sel), ppc->lpt_addr + 2);
+
+	outb(ppc->org_ctrl, ppc->lpt_addr + 2);
+}
+
+//***************************************************************************
+
+static void ppc6_send_cmd(Interface *ppc, u8 cmd)
+{
+	switch(ppc->mode)
+	{
+		case PPCMODE_UNI_SW :
+		case PPCMODE_UNI_FW :
+		case PPCMODE_BI_SW :
+		case PPCMODE_BI_FW :
+		{
+			outb(cmd, ppc->lpt_addr);
+
+			ppc->cur_ctrl ^= cmd_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+
+		case PPCMODE_EPP_BYTE :
+		case PPCMODE_EPP_WORD :
+		case PPCMODE_EPP_DWORD :
+		{
+			outb(cmd, ppc->lpt_addr + 3);
+
+			break;
+		}
+	}
+}
+
+//***************************************************************************
+
+static void ppc6_wr_data_byte(Interface *ppc, u8 data)
+{
+	switch(ppc->mode)
+	{
+		case PPCMODE_UNI_SW :
+		case PPCMODE_UNI_FW :
+		case PPCMODE_BI_SW :
+		case PPCMODE_BI_FW :
+		{
+			outb(data, ppc->lpt_addr);
+
+			ppc->cur_ctrl ^= data_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+
+		case PPCMODE_EPP_BYTE :
+		case PPCMODE_EPP_WORD :
+		case PPCMODE_EPP_DWORD :
+		{
+			outb(data, ppc->lpt_addr + 4);
+
+			break;
+		}
+	}
+}
+
+//***************************************************************************
+
+static u8 ppc6_rd_data_byte(Interface *ppc)
+{
+	u8 data = 0;
+
+	switch(ppc->mode)
+	{
+		case PPCMODE_UNI_SW :
+		case PPCMODE_UNI_FW :
+		{
+			ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			// DELAY
+
+			data = inb(ppc->lpt_addr + 1);
+
+			data = ((data & 0x80) >> 1) | ((data & 0x38) >> 3);
+
+			ppc->cur_ctrl |= port_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			// DELAY
+
+			data |= inb(ppc->lpt_addr + 1) & 0xB8;
+
+			break;
+		}
+
+		case PPCMODE_BI_SW :
+		case PPCMODE_BI_FW :
+		{
+			ppc->cur_ctrl |= port_dir;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			ppc->cur_ctrl = (ppc->cur_ctrl | port_stb) ^ data_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			data = inb(ppc->lpt_addr);
+
+			ppc->cur_ctrl &= ~port_stb;
+
+			outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
+
+			ppc->cur_ctrl &= ~port_dir;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+
+		case PPCMODE_EPP_BYTE :
+		case PPCMODE_EPP_WORD :
+		case PPCMODE_EPP_DWORD :
+		{
+			outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
+
+			data = inb(ppc->lpt_addr + 4);
+
+			outb(ppc->cur_ctrl,ppc->lpt_addr + 2);
+
+			break;
+		}
+	}
+
+	return(data);
+}
+
+//***************************************************************************
+
+static u8 ppc6_rd_port(Interface *ppc, u8 port)
+{
+	ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_READ));
+
+	return(ppc6_rd_data_byte(ppc));
+}
+
+//***************************************************************************
+
+static void ppc6_wr_port(Interface *ppc, u8 port, u8 data)
+{
+	ppc6_send_cmd(ppc,(u8)(port | ACCESS_PORT | ACCESS_WRITE));
+
+	ppc6_wr_data_byte(ppc, data);
+}
+
+//***************************************************************************
+
+static void ppc6_rd_data_blk(Interface *ppc, u8 *data, long count)
+{
+	switch(ppc->mode)
+	{
+		case PPCMODE_UNI_SW :
+		case PPCMODE_UNI_FW :
+		{
+			while(count)
+			{
+				u8 d;
+
+				ppc->cur_ctrl = (ppc->cur_ctrl & ~port_stb) ^ data_stb;
+
+				outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+				// DELAY
+
+				d = inb(ppc->lpt_addr + 1);
+
+				d = ((d & 0x80) >> 1) | ((d & 0x38) >> 3);
+
+				ppc->cur_ctrl |= port_stb;
+
+				outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+				// DELAY
+
+				d |= inb(ppc->lpt_addr + 1) & 0xB8;
+
+				*data++ = d;
+				count--;
+			}
+
+			break;
+		}
+
+		case PPCMODE_BI_SW :
+		case PPCMODE_BI_FW :
+		{
+			ppc->cur_ctrl |= port_dir;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			ppc->cur_ctrl |= port_stb;
+
+			while(count)
+			{
+				ppc->cur_ctrl ^= data_stb;
+
+				outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+				*data++ = inb(ppc->lpt_addr);
+				count--;
+			}
+
+			ppc->cur_ctrl &= ~port_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			ppc->cur_ctrl &= ~port_dir;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+
+		case PPCMODE_EPP_BYTE :
+		{
+			outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
+
+			// DELAY
+
+			while(count)
+			{
+				*data++ = inb(ppc->lpt_addr + 4);
+				count--;
+			}
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+
+		case PPCMODE_EPP_WORD :
+		{
+			outb((ppc->cur_ctrl | port_dir), ppc->lpt_addr + 2);
+
+			// DELAY
+
+			while(count > 1)
+			{
+				*((u16 *)data) = inw(ppc->lpt_addr + 4);
+				data  += 2;
+				count -= 2;
+			}
+
+			while(count)
+			{
+				*data++ = inb(ppc->lpt_addr + 4);
+				count--;
+			}
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+
+		case PPCMODE_EPP_DWORD :
+		{
+			outb((ppc->cur_ctrl | port_dir),ppc->lpt_addr + 2);
+
+			// DELAY
+
+			while(count > 3)
+			{
+				*((u32 *)data) = inl(ppc->lpt_addr + 4);
+				data  += 4;
+				count -= 4;
+			}
+
+			while(count)
+			{
+				*data++ = inb(ppc->lpt_addr + 4);
+				count--;
+			}
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			break;
+		}
+	}
+
+}
+
+//***************************************************************************
+
+static void ppc6_wait_for_fifo(Interface *ppc)
+{
+	int i;
+
+	if (ppc->ppc_flags & fifo_wait)
+	{
+		for(i=0; i<20; i++)
+			inb(ppc->lpt_addr + 1);
+	}
+}
+
+//***************************************************************************
+
+static void ppc6_wr_data_blk(Interface *ppc, u8 *data, long count)
+{
+	switch(ppc->mode)
+	{
+		case PPCMODE_UNI_SW :
+		case PPCMODE_BI_SW :
+		{
+			while(count--)
+			{
+				outb(*data++, ppc->lpt_addr);
+
+				ppc->cur_ctrl ^= data_stb;
+
+				outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+			}
+
+			break;
+		}
+
+		case PPCMODE_UNI_FW :
+		case PPCMODE_BI_FW :
+		{
+			u8 this, last;
+
+			ppc6_send_cmd(ppc,(CMD_PREFIX_SET | PREFIX_FASTWR));
+
+			ppc->cur_ctrl |= port_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			last = *data;
+
+			outb(last, ppc->lpt_addr);
+
+			while(count)
+			{
+				this = *data++;
+				count--;
+
+				if (this == last)
+				{
+					ppc->cur_ctrl ^= data_stb;
+
+					outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+				}
+				else
+				{
+					outb(this, ppc->lpt_addr);
+
+					last = this;
+				}
+			}
+
+			ppc->cur_ctrl &= ~port_stb;
+
+			outb(ppc->cur_ctrl, ppc->lpt_addr + 2);
+
+			ppc6_send_cmd(ppc,(CMD_PREFIX_RESET | PREFIX_FASTWR));
+
+			break;
+		}
+
+		case PPCMODE_EPP_BYTE :
+		{
+			while(count)
+			{
+				outb(*data++,ppc->lpt_addr + 4);
+				count--;
+			}
+
+			ppc6_wait_for_fifo(ppc);
+
+			break;
+		}
+
+		case PPCMODE_EPP_WORD :
+		{
+			while(count > 1)
+			{
+				outw(*((u16 *)data),ppc->lpt_addr + 4);
+				data  += 2;
+				count -= 2;
+			}
+
+			while(count)
+			{
+				outb(*data++,ppc->lpt_addr + 4);
+				count--;
+			}
+
+			ppc6_wait_for_fifo(ppc);
+
+			break;
+		}
+
+		case PPCMODE_EPP_DWORD :
+		{
+			while(count > 3)
+			{
+				outl(*((u32 *)data),ppc->lpt_addr + 4);
+				data  += 4;
+				count -= 4;
+			}
+
+			while(count)
+			{
+				outb(*data++,ppc->lpt_addr + 4);
+				count--;
+			}
+
+			ppc6_wait_for_fifo(ppc);
+
+			break;
+		}
+	}
+}
+
+//***************************************************************************
+
+static void ppc6_rd_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
+{
+	length = length << 1;
+
+	ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
+	ppc6_wr_data_byte(ppc,(u8)length);
+	ppc6_wr_data_byte(ppc,(u8)(length >> 8));
+	ppc6_wr_data_byte(ppc,0);
+
+	ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
+
+	ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_READ));
+
+	ppc6_rd_data_blk(ppc, data, length);
+
+	ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
+}
+
+//***************************************************************************
+
+static void ppc6_wr_port16_blk(Interface *ppc, u8 port, u8 *data, long length)
+{
+	length = length << 1;
+
+	ppc6_send_cmd(ppc, (REG_BLKSIZE | ACCESS_REG | ACCESS_WRITE));
+	ppc6_wr_data_byte(ppc,(u8)length);
+	ppc6_wr_data_byte(ppc,(u8)(length >> 8));
+	ppc6_wr_data_byte(ppc,0);
+
+	ppc6_send_cmd(ppc, (CMD_PREFIX_SET | PREFIX_IO16 | PREFIX_BLK));
+
+	ppc6_send_cmd(ppc, (u8)(port | ACCESS_PORT | ACCESS_WRITE));
+
+	ppc6_wr_data_blk(ppc, data, length);
+
+	ppc6_send_cmd(ppc, (CMD_PREFIX_RESET | PREFIX_IO16 | PREFIX_BLK));
+}
+
+//***************************************************************************
+
+static void ppc6_wr_extout(Interface *ppc, u8 regdata)
+{
+	ppc6_send_cmd(ppc,(REG_VERSION | ACCESS_REG | ACCESS_WRITE));
+
+	ppc6_wr_data_byte(ppc, (u8)((regdata & 0x03) << 6));
+}
+
+//***************************************************************************
+
+static int ppc6_open(Interface *ppc)
+{
+	int ret;
+
+	ret = ppc6_select(ppc);
+
+	if (ret == 0)
+		return(ret);
+
+	ppc->ppc_flags &= ~fifo_wait;
+
+	ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_WRITE | REG_RAMSIZE));
+	ppc6_wr_data_byte(ppc, RAMSIZE_128K);
+
+	ppc6_send_cmd(ppc, (ACCESS_REG | ACCESS_READ | REG_VERSION));
+
+	if ((ppc6_rd_data_byte(ppc) & 0x3F) == 0x0C)
+		ppc->ppc_flags |= fifo_wait;
+
+	return(ret);
+}
+
+//***************************************************************************
+
+static void ppc6_close(Interface *ppc)
+{
+	ppc6_deselect(ppc);
+}
+
+//***************************************************************************
+
diff --git a/drivers/block/paride/pseudo.h b/drivers/block/paride/pseudo.h
new file mode 100644
index 0000000..932342d
--- /dev/null
+++ b/drivers/block/paride/pseudo.h
@@ -0,0 +1,102 @@
+/* 
+        pseudo.h    (c) 1997-8  Grant R. Guenther <grant@torque.net>
+                                Under the terms of the GNU General Public License.
+
+	This is the "pseudo-interrupt" logic for parallel port drivers.
+
+        This module is #included into each driver.  It makes one
+        function available:
+
+		ps_set_intr( void (*continuation)(void),
+			     int  (*ready)(void),
+			     int timeout,
+			     int nice )
+
+	Which will arrange for ready() to be evaluated frequently and
+	when either it returns true, or timeout jiffies have passed,
+	continuation() will be invoked.
+
+	If nice is 1, the test will done approximately once a
+	jiffy.  If nice is 0, the test will also be done whenever
+	the scheduler runs (by adding it to a task queue).  If
+	nice is greater than 1, the test will be done once every
+	(nice-1) jiffies. 
+
+*/
+
+/* Changes:
+
+	1.01	1998.05.03	Switched from cli()/sti() to spinlocks
+	1.02    1998.12.14      Added support for nice > 1
+*/
+	
+#define PS_VERSION	"1.02"
+
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+static void ps_tq_int( void *data);
+
+static void (* ps_continuation)(void);
+static int (* ps_ready)(void);
+static unsigned long ps_timeout;
+static int ps_tq_active = 0;
+static int ps_nice = 0;
+
+static DEFINE_SPINLOCK(ps_spinlock __attribute__((unused)));
+
+static DECLARE_WORK(ps_tq, ps_tq_int, NULL);
+
+static void ps_set_intr(void (*continuation)(void), 
+			int (*ready)(void),
+			int timeout, int nice)
+{
+	unsigned long	flags;
+
+	spin_lock_irqsave(&ps_spinlock,flags);
+
+	ps_continuation = continuation;
+	ps_ready = ready;
+	ps_timeout = jiffies + timeout;
+	ps_nice = nice;
+
+	if (!ps_tq_active) {
+		ps_tq_active = 1;
+		if (!ps_nice)
+			schedule_work(&ps_tq);
+		else
+			schedule_delayed_work(&ps_tq, ps_nice-1);
+	}
+	spin_unlock_irqrestore(&ps_spinlock,flags);
+}
+
+static void ps_tq_int(void *data)
+{
+	void (*con)(void);
+	unsigned long flags;
+
+	spin_lock_irqsave(&ps_spinlock,flags);
+
+	con = ps_continuation;
+	ps_tq_active = 0;
+
+	if (!con) {
+		spin_unlock_irqrestore(&ps_spinlock,flags);
+		return;
+	}
+	if (!ps_ready || ps_ready() || time_after_eq(jiffies, ps_timeout)) {
+		ps_continuation = NULL;
+		spin_unlock_irqrestore(&ps_spinlock,flags);
+		con();
+		return;
+	}
+	ps_tq_active = 1;
+	if (!ps_nice)
+		schedule_work(&ps_tq);
+	else
+		schedule_delayed_work(&ps_tq, ps_nice-1);
+	spin_unlock_irqrestore(&ps_spinlock,flags);
+}
+
+/* end of pseudo.h */
+
diff --git a/drivers/block/paride/pt.c b/drivers/block/paride/pt.c
new file mode 100644
index 0000000..8fbd692
--- /dev/null
+++ b/drivers/block/paride/pt.c
@@ -0,0 +1,1024 @@
+/* 
+        pt.c    (c) 1998  Grant R. Guenther <grant@torque.net>
+                          Under the terms of the GNU General Public License.
+
+        This is the high-level driver for parallel port ATAPI tape
+        drives based on chips supported by the paride module.
+
+	The driver implements both rewinding and non-rewinding
+	devices, filemarks, and the rewind ioctl.  It allocates
+	a small internal "bounce buffer" for each open device, but
+        otherwise expects buffering and blocking to be done at the
+        user level.  As with most block-structured tapes, short
+	writes are padded to full tape blocks, so reading back a file
+        may return more data than was actually written.
+
+        By default, the driver will autoprobe for a single parallel
+        port ATAPI tape drive, but if their individual parameters are
+        specified, the driver can handle up to 4 drives.
+
+	The rewinding devices are named /dev/pt0, /dev/pt1, ...
+	while the non-rewinding devices are /dev/npt0, /dev/npt1, etc.
+
+        The behaviour of the pt driver can be altered by setting
+        some parameters from the insmod command line.  The following
+        parameters are adjustable:
+
+            drive0      These four arguments can be arrays of       
+            drive1      1-6 integers as follows:
+            drive2
+            drive3      <prt>,<pro>,<uni>,<mod>,<slv>,<dly>
+
+                        Where,
+
+                <prt>   is the base of the parallel port address for
+                        the corresponding drive.  (required)
+
+                <pro>   is the protocol number for the adapter that
+                        supports this drive.  These numbers are
+                        logged by 'paride' when the protocol modules
+                        are initialised.  (0 if not given)
+
+                <uni>   for those adapters that support chained
+                        devices, this is the unit selector for the
+                        chain of devices on the given port.  It should
+                        be zero for devices that don't support chaining.
+                        (0 if not given)
+
+                <mod>   this can be -1 to choose the best mode, or one
+                        of the mode numbers supported by the adapter.
+                        (-1 if not given)
+
+                <slv>   ATAPI devices can be jumpered to master or slave.
+                        Set this to 0 to choose the master drive, 1 to
+                        choose the slave, -1 (the default) to choose the
+                        first drive found.
+
+                <dly>   some parallel ports require the driver to 
+                        go more slowly.  -1 sets a default value that
+                        should work with the chosen protocol.  Otherwise,
+                        set this to a small integer, the larger it is
+                        the slower the port i/o.  In some cases, setting
+                        this to zero will speed up the device. (default -1)
+
+	    major	You may use this parameter to overide the
+			default major number (96) that this driver
+			will use.  Be sure to change the device
+			name as well.
+
+	    name	This parameter is a character string that
+			contains the name the kernel will use for this
+			device (in /proc output, for instance).
+			(default "pt").
+
+            verbose     This parameter controls the amount of logging
+                        that the driver will do.  Set it to 0 for
+                        normal operation, 1 to see autoprobe progress
+                        messages, or 2 to see additional debugging
+                        output.  (default 0)
+ 
+        If this driver is built into the kernel, you can use 
+        the following command line parameters, with the same values
+        as the corresponding module parameters listed above:
+
+            pt.drive0
+            pt.drive1
+            pt.drive2
+            pt.drive3
+
+        In addition, you can use the parameter pt.disable to disable
+        the driver entirely.
+
+*/
+
+/*   Changes:
+
+	1.01	GRG 1998.05.06	Round up transfer size, fix ready_wait,
+			        loosed interpretation of ATAPI standard
+				for clearing error status.
+				Eliminate sti();
+	1.02    GRG 1998.06.16  Eliminate an Ugh.
+	1.03    GRG 1998.08.15  Adjusted PT_TMO, use HZ in loop timing,
+				extra debugging
+	1.04    GRG 1998.09.24  Repair minor coding error, added jumbo support
+	
+*/
+
+#define PT_VERSION      "1.04"
+#define PT_MAJOR	96
+#define PT_NAME		"pt"
+#define PT_UNITS	4
+
+/* Here are things one can override from the insmod command.
+   Most are autoprobed by paride unless set here.  Verbose is on
+   by default.
+
+*/
+
+static int verbose = 0;
+static int major = PT_MAJOR;
+static char *name = PT_NAME;
+static int disable = 0;
+
+static int drive0[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive1[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive2[6] = { 0, 0, 0, -1, -1, -1 };
+static int drive3[6] = { 0, 0, 0, -1, -1, -1 };
+
+static int (*drives[4])[6] = {&drive0, &drive1, &drive2, &drive3};
+
+#define D_PRT   0
+#define D_PRO   1
+#define D_UNI   2
+#define D_MOD   3
+#define D_SLV   4
+#define D_DLY   5
+
+#define DU              (*drives[unit])
+
+/* end of parameters */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/mtio.h>
+#include <linux/device.h>
+
+#include <asm/uaccess.h>
+
+module_param(verbose, bool, 0);
+module_param(major, int, 0);
+module_param(name, charp, 0);
+module_param_array(drive0, int, NULL, 0);
+module_param_array(drive1, int, NULL, 0);
+module_param_array(drive2, int, NULL, 0);
+module_param_array(drive3, int, NULL, 0);
+
+#include "paride.h"
+
+#define PT_MAX_RETRIES  5
+#define PT_TMO          3000	/* interrupt timeout in jiffies */
+#define PT_SPIN_DEL     50	/* spin delay in micro-seconds  */
+#define PT_RESET_TMO    30	/* 30 seconds */
+#define PT_READY_TMO	60	/* 60 seconds */
+#define PT_REWIND_TMO	1200	/* 20 minutes */
+
+#define PT_SPIN         ((1000000/(HZ*PT_SPIN_DEL))*PT_TMO)
+
+#define STAT_ERR        0x00001
+#define STAT_INDEX      0x00002
+#define STAT_ECC        0x00004
+#define STAT_DRQ        0x00008
+#define STAT_SEEK       0x00010
+#define STAT_WRERR      0x00020
+#define STAT_READY      0x00040
+#define STAT_BUSY       0x00080
+#define STAT_SENSE	0x1f000
+
+#define ATAPI_TEST_READY	0x00
+#define ATAPI_REWIND		0x01
+#define ATAPI_REQ_SENSE		0x03
+#define ATAPI_READ_6		0x08
+#define ATAPI_WRITE_6		0x0a
+#define ATAPI_WFM		0x10
+#define ATAPI_IDENTIFY		0x12
+#define ATAPI_MODE_SENSE	0x1a
+#define ATAPI_LOG_SENSE		0x4d
+
+static int pt_open(struct inode *inode, struct file *file);
+static int pt_ioctl(struct inode *inode, struct file *file,
+		    unsigned int cmd, unsigned long arg);
+static int pt_release(struct inode *inode, struct file *file);
+static ssize_t pt_read(struct file *filp, char __user *buf,
+		       size_t count, loff_t * ppos);
+static ssize_t pt_write(struct file *filp, const char __user *buf,
+			size_t count, loff_t * ppos);
+static int pt_detect(void);
+
+/* bits in tape->flags */
+
+#define PT_MEDIA	1
+#define PT_WRITE_OK	2
+#define PT_REWIND	4
+#define PT_WRITING      8
+#define PT_READING     16
+#define PT_EOF	       32
+
+#define PT_NAMELEN      8
+#define PT_BUFSIZE  16384
+
+struct pt_unit {
+	struct pi_adapter pia;	/* interface to paride layer */
+	struct pi_adapter *pi;
+	int flags;		/* various state flags */
+	int last_sense;		/* result of last request sense */
+	int drive;		/* drive */
+	atomic_t available;	/* 1 if access is available 0 otherwise */
+	int bs;			/* block size */
+	int capacity;		/* Size of tape in KB */
+	int present;		/* device present ? */
+	char *bufptr;
+	char name[PT_NAMELEN];	/* pf0, pf1, ... */
+};
+
+static int pt_identify(struct pt_unit *tape);
+
+static struct pt_unit pt[PT_UNITS];
+
+static char pt_scratch[512];	/* scratch block buffer */
+
+/* kernel glue structures */
+
+static struct file_operations pt_fops = {
+	.owner = THIS_MODULE,
+	.read = pt_read,
+	.write = pt_write,
+	.ioctl = pt_ioctl,
+	.open = pt_open,
+	.release = pt_release,
+};
+
+/* sysfs class support */
+static struct class_simple *pt_class;
+
+static inline int status_reg(struct pi_adapter *pi)
+{
+	return pi_read_regr(pi, 1, 6);
+}
+
+static inline int read_reg(struct pi_adapter *pi, int reg)
+{
+	return pi_read_regr(pi, 0, reg);
+}
+
+static inline void write_reg(struct pi_adapter *pi, int reg, int val)
+{
+	pi_write_regr(pi, 0, reg, val);
+}
+
+static inline u8 DRIVE(struct pt_unit *tape)
+{
+	return 0xa0+0x10*tape->drive;
+}
+
+static int pt_wait(struct pt_unit *tape, int go, int stop, char *fun, char *msg)
+{
+	int j, r, e, s, p;
+	struct pi_adapter *pi = tape->pi;
+
+	j = 0;
+	while ((((r = status_reg(pi)) & go) || (stop && (!(r & stop))))
+	       && (j++ < PT_SPIN))
+		udelay(PT_SPIN_DEL);
+
+	if ((r & (STAT_ERR & stop)) || (j >= PT_SPIN)) {
+		s = read_reg(pi, 7);
+		e = read_reg(pi, 1);
+		p = read_reg(pi, 2);
+		if (j >= PT_SPIN)
+			e |= 0x100;
+		if (fun)
+			printk("%s: %s %s: alt=0x%x stat=0x%x err=0x%x"
+			       " loop=%d phase=%d\n",
+			       tape->name, fun, msg, r, s, e, j, p);
+		return (e << 8) + s;
+	}
+	return 0;
+}
+
+static int pt_command(struct pt_unit *tape, char *cmd, int dlen, char *fun)
+{
+	struct pi_adapter *pi = tape->pi;
+	pi_connect(pi);
+
+	write_reg(pi, 6, DRIVE(tape));
+
+	if (pt_wait(tape, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) {
+		pi_disconnect(pi);
+		return -1;
+	}
+
+	write_reg(pi, 4, dlen % 256);
+	write_reg(pi, 5, dlen / 256);
+	write_reg(pi, 7, 0xa0);	/* ATAPI packet command */
+
+	if (pt_wait(tape, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) {
+		pi_disconnect(pi);
+		return -1;
+	}
+
+	if (read_reg(pi, 2) != 1) {
+		printk("%s: %s: command phase error\n", tape->name, fun);
+		pi_disconnect(pi);
+		return -1;
+	}
+
+	pi_write_block(pi, cmd, 12);
+
+	return 0;
+}
+
+static int pt_completion(struct pt_unit *tape, char *buf, char *fun)
+{
+	struct pi_adapter *pi = tape->pi;
+	int r, s, n, p;
+
+	r = pt_wait(tape, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR,
+		    fun, "completion");
+
+	if (read_reg(pi, 7) & STAT_DRQ) {
+		n = (((read_reg(pi, 4) + 256 * read_reg(pi, 5)) +
+		      3) & 0xfffc);
+		p = read_reg(pi, 2) & 3;
+		if (p == 0)
+			pi_write_block(pi, buf, n);
+		if (p == 2)
+			pi_read_block(pi, buf, n);
+	}
+
+	s = pt_wait(tape, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done");
+
+	pi_disconnect(pi);
+
+	return (r ? r : s);
+}
+
+static void pt_req_sense(struct pt_unit *tape, int quiet)
+{
+	char rs_cmd[12] = { ATAPI_REQ_SENSE, 0, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 };
+	char buf[16];
+	int r;
+
+	r = pt_command(tape, rs_cmd, 16, "Request sense");
+	mdelay(1);
+	if (!r)
+		pt_completion(tape, buf, "Request sense");
+
+	tape->last_sense = -1;
+	if (!r) {
+		if (!quiet)
+			printk("%s: Sense key: %x, ASC: %x, ASQ: %x\n",
+			       tape->name, buf[2] & 0xf, buf[12], buf[13]);
+		tape->last_sense = (buf[2] & 0xf) | ((buf[12] & 0xff) << 8)
+		    | ((buf[13] & 0xff) << 16);
+	}
+}
+
+static int pt_atapi(struct pt_unit *tape, char *cmd, int dlen, char *buf, char *fun)
+{
+	int r;
+
+	r = pt_command(tape, cmd, dlen, fun);
+	mdelay(1);
+	if (!r)
+		r = pt_completion(tape, buf, fun);
+	if (r)
+		pt_req_sense(tape, !fun);
+
+	return r;
+}
+
+static void pt_sleep(int cs)
+{
+	current->state = TASK_INTERRUPTIBLE;
+	schedule_timeout(cs);
+}
+
+static int pt_poll_dsc(struct pt_unit *tape, int pause, int tmo, char *msg)
+{
+	struct pi_adapter *pi = tape->pi;
+	int k, e, s;
+
+	k = 0;
+	e = 0;
+	s = 0;
+	while (k < tmo) {
+		pt_sleep(pause);
+		k++;
+		pi_connect(pi);
+		write_reg(pi, 6, DRIVE(tape));
+		s = read_reg(pi, 7);
+		e = read_reg(pi, 1);
+		pi_disconnect(pi);
+		if (s & (STAT_ERR | STAT_SEEK))
+			break;
+	}
+	if ((k >= tmo) || (s & STAT_ERR)) {
+		if (k >= tmo)
+			printk("%s: %s DSC timeout\n", tape->name, msg);
+		else
+			printk("%s: %s stat=0x%x err=0x%x\n", tape->name, msg, s,
+			       e);
+		pt_req_sense(tape, 0);
+		return 0;
+	}
+	return 1;
+}
+
+static void pt_media_access_cmd(struct pt_unit *tape, int tmo, char *cmd, char *fun)
+{
+	if (pt_command(tape, cmd, 0, fun)) {
+		pt_req_sense(tape, 0);
+		return;
+	}
+	pi_disconnect(tape->pi);
+	pt_poll_dsc(tape, HZ, tmo, fun);
+}
+
+static void pt_rewind(struct pt_unit *tape)
+{
+	char rw_cmd[12] = { ATAPI_REWIND, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+	pt_media_access_cmd(tape, PT_REWIND_TMO, rw_cmd, "rewind");
+}
+
+static void pt_write_fm(struct pt_unit *tape)
+{
+	char wm_cmd[12] = { ATAPI_WFM, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0 };
+
+	pt_media_access_cmd(tape, PT_TMO, wm_cmd, "write filemark");
+}
+
+#define DBMSG(msg)      ((verbose>1)?(msg):NULL)
+
+static int pt_reset(struct pt_unit *tape)
+{
+	struct pi_adapter *pi = tape->pi;
+	int i, k, flg;
+	int expect[5] = { 1, 1, 1, 0x14, 0xeb };
+
+	pi_connect(pi);
+	write_reg(pi, 6, DRIVE(tape));
+	write_reg(pi, 7, 8);
+
+	pt_sleep(20 * HZ / 1000);
+
+	k = 0;
+	while ((k++ < PT_RESET_TMO) && (status_reg(pi) & STAT_BUSY))
+		pt_sleep(HZ / 10);
+
+	flg = 1;
+	for (i = 0; i < 5; i++)
+		flg &= (read_reg(pi, i + 1) == expect[i]);
+
+	if (verbose) {
+		printk("%s: Reset (%d) signature = ", tape->name, k);
+		for (i = 0; i < 5; i++)
+			printk("%3x", read_reg(pi, i + 1));
+		if (!flg)
+			printk(" (incorrect)");
+		printk("\n");
+	}
+
+	pi_disconnect(pi);
+	return flg - 1;
+}
+
+static int pt_ready_wait(struct pt_unit *tape, int tmo)
+{
+	char tr_cmd[12] = { ATAPI_TEST_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	int k, p;
+
+	k = 0;
+	while (k < tmo) {
+		tape->last_sense = 0;
+		pt_atapi(tape, tr_cmd, 0, NULL, DBMSG("test unit ready"));
+		p = tape->last_sense;
+		if (!p)
+			return 0;
+		if (!(((p & 0xffff) == 0x0402) || ((p & 0xff) == 6)))
+			return p;
+		k++;
+		pt_sleep(HZ);
+	}
+	return 0x000020;	/* timeout */
+}
+
+static void xs(char *buf, char *targ, int offs, int len)
+{
+	int j, k, l;
+
+	j = 0;
+	l = 0;
+	for (k = 0; k < len; k++)
+		if ((buf[k + offs] != 0x20) || (buf[k + offs] != l))
+			l = targ[j++] = buf[k + offs];
+	if (l == 0x20)
+		j--;
+	targ[j] = 0;
+}
+
+static int xn(char *buf, int offs, int size)
+{
+	int v, k;
+
+	v = 0;
+	for (k = 0; k < size; k++)
+		v = v * 256 + (buf[k + offs] & 0xff);
+	return v;
+}
+
+static int pt_identify(struct pt_unit *tape)
+{
+	int dt, s;
+	char *ms[2] = { "master", "slave" };
+	char mf[10], id[18];
+	char id_cmd[12] = { ATAPI_IDENTIFY, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+	char ms_cmd[12] =
+	    { ATAPI_MODE_SENSE, 0, 0x2a, 0, 36, 0, 0, 0, 0, 0, 0, 0 };
+	char ls_cmd[12] =
+	    { ATAPI_LOG_SENSE, 0, 0x71, 0, 0, 0, 0, 0, 36, 0, 0, 0 };
+	char buf[36];
+
+	s = pt_atapi(tape, id_cmd, 36, buf, "identify");
+	if (s)
+		return -1;
+
+	dt = buf[0] & 0x1f;
+	if (dt != 1) {
+		if (verbose)
+			printk("%s: Drive %d, unsupported type %d\n",
+			       tape->name, tape->drive, dt);
+		return -1;
+	}
+
+	xs(buf, mf, 8, 8);
+	xs(buf, id, 16, 16);
+
+	tape->flags = 0;
+	tape->capacity = 0;
+	tape->bs = 0;
+
+	if (!pt_ready_wait(tape, PT_READY_TMO))
+		tape->flags |= PT_MEDIA;
+
+	if (!pt_atapi(tape, ms_cmd, 36, buf, "mode sense")) {
+		if (!(buf[2] & 0x80))
+			tape->flags |= PT_WRITE_OK;
+		tape->bs = xn(buf, 10, 2);
+	}
+
+	if (!pt_atapi(tape, ls_cmd, 36, buf, "log sense"))
+		tape->capacity = xn(buf, 24, 4);
+
+	printk("%s: %s %s, %s", tape->name, mf, id, ms[tape->drive]);
+	if (!(tape->flags & PT_MEDIA))
+		printk(", no media\n");
+	else {
+		if (!(tape->flags & PT_WRITE_OK))
+			printk(", RO");
+		printk(", blocksize %d, %d MB\n", tape->bs, tape->capacity / 1024);
+	}
+
+	return 0;
+}
+
+
+/*
+ * returns  0, with id set if drive is detected
+ *	   -1, if drive detection failed
+ */
+static int pt_probe(struct pt_unit *tape)
+{
+	if (tape->drive == -1) {
+		for (tape->drive = 0; tape->drive <= 1; tape->drive++)
+			if (!pt_reset(tape))
+				return pt_identify(tape);
+	} else {
+		if (!pt_reset(tape))
+			return pt_identify(tape);
+	}
+	return -1;
+}
+
+static int pt_detect(void)
+{
+	struct pt_unit *tape;
+	int specified = 0, found = 0;
+	int unit;
+
+	printk("%s: %s version %s, major %d\n", name, name, PT_VERSION, major);
+
+	specified = 0;
+	for (unit = 0; unit < PT_UNITS; unit++) {
+		struct pt_unit *tape = &pt[unit];
+		tape->pi = &tape->pia;
+		atomic_set(&tape->available, 1);
+		tape->flags = 0;
+		tape->last_sense = 0;
+		tape->present = 0;
+		tape->bufptr = NULL;
+		tape->drive = DU[D_SLV];
+		snprintf(tape->name, PT_NAMELEN, "%s%d", name, unit);
+		if (!DU[D_PRT])
+			continue;
+		specified++;
+		if (pi_init(tape->pi, 0, DU[D_PRT], DU[D_MOD], DU[D_UNI],
+		     DU[D_PRO], DU[D_DLY], pt_scratch, PI_PT,
+		     verbose, tape->name)) {
+			if (!pt_probe(tape)) {
+				tape->present = 1;
+				found++;
+			} else
+				pi_release(tape->pi);
+		}
+	}
+	if (specified == 0) {
+		tape = pt;
+		if (pi_init(tape->pi, 1, -1, -1, -1, -1, -1, pt_scratch,
+			    PI_PT, verbose, tape->name)) {
+			if (!pt_probe(tape)) {
+				tape->present = 1;
+				found++;
+			} else
+				pi_release(tape->pi);
+		}
+
+	}
+	if (found)
+		return 0;
+
+	printk("%s: No ATAPI tape drive detected\n", name);
+	return -1;
+}
+
+static int pt_open(struct inode *inode, struct file *file)
+{
+	int unit = iminor(inode) & 0x7F;
+	struct pt_unit *tape = pt + unit;
+	int err;
+
+	if (unit >= PT_UNITS || (!tape->present))
+		return -ENODEV;
+
+	err = -EBUSY;
+	if (!atomic_dec_and_test(&tape->available))
+		goto out;
+
+	pt_identify(tape);
+
+	err = -ENODEV;
+	if (!tape->flags & PT_MEDIA)
+		goto out;
+
+	err = -EROFS;
+	if ((!tape->flags & PT_WRITE_OK) && (file->f_mode & 2))
+		goto out;
+
+	if (!(iminor(inode) & 128))
+		tape->flags |= PT_REWIND;
+
+	err = -ENOMEM;
+	tape->bufptr = kmalloc(PT_BUFSIZE, GFP_KERNEL);
+	if (tape->bufptr == NULL) {
+		printk("%s: buffer allocation failed\n", tape->name);
+		goto out;
+	}
+
+	file->private_data = tape;
+	return 0;
+
+out:
+	atomic_inc(&tape->available);
+	return err;
+}
+
+static int pt_ioctl(struct inode *inode, struct file *file,
+	 unsigned int cmd, unsigned long arg)
+{
+	struct pt_unit *tape = file->private_data;
+	struct mtop __user *p = (void __user *)arg;
+	struct mtop mtop;
+
+	switch (cmd) {
+	case MTIOCTOP:
+		if (copy_from_user(&mtop, p, sizeof(struct mtop)))
+			return -EFAULT;
+
+		switch (mtop.mt_op) {
+
+		case MTREW:
+			pt_rewind(tape);
+			return 0;
+
+		case MTWEOF:
+			pt_write_fm(tape);
+			return 0;
+
+		default:
+			printk("%s: Unimplemented mt_op %d\n", tape->name,
+			       mtop.mt_op);
+			return -EINVAL;
+		}
+
+	default:
+		printk("%s: Unimplemented ioctl 0x%x\n", tape->name, cmd);
+		return -EINVAL;
+
+	}
+}
+
+static int
+pt_release(struct inode *inode, struct file *file)
+{
+	struct pt_unit *tape = file->private_data;
+
+	if (atomic_read(&tape->available) > 1)
+		return -EINVAL;
+
+	if (tape->flags & PT_WRITING)
+		pt_write_fm(tape);
+
+	if (tape->flags & PT_REWIND)
+		pt_rewind(tape);
+
+	kfree(tape->bufptr);
+	tape->bufptr = NULL;
+
+	atomic_inc(&tape->available);
+
+	return 0;
+
+}
+
+static ssize_t pt_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
+{
+	struct pt_unit *tape = filp->private_data;
+	struct pi_adapter *pi = tape->pi;
+	char rd_cmd[12] = { ATAPI_READ_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	int k, n, r, p, s, t, b;
+
+	if (!(tape->flags & (PT_READING | PT_WRITING))) {
+		tape->flags |= PT_READING;
+		if (pt_atapi(tape, rd_cmd, 0, NULL, "start read-ahead"))
+			return -EIO;
+	} else if (tape->flags & PT_WRITING)
+		return -EIO;
+
+	if (tape->flags & PT_EOF)
+		return 0;
+
+	t = 0;
+
+	while (count > 0) {
+
+		if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "read"))
+			return -EIO;
+
+		n = count;
+		if (n > 32768)
+			n = 32768;	/* max per command */
+		b = (n - 1 + tape->bs) / tape->bs;
+		n = b * tape->bs;	/* rounded up to even block */
+
+		rd_cmd[4] = b;
+
+		r = pt_command(tape, rd_cmd, n, "read");
+
+		mdelay(1);
+
+		if (r) {
+			pt_req_sense(tape, 0);
+			return -EIO;
+		}
+
+		while (1) {
+
+			r = pt_wait(tape, STAT_BUSY,
+				    STAT_DRQ | STAT_ERR | STAT_READY,
+				    DBMSG("read DRQ"), "");
+
+			if (r & STAT_SENSE) {
+				pi_disconnect(pi);
+				pt_req_sense(tape, 0);
+				return -EIO;
+			}
+
+			if (r)
+				tape->flags |= PT_EOF;
+
+			s = read_reg(pi, 7);
+
+			if (!(s & STAT_DRQ))
+				break;
+
+			n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
+			p = (read_reg(pi, 2) & 3);
+			if (p != 2) {
+				pi_disconnect(pi);
+				printk("%s: Phase error on read: %d\n", tape->name,
+				       p);
+				return -EIO;
+			}
+
+			while (n > 0) {
+				k = n;
+				if (k > PT_BUFSIZE)
+					k = PT_BUFSIZE;
+				pi_read_block(pi, tape->bufptr, k);
+				n -= k;
+				b = k;
+				if (b > count)
+					b = count;
+				if (copy_to_user(buf + t, tape->bufptr, b)) {
+					pi_disconnect(pi);
+					return -EFAULT;
+				}
+				t += b;
+				count -= b;
+			}
+
+		}
+		pi_disconnect(pi);
+		if (tape->flags & PT_EOF)
+			break;
+	}
+
+	return t;
+
+}
+
+static ssize_t pt_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
+{
+	struct pt_unit *tape = filp->private_data;
+	struct pi_adapter *pi = tape->pi;
+	char wr_cmd[12] = { ATAPI_WRITE_6, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+	int k, n, r, p, s, t, b;
+
+	if (!(tape->flags & PT_WRITE_OK))
+		return -EROFS;
+
+	if (!(tape->flags & (PT_READING | PT_WRITING))) {
+		tape->flags |= PT_WRITING;
+		if (pt_atapi
+		    (tape, wr_cmd, 0, NULL, "start buffer-available mode"))
+			return -EIO;
+	} else if (tape->flags & PT_READING)
+		return -EIO;
+
+	if (tape->flags & PT_EOF)
+		return -ENOSPC;
+
+	t = 0;
+
+	while (count > 0) {
+
+		if (!pt_poll_dsc(tape, HZ / 100, PT_TMO, "write"))
+			return -EIO;
+
+		n = count;
+		if (n > 32768)
+			n = 32768;	/* max per command */
+		b = (n - 1 + tape->bs) / tape->bs;
+		n = b * tape->bs;	/* rounded up to even block */
+
+		wr_cmd[4] = b;
+
+		r = pt_command(tape, wr_cmd, n, "write");
+
+		mdelay(1);
+
+		if (r) {	/* error delivering command only */
+			pt_req_sense(tape, 0);
+			return -EIO;
+		}
+
+		while (1) {
+
+			r = pt_wait(tape, STAT_BUSY,
+				    STAT_DRQ | STAT_ERR | STAT_READY,
+				    DBMSG("write DRQ"), NULL);
+
+			if (r & STAT_SENSE) {
+				pi_disconnect(pi);
+				pt_req_sense(tape, 0);
+				return -EIO;
+			}
+
+			if (r)
+				tape->flags |= PT_EOF;
+
+			s = read_reg(pi, 7);
+
+			if (!(s & STAT_DRQ))
+				break;
+
+			n = (read_reg(pi, 4) + 256 * read_reg(pi, 5));
+			p = (read_reg(pi, 2) & 3);
+			if (p != 0) {
+				pi_disconnect(pi);
+				printk("%s: Phase error on write: %d \n",
+				       tape->name, p);
+				return -EIO;
+			}
+
+			while (n > 0) {
+				k = n;
+				if (k > PT_BUFSIZE)
+					k = PT_BUFSIZE;
+				b = k;
+				if (b > count)
+					b = count;
+				if (copy_from_user(tape->bufptr, buf + t, b)) {
+					pi_disconnect(pi);
+					return -EFAULT;
+				}
+				pi_write_block(pi, tape->bufptr, k);
+				t += b;
+				count -= b;
+				n -= k;
+			}
+
+		}
+		pi_disconnect(pi);
+		if (tape->flags & PT_EOF)
+			break;
+	}
+
+	return t;
+}
+
+static int __init pt_init(void)
+{
+	int unit, err = 0;
+
+	if (disable) {
+		err = -1;
+		goto out;
+	}
+
+	if (pt_detect()) {
+		err = -1;
+		goto out;
+	}
+
+	if (register_chrdev(major, name, &pt_fops)) {
+		printk("pt_init: unable to get major number %d\n", major);
+		for (unit = 0; unit < PT_UNITS; unit++)
+			if (pt[unit].present)
+				pi_release(pt[unit].pi);
+		err = -1;
+		goto out;
+	}
+	pt_class = class_simple_create(THIS_MODULE, "pt");
+	if (IS_ERR(pt_class)) {
+		err = PTR_ERR(pt_class);
+		goto out_chrdev;
+	}
+
+	devfs_mk_dir("pt");
+	for (unit = 0; unit < PT_UNITS; unit++)
+		if (pt[unit].present) {
+			class_simple_device_add(pt_class, MKDEV(major, unit), 
+					NULL, "pt%d", unit);
+			err = devfs_mk_cdev(MKDEV(major, unit),
+				      S_IFCHR | S_IRUSR | S_IWUSR,
+				      "pt/%d", unit);
+			if (err) {
+				class_simple_device_remove(MKDEV(major, unit));
+				goto out_class;
+			}
+			class_simple_device_add(pt_class, MKDEV(major, unit + 128),
+					NULL, "pt%dn", unit);
+			err = devfs_mk_cdev(MKDEV(major, unit + 128),
+				      S_IFCHR | S_IRUSR | S_IWUSR,
+				      "pt/%dn", unit);
+			if (err) {
+				class_simple_device_remove(MKDEV(major, unit + 128));
+				goto out_class;
+			}
+		}
+	goto out;
+
+out_class:
+	class_simple_destroy(pt_class);
+out_chrdev:
+	unregister_chrdev(major, "pt");
+out:
+	return err;
+}
+
+static void __exit pt_exit(void)
+{
+	int unit;
+	for (unit = 0; unit < PT_UNITS; unit++)
+		if (pt[unit].present) {
+			class_simple_device_remove(MKDEV(major, unit));
+			devfs_remove("pt/%d", unit);
+			class_simple_device_remove(MKDEV(major, unit + 128));
+			devfs_remove("pt/%dn", unit);
+		}
+	class_simple_destroy(pt_class);
+	devfs_remove("pt");
+	unregister_chrdev(major, name);
+	for (unit = 0; unit < PT_UNITS; unit++)
+		if (pt[unit].present)
+			pi_release(pt[unit].pi);
+}
+
+MODULE_LICENSE("GPL");
+module_init(pt_init)
+module_exit(pt_exit)
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
new file mode 100644
index 0000000..1a1fa3c
--- /dev/null
+++ b/drivers/block/pktcdvd.c
@@ -0,0 +1,2681 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ * Copyright (C) 2001-2004 Peter Osterlund <petero2@telia.com>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License.  See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices (aka an exercise in block layer masturbation)
+ *
+ *
+ * TODO: (circa order of when I will fix it)
+ * - Only able to write on CD-RW media right now.
+ * - check host application code on media and set it in write page
+ * - interface for UDF <-> packet to negotiate a new location when a write
+ *   fails.
+ * - handle OPC, especially for -RW media
+ *
+ * Theory of operation:
+ *
+ * We use a custom make_request_fn function that forwards reads directly to
+ * the underlying CD device. Write requests are either attached directly to
+ * a live packet_data object, or simply stored sequentially in a list for
+ * later processing by the kcdrwd kernel thread. This driver doesn't use
+ * any elevator functionally as defined by the elevator_s struct, but the
+ * underlying CD device uses a standard elevator.
+ *
+ * This strategy makes it possible to do very late merging of IO requests.
+ * A new bio sent to pkt_make_request can be merged with a live packet_data
+ * object even if the object is in the data gathering state.
+ *
+ *************************************************************************/
+
+#define VERSION_CODE	"v0.2.0a 2004-07-14 Jens Axboe (axboe@suse.de) and petero2@telia.com"
+
+#include <linux/pktcdvd.h>
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/file.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/miscdevice.h>
+#include <linux/suspend.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_ioctl.h>
+
+#include <asm/uaccess.h>
+
+#if PACKET_DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
+#else
+#define DPRINTK(fmt, args...)
+#endif
+
+#if PACKET_DEBUG > 1
+#define VPRINTK(fmt, args...) printk(KERN_NOTICE fmt, ##args)
+#else
+#define VPRINTK(fmt, args...)
+#endif
+
+#define MAX_SPEED 0xffff
+
+#define ZONE(sector, pd) (((sector) + (pd)->offset) & ~((pd)->settings.size - 1))
+
+static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
+static struct proc_dir_entry *pkt_proc;
+static int pkt_major;
+static struct semaphore ctl_mutex;	/* Serialize open/close/setup/teardown */
+static mempool_t *psd_pool;
+
+
+static void pkt_bio_finished(struct pktcdvd_device *pd)
+{
+	BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0);
+	if (atomic_dec_and_test(&pd->cdrw.pending_bios)) {
+		VPRINTK("pktcdvd: queue empty\n");
+		atomic_set(&pd->iosched.attention, 1);
+		wake_up(&pd->wqueue);
+	}
+}
+
+static void pkt_bio_destructor(struct bio *bio)
+{
+	kfree(bio->bi_io_vec);
+	kfree(bio);
+}
+
+static struct bio *pkt_bio_alloc(int nr_iovecs)
+{
+	struct bio_vec *bvl = NULL;
+	struct bio *bio;
+
+	bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
+	if (!bio)
+		goto no_bio;
+	bio_init(bio);
+
+	bvl = kmalloc(nr_iovecs * sizeof(struct bio_vec), GFP_KERNEL);
+	if (!bvl)
+		goto no_bvl;
+	memset(bvl, 0, nr_iovecs * sizeof(struct bio_vec));
+
+	bio->bi_max_vecs = nr_iovecs;
+	bio->bi_io_vec = bvl;
+	bio->bi_destructor = pkt_bio_destructor;
+
+	return bio;
+
+ no_bvl:
+	kfree(bio);
+ no_bio:
+	return NULL;
+}
+
+/*
+ * Allocate a packet_data struct
+ */
+static struct packet_data *pkt_alloc_packet_data(void)
+{
+	int i;
+	struct packet_data *pkt;
+
+	pkt = kmalloc(sizeof(struct packet_data), GFP_KERNEL);
+	if (!pkt)
+		goto no_pkt;
+	memset(pkt, 0, sizeof(struct packet_data));
+
+	pkt->w_bio = pkt_bio_alloc(PACKET_MAX_SIZE);
+	if (!pkt->w_bio)
+		goto no_bio;
+
+	for (i = 0; i < PAGES_PER_PACKET; i++) {
+		pkt->pages[i] = alloc_page(GFP_KERNEL|__GFP_ZERO);
+		if (!pkt->pages[i])
+			goto no_page;
+	}
+
+	spin_lock_init(&pkt->lock);
+
+	for (i = 0; i < PACKET_MAX_SIZE; i++) {
+		struct bio *bio = pkt_bio_alloc(1);
+		if (!bio)
+			goto no_rd_bio;
+		pkt->r_bios[i] = bio;
+	}
+
+	return pkt;
+
+no_rd_bio:
+	for (i = 0; i < PACKET_MAX_SIZE; i++) {
+		struct bio *bio = pkt->r_bios[i];
+		if (bio)
+			bio_put(bio);
+	}
+
+no_page:
+	for (i = 0; i < PAGES_PER_PACKET; i++)
+		if (pkt->pages[i])
+			__free_page(pkt->pages[i]);
+	bio_put(pkt->w_bio);
+no_bio:
+	kfree(pkt);
+no_pkt:
+	return NULL;
+}
+
+/*
+ * Free a packet_data struct
+ */
+static void pkt_free_packet_data(struct packet_data *pkt)
+{
+	int i;
+
+	for (i = 0; i < PACKET_MAX_SIZE; i++) {
+		struct bio *bio = pkt->r_bios[i];
+		if (bio)
+			bio_put(bio);
+	}
+	for (i = 0; i < PAGES_PER_PACKET; i++)
+		__free_page(pkt->pages[i]);
+	bio_put(pkt->w_bio);
+	kfree(pkt);
+}
+
+static void pkt_shrink_pktlist(struct pktcdvd_device *pd)
+{
+	struct packet_data *pkt, *next;
+
+	BUG_ON(!list_empty(&pd->cdrw.pkt_active_list));
+
+	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) {
+		pkt_free_packet_data(pkt);
+	}
+}
+
+static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets)
+{
+	struct packet_data *pkt;
+
+	INIT_LIST_HEAD(&pd->cdrw.pkt_free_list);
+	INIT_LIST_HEAD(&pd->cdrw.pkt_active_list);
+	spin_lock_init(&pd->cdrw.active_list_lock);
+	while (nr_packets > 0) {
+		pkt = pkt_alloc_packet_data();
+		if (!pkt) {
+			pkt_shrink_pktlist(pd);
+			return 0;
+		}
+		pkt->id = nr_packets;
+		pkt->pd = pd;
+		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+		nr_packets--;
+	}
+	return 1;
+}
+
+static void *pkt_rb_alloc(unsigned int __nocast gfp_mask, void *data)
+{
+	return kmalloc(sizeof(struct pkt_rb_node), gfp_mask);
+}
+
+static void pkt_rb_free(void *ptr, void *data)
+{
+	kfree(ptr);
+}
+
+static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node)
+{
+	struct rb_node *n = rb_next(&node->rb_node);
+	if (!n)
+		return NULL;
+	return rb_entry(n, struct pkt_rb_node, rb_node);
+}
+
+static inline void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+	rb_erase(&node->rb_node, &pd->bio_queue);
+	mempool_free(node, pd->rb_pool);
+	pd->bio_queue_size--;
+	BUG_ON(pd->bio_queue_size < 0);
+}
+
+/*
+ * Find the first node in the pd->bio_queue rb tree with a starting sector >= s.
+ */
+static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s)
+{
+	struct rb_node *n = pd->bio_queue.rb_node;
+	struct rb_node *next;
+	struct pkt_rb_node *tmp;
+
+	if (!n) {
+		BUG_ON(pd->bio_queue_size > 0);
+		return NULL;
+	}
+
+	for (;;) {
+		tmp = rb_entry(n, struct pkt_rb_node, rb_node);
+		if (s <= tmp->bio->bi_sector)
+			next = n->rb_left;
+		else
+			next = n->rb_right;
+		if (!next)
+			break;
+		n = next;
+	}
+
+	if (s > tmp->bio->bi_sector) {
+		tmp = pkt_rbtree_next(tmp);
+		if (!tmp)
+			return NULL;
+	}
+	BUG_ON(s > tmp->bio->bi_sector);
+	return tmp;
+}
+
+/*
+ * Insert a node into the pd->bio_queue rb tree.
+ */
+static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node)
+{
+	struct rb_node **p = &pd->bio_queue.rb_node;
+	struct rb_node *parent = NULL;
+	sector_t s = node->bio->bi_sector;
+	struct pkt_rb_node *tmp;
+
+	while (*p) {
+		parent = *p;
+		tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
+		if (s < tmp->bio->bi_sector)
+			p = &(*p)->rb_left;
+		else
+			p = &(*p)->rb_right;
+	}
+	rb_link_node(&node->rb_node, parent, p);
+	rb_insert_color(&node->rb_node, &pd->bio_queue);
+	pd->bio_queue_size++;
+}
+
+/*
+ * Add a bio to a single linked list defined by its head and tail pointers.
+ */
+static inline void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail)
+{
+	bio->bi_next = NULL;
+	if (*list_tail) {
+		BUG_ON((*list_head) == NULL);
+		(*list_tail)->bi_next = bio;
+		(*list_tail) = bio;
+	} else {
+		BUG_ON((*list_head) != NULL);
+		(*list_head) = bio;
+		(*list_tail) = bio;
+	}
+}
+
+/*
+ * Remove and return the first bio from a single linked list defined by its
+ * head and tail pointers.
+ */
+static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail)
+{
+	struct bio *bio;
+
+	if (*list_head == NULL)
+		return NULL;
+
+	bio = *list_head;
+	*list_head = bio->bi_next;
+	if (*list_head == NULL)
+		*list_tail = NULL;
+
+	bio->bi_next = NULL;
+	return bio;
+}
+
+/*
+ * Send a packet_command to the underlying block device and
+ * wait for completion.
+ */
+static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+	char sense[SCSI_SENSE_BUFFERSIZE];
+	request_queue_t *q;
+	struct request *rq;
+	DECLARE_COMPLETION(wait);
+	int err = 0;
+
+	q = bdev_get_queue(pd->bdev);
+
+	rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? WRITE : READ,
+			     __GFP_WAIT);
+	rq->errors = 0;
+	rq->rq_disk = pd->bdev->bd_disk;
+	rq->bio = NULL;
+	rq->buffer = NULL;
+	rq->timeout = 60*HZ;
+	rq->data = cgc->buffer;
+	rq->data_len = cgc->buflen;
+	rq->sense = sense;
+	memset(sense, 0, sizeof(sense));
+	rq->sense_len = 0;
+	rq->flags |= REQ_BLOCK_PC | REQ_HARDBARRIER;
+	if (cgc->quiet)
+		rq->flags |= REQ_QUIET;
+	memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE);
+	if (sizeof(rq->cmd) > CDROM_PACKET_SIZE)
+		memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE);
+
+	rq->ref_count++;
+	rq->flags |= REQ_NOMERGE;
+	rq->waiting = &wait;
+	rq->end_io = blk_end_sync_rq;
+	elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
+	generic_unplug_device(q);
+	wait_for_completion(&wait);
+
+	if (rq->errors)
+		err = -EIO;
+
+	blk_put_request(rq);
+	return err;
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct packet_command *cgc)
+{
+	static char *info[9] = { "No sense", "Recovered error", "Not ready",
+				 "Medium error", "Hardware error", "Illegal request",
+				 "Unit attention", "Data protect", "Blank check" };
+	int i;
+	struct request_sense *sense = cgc->sense;
+
+	printk("pktcdvd:");
+	for (i = 0; i < CDROM_PACKET_SIZE; i++)
+		printk(" %02x", cgc->cmd[i]);
+	printk(" - ");
+
+	if (sense == NULL) {
+		printk("no sense\n");
+		return;
+	}
+
+	printk("sense %02x.%02x.%02x", sense->sense_key, sense->asc, sense->ascq);
+
+	if (sense->sense_key > 8) {
+		printk(" (INVALID)\n");
+		return;
+	}
+
+	printk(" (%s)\n", info[sense->sense_key]);
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+	struct packet_command cgc;
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+	cgc.quiet = 1;
+
+	/*
+	 * the IMMED bit -- we default to not setting it, although that
+	 * would allow a much faster close, this is safer
+	 */
+#if 0
+	cgc.cmd[1] = 1 << 1;
+#endif
+	return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static int pkt_set_speed(struct pktcdvd_device *pd, unsigned write_speed, unsigned read_speed)
+{
+	struct packet_command cgc;
+	struct request_sense sense;
+	int ret;
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.sense = &sense;
+	cgc.cmd[0] = GPCMD_SET_SPEED;
+	cgc.cmd[2] = (read_speed >> 8) & 0xff;
+	cgc.cmd[3] = read_speed & 0xff;
+	cgc.cmd[4] = (write_speed >> 8) & 0xff;
+	cgc.cmd[5] = write_speed & 0xff;
+
+	if ((ret = pkt_generic_packet(pd, &cgc)))
+		pkt_dump_sense(&cgc);
+
+	return ret;
+}
+
+/*
+ * Queue a bio for processing by the low-level CD device. Must be called
+ * from process context.
+ */
+static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio, int high_prio_read)
+{
+	spin_lock(&pd->iosched.lock);
+	if (bio_data_dir(bio) == READ) {
+		pkt_add_list_last(bio, &pd->iosched.read_queue,
+				  &pd->iosched.read_queue_tail);
+		if (high_prio_read)
+			pd->iosched.high_prio_read = 1;
+	} else {
+		pkt_add_list_last(bio, &pd->iosched.write_queue,
+				  &pd->iosched.write_queue_tail);
+	}
+	spin_unlock(&pd->iosched.lock);
+
+	atomic_set(&pd->iosched.attention, 1);
+	wake_up(&pd->wqueue);
+}
+
+/*
+ * Process the queued read/write requests. This function handles special
+ * requirements for CDRW drives:
+ * - A cache flush command must be inserted before a read request if the
+ *   previous request was a write.
+ * - Switching between reading and writing is slow, so don't it more often
+ *   than necessary.
+ * - Set the read speed according to current usage pattern. When only reading
+ *   from the device, it's best to use the highest possible read speed, but
+ *   when switching often between reading and writing, it's better to have the
+ *   same read and write speeds.
+ * - Reads originating from user space should have higher priority than reads
+ *   originating from pkt_gather_data, because some process is usually waiting
+ *   on reads of the first kind.
+ */
+static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
+{
+	request_queue_t *q;
+
+	if (atomic_read(&pd->iosched.attention) == 0)
+		return;
+	atomic_set(&pd->iosched.attention, 0);
+
+	q = bdev_get_queue(pd->bdev);
+
+	for (;;) {
+		struct bio *bio;
+		int reads_queued, writes_queued, high_prio_read;
+
+		spin_lock(&pd->iosched.lock);
+		reads_queued = (pd->iosched.read_queue != NULL);
+		writes_queued = (pd->iosched.write_queue != NULL);
+		if (!reads_queued)
+			pd->iosched.high_prio_read = 0;
+		high_prio_read = pd->iosched.high_prio_read;
+		spin_unlock(&pd->iosched.lock);
+
+		if (!reads_queued && !writes_queued)
+			break;
+
+		if (pd->iosched.writing) {
+			if (high_prio_read || (!writes_queued && reads_queued)) {
+				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+					VPRINTK("pktcdvd: write, waiting\n");
+					break;
+				}
+				pkt_flush_cache(pd);
+				pd->iosched.writing = 0;
+			}
+		} else {
+			if (!reads_queued && writes_queued) {
+				if (atomic_read(&pd->cdrw.pending_bios) > 0) {
+					VPRINTK("pktcdvd: read, waiting\n");
+					break;
+				}
+				pd->iosched.writing = 1;
+			}
+		}
+
+		spin_lock(&pd->iosched.lock);
+		if (pd->iosched.writing) {
+			bio = pkt_get_list_first(&pd->iosched.write_queue,
+						 &pd->iosched.write_queue_tail);
+		} else {
+			bio = pkt_get_list_first(&pd->iosched.read_queue,
+						 &pd->iosched.read_queue_tail);
+		}
+		spin_unlock(&pd->iosched.lock);
+
+		if (!bio)
+			continue;
+
+		if (bio_data_dir(bio) == READ)
+			pd->iosched.successive_reads += bio->bi_size >> 10;
+		else
+			pd->iosched.successive_reads = 0;
+		if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) {
+			if (pd->read_speed == pd->write_speed) {
+				pd->read_speed = MAX_SPEED;
+				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+			}
+		} else {
+			if (pd->read_speed != pd->write_speed) {
+				pd->read_speed = pd->write_speed;
+				pkt_set_speed(pd, pd->write_speed, pd->read_speed);
+			}
+		}
+
+		atomic_inc(&pd->cdrw.pending_bios);
+		generic_make_request(bio);
+	}
+}
+
+/*
+ * Special care is needed if the underlying block device has a small
+ * max_phys_segments value.
+ */
+static int pkt_set_segment_merging(struct pktcdvd_device *pd, request_queue_t *q)
+{
+	if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) {
+		/*
+		 * The cdrom device can handle one segment/frame
+		 */
+		clear_bit(PACKET_MERGE_SEGS, &pd->flags);
+		return 0;
+	} else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) {
+		/*
+		 * We can handle this case at the expense of some extra memory
+		 * copies during write operations
+		 */
+		set_bit(PACKET_MERGE_SEGS, &pd->flags);
+		return 0;
+	} else {
+		printk("pktcdvd: cdrom max_phys_segments too small\n");
+		return -EIO;
+	}
+}
+
+/*
+ * Copy CD_FRAMESIZE bytes from src_bio into a destination page
+ */
+static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct page *dst_page, int dst_offs)
+{
+	unsigned int copy_size = CD_FRAMESIZE;
+
+	while (copy_size > 0) {
+		struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
+		void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) +
+			src_bvl->bv_offset + offs;
+		void *vto = page_address(dst_page) + dst_offs;
+		int len = min_t(int, copy_size, src_bvl->bv_len - offs);
+
+		BUG_ON(len < 0);
+		memcpy(vto, vfrom, len);
+		kunmap_atomic(vfrom, KM_USER0);
+
+		seg++;
+		offs = 0;
+		dst_offs += len;
+		copy_size -= len;
+	}
+}
+
+/*
+ * Copy all data for this packet to pkt->pages[], so that
+ * a) The number of required segments for the write bio is minimized, which
+ *    is necessary for some scsi controllers.
+ * b) The data can be used as cache to avoid read requests if we receive a
+ *    new write request for the same zone.
+ */
+static void pkt_make_local_copy(struct packet_data *pkt, struct page **pages, int *offsets)
+{
+	int f, p, offs;
+
+	/* Copy all data to pkt->pages[] */
+	p = 0;
+	offs = 0;
+	for (f = 0; f < pkt->frames; f++) {
+		if (pages[f] != pkt->pages[p]) {
+			void *vfrom = kmap_atomic(pages[f], KM_USER0) + offsets[f];
+			void *vto = page_address(pkt->pages[p]) + offs;
+			memcpy(vto, vfrom, CD_FRAMESIZE);
+			kunmap_atomic(vfrom, KM_USER0);
+			pages[f] = pkt->pages[p];
+			offsets[f] = offs;
+		} else {
+			BUG_ON(offsets[f] != offs);
+		}
+		offs += CD_FRAMESIZE;
+		if (offs >= PAGE_SIZE) {
+			BUG_ON(offs > PAGE_SIZE);
+			offs = 0;
+			p++;
+		}
+	}
+}
+
+static int pkt_end_io_read(struct bio *bio, unsigned int bytes_done, int err)
+{
+	struct packet_data *pkt = bio->bi_private;
+	struct pktcdvd_device *pd = pkt->pd;
+	BUG_ON(!pd);
+
+	if (bio->bi_size)
+		return 1;
+
+	VPRINTK("pkt_end_io_read: bio=%p sec0=%llx sec=%llx err=%d\n", bio,
+		(unsigned long long)pkt->sector, (unsigned long long)bio->bi_sector, err);
+
+	if (err)
+		atomic_inc(&pkt->io_errors);
+	if (atomic_dec_and_test(&pkt->io_wait)) {
+		atomic_inc(&pkt->run_sm);
+		wake_up(&pd->wqueue);
+	}
+	pkt_bio_finished(pd);
+
+	return 0;
+}
+
+static int pkt_end_io_packet_write(struct bio *bio, unsigned int bytes_done, int err)
+{
+	struct packet_data *pkt = bio->bi_private;
+	struct pktcdvd_device *pd = pkt->pd;
+	BUG_ON(!pd);
+
+	if (bio->bi_size)
+		return 1;
+
+	VPRINTK("pkt_end_io_packet_write: id=%d, err=%d\n", pkt->id, err);
+
+	pd->stats.pkt_ended++;
+
+	pkt_bio_finished(pd);
+	atomic_dec(&pkt->io_wait);
+	atomic_inc(&pkt->run_sm);
+	wake_up(&pd->wqueue);
+	return 0;
+}
+
+/*
+ * Schedule reads for the holes in a packet
+ */
+static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+	int frames_read = 0;
+	struct bio *bio;
+	int f;
+	char written[PACKET_MAX_SIZE];
+
+	BUG_ON(!pkt->orig_bios);
+
+	atomic_set(&pkt->io_wait, 0);
+	atomic_set(&pkt->io_errors, 0);
+
+	if (pkt->cache_valid) {
+		VPRINTK("pkt_gather_data: zone %llx cached\n",
+			(unsigned long long)pkt->sector);
+		goto out_account;
+	}
+
+	/*
+	 * Figure out which frames we need to read before we can write.
+	 */
+	memset(written, 0, sizeof(written));
+	spin_lock(&pkt->lock);
+	for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
+		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
+		int num_frames = bio->bi_size / CD_FRAMESIZE;
+		BUG_ON(first_frame < 0);
+		BUG_ON(first_frame + num_frames > pkt->frames);
+		for (f = first_frame; f < first_frame + num_frames; f++)
+			written[f] = 1;
+	}
+	spin_unlock(&pkt->lock);
+
+	/*
+	 * Schedule reads for missing parts of the packet.
+	 */
+	for (f = 0; f < pkt->frames; f++) {
+		int p, offset;
+		if (written[f])
+			continue;
+		bio = pkt->r_bios[f];
+		bio_init(bio);
+		bio->bi_max_vecs = 1;
+		bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
+		bio->bi_bdev = pd->bdev;
+		bio->bi_end_io = pkt_end_io_read;
+		bio->bi_private = pkt;
+
+		p = (f * CD_FRAMESIZE) / PAGE_SIZE;
+		offset = (f * CD_FRAMESIZE) % PAGE_SIZE;
+		VPRINTK("pkt_gather_data: Adding frame %d, page:%p offs:%d\n",
+			f, pkt->pages[p], offset);
+		if (!bio_add_page(bio, pkt->pages[p], CD_FRAMESIZE, offset))
+			BUG();
+
+		atomic_inc(&pkt->io_wait);
+		bio->bi_rw = READ;
+		pkt_queue_bio(pd, bio, 0);
+		frames_read++;
+	}
+
+out_account:
+	VPRINTK("pkt_gather_data: need %d frames for zone %llx\n",
+		frames_read, (unsigned long long)pkt->sector);
+	pd->stats.pkt_started++;
+	pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9);
+	pd->stats.secs_w += pd->settings.size;
+}
+
+/*
+ * Find a packet matching zone, or the least recently used packet if
+ * there is no match.
+ */
+static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone)
+{
+	struct packet_data *pkt;
+
+	list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) {
+		if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) {
+			list_del_init(&pkt->list);
+			if (pkt->sector != zone)
+				pkt->cache_valid = 0;
+			break;
+		}
+	}
+	return pkt;
+}
+
+static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+	if (pkt->cache_valid) {
+		list_add(&pkt->list, &pd->cdrw.pkt_free_list);
+	} else {
+		list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list);
+	}
+}
+
+/*
+ * recover a failed write, query for relocation if possible
+ *
+ * returns 1 if recovery is possible, or 0 if not
+ *
+ */
+static int pkt_start_recovery(struct packet_data *pkt)
+{
+	/*
+	 * FIXME. We need help from the file system to implement
+	 * recovery handling.
+	 */
+	return 0;
+#if 0
+	struct request *rq = pkt->rq;
+	struct pktcdvd_device *pd = rq->rq_disk->private_data;
+	struct block_device *pkt_bdev;
+	struct super_block *sb = NULL;
+	unsigned long old_block, new_block;
+	sector_t new_sector;
+
+	pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev));
+	if (pkt_bdev) {
+		sb = get_super(pkt_bdev);
+		bdput(pkt_bdev);
+	}
+
+	if (!sb)
+		return 0;
+
+	if (!sb->s_op || !sb->s_op->relocate_blocks)
+		goto out;
+
+	old_block = pkt->sector / (CD_FRAMESIZE >> 9);
+	if (sb->s_op->relocate_blocks(sb, old_block, &new_block))
+		goto out;
+
+	new_sector = new_block * (CD_FRAMESIZE >> 9);
+	pkt->sector = new_sector;
+
+	pkt->bio->bi_sector = new_sector;
+	pkt->bio->bi_next = NULL;
+	pkt->bio->bi_flags = 1 << BIO_UPTODATE;
+	pkt->bio->bi_idx = 0;
+
+	BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW));
+	BUG_ON(pkt->bio->bi_vcnt != pkt->frames);
+	BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE);
+	BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write);
+	BUG_ON(pkt->bio->bi_private != pkt);
+
+	drop_super(sb);
+	return 1;
+
+out:
+	drop_super(sb);
+	return 0;
+#endif
+}
+
+static inline void pkt_set_state(struct packet_data *pkt, enum packet_data_state state)
+{
+#if PACKET_DEBUG > 1
+	static const char *state_name[] = {
+		"IDLE", "WAITING", "READ_WAIT", "WRITE_WAIT", "RECOVERY", "FINISHED"
+	};
+	enum packet_data_state old_state = pkt->state;
+	VPRINTK("pkt %2d : s=%6llx %s -> %s\n", pkt->id, (unsigned long long)pkt->sector,
+		state_name[old_state], state_name[state]);
+#endif
+	pkt->state = state;
+}
+
+/*
+ * Scan the work queue to see if we can start a new packet.
+ * returns non-zero if any work was done.
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd)
+{
+	struct packet_data *pkt, *p;
+	struct bio *bio = NULL;
+	sector_t zone = 0; /* Suppress gcc warning */
+	struct pkt_rb_node *node, *first_node;
+	struct rb_node *n;
+
+	VPRINTK("handle_queue\n");
+
+	atomic_set(&pd->scan_queue, 0);
+
+	if (list_empty(&pd->cdrw.pkt_free_list)) {
+		VPRINTK("handle_queue: no pkt\n");
+		return 0;
+	}
+
+	/*
+	 * Try to find a zone we are not already working on.
+	 */
+	spin_lock(&pd->lock);
+	first_node = pkt_rbtree_find(pd, pd->current_sector);
+	if (!first_node) {
+		n = rb_first(&pd->bio_queue);
+		if (n)
+			first_node = rb_entry(n, struct pkt_rb_node, rb_node);
+	}
+	node = first_node;
+	while (node) {
+		bio = node->bio;
+		zone = ZONE(bio->bi_sector, pd);
+		list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
+			if (p->sector == zone)
+				goto try_next_bio;
+		}
+		break;
+try_next_bio:
+		node = pkt_rbtree_next(node);
+		if (!node) {
+			n = rb_first(&pd->bio_queue);
+			if (n)
+				node = rb_entry(n, struct pkt_rb_node, rb_node);
+		}
+		if (node == first_node)
+			node = NULL;
+	}
+	spin_unlock(&pd->lock);
+	if (!bio) {
+		VPRINTK("handle_queue: no bio\n");
+		return 0;
+	}
+
+	pkt = pkt_get_packet_data(pd, zone);
+	BUG_ON(!pkt);
+
+	pd->current_sector = zone + pd->settings.size;
+	pkt->sector = zone;
+	pkt->frames = pd->settings.size >> 2;
+	BUG_ON(pkt->frames > PACKET_MAX_SIZE);
+	pkt->write_size = 0;
+
+	/*
+	 * Scan work queue for bios in the same zone and link them
+	 * to this packet.
+	 */
+	spin_lock(&pd->lock);
+	VPRINTK("pkt_handle_queue: looking for zone %llx\n", (unsigned long long)zone);
+	while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
+		bio = node->bio;
+		VPRINTK("pkt_handle_queue: found zone=%llx\n",
+			(unsigned long long)ZONE(bio->bi_sector, pd));
+		if (ZONE(bio->bi_sector, pd) != zone)
+			break;
+		pkt_rbtree_erase(pd, node);
+		spin_lock(&pkt->lock);
+		pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail);
+		pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+		spin_unlock(&pkt->lock);
+	}
+	spin_unlock(&pd->lock);
+
+	pkt->sleep_time = max(PACKET_WAIT_TIME, 1);
+	pkt_set_state(pkt, PACKET_WAITING_STATE);
+	atomic_set(&pkt->run_sm, 1);
+
+	spin_lock(&pd->cdrw.active_list_lock);
+	list_add(&pkt->list, &pd->cdrw.pkt_active_list);
+	spin_unlock(&pd->cdrw.active_list_lock);
+
+	return 1;
+}
+
+/*
+ * Assemble a bio to write one packet and queue the bio for processing
+ * by the underlying block device.
+ */
+static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+	struct bio *bio;
+	struct page *pages[PACKET_MAX_SIZE];
+	int offsets[PACKET_MAX_SIZE];
+	int f;
+	int frames_write;
+
+	for (f = 0; f < pkt->frames; f++) {
+		pages[f] = pkt->pages[(f * CD_FRAMESIZE) / PAGE_SIZE];
+		offsets[f] = (f * CD_FRAMESIZE) % PAGE_SIZE;
+	}
+
+	/*
+	 * Fill-in pages[] and offsets[] with data from orig_bios.
+	 */
+	frames_write = 0;
+	spin_lock(&pkt->lock);
+	for (bio = pkt->orig_bios; bio; bio = bio->bi_next) {
+		int segment = bio->bi_idx;
+		int src_offs = 0;
+		int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9);
+		int num_frames = bio->bi_size / CD_FRAMESIZE;
+		BUG_ON(first_frame < 0);
+		BUG_ON(first_frame + num_frames > pkt->frames);
+		for (f = first_frame; f < first_frame + num_frames; f++) {
+			struct bio_vec *src_bvl = bio_iovec_idx(bio, segment);
+
+			while (src_offs >= src_bvl->bv_len) {
+				src_offs -= src_bvl->bv_len;
+				segment++;
+				BUG_ON(segment >= bio->bi_vcnt);
+				src_bvl = bio_iovec_idx(bio, segment);
+			}
+
+			if (src_bvl->bv_len - src_offs >= CD_FRAMESIZE) {
+				pages[f] = src_bvl->bv_page;
+				offsets[f] = src_bvl->bv_offset + src_offs;
+			} else {
+				pkt_copy_bio_data(bio, segment, src_offs,
+						  pages[f], offsets[f]);
+			}
+			src_offs += CD_FRAMESIZE;
+			frames_write++;
+		}
+	}
+	pkt_set_state(pkt, PACKET_WRITE_WAIT_STATE);
+	spin_unlock(&pkt->lock);
+
+	VPRINTK("pkt_start_write: Writing %d frames for zone %llx\n",
+		frames_write, (unsigned long long)pkt->sector);
+	BUG_ON(frames_write != pkt->write_size);
+
+	if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) {
+		pkt_make_local_copy(pkt, pages, offsets);
+		pkt->cache_valid = 1;
+	} else {
+		pkt->cache_valid = 0;
+	}
+
+	/* Start the write request */
+	bio_init(pkt->w_bio);
+	pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
+	pkt->w_bio->bi_sector = pkt->sector;
+	pkt->w_bio->bi_bdev = pd->bdev;
+	pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
+	pkt->w_bio->bi_private = pkt;
+	for (f = 0; f < pkt->frames; f++) {
+		if ((f + 1 < pkt->frames) && (pages[f + 1] == pages[f]) &&
+		    (offsets[f + 1] = offsets[f] + CD_FRAMESIZE)) {
+			if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE * 2, offsets[f]))
+				BUG();
+			f++;
+		} else {
+			if (!bio_add_page(pkt->w_bio, pages[f], CD_FRAMESIZE, offsets[f]))
+				BUG();
+		}
+	}
+	VPRINTK("pktcdvd: vcnt=%d\n", pkt->w_bio->bi_vcnt);
+
+	atomic_set(&pkt->io_wait, 1);
+	pkt->w_bio->bi_rw = WRITE;
+	pkt_queue_bio(pd, pkt->w_bio, 0);
+}
+
+static void pkt_finish_packet(struct packet_data *pkt, int uptodate)
+{
+	struct bio *bio, *next;
+
+	if (!uptodate)
+		pkt->cache_valid = 0;
+
+	/* Finish all bios corresponding to this packet */
+	bio = pkt->orig_bios;
+	while (bio) {
+		next = bio->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, bio->bi_size, uptodate ? 0 : -EIO);
+		bio = next;
+	}
+	pkt->orig_bios = pkt->orig_bios_tail = NULL;
+}
+
+static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt)
+{
+	int uptodate;
+
+	VPRINTK("run_state_machine: pkt %d\n", pkt->id);
+
+	for (;;) {
+		switch (pkt->state) {
+		case PACKET_WAITING_STATE:
+			if ((pkt->write_size < pkt->frames) && (pkt->sleep_time > 0))
+				return;
+
+			pkt->sleep_time = 0;
+			pkt_gather_data(pd, pkt);
+			pkt_set_state(pkt, PACKET_READ_WAIT_STATE);
+			break;
+
+		case PACKET_READ_WAIT_STATE:
+			if (atomic_read(&pkt->io_wait) > 0)
+				return;
+
+			if (atomic_read(&pkt->io_errors) > 0) {
+				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+			} else {
+				pkt_start_write(pd, pkt);
+			}
+			break;
+
+		case PACKET_WRITE_WAIT_STATE:
+			if (atomic_read(&pkt->io_wait) > 0)
+				return;
+
+			if (test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags)) {
+				pkt_set_state(pkt, PACKET_FINISHED_STATE);
+			} else {
+				pkt_set_state(pkt, PACKET_RECOVERY_STATE);
+			}
+			break;
+
+		case PACKET_RECOVERY_STATE:
+			if (pkt_start_recovery(pkt)) {
+				pkt_start_write(pd, pkt);
+			} else {
+				VPRINTK("No recovery possible\n");
+				pkt_set_state(pkt, PACKET_FINISHED_STATE);
+			}
+			break;
+
+		case PACKET_FINISHED_STATE:
+			uptodate = test_bit(BIO_UPTODATE, &pkt->w_bio->bi_flags);
+			pkt_finish_packet(pkt, uptodate);
+			return;
+
+		default:
+			BUG();
+			break;
+		}
+	}
+}
+
+static void pkt_handle_packets(struct pktcdvd_device *pd)
+{
+	struct packet_data *pkt, *next;
+
+	VPRINTK("pkt_handle_packets\n");
+
+	/*
+	 * Run state machine for active packets
+	 */
+	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+		if (atomic_read(&pkt->run_sm) > 0) {
+			atomic_set(&pkt->run_sm, 0);
+			pkt_run_state_machine(pd, pkt);
+		}
+	}
+
+	/*
+	 * Move no longer active packets to the free list
+	 */
+	spin_lock(&pd->cdrw.active_list_lock);
+	list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) {
+		if (pkt->state == PACKET_FINISHED_STATE) {
+			list_del(&pkt->list);
+			pkt_put_packet_data(pd, pkt);
+			pkt_set_state(pkt, PACKET_IDLE_STATE);
+			atomic_set(&pd->scan_queue, 1);
+		}
+	}
+	spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+static void pkt_count_states(struct pktcdvd_device *pd, int *states)
+{
+	struct packet_data *pkt;
+	int i;
+
+	for (i = 0; i <= PACKET_NUM_STATES; i++)
+		states[i] = 0;
+
+	spin_lock(&pd->cdrw.active_list_lock);
+	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+		states[pkt->state]++;
+	}
+	spin_unlock(&pd->cdrw.active_list_lock);
+}
+
+/*
+ * kcdrwd is woken up when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+	struct pktcdvd_device *pd = foobar;
+	struct packet_data *pkt;
+	long min_sleep_time, residue;
+
+	set_user_nice(current, -20);
+
+	for (;;) {
+		DECLARE_WAITQUEUE(wait, current);
+
+		/*
+		 * Wait until there is something to do
+		 */
+		add_wait_queue(&pd->wqueue, &wait);
+		for (;;) {
+			set_current_state(TASK_INTERRUPTIBLE);
+
+			/* Check if we need to run pkt_handle_queue */
+			if (atomic_read(&pd->scan_queue) > 0)
+				goto work_to_do;
+
+			/* Check if we need to run the state machine for some packet */
+			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+				if (atomic_read(&pkt->run_sm) > 0)
+					goto work_to_do;
+			}
+
+			/* Check if we need to process the iosched queues */
+			if (atomic_read(&pd->iosched.attention) != 0)
+				goto work_to_do;
+
+			/* Otherwise, go to sleep */
+			if (PACKET_DEBUG > 1) {
+				int states[PACKET_NUM_STATES];
+				pkt_count_states(pd, states);
+				VPRINTK("kcdrwd: i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+					states[0], states[1], states[2], states[3],
+					states[4], states[5]);
+			}
+
+			min_sleep_time = MAX_SCHEDULE_TIMEOUT;
+			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+				if (pkt->sleep_time && pkt->sleep_time < min_sleep_time)
+					min_sleep_time = pkt->sleep_time;
+			}
+
+			generic_unplug_device(bdev_get_queue(pd->bdev));
+
+			VPRINTK("kcdrwd: sleeping\n");
+			residue = schedule_timeout(min_sleep_time);
+			VPRINTK("kcdrwd: wake up\n");
+
+			/* make swsusp happy with our thread */
+			if (current->flags & PF_FREEZE)
+				refrigerator(PF_FREEZE);
+
+			list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+				if (!pkt->sleep_time)
+					continue;
+				pkt->sleep_time -= min_sleep_time - residue;
+				if (pkt->sleep_time <= 0) {
+					pkt->sleep_time = 0;
+					atomic_inc(&pkt->run_sm);
+				}
+			}
+
+			if (signal_pending(current)) {
+				flush_signals(current);
+			}
+			if (kthread_should_stop())
+				break;
+		}
+work_to_do:
+		set_current_state(TASK_RUNNING);
+		remove_wait_queue(&pd->wqueue, &wait);
+
+		if (kthread_should_stop())
+			break;
+
+		/*
+		 * if pkt_handle_queue returns true, we can queue
+		 * another request.
+		 */
+		while (pkt_handle_queue(pd))
+			;
+
+		/*
+		 * Handle packet state machine
+		 */
+		pkt_handle_packets(pd);
+
+		/*
+		 * Handle iosched queues
+		 */
+		pkt_iosched_process_queue(pd);
+	}
+
+	return 0;
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+	printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
+	printk("%u blocks, ", pd->settings.size >> 2);
+	printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int page_control)
+{
+	memset(cgc->cmd, 0, sizeof(cgc->cmd));
+
+	cgc->cmd[0] = GPCMD_MODE_SENSE_10;
+	cgc->cmd[2] = page_code | (page_control << 6);
+	cgc->cmd[7] = cgc->buflen >> 8;
+	cgc->cmd[8] = cgc->buflen & 0xff;
+	cgc->data_direction = CGC_DATA_READ;
+	return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc)
+{
+	memset(cgc->cmd, 0, sizeof(cgc->cmd));
+	memset(cgc->buffer, 0, 2);
+	cgc->cmd[0] = GPCMD_MODE_SELECT_10;
+	cgc->cmd[1] = 0x10;		/* PF */
+	cgc->cmd[7] = cgc->buflen >> 8;
+	cgc->cmd[8] = cgc->buflen & 0xff;
+	cgc->data_direction = CGC_DATA_WRITE;
+	return pkt_generic_packet(pd, cgc);
+}
+
+static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di)
+{
+	struct packet_command cgc;
+	int ret;
+
+	/* set up command and get the disc info */
+	init_cdrom_command(&cgc, di, sizeof(*di), CGC_DATA_READ);
+	cgc.cmd[0] = GPCMD_READ_DISC_INFO;
+	cgc.cmd[8] = cgc.buflen = 2;
+	cgc.quiet = 1;
+
+	if ((ret = pkt_generic_packet(pd, &cgc)))
+		return ret;
+
+	/* not all drives have the same disc_info length, so requeue
+	 * packet with the length the drive tells us it can supply
+	 */
+	cgc.buflen = be16_to_cpu(di->disc_information_length) +
+		     sizeof(di->disc_information_length);
+
+	if (cgc.buflen > sizeof(disc_information))
+		cgc.buflen = sizeof(disc_information);
+
+	cgc.cmd[8] = cgc.buflen;
+	return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information *ti)
+{
+	struct packet_command cgc;
+	int ret;
+
+	init_cdrom_command(&cgc, ti, 8, CGC_DATA_READ);
+	cgc.cmd[0] = GPCMD_READ_TRACK_RZONE_INFO;
+	cgc.cmd[1] = type & 3;
+	cgc.cmd[4] = (track & 0xff00) >> 8;
+	cgc.cmd[5] = track & 0xff;
+	cgc.cmd[8] = 8;
+	cgc.quiet = 1;
+
+	if ((ret = pkt_generic_packet(pd, &cgc)))
+		return ret;
+
+	cgc.buflen = be16_to_cpu(ti->track_information_length) +
+		     sizeof(ti->track_information_length);
+
+	if (cgc.buflen > sizeof(track_information))
+		cgc.buflen = sizeof(track_information);
+
+	cgc.cmd[8] = cgc.buflen;
+	return pkt_generic_packet(pd, &cgc);
+}
+
+static int pkt_get_last_written(struct pktcdvd_device *pd, long *last_written)
+{
+	disc_information di;
+	track_information ti;
+	__u32 last_track;
+	int ret = -1;
+
+	if ((ret = pkt_get_disc_info(pd, &di)))
+		return ret;
+
+	last_track = (di.last_track_msb << 8) | di.last_track_lsb;
+	if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+		return ret;
+
+	/* if this track is blank, try the previous. */
+	if (ti.blank) {
+		last_track--;
+		if ((ret = pkt_get_track_info(pd, last_track, 1, &ti)))
+			return ret;
+	}
+
+	/* if last recorded field is valid, return it. */
+	if (ti.lra_v) {
+		*last_written = be32_to_cpu(ti.last_rec_address);
+	} else {
+		/* make it up instead */
+		*last_written = be32_to_cpu(ti.track_start) +
+				be32_to_cpu(ti.track_size);
+		if (ti.free_blocks)
+			*last_written -= (be32_to_cpu(ti.free_blocks) + 7);
+	}
+	return 0;
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+	struct packet_command cgc;
+	struct request_sense sense;
+	write_param_page *wp;
+	char buffer[128];
+	int ret, size;
+
+	/* doesn't apply to DVD+RW or DVD-RAM */
+	if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12))
+		return 0;
+
+	memset(buffer, 0, sizeof(buffer));
+	init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+	cgc.sense = &sense;
+	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+		pkt_dump_sense(&cgc);
+		return ret;
+	}
+
+	size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+	pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+	if (size > sizeof(buffer))
+		size = sizeof(buffer);
+
+	/*
+	 * now get it all
+	 */
+	init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+	cgc.sense = &sense;
+	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) {
+		pkt_dump_sense(&cgc);
+		return ret;
+	}
+
+	/*
+	 * write page is offset header + block descriptor length
+	 */
+	wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset];
+
+	wp->fp = pd->settings.fp;
+	wp->track_mode = pd->settings.track_mode;
+	wp->write_type = pd->settings.write_type;
+	wp->data_block_type = pd->settings.block_mode;
+
+	wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+	wp->link_size = 7;
+	wp->ls_v = 1;
+#endif
+
+	if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+		wp->session_format = 0;
+		wp->subhdr2 = 0x20;
+	} else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+		wp->session_format = 0x20;
+		wp->subhdr2 = 8;
+#if 0
+		wp->mcn[0] = 0x80;
+		memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+	} else {
+		/*
+		 * paranoia
+		 */
+		printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
+		return 1;
+	}
+	wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+	cgc.buflen = cgc.cmd[8] = size;
+	if ((ret = pkt_mode_select(pd, &cgc))) {
+		pkt_dump_sense(&cgc);
+		return ret;
+	}
+
+	pkt_print_settings(pd);
+	return 0;
+}
+
+/*
+ * 0 -- we can write to this track, 1 -- we can't
+ */
+static int pkt_good_track(track_information *ti)
+{
+	/*
+	 * only good for CD-RW at the moment, not DVD-RW
+	 */
+
+	/*
+	 * FIXME: only for FP
+	 */
+	if (ti->fp == 0)
+		return 0;
+
+	/*
+	 * "good" settings as per Mt Fuji.
+	 */
+	if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
+		return 0;
+
+	if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
+		return 0;
+
+	if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
+		return 0;
+
+	printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+	return 1;
+}
+
+/*
+ * 0 -- we can write to this disc, 1 -- we can't
+ */
+static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+	switch (pd->mmc3_profile) {
+		case 0x0a: /* CD-RW */
+		case 0xffff: /* MMC3 not supported */
+			break;
+		case 0x1a: /* DVD+RW */
+		case 0x13: /* DVD-RW */
+		case 0x12: /* DVD-RAM */
+			return 0;
+		default:
+			printk("pktcdvd: Wrong disc profile (%x)\n", pd->mmc3_profile);
+			return 1;
+	}
+
+	/*
+	 * for disc type 0xff we should probably reserve a new track.
+	 * but i'm not sure, should we leave this to user apps? probably.
+	 */
+	if (di->disc_type == 0xff) {
+		printk("pktcdvd: Unknown disc. No track?\n");
+		return 1;
+	}
+
+	if (di->disc_type != 0x20 && di->disc_type != 0) {
+		printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
+		return 1;
+	}
+
+	if (di->erasable == 0) {
+		printk("pktcdvd: Disc not erasable\n");
+		return 1;
+	}
+
+	if (di->border_status == PACKET_SESSION_RESERVED) {
+		printk("pktcdvd: Can't write to last track (reserved)\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+	struct packet_command cgc;
+	unsigned char buf[12];
+	disc_information di;
+	track_information ti;
+	int ret, track;
+
+	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+	cgc.cmd[0] = GPCMD_GET_CONFIGURATION;
+	cgc.cmd[8] = 8;
+	ret = pkt_generic_packet(pd, &cgc);
+	pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7];
+
+	memset(&di, 0, sizeof(disc_information));
+	memset(&ti, 0, sizeof(track_information));
+
+	if ((ret = pkt_get_disc_info(pd, &di))) {
+		printk("failed get_disc\n");
+		return ret;
+	}
+
+	if (pkt_good_disc(pd, &di))
+		return -ENXIO;
+
+	switch (pd->mmc3_profile) {
+		case 0x1a: /* DVD+RW */
+			printk("pktcdvd: inserted media is DVD+RW\n");
+			break;
+		case 0x13: /* DVD-RW */
+			printk("pktcdvd: inserted media is DVD-RW\n");
+			break;
+		case 0x12: /* DVD-RAM */
+			printk("pktcdvd: inserted media is DVD-RAM\n");
+			break;
+		default:
+			printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
+			break;
+	}
+	pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+	track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+	if ((ret = pkt_get_track_info(pd, track, 1, &ti))) {
+		printk("pktcdvd: failed get_track\n");
+		return ret;
+	}
+
+	if (pkt_good_track(&ti)) {
+		printk("pktcdvd: can't write to this track\n");
+		return -ENXIO;
+	}
+
+	/*
+	 * we keep packet size in 512 byte units, makes it easier to
+	 * deal with request calculations.
+	 */
+	pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+	if (pd->settings.size == 0) {
+		printk("pktcdvd: detected zero packet size!\n");
+		pd->settings.size = 128;
+	}
+	pd->settings.fp = ti.fp;
+	pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
+
+	if (ti.nwa_v) {
+		pd->nwa = be32_to_cpu(ti.next_writable);
+		set_bit(PACKET_NWA_VALID, &pd->flags);
+	}
+
+	/*
+	 * in theory we could use lra on -RW media as well and just zero
+	 * blocks that haven't been written yet, but in practice that
+	 * is just a no-go. we'll use that for -R, naturally.
+	 */
+	if (ti.lra_v) {
+		pd->lra = be32_to_cpu(ti.last_rec_address);
+		set_bit(PACKET_LRA_VALID, &pd->flags);
+	} else {
+		pd->lra = 0xffffffff;
+		set_bit(PACKET_LRA_VALID, &pd->flags);
+	}
+
+	/*
+	 * fine for now
+	 */
+	pd->settings.link_loss = 7;
+	pd->settings.write_type = 0;	/* packet */
+	pd->settings.track_mode = ti.track_mode;
+
+	/*
+	 * mode1 or mode2 disc
+	 */
+	switch (ti.data_mode) {
+		case PACKET_MODE1:
+			pd->settings.block_mode = PACKET_BLOCK_MODE1;
+			break;
+		case PACKET_MODE2:
+			pd->settings.block_mode = PACKET_BLOCK_MODE2;
+			break;
+		default:
+			printk("pktcdvd: unknown data mode\n");
+			return 1;
+	}
+	return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static int pkt_write_caching(struct pktcdvd_device *pd, int set)
+{
+	struct packet_command cgc;
+	struct request_sense sense;
+	unsigned char buf[64];
+	int ret;
+
+	memset(buf, 0, sizeof(buf));
+	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+	cgc.sense = &sense;
+	cgc.buflen = pd->mode_offset + 12;
+
+	/*
+	 * caching mode page might not be there, so quiet this command
+	 */
+	cgc.quiet = 1;
+
+	if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0)))
+		return ret;
+
+	buf[pd->mode_offset + 10] |= (!!set << 2);
+
+	cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+	ret = pkt_mode_select(pd, &cgc);
+	if (ret) {
+		printk("pktcdvd: write caching control failed\n");
+		pkt_dump_sense(&cgc);
+	} else if (!ret && set)
+		printk("pktcdvd: enabled write caching on %s\n", pd->name);
+	return ret;
+}
+
+static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag)
+{
+	struct packet_command cgc;
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.cmd[0] = GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL;
+	cgc.cmd[4] = lockflag ? 1 : 0;
+	return pkt_generic_packet(pd, &cgc);
+}
+
+/*
+ * Returns drive maximum write speed
+ */
+static int pkt_get_max_speed(struct pktcdvd_device *pd, unsigned *write_speed)
+{
+	struct packet_command cgc;
+	struct request_sense sense;
+	unsigned char buf[256+18];
+	unsigned char *cap_buf;
+	int ret, offset;
+
+	memset(buf, 0, sizeof(buf));
+	cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset];
+	init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+	cgc.sense = &sense;
+
+	ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+	if (ret) {
+		cgc.buflen = pd->mode_offset + cap_buf[1] + 2 +
+			     sizeof(struct mode_page_header);
+		ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+		if (ret) {
+			pkt_dump_sense(&cgc);
+			return ret;
+		}
+	}
+
+	offset = 20;			    /* Obsoleted field, used by older drives */
+	if (cap_buf[1] >= 28)
+		offset = 28;		    /* Current write speed selected */
+	if (cap_buf[1] >= 30) {
+		/* If the drive reports at least one "Logical Unit Write
+		 * Speed Performance Descriptor Block", use the information
+		 * in the first block. (contains the highest speed)
+		 */
+		int num_spdb = (cap_buf[30] << 8) + cap_buf[31];
+		if (num_spdb > 0)
+			offset = 34;
+	}
+
+	*write_speed = (cap_buf[offset] << 8) | cap_buf[offset + 1];
+	return 0;
+}
+
+/* These tables from cdrecord - I don't have orange book */
+/* standard speed CD-RW (1-4x) */
+static char clv_to_speed[16] = {
+	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
+	   0, 2, 4, 6, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* high speed CD-RW (-10x) */
+static char hs_clv_to_speed[16] = {
+	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
+	   0, 2, 4, 6, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+};
+/* ultra high speed CD-RW */
+static char us_clv_to_speed[16] = {
+	/* 0  1  2  3  4  5  6  7  8  9 10 11 12 13 14 15 */
+	   0, 2, 4, 8, 0, 0,16, 0,24,32,40,48, 0, 0, 0, 0
+};
+
+/*
+ * reads the maximum media speed from ATIP
+ */
+static int pkt_media_speed(struct pktcdvd_device *pd, unsigned *speed)
+{
+	struct packet_command cgc;
+	struct request_sense sense;
+	unsigned char buf[64];
+	unsigned int size, st, sp;
+	int ret;
+
+	init_cdrom_command(&cgc, buf, 2, CGC_DATA_READ);
+	cgc.sense = &sense;
+	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+	cgc.cmd[1] = 2;
+	cgc.cmd[2] = 4; /* READ ATIP */
+	cgc.cmd[8] = 2;
+	ret = pkt_generic_packet(pd, &cgc);
+	if (ret) {
+		pkt_dump_sense(&cgc);
+		return ret;
+	}
+	size = ((unsigned int) buf[0]<<8) + buf[1] + 2;
+	if (size > sizeof(buf))
+		size = sizeof(buf);
+
+	init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
+	cgc.sense = &sense;
+	cgc.cmd[0] = GPCMD_READ_TOC_PMA_ATIP;
+	cgc.cmd[1] = 2;
+	cgc.cmd[2] = 4;
+	cgc.cmd[8] = size;
+	ret = pkt_generic_packet(pd, &cgc);
+	if (ret) {
+		pkt_dump_sense(&cgc);
+		return ret;
+	}
+
+	if (!buf[6] & 0x40) {
+		printk("pktcdvd: Disc type is not CD-RW\n");
+		return 1;
+	}
+	if (!buf[6] & 0x4) {
+		printk("pktcdvd: A1 values on media are not valid, maybe not CDRW?\n");
+		return 1;
+	}
+
+	st = (buf[6] >> 3) & 0x7; /* disc sub-type */
+
+	sp = buf[16] & 0xf; /* max speed from ATIP A1 field */
+
+	/* Info from cdrecord */
+	switch (st) {
+		case 0: /* standard speed */
+			*speed = clv_to_speed[sp];
+			break;
+		case 1: /* high speed */
+			*speed = hs_clv_to_speed[sp];
+			break;
+		case 2: /* ultra high speed */
+			*speed = us_clv_to_speed[sp];
+			break;
+		default:
+			printk("pktcdvd: Unknown disc sub-type %d\n",st);
+			return 1;
+	}
+	if (*speed) {
+		printk("pktcdvd: Max. media speed: %d\n",*speed);
+		return 0;
+	} else {
+		printk("pktcdvd: Unknown speed %d for sub-type %d\n",sp,st);
+		return 1;
+	}
+}
+
+static int pkt_perform_opc(struct pktcdvd_device *pd)
+{
+	struct packet_command cgc;
+	struct request_sense sense;
+	int ret;
+
+	VPRINTK("pktcdvd: Performing OPC\n");
+
+	init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+	cgc.sense = &sense;
+	cgc.timeout = 60*HZ;
+	cgc.cmd[0] = GPCMD_SEND_OPC;
+	cgc.cmd[1] = 1;
+	if ((ret = pkt_generic_packet(pd, &cgc)))
+		pkt_dump_sense(&cgc);
+	return ret;
+}
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+	int ret;
+	unsigned int write_speed, media_write_speed, read_speed;
+
+	if ((ret = pkt_probe_settings(pd))) {
+		DPRINTK("pktcdvd: %s failed probe\n", pd->name);
+		return -EIO;
+	}
+
+	if ((ret = pkt_set_write_settings(pd))) {
+		DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
+		return -EIO;
+	}
+
+	pkt_write_caching(pd, USE_WCACHING);
+
+	if ((ret = pkt_get_max_speed(pd, &write_speed)))
+		write_speed = 16 * 177;
+	switch (pd->mmc3_profile) {
+		case 0x13: /* DVD-RW */
+		case 0x1a: /* DVD+RW */
+		case 0x12: /* DVD-RAM */
+			DPRINTK("pktcdvd: write speed %ukB/s\n", write_speed);
+			break;
+		default:
+			if ((ret = pkt_media_speed(pd, &media_write_speed)))
+				media_write_speed = 16;
+			write_speed = min(write_speed, media_write_speed * 177);
+			DPRINTK("pktcdvd: write speed %ux\n", write_speed / 176);
+			break;
+	}
+	read_speed = write_speed;
+
+	if ((ret = pkt_set_speed(pd, write_speed, read_speed))) {
+		DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
+		return -EIO;
+	}
+	pd->write_speed = write_speed;
+	pd->read_speed = read_speed;
+
+	if ((ret = pkt_perform_opc(pd))) {
+		DPRINTK("pktcdvd: %s Optimum Power Calibration failed\n", pd->name);
+	}
+
+	return 0;
+}
+
+/*
+ * called at open time.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, int write)
+{
+	int ret;
+	long lba;
+	request_queue_t *q;
+
+	/*
+	 * We need to re-open the cdrom device without O_NONBLOCK to be able
+	 * to read/write from/to it. It is already opened in O_NONBLOCK mode
+	 * so bdget() can't fail.
+	 */
+	bdget(pd->bdev->bd_dev);
+	if ((ret = blkdev_get(pd->bdev, FMODE_READ, O_RDONLY)))
+		goto out;
+
+	if ((ret = pkt_get_last_written(pd, &lba))) {
+		printk("pktcdvd: pkt_get_last_written failed\n");
+		goto out_putdev;
+	}
+
+	set_capacity(pd->disk, lba << 2);
+	set_capacity(pd->bdev->bd_disk, lba << 2);
+	bd_set_size(pd->bdev, (loff_t)lba << 11);
+
+	q = bdev_get_queue(pd->bdev);
+	if (write) {
+		if ((ret = pkt_open_write(pd)))
+			goto out_putdev;
+		/*
+		 * Some CDRW drives can not handle writes larger than one packet,
+		 * even if the size is a multiple of the packet size.
+		 */
+		spin_lock_irq(q->queue_lock);
+		blk_queue_max_sectors(q, pd->settings.size);
+		spin_unlock_irq(q->queue_lock);
+		set_bit(PACKET_WRITABLE, &pd->flags);
+	} else {
+		pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+		clear_bit(PACKET_WRITABLE, &pd->flags);
+	}
+
+	if ((ret = pkt_set_segment_merging(pd, q)))
+		goto out_putdev;
+
+	if (write)
+		printk("pktcdvd: %lukB available on disc\n", lba << 1);
+
+	return 0;
+
+out_putdev:
+	blkdev_put(pd->bdev);
+out:
+	return ret;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+	if (flush && pkt_flush_cache(pd))
+		DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
+
+	pkt_lock_door(pd, 0);
+
+	pkt_set_speed(pd, MAX_SPEED, MAX_SPEED);
+	blkdev_put(pd->bdev);
+}
+
+static struct pktcdvd_device *pkt_find_dev_from_minor(int dev_minor)
+{
+	if (dev_minor >= MAX_WRITERS)
+		return NULL;
+	return pkt_devs[dev_minor];
+}
+
+static int pkt_open(struct inode *inode, struct file *file)
+{
+	struct pktcdvd_device *pd = NULL;
+	int ret;
+
+	VPRINTK("pktcdvd: entering open\n");
+
+	down(&ctl_mutex);
+	pd = pkt_find_dev_from_minor(iminor(inode));
+	if (!pd) {
+		ret = -ENODEV;
+		goto out;
+	}
+	BUG_ON(pd->refcnt < 0);
+
+	pd->refcnt++;
+	if (pd->refcnt == 1) {
+		if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE)) {
+			ret = -EIO;
+			goto out_dec;
+		}
+		/*
+		 * needed here as well, since ext2 (among others) may change
+		 * the blocksize at mount time
+		 */
+		set_blocksize(inode->i_bdev, CD_FRAMESIZE);
+	}
+
+	up(&ctl_mutex);
+	return 0;
+
+out_dec:
+	pd->refcnt--;
+out:
+	VPRINTK("pktcdvd: failed open (%d)\n", ret);
+	up(&ctl_mutex);
+	return ret;
+}
+
+static int pkt_close(struct inode *inode, struct file *file)
+{
+	struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
+	int ret = 0;
+
+	down(&ctl_mutex);
+	pd->refcnt--;
+	BUG_ON(pd->refcnt < 0);
+	if (pd->refcnt == 0) {
+		int flush = test_bit(PACKET_WRITABLE, &pd->flags);
+		pkt_release_dev(pd, flush);
+	}
+	up(&ctl_mutex);
+	return ret;
+}
+
+
+static void *psd_pool_alloc(unsigned int __nocast gfp_mask, void *data)
+{
+	return kmalloc(sizeof(struct packet_stacked_data), gfp_mask);
+}
+
+static void psd_pool_free(void *ptr, void *data)
+{
+	kfree(ptr);
+}
+
+static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err)
+{
+	struct packet_stacked_data *psd = bio->bi_private;
+	struct pktcdvd_device *pd = psd->pd;
+
+	if (bio->bi_size)
+		return 1;
+
+	bio_put(bio);
+	bio_endio(psd->bio, psd->bio->bi_size, err);
+	mempool_free(psd, psd_pool);
+	pkt_bio_finished(pd);
+	return 0;
+}
+
+static int pkt_make_request(request_queue_t *q, struct bio *bio)
+{
+	struct pktcdvd_device *pd;
+	char b[BDEVNAME_SIZE];
+	sector_t zone;
+	struct packet_data *pkt;
+	int was_empty, blocked_bio;
+	struct pkt_rb_node *node;
+
+	pd = q->queuedata;
+	if (!pd) {
+		printk("pktcdvd: %s incorrect request queue\n", bdevname(bio->bi_bdev, b));
+		goto end_io;
+	}
+
+	/*
+	 * Clone READ bios so we can have our own bi_end_io callback.
+	 */
+	if (bio_data_dir(bio) == READ) {
+		struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
+		struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
+
+		psd->pd = pd;
+		psd->bio = bio;
+		cloned_bio->bi_bdev = pd->bdev;
+		cloned_bio->bi_private = psd;
+		cloned_bio->bi_end_io = pkt_end_io_read_cloned;
+		pd->stats.secs_r += bio->bi_size >> 9;
+		pkt_queue_bio(pd, cloned_bio, 1);
+		return 0;
+	}
+
+	if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
+		printk("pktcdvd: WRITE for ro device %s (%llu)\n",
+			pd->name, (unsigned long long)bio->bi_sector);
+		goto end_io;
+	}
+
+	if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
+		printk("pktcdvd: wrong bio size\n");
+		goto end_io;
+	}
+
+	blk_queue_bounce(q, &bio);
+
+	zone = ZONE(bio->bi_sector, pd);
+	VPRINTK("pkt_make_request: start = %6llx stop = %6llx\n",
+		(unsigned long long)bio->bi_sector,
+		(unsigned long long)(bio->bi_sector + bio_sectors(bio)));
+
+	/* Check if we have to split the bio */
+	{
+		struct bio_pair *bp;
+		sector_t last_zone;
+		int first_sectors;
+
+		last_zone = ZONE(bio->bi_sector + bio_sectors(bio) - 1, pd);
+		if (last_zone != zone) {
+			BUG_ON(last_zone != zone + pd->settings.size);
+			first_sectors = last_zone - bio->bi_sector;
+			bp = bio_split(bio, bio_split_pool, first_sectors);
+			BUG_ON(!bp);
+			pkt_make_request(q, &bp->bio1);
+			pkt_make_request(q, &bp->bio2);
+			bio_pair_release(bp);
+			return 0;
+		}
+	}
+
+	/*
+	 * If we find a matching packet in state WAITING or READ_WAIT, we can
+	 * just append this bio to that packet.
+	 */
+	spin_lock(&pd->cdrw.active_list_lock);
+	blocked_bio = 0;
+	list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) {
+		if (pkt->sector == zone) {
+			spin_lock(&pkt->lock);
+			if ((pkt->state == PACKET_WAITING_STATE) ||
+			    (pkt->state == PACKET_READ_WAIT_STATE)) {
+				pkt_add_list_last(bio, &pkt->orig_bios,
+						  &pkt->orig_bios_tail);
+				pkt->write_size += bio->bi_size / CD_FRAMESIZE;
+				if ((pkt->write_size >= pkt->frames) &&
+				    (pkt->state == PACKET_WAITING_STATE)) {
+					atomic_inc(&pkt->run_sm);
+					wake_up(&pd->wqueue);
+				}
+				spin_unlock(&pkt->lock);
+				spin_unlock(&pd->cdrw.active_list_lock);
+				return 0;
+			} else {
+				blocked_bio = 1;
+			}
+			spin_unlock(&pkt->lock);
+		}
+	}
+	spin_unlock(&pd->cdrw.active_list_lock);
+
+	/*
+	 * No matching packet found. Store the bio in the work queue.
+	 */
+	node = mempool_alloc(pd->rb_pool, GFP_NOIO);
+	BUG_ON(!node);
+	node->bio = bio;
+	spin_lock(&pd->lock);
+	BUG_ON(pd->bio_queue_size < 0);
+	was_empty = (pd->bio_queue_size == 0);
+	pkt_rbtree_insert(pd, node);
+	spin_unlock(&pd->lock);
+
+	/*
+	 * Wake up the worker thread.
+	 */
+	atomic_set(&pd->scan_queue, 1);
+	if (was_empty) {
+		/* This wake_up is required for correct operation */
+		wake_up(&pd->wqueue);
+	} else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) {
+		/*
+		 * This wake up is not required for correct operation,
+		 * but improves performance in some cases.
+		 */
+		wake_up(&pd->wqueue);
+	}
+	return 0;
+end_io:
+	bio_io_error(bio, bio->bi_size);
+	return 0;
+}
+
+
+
+static int pkt_merge_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *bvec)
+{
+	struct pktcdvd_device *pd = q->queuedata;
+	sector_t zone = ZONE(bio->bi_sector, pd);
+	int used = ((bio->bi_sector - zone) << 9) + bio->bi_size;
+	int remaining = (pd->settings.size << 9) - used;
+	int remaining2;
+
+	/*
+	 * A bio <= PAGE_SIZE must be allowed. If it crosses a packet
+	 * boundary, pkt_make_request() will split the bio.
+	 */
+	remaining2 = PAGE_SIZE - bio->bi_size;
+	remaining = max(remaining, remaining2);
+
+	BUG_ON(remaining < 0);
+	return remaining;
+}
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+	request_queue_t *q = pd->disk->queue;
+
+	blk_queue_make_request(q, pkt_make_request);
+	blk_queue_hardsect_size(q, CD_FRAMESIZE);
+	blk_queue_max_sectors(q, PACKET_MAX_SECTORS);
+	blk_queue_merge_bvec(q, pkt_merge_bvec);
+	q->queuedata = pd;
+}
+
+static int pkt_seq_show(struct seq_file *m, void *p)
+{
+	struct pktcdvd_device *pd = m->private;
+	char *msg;
+	char bdev_buf[BDEVNAME_SIZE];
+	int states[PACKET_NUM_STATES];
+
+	seq_printf(m, "Writer %s mapped to %s:\n", pd->name,
+		   bdevname(pd->bdev, bdev_buf));
+
+	seq_printf(m, "\nSettings:\n");
+	seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2);
+
+	if (pd->settings.write_type == 0)
+		msg = "Packet";
+	else
+		msg = "Unknown";
+	seq_printf(m, "\twrite type:\t\t%s\n", msg);
+
+	seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+	seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+	seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+	if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+		msg = "Mode 1";
+	else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+		msg = "Mode 2";
+	else
+		msg = "Unknown";
+	seq_printf(m, "\tblock mode:\t\t%s\n", msg);
+
+	seq_printf(m, "\nStatistics:\n");
+	seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started);
+	seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended);
+	seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1);
+	seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1);
+	seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1);
+
+	seq_printf(m, "\nMisc:\n");
+	seq_printf(m, "\treference count:\t%d\n", pd->refcnt);
+	seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags);
+	seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed);
+	seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed);
+	seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset);
+	seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+	seq_printf(m, "\nQueue state:\n");
+	seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size);
+	seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios));
+	seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector);
+
+	pkt_count_states(pd, states);
+	seq_printf(m, "\tstate:\t\t\ti:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n",
+		   states[0], states[1], states[2], states[3], states[4], states[5]);
+
+	return 0;
+}
+
+static int pkt_seq_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, pkt_seq_show, PDE(inode)->data);
+}
+
+static struct file_operations pkt_proc_fops = {
+	.open	= pkt_seq_open,
+	.read	= seq_read,
+	.llseek	= seq_lseek,
+	.release = single_release
+};
+
+static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev)
+{
+	int i;
+	int ret = 0;
+	char b[BDEVNAME_SIZE];
+	struct proc_dir_entry *proc;
+	struct block_device *bdev;
+
+	if (pd->pkt_dev == dev) {
+		printk("pktcdvd: Recursive setup not allowed\n");
+		return -EBUSY;
+	}
+	for (i = 0; i < MAX_WRITERS; i++) {
+		struct pktcdvd_device *pd2 = pkt_devs[i];
+		if (!pd2)
+			continue;
+		if (pd2->bdev->bd_dev == dev) {
+			printk("pktcdvd: %s already setup\n", bdevname(pd2->bdev, b));
+			return -EBUSY;
+		}
+		if (pd2->pkt_dev == dev) {
+			printk("pktcdvd: Can't chain pktcdvd devices\n");
+			return -EBUSY;
+		}
+	}
+
+	bdev = bdget(dev);
+	if (!bdev)
+		return -ENOMEM;
+	ret = blkdev_get(bdev, FMODE_READ, O_RDONLY | O_NONBLOCK);
+	if (ret)
+		return ret;
+
+	/* This is safe, since we have a reference from open(). */
+	__module_get(THIS_MODULE);
+
+	if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) {
+		printk("pktcdvd: not enough memory for buffers\n");
+		ret = -ENOMEM;
+		goto out_mem;
+	}
+
+	pd->bdev = bdev;
+	set_blocksize(bdev, CD_FRAMESIZE);
+
+	pkt_init_queue(pd);
+
+	atomic_set(&pd->cdrw.pending_bios, 0);
+	pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name);
+	if (IS_ERR(pd->cdrw.thread)) {
+		printk("pktcdvd: can't start kernel thread\n");
+		ret = -ENOMEM;
+		goto out_thread;
+	}
+
+	proc = create_proc_entry(pd->name, 0, pkt_proc);
+	if (proc) {
+		proc->data = pd;
+		proc->proc_fops = &pkt_proc_fops;
+	}
+	DPRINTK("pktcdvd: writer %s mapped to %s\n", pd->name, bdevname(bdev, b));
+	return 0;
+
+out_thread:
+	pkt_shrink_pktlist(pd);
+out_mem:
+	blkdev_put(bdev);
+	/* This is safe: open() is still holding a reference. */
+	module_put(THIS_MODULE);
+	return ret;
+}
+
+static int pkt_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct pktcdvd_device *pd = inode->i_bdev->bd_disk->private_data;
+
+	VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, imajor(inode), iminor(inode));
+	BUG_ON(!pd);
+
+	switch (cmd) {
+	/*
+	 * forward selected CDROM ioctls to CD-ROM, for UDF
+	 */
+	case CDROMMULTISESSION:
+	case CDROMREADTOCENTRY:
+	case CDROM_LAST_WRITTEN:
+	case CDROM_SEND_PACKET:
+	case SCSI_IOCTL_SEND_COMMAND:
+		return ioctl_by_bdev(pd->bdev, cmd, arg);
+
+	case CDROMEJECT:
+		/*
+		 * The door gets locked when the device is opened, so we
+		 * have to unlock it or else the eject command fails.
+		 */
+		pkt_lock_door(pd, 0);
+		return ioctl_by_bdev(pd->bdev, cmd, arg);
+
+	default:
+		printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
+		return -ENOTTY;
+	}
+
+	return 0;
+}
+
+static int pkt_media_changed(struct gendisk *disk)
+{
+	struct pktcdvd_device *pd = disk->private_data;
+	struct gendisk *attached_disk;
+
+	if (!pd)
+		return 0;
+	if (!pd->bdev)
+		return 0;
+	attached_disk = pd->bdev->bd_disk;
+	if (!attached_disk)
+		return 0;
+	return attached_disk->fops->media_changed(attached_disk);
+}
+
+static struct block_device_operations pktcdvd_ops = {
+	.owner =		THIS_MODULE,
+	.open =			pkt_open,
+	.release =		pkt_close,
+	.ioctl =		pkt_ioctl,
+	.media_changed =	pkt_media_changed,
+};
+
+/*
+ * Set up mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd)
+{
+	int idx;
+	int ret = -ENOMEM;
+	struct pktcdvd_device *pd;
+	struct gendisk *disk;
+	dev_t dev = new_decode_dev(ctrl_cmd->dev);
+
+	for (idx = 0; idx < MAX_WRITERS; idx++)
+		if (!pkt_devs[idx])
+			break;
+	if (idx == MAX_WRITERS) {
+		printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
+		return -EBUSY;
+	}
+
+	pd = kmalloc(sizeof(struct pktcdvd_device), GFP_KERNEL);
+	if (!pd)
+		return ret;
+	memset(pd, 0, sizeof(struct pktcdvd_device));
+
+	pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL);
+	if (!pd->rb_pool)
+		goto out_mem;
+
+	disk = alloc_disk(1);
+	if (!disk)
+		goto out_mem;
+	pd->disk = disk;
+
+	spin_lock_init(&pd->lock);
+	spin_lock_init(&pd->iosched.lock);
+	sprintf(pd->name, "pktcdvd%d", idx);
+	init_waitqueue_head(&pd->wqueue);
+	pd->bio_queue = RB_ROOT;
+
+	disk->major = pkt_major;
+	disk->first_minor = idx;
+	disk->fops = &pktcdvd_ops;
+	disk->flags = GENHD_FL_REMOVABLE;
+	sprintf(disk->disk_name, "pktcdvd%d", idx);
+	disk->private_data = pd;
+	disk->queue = blk_alloc_queue(GFP_KERNEL);
+	if (!disk->queue)
+		goto out_mem2;
+
+	pd->pkt_dev = MKDEV(disk->major, disk->first_minor);
+	ret = pkt_new_dev(pd, dev);
+	if (ret)
+		goto out_new_dev;
+
+	add_disk(disk);
+	pkt_devs[idx] = pd;
+	ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+	return 0;
+
+out_new_dev:
+	blk_put_queue(disk->queue);
+out_mem2:
+	put_disk(disk);
+out_mem:
+	if (pd->rb_pool)
+		mempool_destroy(pd->rb_pool);
+	kfree(pd);
+	return ret;
+}
+
+/*
+ * Tear down mapping from pktcdvd device to CD-ROM device.
+ */
+static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd)
+{
+	struct pktcdvd_device *pd;
+	int idx;
+	dev_t pkt_dev = new_decode_dev(ctrl_cmd->pkt_dev);
+
+	for (idx = 0; idx < MAX_WRITERS; idx++) {
+		pd = pkt_devs[idx];
+		if (pd && (pd->pkt_dev == pkt_dev))
+			break;
+	}
+	if (idx == MAX_WRITERS) {
+		DPRINTK("pktcdvd: dev not setup\n");
+		return -ENXIO;
+	}
+
+	if (pd->refcnt > 0)
+		return -EBUSY;
+
+	if (!IS_ERR(pd->cdrw.thread))
+		kthread_stop(pd->cdrw.thread);
+
+	blkdev_put(pd->bdev);
+
+	pkt_shrink_pktlist(pd);
+
+	remove_proc_entry(pd->name, pkt_proc);
+	DPRINTK("pktcdvd: writer %s unmapped\n", pd->name);
+
+	del_gendisk(pd->disk);
+	blk_put_queue(pd->disk->queue);
+	put_disk(pd->disk);
+
+	pkt_devs[idx] = NULL;
+	mempool_destroy(pd->rb_pool);
+	kfree(pd);
+
+	/* This is safe: open() is still holding a reference. */
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static void pkt_get_status(struct pkt_ctrl_command *ctrl_cmd)
+{
+	struct pktcdvd_device *pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index);
+	if (pd) {
+		ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev);
+		ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev);
+	} else {
+		ctrl_cmd->dev = 0;
+		ctrl_cmd->pkt_dev = 0;
+	}
+	ctrl_cmd->num_devices = MAX_WRITERS;
+}
+
+static int pkt_ctl_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg)
+{
+	void __user *argp = (void __user *)arg;
+	struct pkt_ctrl_command ctrl_cmd;
+	int ret = 0;
+
+	if (cmd != PACKET_CTRL_CMD)
+		return -ENOTTY;
+
+	if (copy_from_user(&ctrl_cmd, argp, sizeof(struct pkt_ctrl_command)))
+		return -EFAULT;
+
+	switch (ctrl_cmd.command) {
+	case PKT_CTRL_CMD_SETUP:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		down(&ctl_mutex);
+		ret = pkt_setup_dev(&ctrl_cmd);
+		up(&ctl_mutex);
+		break;
+	case PKT_CTRL_CMD_TEARDOWN:
+		if (!capable(CAP_SYS_ADMIN))
+			return -EPERM;
+		down(&ctl_mutex);
+		ret = pkt_remove_dev(&ctrl_cmd);
+		up(&ctl_mutex);
+		break;
+	case PKT_CTRL_CMD_STATUS:
+		down(&ctl_mutex);
+		pkt_get_status(&ctrl_cmd);
+		up(&ctl_mutex);
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	if (copy_to_user(argp, &ctrl_cmd, sizeof(struct pkt_ctrl_command)))
+		return -EFAULT;
+	return ret;
+}
+
+
+static struct file_operations pkt_ctl_fops = {
+	.ioctl	 = pkt_ctl_ioctl,
+	.owner	 = THIS_MODULE,
+};
+
+static struct miscdevice pkt_misc = {
+	.minor 		= MISC_DYNAMIC_MINOR,
+	.name  		= "pktcdvd",
+	.devfs_name 	= "pktcdvd/control",
+	.fops  		= &pkt_ctl_fops
+};
+
+static int __init pkt_init(void)
+{
+	int ret;
+
+	psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL);
+	if (!psd_pool)
+		return -ENOMEM;
+
+	ret = register_blkdev(pkt_major, "pktcdvd");
+	if (ret < 0) {
+		printk("pktcdvd: Unable to register block device\n");
+		goto out2;
+	}
+	if (!pkt_major)
+		pkt_major = ret;
+
+	ret = misc_register(&pkt_misc);
+	if (ret) {
+		printk("pktcdvd: Unable to register misc device\n");
+		goto out;
+	}
+
+	init_MUTEX(&ctl_mutex);
+
+	pkt_proc = proc_mkdir("pktcdvd", proc_root_driver);
+
+	DPRINTK("pktcdvd: %s\n", VERSION_CODE);
+	return 0;
+
+out:
+	unregister_blkdev(pkt_major, "pktcdvd");
+out2:
+	mempool_destroy(psd_pool);
+	return ret;
+}
+
+static void __exit pkt_exit(void)
+{
+	remove_proc_entry("pktcdvd", proc_root_driver);
+	misc_deregister(&pkt_misc);
+	unregister_blkdev(pkt_major, "pktcdvd");
+	mempool_destroy(psd_pool);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+MODULE_LICENSE("GPL");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
diff --git a/drivers/block/ps2esdi.c b/drivers/block/ps2esdi.c
new file mode 100644
index 0000000..2954878
--- /dev/null
+++ b/drivers/block/ps2esdi.c
@@ -0,0 +1,1092 @@
+/* ps2esdi driver based on assembler code by Arindam Banerji,
+   written by Peter De Schrijver */
+/* Reassuring note to IBM : This driver was NOT developed by vice-versa
+   engineering the PS/2's BIOS */
+/* Dedicated to Wannes, Tofke, Ykke, Godot, Killroy and all those 
+   other lovely fish out there... */
+/* This code was written during the long and boring WINA 
+   elections 1994 */
+/* Thanks to Arindam Banerij for giving me the source of his driver */
+/* This code may be freely distributed and modified in any way, 
+   as long as these notes remain intact */
+
+/*  Revised: 05/07/94 by Arindam Banerji (axb@cse.nd.edu) */
+/*  Revised: 09/08/94 by Peter De Schrijver (stud11@cc4.kuleuven.ac.be)
+   Thanks to Arindam Banerij for sending me the docs of the adapter */
+
+/* BA Modified for ThinkPad 720 by Boris Ashkinazi */
+/*                    (bash@vnet.ibm.com) 08/08/95 */
+
+/* Modified further for ThinkPad-720C by Uri Blumenthal */
+/*                    (uri@watson.ibm.com) Sep 11, 1995 */
+
+/* TODO : 
+   + Timeouts
+   + Get disk parameters
+   + DMA above 16MB
+   + reset after read/write error
+ */
+
+#define DEVICE_NAME "PS/2 ESDI"
+
+#include <linux/config.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/interrupt.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/genhd.h>
+#include <linux/ps2esdi.h>
+#include <linux/blkdev.h>
+#include <linux/mca-legacy.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <asm/mca_dma.h>
+#include <asm/uaccess.h>
+
+#define PS2ESDI_IRQ 14
+#define MAX_HD 2
+#define MAX_RETRIES 5
+#define MAX_16BIT 65536
+#define ESDI_TIMEOUT   0xf000
+#define ESDI_STAT_TIMEOUT 4
+
+#define TYPE_0_CMD_BLK_LENGTH 2
+#define TYPE_1_CMD_BLK_LENGTH 4
+
+static void reset_ctrl(void);
+
+static int ps2esdi_geninit(void);
+
+static void do_ps2esdi_request(request_queue_t * q);
+
+static void ps2esdi_readwrite(int cmd, struct request *req);
+
+static void ps2esdi_fill_cmd_block(u_short * cmd_blk, u_short cmd,
+u_short cyl, u_short head, u_short sector, u_short length, u_char drive);
+
+static int ps2esdi_out_cmd_blk(u_short * cmd_blk);
+
+static void ps2esdi_prep_dma(char *buffer, u_short length, u_char dma_xmode);
+
+static irqreturn_t ps2esdi_interrupt_handler(int irq, void *dev_id,
+				      struct pt_regs *regs);
+static void (*current_int_handler) (u_int) = NULL;
+static void ps2esdi_normal_interrupt_handler(u_int);
+static void ps2esdi_initial_reset_int_handler(u_int);
+static void ps2esdi_geometry_int_handler(u_int);
+static int ps2esdi_ioctl(struct inode *inode, struct file *file,
+			 u_int cmd, u_long arg);
+
+static int ps2esdi_read_status_words(int num_words, int max_words, u_short * buffer);
+
+static void dump_cmd_complete_status(u_int int_ret_code);
+
+static void ps2esdi_get_device_cfg(void);
+
+static void ps2esdi_reset_timer(unsigned long unused);
+
+static u_int dma_arb_level;		/* DMA arbitration level */
+
+static DECLARE_WAIT_QUEUE_HEAD(ps2esdi_int);
+
+static int no_int_yet;
+static int ps2esdi_drives;
+static u_short io_base;
+static struct timer_list esdi_timer =
+		TIMER_INITIALIZER(ps2esdi_reset_timer, 0, 0);
+static int reset_status;
+static int ps2esdi_slot = -1;
+static int tp720esdi = 0;	/* Is it Integrated ESDI of ThinkPad-720? */
+static int intg_esdi = 0;       /* If integrated adapter */
+struct ps2esdi_i_struct {
+	unsigned int head, sect, cyl, wpcom, lzone, ctl;
+};
+static DEFINE_SPINLOCK(ps2esdi_lock);
+static struct request_queue *ps2esdi_queue;
+static struct request *current_req;
+
+#if 0
+#if 0				/* try both - I don't know which one is better... UB */
+static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
+{
+	{4, 48, 1553, 0, 0, 0},
+	{0, 0, 0, 0, 0, 0}};
+#else
+static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
+{
+	{64, 32, 161, 0, 0, 0},
+	{0, 0, 0, 0, 0, 0}};
+#endif
+#endif
+static struct ps2esdi_i_struct ps2esdi_info[MAX_HD] =
+{
+	{0, 0, 0, 0, 0, 0},
+	{0, 0, 0, 0, 0, 0}};
+
+static struct block_device_operations ps2esdi_fops =
+{
+	.owner		= THIS_MODULE,
+	.ioctl		= ps2esdi_ioctl,
+};
+
+static struct gendisk *ps2esdi_gendisk[2];
+
+/* initialization routine called by ll_rw_blk.c   */
+static int __init ps2esdi_init(void)
+{
+
+	int error = 0;
+
+	/* register the device - pass the name and major number */
+	if (register_blkdev(PS2ESDI_MAJOR, "ed"))
+		return -EBUSY;
+
+	/* set up some global information - indicating device specific info */
+	ps2esdi_queue = blk_init_queue(do_ps2esdi_request, &ps2esdi_lock);
+	if (!ps2esdi_queue) {
+		unregister_blkdev(PS2ESDI_MAJOR, "ed");
+		return -ENOMEM;
+	}
+
+	/* some minor housekeeping - setup the global gendisk structure */
+	error = ps2esdi_geninit();
+	if (error) {
+		printk(KERN_WARNING "PS2ESDI: error initialising"
+			" device, releasing resources\n");
+		unregister_blkdev(PS2ESDI_MAJOR, "ed");
+		blk_cleanup_queue(ps2esdi_queue);
+		return error;
+	}
+	return 0;
+}				/* ps2esdi_init */
+
+#ifndef MODULE
+
+module_init(ps2esdi_init);
+
+#else
+
+static int cyl[MAX_HD] = {-1,-1};
+static int head[MAX_HD] = {-1, -1};
+static int sect[MAX_HD] = {-1, -1};
+
+module_param(tp720esdi, bool, 0);
+module_param_array(cyl, int, NULL, 0);
+module_param_array(head, int, NULL, 0);
+module_param_array(sect, int, NULL, 0);
+MODULE_LICENSE("GPL");
+
+int init_module(void) {
+	int drive;
+
+	for(drive = 0; drive < MAX_HD; drive++) {
+	        struct ps2esdi_i_struct *info = &ps2esdi_info[drive];
+
+        	if (cyl[drive] != -1) {
+		  	info->cyl = info->lzone = cyl[drive];
+			info->wpcom = 0;
+		}
+        	if (head[drive] != -1) {
+			info->head = head[drive];
+			info->ctl = (head[drive] > 8 ? 8 : 0);
+		}
+        	if (sect[drive] != -1) info->sect = sect[drive];
+	}
+	return ps2esdi_init();
+}
+
+void
+cleanup_module(void) {
+	int i;
+	if(ps2esdi_slot) {
+		mca_mark_as_unused(ps2esdi_slot);
+		mca_set_adapter_procfn(ps2esdi_slot, NULL, NULL);
+	}
+	release_region(io_base, 4);
+	free_dma(dma_arb_level);
+	free_irq(PS2ESDI_IRQ, &ps2esdi_gendisk);
+	unregister_blkdev(PS2ESDI_MAJOR, "ed");
+	blk_cleanup_queue(ps2esdi_queue);
+	for (i = 0; i < ps2esdi_drives; i++) {
+		del_gendisk(ps2esdi_gendisk[i]);
+		put_disk(ps2esdi_gendisk[i]);
+	}
+}
+#endif /* MODULE */
+
+/* handles boot time command line parameters */
+void __init tp720_setup(char *str, int *ints)
+{
+	/* no params, just sets the tp720esdi flag if it exists */
+
+	printk("%s: TP 720 ESDI flag set\n", DEVICE_NAME);
+	tp720esdi = 1;
+}
+
+void __init ed_setup(char *str, int *ints)
+{
+	int hdind = 0;
+
+	/* handles 3 parameters only - corresponding to
+	   1. Number of cylinders
+	   2. Number of heads
+	   3. Sectors/track
+	 */
+
+	if (ints[0] != 3)
+		return;
+
+	/* print out the information - seen at boot time */
+	printk("%s: ints[0]=%d ints[1]=%d ints[2]=%d ints[3]=%d\n",
+	       DEVICE_NAME, ints[0], ints[1], ints[2], ints[3]);
+
+	/* set the index into device specific information table */
+	if (ps2esdi_info[0].head != 0)
+		hdind = 1;
+
+	/* set up all the device information */
+	ps2esdi_info[hdind].head = ints[2];
+	ps2esdi_info[hdind].sect = ints[3];
+	ps2esdi_info[hdind].cyl = ints[1];
+	ps2esdi_info[hdind].wpcom = 0;
+	ps2esdi_info[hdind].lzone = ints[1];
+	ps2esdi_info[hdind].ctl = (ints[2] > 8 ? 8 : 0);
+#if 0				/* this may be needed for PS2/Mod.80, but it hurts ThinkPad! */
+	ps2esdi_drives = hdind + 1;	/* increment index for the next time */
+#endif
+}				/* ed_setup */
+
+static int ps2esdi_getinfo(char *buf, int slot, void *d)
+{
+	int len = 0;
+
+	len += sprintf(buf + len, "DMA Arbitration Level: %d\n",
+		       dma_arb_level);
+	len += sprintf(buf + len, "IO Port: %x\n", io_base);
+	len += sprintf(buf + len, "IRQ: 14\n");
+	len += sprintf(buf + len, "Drives: %d\n", ps2esdi_drives);
+
+	return len;
+}
+
+/* ps2 esdi specific initialization - called thru the gendisk chain */
+static int __init ps2esdi_geninit(void)
+{
+	/*
+	   The first part contains the initialization code
+	   for the ESDI disk subsystem.  All we really do
+	   is search for the POS registers of the controller
+	   to do some simple setup operations.  First, we
+	   must ensure that the controller is installed,
+	   enabled, and configured as PRIMARY.  Then we must
+	   determine the DMA arbitration level being used by
+	   the controller so we can handle data transfer
+	   operations properly.  If all of this works, then
+	   we will set the INIT_FLAG to a non-zero value.
+	 */
+
+	int slot = 0, i, reset_start, reset_end;
+	u_char status;
+	unsigned short adapterID;
+	int error = 0;
+
+	if ((slot = mca_find_adapter(INTG_ESDI_ID, 0)) != MCA_NOTFOUND) {
+		adapterID = INTG_ESDI_ID;
+		printk("%s: integrated ESDI adapter found in slot %d\n",
+		       DEVICE_NAME, slot+1);
+#ifndef MODULE
+		mca_set_adapter_name(slot, "PS/2 Integrated ESDI");
+#endif
+	} else if ((slot = mca_find_adapter(NRML_ESDI_ID, 0)) != -1) {
+		adapterID = NRML_ESDI_ID;
+		printk("%s: normal ESDI adapter found in slot %d\n",
+		       DEVICE_NAME, slot+1);
+		mca_set_adapter_name(slot, "PS/2 ESDI");
+	} else {
+		return -ENODEV;
+	}
+
+	ps2esdi_slot = slot;
+	mca_mark_as_used(slot);
+	mca_set_adapter_procfn(slot, (MCA_ProcFn) ps2esdi_getinfo, NULL);
+
+	/* Found the slot - read the POS register 2 to get the necessary
+	   configuration and status information.  POS register 2 has the
+	   following information :
+	   Bit           Function
+	   7             reserved = 0
+	   6             arbitration method
+	   0 - fairness enabled
+	   1 - fairness disabled, linear priority assignment
+	   5-2           arbitration level
+	   1             alternate address
+	   1              alternate address
+	   0 - use addresses 0x3510 - 0x3517
+	   0             adapter enable
+	 */
+
+	status = mca_read_stored_pos(slot, 2);
+	/* is it enabled ? */
+	if (!(status & STATUS_ENABLED)) {
+		printk("%s: ESDI adapter disabled\n", DEVICE_NAME);
+		error = -ENODEV;
+		goto err_out1;
+	}
+	/* try to grab IRQ, and try to grab a slow IRQ if it fails, so we can
+	   share with the SCSI driver */
+	if (request_irq(PS2ESDI_IRQ, ps2esdi_interrupt_handler,
+		  SA_INTERRUPT | SA_SHIRQ, "PS/2 ESDI", &ps2esdi_gendisk)
+	    && request_irq(PS2ESDI_IRQ, ps2esdi_interrupt_handler,
+			   SA_SHIRQ, "PS/2 ESDI", &ps2esdi_gendisk)
+	    ) {
+		printk("%s: Unable to get IRQ %d\n", DEVICE_NAME, PS2ESDI_IRQ);
+		error = -EBUSY;
+		goto err_out1;
+	}
+	if (status & STATUS_ALTERNATE)
+		io_base = ALT_IO_BASE;
+	else
+		io_base = PRIMARY_IO_BASE;
+
+	if (!request_region(io_base, 4, "ed")) {
+		printk(KERN_WARNING"Unable to request region 0x%x\n", io_base);
+		error = -EBUSY;
+		goto err_out2;
+	}
+	/* get the dma arbitration level */
+	dma_arb_level = (status >> 2) & 0xf;
+
+	/* BA */
+	printk("%s: DMA arbitration level : %d\n",
+	       DEVICE_NAME, dma_arb_level);
+
+	LITE_ON;
+	current_int_handler = ps2esdi_initial_reset_int_handler;
+	reset_ctrl();
+	reset_status = 0;
+	reset_start = jiffies;
+	while (!reset_status) {
+		init_timer(&esdi_timer);
+		esdi_timer.expires = jiffies + HZ;
+		esdi_timer.data = 0;
+		add_timer(&esdi_timer);
+		sleep_on(&ps2esdi_int);
+	}
+	reset_end = jiffies;
+	LITE_OFF;
+	printk("%s: reset interrupt after %d jiffies,  %u.%02u secs\n",
+	       DEVICE_NAME, reset_end - reset_start, (reset_end - reset_start) / HZ,
+	       (reset_end - reset_start) % HZ);
+
+
+	/* Integrated ESDI Disk and Controller has only one drive! */
+	if (adapterID == INTG_ESDI_ID) {/* if not "normal" PS2 ESDI adapter */
+		ps2esdi_drives = 1;	/* then we have only one physical disk! */		intg_esdi = 1;
+	}
+
+
+
+	/* finally this part sets up some global data structures etc. */
+
+	ps2esdi_get_device_cfg();
+
+	/* some annoyance in the above routine returns TWO drives?
+	 Is something else happining in the background?
+	 Regaurdless we fix the # of drives again. AJK */
+	/* Integrated ESDI Disk and Controller has only one drive! */
+	if (adapterID == INTG_ESDI_ID)	/* if not "normal" PS2 ESDI adapter */
+		ps2esdi_drives = 1;	/* Not three or two, ONE DAMNIT! */
+
+	current_int_handler = ps2esdi_normal_interrupt_handler;
+
+	if (request_dma(dma_arb_level, "ed") !=0) {
+		printk(KERN_WARNING "PS2ESDI: Can't request dma-channel %d\n"
+			,(int) dma_arb_level);
+		error = -EBUSY;
+		goto err_out3;
+	}
+	blk_queue_max_sectors(ps2esdi_queue, 128);
+
+	error = -ENOMEM;
+	for (i = 0; i < ps2esdi_drives; i++) {
+		struct gendisk *disk = alloc_disk(64);
+		if (!disk)
+			goto err_out4;
+		disk->major = PS2ESDI_MAJOR;
+		disk->first_minor = i<<6;
+		sprintf(disk->disk_name, "ed%c", 'a'+i);
+		sprintf(disk->devfs_name, "ed/target%d", i);
+		disk->fops = &ps2esdi_fops;
+		ps2esdi_gendisk[i] = disk;
+	}
+
+	for (i = 0; i < ps2esdi_drives; i++) {
+		struct gendisk *disk = ps2esdi_gendisk[i];
+		set_capacity(disk, ps2esdi_info[i].head * ps2esdi_info[i].sect *
+				ps2esdi_info[i].cyl);
+		disk->queue = ps2esdi_queue;
+		disk->private_data = &ps2esdi_info[i];
+		add_disk(disk);
+	}
+	return 0;
+err_out4:
+	while (i--)
+		put_disk(ps2esdi_gendisk[i]);
+err_out3:
+	release_region(io_base, 4);
+err_out2:
+	free_irq(PS2ESDI_IRQ, &ps2esdi_gendisk);
+err_out1:
+	if(ps2esdi_slot) {
+		mca_mark_as_unused(ps2esdi_slot);
+		mca_set_adapter_procfn(ps2esdi_slot, NULL, NULL);
+	}
+	return error;
+}
+
+static void __init ps2esdi_get_device_cfg(void)
+{
+	u_short cmd_blk[TYPE_0_CMD_BLK_LENGTH];
+
+	/*BA */ printk("%s: Drive 0\n", DEVICE_NAME);
+	current_int_handler = ps2esdi_geometry_int_handler;
+	cmd_blk[0] = CMD_GET_DEV_CONFIG | 0x600;
+	cmd_blk[1] = 0;
+	no_int_yet = TRUE;
+	ps2esdi_out_cmd_blk(cmd_blk);
+	if (no_int_yet)
+		sleep_on(&ps2esdi_int);
+
+	if (ps2esdi_drives > 1) {
+		printk("%s: Drive 1\n", DEVICE_NAME);	/*BA */
+		cmd_blk[0] = CMD_GET_DEV_CONFIG | (1 << 5) | 0x600;
+		cmd_blk[1] = 0;
+		no_int_yet = TRUE;
+		ps2esdi_out_cmd_blk(cmd_blk);
+		if (no_int_yet)
+			sleep_on(&ps2esdi_int);
+	}			/* if second physical drive is present */
+	return;
+}
+
+/* strategy routine that handles most of the IO requests */
+static void do_ps2esdi_request(request_queue_t * q)
+{
+	struct request *req;
+	/* since, this routine is called with interrupts cleared - they 
+	   must be before it finishes  */
+
+	req = elv_next_request(q);
+	if (!req)
+		return;
+
+#if 0
+	printk("%s:got request. device : %s command : %d  sector : %ld count : %ld, buffer: %p\n",
+	       DEVICE_NAME,
+	       req->rq_disk->disk_name,
+	       req->cmd, req->sector,
+	       req->current_nr_sectors, req->buffer);
+#endif
+
+	/* check for above 16Mb dmas */
+	if (isa_virt_to_bus(req->buffer + req->current_nr_sectors * 512) > 16 * MB) {
+		printk("%s: DMA above 16MB not supported\n", DEVICE_NAME);
+		end_request(req, FAIL);
+		return;
+	}
+
+	if (req->sector+req->current_nr_sectors > get_capacity(req->rq_disk)) {
+		printk("Grrr. error. ps2esdi_drives: %d, %llu %llu\n",
+		    ps2esdi_drives, req->sector,
+		    (unsigned long long)get_capacity(req->rq_disk));
+		end_request(req, FAIL);
+		return;
+	}
+
+	switch (rq_data_dir(req)) {
+	case READ:
+		ps2esdi_readwrite(READ, req);
+		break;
+	case WRITE:
+		ps2esdi_readwrite(WRITE, req);
+		break;
+	default:
+		printk("%s: Unknown command\n", req->rq_disk->disk_name);
+		end_request(req, FAIL);
+		break;
+	}		/* handle different commands */
+}				/* main strategy routine */
+
+/* resets the ESDI adapter */
+static void reset_ctrl(void)
+{
+
+	u_long expire;
+	u_short status;
+
+	/* enable interrupts on the controller */
+	status = inb(ESDI_INTRPT);
+	outb((status & 0xe0) | ATT_EOI, ESDI_ATTN);	/* to be sure we don't have
+							   any interrupt pending... */
+	outb_p(CTRL_ENABLE_INTR, ESDI_CONTROL);
+
+	/* read the ESDI status port - if the controller is not busy,
+	   simply do a soft reset (fast) - otherwise we'll have to do a
+	   hard (slow) reset.  */
+	if (!(inb_p(ESDI_STATUS) & STATUS_BUSY)) {
+		/*BA */ printk("%s: soft reset...\n", DEVICE_NAME);
+		outb_p(CTRL_SOFT_RESET, ESDI_ATTN);
+	}
+	/* soft reset */ 
+	else {
+		/*BA */
+		printk("%s: hard reset...\n", DEVICE_NAME);
+		outb_p(CTRL_HARD_RESET, ESDI_CONTROL);
+		expire = jiffies + 2*HZ;
+		while (time_before(jiffies, expire));
+		outb_p(1, ESDI_CONTROL);
+	}			/* hard reset */
+
+
+}				/* reset the controller */
+
+/* called by the strategy routine to handle read and write requests */
+static void ps2esdi_readwrite(int cmd, struct request *req)
+{
+	struct ps2esdi_i_struct *p = req->rq_disk->private_data;
+	unsigned block = req->sector;
+	unsigned count = req->current_nr_sectors;
+	int drive = p - ps2esdi_info;
+	u_short track, head, cylinder, sector;
+	u_short cmd_blk[TYPE_1_CMD_BLK_LENGTH];
+
+	/* do some relevant arithmatic */
+	track = block / p->sect;
+	head = track % p->head;
+	cylinder = track / p->head;
+	sector = block % p->sect;
+
+#if 0
+	printk("%s: cyl=%d head=%d sect=%d\n", DEVICE_NAME, cylinder, head, sector);
+#endif
+	/* call the routine that actually fills out a command block */
+	ps2esdi_fill_cmd_block
+	    (cmd_blk,
+	     (cmd == READ) ? CMD_READ : CMD_WRITE,
+	     cylinder, head, sector, count, drive);
+
+	/* send the command block to the controller */
+	current_req = req;
+	spin_unlock_irq(&ps2esdi_lock);
+	if (ps2esdi_out_cmd_blk(cmd_blk)) {
+		spin_lock_irq(&ps2esdi_lock);
+		printk("%s: Controller failed\n", DEVICE_NAME);
+		if ((++req->errors) >= MAX_RETRIES)
+			end_request(req, FAIL);
+	}
+	/* check for failure to put out the command block */ 
+	else {
+		spin_lock_irq(&ps2esdi_lock);
+#if 0
+		printk("%s: waiting for xfer\n", DEVICE_NAME);
+#endif
+		/* turn disk lights on */
+		LITE_ON;
+	}
+
+}				/* ps2esdi_readwrite */
+
+/* fill out the command block */
+static void ps2esdi_fill_cmd_block(u_short * cmd_blk, u_short cmd,
+ u_short cyl, u_short head, u_short sector, u_short length, u_char drive)
+{
+
+	cmd_blk[0] = (drive << 5) | cmd;
+	cmd_blk[1] = length;
+	cmd_blk[2] = ((cyl & 0x1f) << 11) | (head << 5) | sector;
+	cmd_blk[3] = (cyl & 0x3E0) >> 5;
+
+}				/* fill out the command block */
+
+/* write a command block to the controller */
+static int ps2esdi_out_cmd_blk(u_short * cmd_blk)
+{
+
+	int i;
+	unsigned long jif;
+	u_char status;
+
+	/* enable interrupts */
+	outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+
+	/* do not write to the controller, if it is busy */
+	for (jif = jiffies + ESDI_STAT_TIMEOUT;
+		time_after(jif, jiffies) &&
+			(inb(ESDI_STATUS) & STATUS_BUSY); )
+		;
+
+#if 0
+	printk("%s: i(1)=%ld\n", DEVICE_NAME, jif);
+#endif
+
+	/* if device is still busy - then just time out */
+	if (inb(ESDI_STATUS) & STATUS_BUSY) {
+		printk("%s: ps2esdi_out_cmd timed out (1)\n", DEVICE_NAME);
+		return ERROR;
+	}			/* timeout ??? */
+	/* Set up the attention register in the controller */
+	outb(((*cmd_blk) & 0xE0) | 1, ESDI_ATTN);
+
+#if 0
+	printk("%s: sending %d words to controller\n", DEVICE_NAME, (((*cmd_blk) >> 14) + 1) << 1);
+#endif
+
+	/* one by one send each word out */
+	for (i = (((*cmd_blk) >> 14) + 1) << 1; i; i--) {
+		status = inb(ESDI_STATUS);
+		for (jif = jiffies + ESDI_STAT_TIMEOUT;
+		     time_after(jif, jiffies) && (status & STATUS_BUSY) &&
+		   (status & STATUS_CMD_INF); status = inb(ESDI_STATUS));
+		if ((status & (STATUS_BUSY | STATUS_CMD_INF)) == STATUS_BUSY) {
+#if 0
+			printk("%s: sending %04X\n", DEVICE_NAME, *cmd_blk);
+#endif
+			outw(*cmd_blk++, ESDI_CMD_INT);
+		} else {
+			printk("%s: ps2esdi_out_cmd timed out while sending command (status=%02X)\n",
+			       DEVICE_NAME, status);
+			return ERROR;
+		}
+	}			/* send all words out */
+	return OK;
+}				/* send out the commands */
+
+
+/* prepare for dma - do all the necessary setup */
+static void ps2esdi_prep_dma(char *buffer, u_short length, u_char dma_xmode)
+{
+	unsigned long flags = claim_dma_lock();
+
+	mca_disable_dma(dma_arb_level);
+
+	mca_set_dma_addr(dma_arb_level, isa_virt_to_bus(buffer));
+
+	mca_set_dma_count(dma_arb_level, length * 512 / 2);
+
+	mca_set_dma_mode(dma_arb_level, dma_xmode);
+
+	mca_enable_dma(dma_arb_level);
+
+	release_dma_lock(flags);
+
+}				/* prepare for dma */
+
+
+
+static irqreturn_t ps2esdi_interrupt_handler(int irq, void *dev_id,
+				      struct pt_regs *regs)
+{
+	u_int int_ret_code;
+
+	if (inb(ESDI_STATUS) & STATUS_INTR) {
+		int_ret_code = inb(ESDI_INTRPT);
+		if (current_int_handler) {
+			/* Disable adapter interrupts till processing is finished */
+			outb(CTRL_DISABLE_INTR, ESDI_CONTROL);
+			current_int_handler(int_ret_code);
+		} else
+			printk("%s: help ! No interrupt handler.\n", DEVICE_NAME);
+	} else {
+		return IRQ_NONE;
+	}
+	return IRQ_HANDLED;
+}
+
+static void ps2esdi_initial_reset_int_handler(u_int int_ret_code)
+{
+
+	switch (int_ret_code & 0xf) {
+	case INT_RESET:
+		/*BA */
+		printk("%s: initial reset completed.\n", DEVICE_NAME);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		wake_up(&ps2esdi_int);
+		break;
+	case INT_ATTN_ERROR:
+		printk("%s: Attention error. interrupt status : %02X\n", DEVICE_NAME,
+		       int_ret_code);
+		printk("%s: status: %02x\n", DEVICE_NAME, inb(ESDI_STATUS));
+		break;
+	default:
+		printk("%s: initial reset handler received interrupt: %02X\n",
+		       DEVICE_NAME, int_ret_code);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		break;
+	}
+	outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+}
+
+
+static void ps2esdi_geometry_int_handler(u_int int_ret_code)
+{
+	u_int status, drive_num;
+	unsigned long rba;
+	int i;
+
+	drive_num = int_ret_code >> 5;
+	switch (int_ret_code & 0xf) {
+	case INT_CMD_COMPLETE:
+		for (i = ESDI_TIMEOUT; i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL); i--);
+		if (!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) {
+			printk("%s: timeout reading status word\n", DEVICE_NAME);
+			outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+			break;
+		}
+		status = inw(ESDI_STT_INT);
+		if ((status & 0x1F) == CMD_GET_DEV_CONFIG) {
+#define REPLY_WORDS 5		/* we already read word 0 */
+			u_short reply[REPLY_WORDS];
+
+			if (ps2esdi_read_status_words((status >> 8) - 1, REPLY_WORDS, reply)) {
+				/*BA */
+				printk("%s: Device Configuration Status for drive %u\n",
+				       DEVICE_NAME, drive_num);
+
+				printk("%s: Spares/cyls: %u", DEVICE_NAME, reply[0] >> 8);
+
+				printk
+				    ("Config bits: %s%s%s%s%s\n",
+				     (reply[0] & CONFIG_IS) ? "Invalid Secondary, " : "",
+				     ((reply[0] & CONFIG_ZD) && !(reply[0] & CONFIG_IS))
+				 ? "Zero Defect, " : "Defects Present, ",
+				     (reply[0] & CONFIG_SF) ? "Skewed Format, " : "",
+				     (reply[0] & CONFIG_FR) ? "Removable, " : "Non-Removable, ",
+				     (reply[0] & CONFIG_RT) ? "No Retries" : "Retries");
+
+				rba = reply[1] | ((unsigned long) reply[2] << 16);
+				printk("%s: Number of RBA's: %lu\n", DEVICE_NAME, rba);
+
+				printk("%s: Physical number of cylinders: %u, Sectors/Track: %u, Heads: %u\n",
+				       DEVICE_NAME, reply[3], reply[4] >> 8, reply[4] & 0xff);
+
+				if (!ps2esdi_info[drive_num].head) {
+					ps2esdi_info[drive_num].head = 64;
+					ps2esdi_info[drive_num].sect = 32;
+					ps2esdi_info[drive_num].cyl = rba / (64 * 32);
+					ps2esdi_info[drive_num].wpcom = 0;
+					ps2esdi_info[drive_num].lzone = ps2esdi_info[drive_num].cyl;
+					ps2esdi_info[drive_num].ctl = 8;
+					if (tp720esdi) {	/* store the retrieved parameters */
+						ps2esdi_info[0].head = reply[4] & 0Xff;
+						ps2esdi_info[0].sect = reply[4] >> 8;
+						ps2esdi_info[0].cyl = reply[3];
+						ps2esdi_info[0].wpcom = 0;
+						ps2esdi_info[0].lzone = reply[3];
+					} else {
+						if (!intg_esdi)
+							ps2esdi_drives++;
+					}
+				}
+#ifdef OBSOLETE
+				if (!ps2esdi_info[drive_num].head) {
+					ps2esdi_info[drive_num].head = reply[4] & 0Xff;
+					ps2esdi_info[drive_num].sect = reply[4] >> 8;
+					ps2esdi_info[drive_num].cyl = reply[3];
+					ps2esdi_info[drive_num].wpcom = 0;
+					ps2esdi_info[drive_num].lzone = reply[3];
+					if (tp720esdi) {	/* store the retrieved parameters */
+						ps2esdi_info[0].head = reply[4] & 0Xff;
+						ps2esdi_info[0].sect = reply[4] >> 8;
+						ps2esdi_info[0].cyl = reply[3];
+						ps2esdi_info[0].wpcom = 0;
+						ps2esdi_info[0].lzone = reply[3];
+					} else {
+						ps2esdi_drives++;
+					}
+				}
+#endif
+
+			} else
+				printk("%s: failed while getting device config\n", DEVICE_NAME);
+#undef REPLY_WORDS
+		} else
+			printk("%s: command %02X unknown by geometry handler\n",
+			       DEVICE_NAME, status & 0x1f);
+
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		break;
+
+	case INT_ATTN_ERROR:
+		printk("%s: Attention error. interrupt status : %02X\n", DEVICE_NAME,
+		       int_ret_code);
+		printk("%s: Device not available\n", DEVICE_NAME);
+		break;
+	case INT_CMD_ECC:
+	case INT_CMD_RETRY:
+	case INT_CMD_ECC_RETRY:
+	case INT_CMD_WARNING:
+	case INT_CMD_ABORT:
+	case INT_CMD_FAILED:
+	case INT_DMA_ERR:
+	case INT_CMD_BLK_ERR:
+		/*BA */ printk("%s: Whaa. Error occurred...\n", DEVICE_NAME);
+		dump_cmd_complete_status(int_ret_code);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		break;
+	default:
+		printk("%s: Unknown interrupt reason: %02X\n",
+		       DEVICE_NAME, int_ret_code & 0xf);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		break;
+	}
+
+	wake_up(&ps2esdi_int);
+	no_int_yet = FALSE;
+	outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+
+}
+
+static void ps2esdi_normal_interrupt_handler(u_int int_ret_code)
+{
+	unsigned long flags;
+	u_int status;
+	u_int ending;
+	int i;
+
+	switch (int_ret_code & 0x0f) {
+	case INT_TRANSFER_REQ:
+		ps2esdi_prep_dma(current_req->buffer,
+				 current_req->current_nr_sectors,
+		    (rq_data_dir(current_req) == READ)
+		    ? MCA_DMA_MODE_16 | MCA_DMA_MODE_WRITE | MCA_DMA_MODE_XFER
+		    : MCA_DMA_MODE_16 | MCA_DMA_MODE_READ);
+		outb(CTRL_ENABLE_DMA | CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = -1;
+		break;
+
+	case INT_ATTN_ERROR:
+		printk("%s: Attention error. interrupt status : %02X\n", DEVICE_NAME,
+		       int_ret_code);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = FAIL;
+		break;
+
+	case INT_CMD_COMPLETE:
+		for (i = ESDI_TIMEOUT; i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL); i--);
+		if (!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) {
+			printk("%s: timeout reading status word\n", DEVICE_NAME);
+			outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+			outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+			if ((++current_req->errors) >= MAX_RETRIES)
+				ending = FAIL;
+			else
+				ending = -1;
+			break;
+		}
+		status = inw(ESDI_STT_INT);
+		switch (status & 0x1F) {
+		case (CMD_READ & 0xff):
+		case (CMD_WRITE & 0xff):
+			LITE_OFF;
+			outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+			outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+			ending = SUCCES;
+			break;
+		default:
+			printk("%s: interrupt for unknown command %02X\n",
+			       DEVICE_NAME, status & 0x1f);
+			outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+			outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+			ending = -1;
+			break;
+		}
+		break;
+	case INT_CMD_ECC:
+	case INT_CMD_RETRY:
+	case INT_CMD_ECC_RETRY:
+		LITE_OFF;
+		dump_cmd_complete_status(int_ret_code);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = SUCCES;
+		break;
+	case INT_CMD_WARNING:
+	case INT_CMD_ABORT:
+	case INT_CMD_FAILED:
+	case INT_DMA_ERR:
+		LITE_OFF;
+		dump_cmd_complete_status(int_ret_code);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		if ((++current_req->errors) >= MAX_RETRIES)
+			ending = FAIL;
+		else
+			ending = -1;
+		break;
+
+	case INT_CMD_BLK_ERR:
+		dump_cmd_complete_status(int_ret_code);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = FAIL;
+		break;
+
+	case INT_CMD_FORMAT:
+		printk("%s: huh ? Who issued this format command ?\n"
+		       ,DEVICE_NAME);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = -1;
+		break;
+
+	case INT_RESET:
+		/* BA printk("%s: reset completed.\n", DEVICE_NAME) */ ;
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = -1;
+		break;
+
+	default:
+		printk("%s: Unknown interrupt reason: %02X\n",
+		       DEVICE_NAME, int_ret_code & 0xf);
+		outb((int_ret_code & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		ending = -1;
+		break;
+	}
+	if(ending != -1) {
+		spin_lock_irqsave(&ps2esdi_lock, flags);
+		end_request(current_req, ending);
+		current_req = NULL;
+		do_ps2esdi_request(ps2esdi_queue);
+		spin_unlock_irqrestore(&ps2esdi_lock, flags);
+	}
+}				/* handle interrupts */
+
+
+
+static int ps2esdi_read_status_words(int num_words,
+				     int max_words,
+				     u_short * buffer)
+{
+	int i;
+
+	for (; max_words && num_words; max_words--, num_words--, buffer++) {
+		for (i = ESDI_TIMEOUT; i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL); i--);
+		if (!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) {
+			printk("%s: timeout reading status word\n", DEVICE_NAME);
+			return FAIL;
+		}
+		*buffer = inw(ESDI_STT_INT);
+	}
+	return SUCCES;
+}
+
+
+
+
+static void dump_cmd_complete_status(u_int int_ret_code)
+{
+#define WAIT_FOR_STATUS \
+  for(i=ESDI_TIMEOUT;i && !(inb(ESDI_STATUS) & STATUS_STAT_AVAIL);i--); \
+    if(!(inb(ESDI_STATUS) & STATUS_STAT_AVAIL)) { \
+    printk("%s: timeout reading status word\n",DEVICE_NAME); \
+    return; \
+    }
+
+	int i, word_count;
+	u_short stat_word;
+	u_long rba;
+
+	printk("%s: Device: %u, interrupt ID: %02X\n",
+	       DEVICE_NAME, int_ret_code >> 5,
+	       int_ret_code & 0xf);
+
+	WAIT_FOR_STATUS;
+	stat_word = inw(ESDI_STT_INT);
+	word_count = (stat_word >> 8) - 1;
+	printk("%s: %u status words, command: %02X\n", DEVICE_NAME, word_count,
+	       stat_word & 0xff);
+
+	if (word_count--) {
+		WAIT_FOR_STATUS;
+		stat_word = inw(ESDI_STT_INT);
+		printk("%s: command status code: %02X, command error code: %02X\n",
+		       DEVICE_NAME, stat_word >> 8, stat_word & 0xff);
+	}
+	if (word_count--) {
+		WAIT_FOR_STATUS;
+		stat_word = inw(ESDI_STT_INT);
+		printk("%s: device error code: %s%s%s%s%s,%02X\n", DEVICE_NAME,
+		       (stat_word & 0x1000) ? "Ready, " : "Not Ready, ",
+		  (stat_word & 0x0800) ? "Selected, " : "Not Selected, ",
+		       (stat_word & 0x0400) ? "Write Fault, " : "",
+		       (stat_word & 0x0200) ? "Track 0, " : "",
+		(stat_word & 0x0100) ? "Seek or command complete, " : "",
+		       stat_word >> 8);
+	}
+	if (word_count--) {
+		WAIT_FOR_STATUS;
+		stat_word = inw(ESDI_STT_INT);
+		printk("%s: Blocks to do: %u", DEVICE_NAME, stat_word);
+	}
+	if (word_count -= 2) {
+		WAIT_FOR_STATUS;
+		rba = inw(ESDI_STT_INT);
+		WAIT_FOR_STATUS;
+		rba |= inw(ESDI_STT_INT) << 16;
+		printk(", Last Cyl: %u Head: %u Sector: %u\n",
+		       (u_short) ((rba & 0x1ff80000) >> 11),
+		 (u_short) ((rba & 0x7E0) >> 5), (u_short) (rba & 0x1f));
+	} else
+		printk("\n");
+
+	if (word_count--) {
+		WAIT_FOR_STATUS;
+		stat_word = inw(ESDI_STT_INT);
+		printk("%s: Blocks required ECC: %u", DEVICE_NAME, stat_word);
+	}
+	printk("\n");
+
+#undef WAIT_FOR_STATUS
+
+}
+
+static int ps2esdi_ioctl(struct inode *inode,
+			 struct file *file, u_int cmd, u_long arg)
+{
+	struct ps2esdi_i_struct *p = inode->i_bdev->bd_disk->private_data;
+	struct ps2esdi_geometry geom;
+
+	if (cmd != HDIO_GETGEO)
+		return -EINVAL;
+	memset(&geom, 0, sizeof(geom));
+	geom.heads = p->head;
+	geom.sectors = p->sect;
+	geom.cylinders = p->cyl;
+	geom.start = get_start_sect(inode->i_bdev);
+	if (copy_to_user((void __user *)arg, &geom, sizeof(geom)))
+		return -EFAULT;
+	return 0;
+}
+
+static void ps2esdi_reset_timer(unsigned long unused)
+{
+
+	int status;
+
+	status = inb(ESDI_INTRPT);
+	if ((status & 0xf) == INT_RESET) {
+		outb((status & 0xe0) | ATT_EOI, ESDI_ATTN);
+		outb(CTRL_ENABLE_INTR, ESDI_CONTROL);
+		reset_status = 1;
+	}
+	wake_up(&ps2esdi_int);
+}
diff --git a/drivers/block/rd.c b/drivers/block/rd.c
new file mode 100644
index 0000000..145c1fb
--- /dev/null
+++ b/drivers/block/rd.c
@@ -0,0 +1,515 @@
+/*
+ * ramdisk.c - Multiple RAM disk driver - gzip-loading version - v. 0.8 beta.
+ *
+ * (C) Chad Page, Theodore Ts'o, et. al, 1995.
+ *
+ * This RAM disk is designed to have filesystems created on it and mounted
+ * just like a regular floppy disk.
+ *
+ * It also does something suggested by Linus: use the buffer cache as the
+ * RAM disk data.  This makes it possible to dynamically allocate the RAM disk
+ * buffer - with some consequences I have to deal with as I write this.
+ *
+ * This code is based on the original ramdisk.c, written mostly by
+ * Theodore Ts'o (TYT) in 1991.  The code was largely rewritten by
+ * Chad Page to use the buffer cache to store the RAM disk data in
+ * 1995; Theodore then took over the driver again, and cleaned it up
+ * for inclusion in the mainline kernel.
+ *
+ * The original CRAMDISK code was written by Richard Lyons, and
+ * adapted by Chad Page to use the new RAM disk interface.  Theodore
+ * Ts'o rewrote it so that both the compressed RAM disk loader and the
+ * kernel decompressor uses the same inflate.c codebase.  The RAM disk
+ * loader now also loads into a dynamic (buffer cache based) RAM disk,
+ * not the old static RAM disk.  Support for the old static RAM disk has
+ * been completely removed.
+ *
+ * Loadable module support added by Tom Dyas.
+ *
+ * Further cleanups by Chad Page (page0588@sundance.sjsu.edu):
+ *	Cosmetic changes in #ifdef MODULE, code movement, etc.
+ * 	When the RAM disk module is removed, free the protected buffers
+ * 	Default RAM disk size changed to 2.88 MB
+ *
+ *  Added initrd: Werner Almesberger & Hans Lermen, Feb '96
+ *
+ * 4/25/96 : Made RAM disk size a parameter (default is now 4 MB)
+ *		- Chad Page
+ *
+ * Add support for fs images split across >1 disk, Paul Gortmaker, Mar '98
+ *
+ * Make block size and block size shift for RAM disks a global macro
+ * and set blk_size for -ENOSPC,     Werner Fink <werner@suse.de>, Apr '99
+ */
+
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <asm/atomic.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/pagemap.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/buffer_head.h>		/* for invalidate_bdev() */
+#include <linux/backing-dev.h>
+#include <linux/blkpg.h>
+#include <linux/writeback.h>
+
+#include <asm/uaccess.h>
+
+/* Various static variables go here.  Most are used only in the RAM disk code.
+ */
+
+static struct gendisk *rd_disks[CONFIG_BLK_DEV_RAM_COUNT];
+static struct block_device *rd_bdev[CONFIG_BLK_DEV_RAM_COUNT];/* Protected device data */
+static struct request_queue *rd_queue[CONFIG_BLK_DEV_RAM_COUNT];
+
+/*
+ * Parameters for the boot-loading of the RAM disk.  These are set by
+ * init/main.c (from arguments to the kernel command line) or from the
+ * architecture-specific setup routine (from the stored boot sector
+ * information).
+ */
+int rd_size = CONFIG_BLK_DEV_RAM_SIZE;		/* Size of the RAM disks */
+/*
+ * It would be very desirable to have a soft-blocksize (that in the case
+ * of the ramdisk driver is also the hardblocksize ;) of PAGE_SIZE because
+ * doing that we'll achieve a far better MM footprint. Using a rd_blocksize of
+ * BLOCK_SIZE in the worst case we'll make PAGE_SIZE/BLOCK_SIZE buffer-pages
+ * unfreeable. With a rd_blocksize of PAGE_SIZE instead we are sure that only
+ * 1 page will be protected. Depending on the size of the ramdisk you
+ * may want to change the ramdisk blocksize to achieve a better or worse MM
+ * behaviour. The default is still BLOCK_SIZE (needed by rd_load_image that
+ * supposes the filesystem in the image uses a BLOCK_SIZE blocksize).
+ */
+static int rd_blocksize = BLOCK_SIZE;		/* blocksize of the RAM disks */
+
+/*
+ * Copyright (C) 2000 Linus Torvalds.
+ *               2000 Transmeta Corp.
+ * aops copied from ramfs.
+ */
+
+/*
+ * If a ramdisk page has buffers, some may be uptodate and some may be not.
+ * To bring the page uptodate we zero out the non-uptodate buffers.  The
+ * page must be locked.
+ */
+static void make_page_uptodate(struct page *page)
+{
+	if (page_has_buffers(page)) {
+		struct buffer_head *bh = page_buffers(page);
+		struct buffer_head *head = bh;
+
+		do {
+			if (!buffer_uptodate(bh)) {
+				memset(bh->b_data, 0, bh->b_size);
+				/*
+				 * akpm: I'm totally undecided about this.  The
+				 * buffer has just been magically brought "up to
+				 * date", but nobody should want to be reading
+				 * it anyway, because it hasn't been used for
+				 * anything yet.  It is still in a "not read
+				 * from disk yet" state.
+				 *
+				 * But non-uptodate buffers against an uptodate
+				 * page are against the rules.  So do it anyway.
+				 */
+				 set_buffer_uptodate(bh);
+			}
+		} while ((bh = bh->b_this_page) != head);
+	} else {
+		memset(page_address(page), 0, PAGE_CACHE_SIZE);
+	}
+	flush_dcache_page(page);
+	SetPageUptodate(page);
+}
+
+static int ramdisk_readpage(struct file *file, struct page *page)
+{
+	if (!PageUptodate(page))
+		make_page_uptodate(page);
+	unlock_page(page);
+	return 0;
+}
+
+static int ramdisk_prepare_write(struct file *file, struct page *page,
+				unsigned offset, unsigned to)
+{
+	if (!PageUptodate(page))
+		make_page_uptodate(page);
+	return 0;
+}
+
+static int ramdisk_commit_write(struct file *file, struct page *page,
+				unsigned offset, unsigned to)
+{
+	set_page_dirty(page);
+	return 0;
+}
+
+/*
+ * ->writepage to the the blockdev's mapping has to redirty the page so that the
+ * VM doesn't go and steal it.  We return WRITEPAGE_ACTIVATE so that the VM
+ * won't try to (pointlessly) write the page again for a while.
+ *
+ * Really, these pages should not be on the LRU at all.
+ */
+static int ramdisk_writepage(struct page *page, struct writeback_control *wbc)
+{
+	if (!PageUptodate(page))
+		make_page_uptodate(page);
+	SetPageDirty(page);
+	if (wbc->for_reclaim)
+		return WRITEPAGE_ACTIVATE;
+	unlock_page(page);
+	return 0;
+}
+
+/*
+ * This is a little speedup thing: short-circuit attempts to write back the
+ * ramdisk blockdev inode to its non-existent backing store.
+ */
+static int ramdisk_writepages(struct address_space *mapping,
+				struct writeback_control *wbc)
+{
+	return 0;
+}
+
+/*
+ * ramdisk blockdev pages have their own ->set_page_dirty() because we don't
+ * want them to contribute to dirty memory accounting.
+ */
+static int ramdisk_set_page_dirty(struct page *page)
+{
+	SetPageDirty(page);
+	return 0;
+}
+
+static struct address_space_operations ramdisk_aops = {
+	.readpage	= ramdisk_readpage,
+	.prepare_write	= ramdisk_prepare_write,
+	.commit_write	= ramdisk_commit_write,
+	.writepage	= ramdisk_writepage,
+	.set_page_dirty	= ramdisk_set_page_dirty,
+	.writepages	= ramdisk_writepages,
+};
+
+static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector,
+				struct address_space *mapping)
+{
+	pgoff_t index = sector >> (PAGE_CACHE_SHIFT - 9);
+	unsigned int vec_offset = vec->bv_offset;
+	int offset = (sector << 9) & ~PAGE_CACHE_MASK;
+	int size = vec->bv_len;
+	int err = 0;
+
+	do {
+		int count;
+		struct page *page;
+		char *src;
+		char *dst;
+
+		count = PAGE_CACHE_SIZE - offset;
+		if (count > size)
+			count = size;
+		size -= count;
+
+		page = grab_cache_page(mapping, index);
+		if (!page) {
+			err = -ENOMEM;
+			goto out;
+		}
+
+		if (!PageUptodate(page))
+			make_page_uptodate(page);
+
+		index++;
+
+		if (rw == READ) {
+			src = kmap_atomic(page, KM_USER0) + offset;
+			dst = kmap_atomic(vec->bv_page, KM_USER1) + vec_offset;
+		} else {
+			src = kmap_atomic(vec->bv_page, KM_USER0) + vec_offset;
+			dst = kmap_atomic(page, KM_USER1) + offset;
+		}
+		offset = 0;
+		vec_offset += count;
+
+		memcpy(dst, src, count);
+
+		kunmap_atomic(src, KM_USER0);
+		kunmap_atomic(dst, KM_USER1);
+
+		if (rw == READ)
+			flush_dcache_page(vec->bv_page);
+		else
+			set_page_dirty(page);
+		unlock_page(page);
+		put_page(page);
+	} while (size);
+
+ out:
+	return err;
+}
+
+/*
+ *  Basically, my strategy here is to set up a buffer-head which can't be
+ *  deleted, and make that my Ramdisk.  If the request is outside of the
+ *  allocated size, we must get rid of it...
+ *
+ * 19-JAN-1998  Richard Gooch <rgooch@atnf.csiro.au>  Added devfs support
+ *
+ */
+static int rd_make_request(request_queue_t *q, struct bio *bio)
+{
+	struct block_device *bdev = bio->bi_bdev;
+	struct address_space * mapping = bdev->bd_inode->i_mapping;
+	sector_t sector = bio->bi_sector;
+	unsigned long len = bio->bi_size >> 9;
+	int rw = bio_data_dir(bio);
+	struct bio_vec *bvec;
+	int ret = 0, i;
+
+	if (sector + len > get_capacity(bdev->bd_disk))
+		goto fail;
+
+	if (rw==READA)
+		rw=READ;
+
+	bio_for_each_segment(bvec, bio, i) {
+		ret |= rd_blkdev_pagecache_IO(rw, bvec, sector, mapping);
+		sector += bvec->bv_len >> 9;
+	}
+	if (ret)
+		goto fail;
+
+	bio_endio(bio, bio->bi_size, 0);
+	return 0;
+fail:
+	bio_io_error(bio, bio->bi_size);
+	return 0;
+} 
+
+static int rd_ioctl(struct inode *inode, struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int error;
+	struct block_device *bdev = inode->i_bdev;
+
+	if (cmd != BLKFLSBUF)
+		return -ENOTTY;
+
+	/*
+	 * special: we want to release the ramdisk memory, it's not like with
+	 * the other blockdevices where this ioctl only flushes away the buffer
+	 * cache
+	 */
+	error = -EBUSY;
+	down(&bdev->bd_sem);
+	if (bdev->bd_openers <= 2) {
+		truncate_inode_pages(bdev->bd_inode->i_mapping, 0);
+		error = 0;
+	}
+	up(&bdev->bd_sem);
+	return error;
+}
+
+/*
+ * This is the backing_dev_info for the blockdev inode itself.  It doesn't need
+ * writeback and it does not contribute to dirty memory accounting.
+ */
+static struct backing_dev_info rd_backing_dev_info = {
+	.ra_pages	= 0,	/* No readahead */
+	.capabilities	= BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK | BDI_CAP_MAP_COPY,
+	.unplug_io_fn	= default_unplug_io_fn,
+};
+
+/*
+ * This is the backing_dev_info for the files which live atop the ramdisk
+ * "device".  These files do need writeback and they do contribute to dirty
+ * memory accounting.
+ */
+static struct backing_dev_info rd_file_backing_dev_info = {
+	.ra_pages	= 0,	/* No readahead */
+	.capabilities	= BDI_CAP_MAP_COPY,	/* Does contribute to dirty memory */
+	.unplug_io_fn	= default_unplug_io_fn,
+};
+
+static int rd_open(struct inode *inode, struct file *filp)
+{
+	unsigned unit = iminor(inode);
+
+	if (rd_bdev[unit] == NULL) {
+		struct block_device *bdev = inode->i_bdev;
+		struct address_space *mapping;
+		unsigned bsize;
+		int gfp_mask;
+
+		inode = igrab(bdev->bd_inode);
+		rd_bdev[unit] = bdev;
+		bdev->bd_openers++;
+		bsize = bdev_hardsect_size(bdev);
+		bdev->bd_block_size = bsize;
+		inode->i_blkbits = blksize_bits(bsize);
+		inode->i_size = get_capacity(bdev->bd_disk)<<9;
+
+		mapping = inode->i_mapping;
+		mapping->a_ops = &ramdisk_aops;
+		mapping->backing_dev_info = &rd_backing_dev_info;
+		bdev->bd_inode_backing_dev_info = &rd_file_backing_dev_info;
+
+		/*
+		 * Deep badness.  rd_blkdev_pagecache_IO() needs to allocate
+		 * pagecache pages within a request_fn.  We cannot recur back
+		 * into the filesytem which is mounted atop the ramdisk, because
+		 * that would deadlock on fs locks.  And we really don't want
+		 * to reenter rd_blkdev_pagecache_IO when we're already within
+		 * that function.
+		 *
+		 * So we turn off __GFP_FS and __GFP_IO.
+		 *
+		 * And to give this thing a hope of working, turn on __GFP_HIGH.
+		 * Hopefully, there's enough regular memory allocation going on
+		 * for the page allocator emergency pools to keep the ramdisk
+		 * driver happy.
+		 */
+		gfp_mask = mapping_gfp_mask(mapping);
+		gfp_mask &= ~(__GFP_FS|__GFP_IO);
+		gfp_mask |= __GFP_HIGH;
+		mapping_set_gfp_mask(mapping, gfp_mask);
+	}
+
+	return 0;
+}
+
+static struct block_device_operations rd_bd_op = {
+	.owner =	THIS_MODULE,
+	.open =		rd_open,
+	.ioctl =	rd_ioctl,
+};
+
+/*
+ * Before freeing the module, invalidate all of the protected buffers!
+ */
+static void __exit rd_cleanup(void)
+{
+	int i;
+
+	for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
+		struct block_device *bdev = rd_bdev[i];
+		rd_bdev[i] = NULL;
+		if (bdev) {
+			invalidate_bdev(bdev, 1);
+			blkdev_put(bdev);
+		}
+		del_gendisk(rd_disks[i]);
+		put_disk(rd_disks[i]);
+		blk_cleanup_queue(rd_queue[i]);
+	}
+	devfs_remove("rd");
+	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
+}
+
+/*
+ * This is the registration and initialization section of the RAM disk driver
+ */
+static int __init rd_init(void)
+{
+	int i;
+	int err = -ENOMEM;
+
+	if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 ||
+			(rd_blocksize & (rd_blocksize-1))) {
+		printk("RAMDISK: wrong blocksize %d, reverting to defaults\n",
+		       rd_blocksize);
+		rd_blocksize = BLOCK_SIZE;
+	}
+
+	for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
+		rd_disks[i] = alloc_disk(1);
+		if (!rd_disks[i])
+			goto out;
+	}
+
+	if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) {
+		err = -EIO;
+		goto out;
+	}
+
+	devfs_mk_dir("rd");
+
+	for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) {
+		struct gendisk *disk = rd_disks[i];
+
+		rd_queue[i] = blk_alloc_queue(GFP_KERNEL);
+		if (!rd_queue[i])
+			goto out_queue;
+
+		blk_queue_make_request(rd_queue[i], &rd_make_request);
+		blk_queue_hardsect_size(rd_queue[i], rd_blocksize);
+
+		/* rd_size is given in kB */
+		disk->major = RAMDISK_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &rd_bd_op;
+		disk->queue = rd_queue[i];
+		disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
+		sprintf(disk->disk_name, "ram%d", i);
+		sprintf(disk->devfs_name, "rd/%d", i);
+		set_capacity(disk, rd_size * 2);
+		add_disk(rd_disks[i]);
+	}
+
+	/* rd_size is given in kB */
+	printk("RAMDISK driver initialized: "
+		"%d RAM disks of %dK size %d blocksize\n",
+		CONFIG_BLK_DEV_RAM_COUNT, rd_size, rd_blocksize);
+
+	return 0;
+out_queue:
+	unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
+out:
+	while (i--) {
+		put_disk(rd_disks[i]);
+		blk_cleanup_queue(rd_queue[i]);
+	}
+	return err;
+}
+
+module_init(rd_init);
+module_exit(rd_cleanup);
+
+/* options - nonmodular */
+#ifndef MODULE
+static int __init ramdisk_size(char *str)
+{
+	rd_size = simple_strtol(str,NULL,0);
+	return 1;
+}
+static int __init ramdisk_size2(char *str)	/* kludge */
+{
+	return ramdisk_size(str);
+}
+static int __init ramdisk_blocksize(char *str)
+{
+	rd_blocksize = simple_strtol(str,NULL,0);
+	return 1;
+}
+__setup("ramdisk=", ramdisk_size);
+__setup("ramdisk_size=", ramdisk_size2);
+__setup("ramdisk_blocksize=", ramdisk_blocksize);
+#endif
+
+/* options - modular */
+module_param(rd_size, int, 0);
+MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes.");
+module_param(rd_blocksize, int, 0);
+MODULE_PARM_DESC(rd_blocksize, "Blocksize of each RAM disk in bytes.");
+MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/scsi_ioctl.c b/drivers/block/scsi_ioctl.c
new file mode 100644
index 0000000..689527a
--- /dev/null
+++ b/drivers/block/scsi_ioctl.c
@@ -0,0 +1,580 @@
+/*
+ * Copyright (C) 2001 Jens Axboe <axboe@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public Licens
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/completion.h>
+#include <linux/cdrom.h>
+#include <linux/slab.h>
+#include <linux/times.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_ioctl.h>
+#include <scsi/scsi_cmnd.h>
+
+/* Command group 3 is reserved and should never be used.  */
+const unsigned char scsi_command_size[8] =
+{
+	6, 10, 10, 12,
+	16, 12, 10, 10
+};
+
+EXPORT_SYMBOL(scsi_command_size);
+
+#define BLK_DEFAULT_TIMEOUT	(60 * HZ)
+
+#include <scsi/sg.h>
+
+static int sg_get_version(int __user *p)
+{
+	static int sg_version_num = 30527;
+	return put_user(sg_version_num, p);
+}
+
+static int scsi_get_idlun(request_queue_t *q, int __user *p)
+{
+	return put_user(0, p);
+}
+
+static int scsi_get_bus(request_queue_t *q, int __user *p)
+{
+	return put_user(0, p);
+}
+
+static int sg_get_timeout(request_queue_t *q)
+{
+	return q->sg_timeout / (HZ / USER_HZ);
+}
+
+static int sg_set_timeout(request_queue_t *q, int __user *p)
+{
+	int timeout, err = get_user(timeout, p);
+
+	if (!err)
+		q->sg_timeout = timeout * (HZ / USER_HZ);
+
+	return err;
+}
+
+static int sg_get_reserved_size(request_queue_t *q, int __user *p)
+{
+	return put_user(q->sg_reserved_size, p);
+}
+
+static int sg_set_reserved_size(request_queue_t *q, int __user *p)
+{
+	int size, err = get_user(size, p);
+
+	if (err)
+		return err;
+
+	if (size < 0)
+		return -EINVAL;
+	if (size > (q->max_sectors << 9))
+		size = q->max_sectors << 9;
+
+	q->sg_reserved_size = size;
+	return 0;
+}
+
+/*
+ * will always return that we are ATAPI even for a real SCSI drive, I'm not
+ * so sure this is worth doing anything about (why would you care??)
+ */
+static int sg_emulated_host(request_queue_t *q, int __user *p)
+{
+	return put_user(1, p);
+}
+
+#define CMD_READ_SAFE	0x01
+#define CMD_WRITE_SAFE	0x02
+#define CMD_WARNED	0x04
+#define safe_for_read(cmd)	[cmd] = CMD_READ_SAFE
+#define safe_for_write(cmd)	[cmd] = CMD_WRITE_SAFE
+
+static int verify_command(struct file *file, unsigned char *cmd)
+{
+	static unsigned char cmd_type[256] = {
+
+		/* Basic read-only commands */
+		safe_for_read(TEST_UNIT_READY),
+		safe_for_read(REQUEST_SENSE),
+		safe_for_read(READ_6),
+		safe_for_read(READ_10),
+		safe_for_read(READ_12),
+		safe_for_read(READ_16),
+		safe_for_read(READ_BUFFER),
+		safe_for_read(READ_LONG),
+		safe_for_read(INQUIRY),
+		safe_for_read(MODE_SENSE),
+		safe_for_read(MODE_SENSE_10),
+		safe_for_read(LOG_SENSE),
+		safe_for_read(START_STOP),
+		safe_for_read(GPCMD_VERIFY_10),
+		safe_for_read(VERIFY_16),
+
+		/* Audio CD commands */
+		safe_for_read(GPCMD_PLAY_CD),
+		safe_for_read(GPCMD_PLAY_AUDIO_10),
+		safe_for_read(GPCMD_PLAY_AUDIO_MSF),
+		safe_for_read(GPCMD_PLAY_AUDIO_TI),
+		safe_for_read(GPCMD_PAUSE_RESUME),
+
+		/* CD/DVD data reading */
+		safe_for_read(GPCMD_READ_BUFFER_CAPACITY),
+		safe_for_read(GPCMD_READ_CD),
+		safe_for_read(GPCMD_READ_CD_MSF),
+		safe_for_read(GPCMD_READ_DISC_INFO),
+		safe_for_read(GPCMD_READ_CDVD_CAPACITY),
+		safe_for_read(GPCMD_READ_DVD_STRUCTURE),
+		safe_for_read(GPCMD_READ_HEADER),
+		safe_for_read(GPCMD_READ_TRACK_RZONE_INFO),
+		safe_for_read(GPCMD_READ_SUBCHANNEL),
+		safe_for_read(GPCMD_READ_TOC_PMA_ATIP),
+		safe_for_read(GPCMD_REPORT_KEY),
+		safe_for_read(GPCMD_SCAN),
+		safe_for_read(GPCMD_GET_CONFIGURATION),
+		safe_for_read(GPCMD_READ_FORMAT_CAPACITIES),
+		safe_for_read(GPCMD_GET_EVENT_STATUS_NOTIFICATION),
+		safe_for_read(GPCMD_GET_PERFORMANCE),
+		safe_for_read(GPCMD_SEEK),
+		safe_for_read(GPCMD_STOP_PLAY_SCAN),
+
+		/* Basic writing commands */
+		safe_for_write(WRITE_6),
+		safe_for_write(WRITE_10),
+		safe_for_write(WRITE_VERIFY),
+		safe_for_write(WRITE_12),
+		safe_for_write(WRITE_VERIFY_12),
+		safe_for_write(WRITE_16),
+		safe_for_write(WRITE_LONG),
+		safe_for_write(ERASE),
+		safe_for_write(GPCMD_MODE_SELECT_10),
+		safe_for_write(MODE_SELECT),
+		safe_for_write(LOG_SELECT),
+		safe_for_write(GPCMD_BLANK),
+		safe_for_write(GPCMD_CLOSE_TRACK),
+		safe_for_write(GPCMD_FLUSH_CACHE),
+		safe_for_write(GPCMD_FORMAT_UNIT),
+		safe_for_write(GPCMD_REPAIR_RZONE_TRACK),
+		safe_for_write(GPCMD_RESERVE_RZONE_TRACK),
+		safe_for_write(GPCMD_SEND_DVD_STRUCTURE),
+		safe_for_write(GPCMD_SEND_EVENT),
+		safe_for_write(GPCMD_SEND_KEY),
+		safe_for_write(GPCMD_SEND_OPC),
+		safe_for_write(GPCMD_SEND_CUE_SHEET),
+		safe_for_write(GPCMD_SET_SPEED),
+		safe_for_write(GPCMD_PREVENT_ALLOW_MEDIUM_REMOVAL),
+		safe_for_write(GPCMD_LOAD_UNLOAD),
+		safe_for_write(GPCMD_SET_STREAMING),
+	};
+	unsigned char type = cmd_type[cmd[0]];
+
+	/* Anybody who can open the device can do a read-safe command */
+	if (type & CMD_READ_SAFE)
+		return 0;
+
+	/* Write-safe commands just require a writable open.. */
+	if (type & CMD_WRITE_SAFE) {
+		if (file->f_mode & FMODE_WRITE)
+			return 0;
+	}
+
+	if (!type) {
+		cmd_type[cmd[0]] = CMD_WARNED;
+		printk(KERN_WARNING "scsi: unknown opcode 0x%02x\n", cmd[0]);
+	}
+
+	/* And root can do any command.. */
+	if (capable(CAP_SYS_RAWIO))
+		return 0;
+
+	/* Otherwise fail it with an "Operation not permitted" */
+	return -EPERM;
+}
+
+static int sg_io(struct file *file, request_queue_t *q,
+		struct gendisk *bd_disk, struct sg_io_hdr *hdr)
+{
+	unsigned long start_time;
+	int reading, writing;
+	struct request *rq;
+	struct bio *bio;
+	char sense[SCSI_SENSE_BUFFERSIZE];
+	unsigned char cmd[BLK_MAX_CDB];
+
+	if (hdr->interface_id != 'S')
+		return -EINVAL;
+	if (hdr->cmd_len > BLK_MAX_CDB)
+		return -EINVAL;
+	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
+		return -EFAULT;
+	if (verify_command(file, cmd))
+		return -EPERM;
+
+	/*
+	 * we'll do that later
+	 */
+	if (hdr->iovec_count)
+		return -EOPNOTSUPP;
+
+	if (hdr->dxfer_len > (q->max_sectors << 9))
+		return -EIO;
+
+	reading = writing = 0;
+	if (hdr->dxfer_len) {
+		switch (hdr->dxfer_direction) {
+		default:
+			return -EINVAL;
+		case SG_DXFER_TO_FROM_DEV:
+			reading = 1;
+			/* fall through */
+		case SG_DXFER_TO_DEV:
+			writing = 1;
+			break;
+		case SG_DXFER_FROM_DEV:
+			reading = 1;
+			break;
+		}
+
+		rq = blk_rq_map_user(q, writing ? WRITE : READ, hdr->dxferp,
+				     hdr->dxfer_len);
+
+		if (IS_ERR(rq))
+			return PTR_ERR(rq);
+	} else
+		rq = blk_get_request(q, READ, __GFP_WAIT);
+
+	/*
+	 * fill in request structure
+	 */
+	rq->cmd_len = hdr->cmd_len;
+	memcpy(rq->cmd, cmd, hdr->cmd_len);
+	if (sizeof(rq->cmd) != hdr->cmd_len)
+		memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len);
+
+	memset(sense, 0, sizeof(sense));
+	rq->sense = sense;
+	rq->sense_len = 0;
+
+	rq->flags |= REQ_BLOCK_PC;
+	bio = rq->bio;
+
+	/*
+	 * bounce this after holding a reference to the original bio, it's
+	 * needed for proper unmapping
+	 */
+	if (rq->bio)
+		blk_queue_bounce(q, &rq->bio);
+
+	rq->timeout = (hdr->timeout * HZ) / 1000;
+	if (!rq->timeout)
+		rq->timeout = q->sg_timeout;
+	if (!rq->timeout)
+		rq->timeout = BLK_DEFAULT_TIMEOUT;
+
+	start_time = jiffies;
+
+	/* ignore return value. All information is passed back to caller
+	 * (if he doesn't check that is his problem).
+	 * N.B. a non-zero SCSI status is _not_ necessarily an error.
+	 */
+	blk_execute_rq(q, bd_disk, rq);
+
+	/* write to all output members */
+	hdr->status = 0xff & rq->errors;
+	hdr->masked_status = status_byte(rq->errors);
+	hdr->msg_status = msg_byte(rq->errors);
+	hdr->host_status = host_byte(rq->errors);
+	hdr->driver_status = driver_byte(rq->errors);
+	hdr->info = 0;
+	if (hdr->masked_status || hdr->host_status || hdr->driver_status)
+		hdr->info |= SG_INFO_CHECK;
+	hdr->resid = rq->data_len;
+	hdr->duration = ((jiffies - start_time) * 1000) / HZ;
+	hdr->sb_len_wr = 0;
+
+	if (rq->sense_len && hdr->sbp) {
+		int len = min((unsigned int) hdr->mx_sb_len, rq->sense_len);
+
+		if (!copy_to_user(hdr->sbp, rq->sense, len))
+			hdr->sb_len_wr = len;
+	}
+
+	if (blk_rq_unmap_user(rq, bio, hdr->dxfer_len))
+		return -EFAULT;
+
+	/* may not have succeeded, but output values written to control
+	 * structure (struct sg_io_hdr).  */
+	return 0;
+}
+
+#define FORMAT_UNIT_TIMEOUT		(2 * 60 * 60 * HZ)
+#define START_STOP_TIMEOUT		(60 * HZ)
+#define MOVE_MEDIUM_TIMEOUT		(5 * 60 * HZ)
+#define READ_ELEMENT_STATUS_TIMEOUT	(5 * 60 * HZ)
+#define READ_DEFECT_DATA_TIMEOUT	(60 * HZ )
+#define OMAX_SB_LEN 16          /* For backward compatibility */
+
+static int sg_scsi_ioctl(struct file *file, request_queue_t *q,
+			 struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic)
+{
+	struct request *rq;
+	int err;
+	unsigned int in_len, out_len, bytes, opcode, cmdlen;
+	char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
+
+	/*
+	 * get in an out lengths, verify they don't exceed a page worth of data
+	 */
+	if (get_user(in_len, &sic->inlen))
+		return -EFAULT;
+	if (get_user(out_len, &sic->outlen))
+		return -EFAULT;
+	if (in_len > PAGE_SIZE || out_len > PAGE_SIZE)
+		return -EINVAL;
+	if (get_user(opcode, sic->data))
+		return -EFAULT;
+
+	bytes = max(in_len, out_len);
+	if (bytes) {
+		buffer = kmalloc(bytes, q->bounce_gfp | GFP_USER| __GFP_NOWARN);
+		if (!buffer)
+			return -ENOMEM;
+
+		memset(buffer, 0, bytes);
+	}
+
+	rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
+
+	cmdlen = COMMAND_SIZE(opcode);
+
+	/*
+	 * get command and data to send to device, if any
+	 */
+	err = -EFAULT;
+	rq->cmd_len = cmdlen;
+	if (copy_from_user(rq->cmd, sic->data, cmdlen))
+		goto error;
+
+	if (copy_from_user(buffer, sic->data + cmdlen, in_len))
+		goto error;
+
+	err = verify_command(file, rq->cmd);
+	if (err)
+		goto error;
+
+	switch (opcode) {
+		case SEND_DIAGNOSTIC:
+		case FORMAT_UNIT:
+			rq->timeout = FORMAT_UNIT_TIMEOUT;
+			break;
+		case START_STOP:
+			rq->timeout = START_STOP_TIMEOUT;
+			break;
+		case MOVE_MEDIUM:
+			rq->timeout = MOVE_MEDIUM_TIMEOUT;
+			break;
+		case READ_ELEMENT_STATUS:
+			rq->timeout = READ_ELEMENT_STATUS_TIMEOUT;
+			break;
+		case READ_DEFECT_DATA:
+			rq->timeout = READ_DEFECT_DATA_TIMEOUT;
+			break;
+		default:
+			rq->timeout = BLK_DEFAULT_TIMEOUT;
+			break;
+	}
+
+	memset(sense, 0, sizeof(sense));
+	rq->sense = sense;
+	rq->sense_len = 0;
+
+	rq->data = buffer;
+	rq->data_len = bytes;
+	rq->flags |= REQ_BLOCK_PC;
+
+	blk_execute_rq(q, bd_disk, rq);
+	err = rq->errors & 0xff;	/* only 8 bit SCSI status */
+	if (err) {
+		if (rq->sense_len && rq->sense) {
+			bytes = (OMAX_SB_LEN > rq->sense_len) ?
+				rq->sense_len : OMAX_SB_LEN;
+			if (copy_to_user(sic->data, rq->sense, bytes))
+				err = -EFAULT;
+		}
+	} else {
+		if (copy_to_user(sic->data, buffer, out_len))
+			err = -EFAULT;
+	}
+	
+error:
+	kfree(buffer);
+	blk_put_request(rq);
+	return err;
+}
+
+int scsi_cmd_ioctl(struct file *file, struct gendisk *bd_disk, unsigned int cmd, void __user *arg)
+{
+	request_queue_t *q;
+	struct request *rq;
+	int close = 0, err;
+
+	q = bd_disk->queue;
+	if (!q)
+		return -ENXIO;
+
+	if (blk_get_queue(q))
+		return -ENXIO;
+
+	switch (cmd) {
+		/*
+		 * new sgv3 interface
+		 */
+		case SG_GET_VERSION_NUM:
+			err = sg_get_version(arg);
+			break;
+		case SCSI_IOCTL_GET_IDLUN:
+			err = scsi_get_idlun(q, arg);
+			break;
+		case SCSI_IOCTL_GET_BUS_NUMBER:
+			err = scsi_get_bus(q, arg);
+			break;
+		case SG_SET_TIMEOUT:
+			err = sg_set_timeout(q, arg);
+			break;
+		case SG_GET_TIMEOUT:
+			err = sg_get_timeout(q);
+			break;
+		case SG_GET_RESERVED_SIZE:
+			err = sg_get_reserved_size(q, arg);
+			break;
+		case SG_SET_RESERVED_SIZE:
+			err = sg_set_reserved_size(q, arg);
+			break;
+		case SG_EMULATED_HOST:
+			err = sg_emulated_host(q, arg);
+			break;
+		case SG_IO: {
+			struct sg_io_hdr hdr;
+
+			err = -EFAULT;
+			if (copy_from_user(&hdr, arg, sizeof(hdr)))
+				break;
+			err = sg_io(file, q, bd_disk, &hdr);
+			if (err == -EFAULT)
+				break;
+
+			if (copy_to_user(arg, &hdr, sizeof(hdr)))
+				err = -EFAULT;
+			break;
+		}
+		case CDROM_SEND_PACKET: {
+			struct cdrom_generic_command cgc;
+			struct sg_io_hdr hdr;
+
+			err = -EFAULT;
+			if (copy_from_user(&cgc, arg, sizeof(cgc)))
+				break;
+			cgc.timeout = clock_t_to_jiffies(cgc.timeout);
+			memset(&hdr, 0, sizeof(hdr));
+			hdr.interface_id = 'S';
+			hdr.cmd_len = sizeof(cgc.cmd);
+			hdr.dxfer_len = cgc.buflen;
+			err = 0;
+			switch (cgc.data_direction) {
+				case CGC_DATA_UNKNOWN:
+					hdr.dxfer_direction = SG_DXFER_UNKNOWN;
+					break;
+				case CGC_DATA_WRITE:
+					hdr.dxfer_direction = SG_DXFER_TO_DEV;
+					break;
+				case CGC_DATA_READ:
+					hdr.dxfer_direction = SG_DXFER_FROM_DEV;
+					break;
+				case CGC_DATA_NONE:
+					hdr.dxfer_direction = SG_DXFER_NONE;
+					break;
+				default:
+					err = -EINVAL;
+			}
+			if (err)
+				break;
+
+			hdr.dxferp = cgc.buffer;
+			hdr.sbp = cgc.sense;
+			if (hdr.sbp)
+				hdr.mx_sb_len = sizeof(struct request_sense);
+			hdr.timeout = cgc.timeout;
+			hdr.cmdp = ((struct cdrom_generic_command __user*) arg)->cmd;
+			hdr.cmd_len = sizeof(cgc.cmd);
+
+			err = sg_io(file, q, bd_disk, &hdr);
+			if (err == -EFAULT)
+				break;
+
+			if (hdr.status)
+				err = -EIO;
+
+			cgc.stat = err;
+			cgc.buflen = hdr.resid;
+			if (copy_to_user(arg, &cgc, sizeof(cgc)))
+				err = -EFAULT;
+
+			break;
+		}
+
+		/*
+		 * old junk scsi send command ioctl
+		 */
+		case SCSI_IOCTL_SEND_COMMAND:
+			printk(KERN_WARNING "program %s is using a deprecated SCSI ioctl, please convert it to SG_IO\n", current->comm);
+			err = -EINVAL;
+			if (!arg)
+				break;
+
+			err = sg_scsi_ioctl(file, q, bd_disk, arg);
+			break;
+		case CDROMCLOSETRAY:
+			close = 1;
+		case CDROMEJECT:
+			rq = blk_get_request(q, WRITE, __GFP_WAIT);
+			rq->flags |= REQ_BLOCK_PC;
+			rq->data = NULL;
+			rq->data_len = 0;
+			rq->timeout = BLK_DEFAULT_TIMEOUT;
+			memset(rq->cmd, 0, sizeof(rq->cmd));
+			rq->cmd[0] = GPCMD_START_STOP_UNIT;
+			rq->cmd[4] = 0x02 + (close != 0);
+			rq->cmd_len = 6;
+			err = blk_execute_rq(q, bd_disk, rq);
+			blk_put_request(rq);
+			break;
+		default:
+			err = -ENOTTY;
+	}
+
+	blk_put_queue(q);
+	return err;
+}
+
+EXPORT_SYMBOL(scsi_cmd_ioctl);
diff --git a/drivers/block/smart1,2.h b/drivers/block/smart1,2.h
new file mode 100644
index 0000000..a0b403a
--- /dev/null
+++ b/drivers/block/smart1,2.h
@@ -0,0 +1,278 @@
+/*
+ *    Disk Array driver for Compaq SMART2 Controllers
+ *    Copyright 1998 Compaq Computer Corporation
+ *
+ *    This program is free software; you can redistribute it and/or modify
+ *    it under the terms of the GNU General Public License as published by
+ *    the Free Software Foundation; either version 2 of the License, or
+ *    (at your option) any later version.
+ *
+ *    This program is distributed in the hope that it will be useful,
+ *    but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ *    NON INFRINGEMENT.  See the GNU General Public License for more details.
+ *
+ *    You should have received a copy of the GNU General Public License
+ *    along with this program; if not, write to the Free Software
+ *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
+ *
+ *    If you want to make changes, improve or add functionality to this
+ *    driver, you'll probably need the Compaq Array Controller Interface
+ *    Specificiation (Document number ECG086/1198)
+ */
+
+/*
+ * This file contains the controller communication implementation for
+ * Compaq SMART-1 and SMART-2 controllers.  To the best of my knowledge,
+ * this should support:
+ *
+ *  PCI:
+ *  SMART-2/P, SMART-2DH, SMART-2SL, SMART-221, SMART-3100ES, SMART-3200
+ *  Integerated SMART Array Controller, SMART-4200, SMART-4250ES
+ *
+ *  EISA:
+ *  SMART-2/E, SMART, IAES, IDA-2, IDA
+ */
+
+/*
+ * Memory mapped FIFO interface (SMART 42xx cards)
+ */
+static void smart4_submit_command(ctlr_info_t *h, cmdlist_t *c)
+{
+        writel(c->busaddr, h->vaddr + S42XX_REQUEST_PORT_OFFSET);
+}
+
+/*  
+ *  This card is the opposite of the other cards.  
+ *   0 turns interrupts on... 
+ *   0x08 turns them off... 
+ */
+static void smart4_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+	if (val) 
+	{ /* Turn interrupts on */
+		writel(0, h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
+	} else /* Turn them off */
+	{
+        	writel( S42XX_INTR_OFF, 
+			h->vaddr + S42XX_REPLY_INTR_MASK_OFFSET);
+	}
+}
+
+/*
+ *  For older cards FIFO Full = 0. 
+ *  On this card 0 means there is room, anything else FIFO Full. 
+ * 
+ */ 
+static unsigned long smart4_fifo_full(ctlr_info_t *h)
+{
+	
+        return (!readl(h->vaddr + S42XX_REQUEST_PORT_OFFSET));
+}
+
+/* This type of controller returns -1 if the fifo is empty, 
+ *    Not 0 like the others.
+ *    And we need to let it know we read a value out 
+ */ 
+static unsigned long smart4_completed(ctlr_info_t *h)
+{
+	long register_value 
+		= readl(h->vaddr + S42XX_REPLY_PORT_OFFSET);
+
+	/* Fifo is empty */
+	if( register_value == 0xffffffff)
+		return 0; 	
+
+	/* Need to let it know we got the reply */
+	/* We do this by writing a 0 to the port we just read from */
+	writel(0, h->vaddr + S42XX_REPLY_PORT_OFFSET);
+
+	return ((unsigned long) register_value); 
+}
+
+ /*
+ *  This hardware returns interrupt pending at a different place and 
+ *  it does not tell us if the fifo is empty, we will have check  
+ *  that by getting a 0 back from the comamnd_completed call. 
+ */
+static unsigned long smart4_intr_pending(ctlr_info_t *h)
+{
+	unsigned long register_value  = 
+		readl(h->vaddr + S42XX_INTR_STATUS);
+
+	if( register_value &  S42XX_INTR_PENDING) 
+		return  FIFO_NOT_EMPTY;	
+	return 0 ;
+}
+
+static struct access_method smart4_access = {
+	smart4_submit_command,
+	smart4_intr_mask,
+	smart4_fifo_full,
+	smart4_intr_pending,
+	smart4_completed,
+};
+
+/*
+ * Memory mapped FIFO interface (PCI SMART2 and SMART 3xxx cards)
+ */
+static void smart2_submit_command(ctlr_info_t *h, cmdlist_t *c)
+{
+	writel(c->busaddr, h->vaddr + COMMAND_FIFO);
+}
+
+static void smart2_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+	writel(val, h->vaddr + INTR_MASK);
+}
+
+static unsigned long smart2_fifo_full(ctlr_info_t *h)
+{
+	return readl(h->vaddr + COMMAND_FIFO);
+}
+
+static unsigned long smart2_completed(ctlr_info_t *h)
+{
+	return readl(h->vaddr + COMMAND_COMPLETE_FIFO);
+}
+
+static unsigned long smart2_intr_pending(ctlr_info_t *h)
+{
+	return readl(h->vaddr + INTR_PENDING);
+}
+
+static struct access_method smart2_access = {
+	smart2_submit_command,
+	smart2_intr_mask,
+	smart2_fifo_full,
+	smart2_intr_pending,
+	smart2_completed,
+};
+
+/*
+ *  IO access for SMART-2/E cards
+ */
+static void smart2e_submit_command(ctlr_info_t *h, cmdlist_t *c)
+{
+	outl(c->busaddr, h->io_mem_addr + COMMAND_FIFO);
+}
+
+static void smart2e_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+	outl(val, h->io_mem_addr + INTR_MASK);
+}
+
+static unsigned long smart2e_fifo_full(ctlr_info_t *h)
+{
+	return inl(h->io_mem_addr + COMMAND_FIFO);
+}
+
+static unsigned long smart2e_completed(ctlr_info_t *h)
+{
+	return inl(h->io_mem_addr + COMMAND_COMPLETE_FIFO);
+}
+
+static unsigned long smart2e_intr_pending(ctlr_info_t *h)
+{
+	return inl(h->io_mem_addr + INTR_PENDING);
+}
+
+static struct access_method smart2e_access = {
+	smart2e_submit_command,
+	smart2e_intr_mask,
+	smart2e_fifo_full,
+	smart2e_intr_pending,
+	smart2e_completed,
+};
+
+/*
+ *  IO access for older SMART-1 type cards
+ */
+#define SMART1_SYSTEM_MASK		0xC8E
+#define SMART1_SYSTEM_DOORBELL		0xC8F
+#define SMART1_LOCAL_MASK		0xC8C
+#define SMART1_LOCAL_DOORBELL		0xC8D
+#define SMART1_INTR_MASK		0xC89
+#define SMART1_LISTADDR			0xC90
+#define SMART1_LISTLEN			0xC94
+#define SMART1_TAG			0xC97
+#define SMART1_COMPLETE_ADDR		0xC98
+#define SMART1_LISTSTATUS		0xC9E
+
+#define CHANNEL_BUSY			0x01
+#define CHANNEL_CLEAR			0x02
+
+static void smart1_submit_command(ctlr_info_t *h, cmdlist_t *c)
+{
+	/*
+	 * This __u16 is actually a bunch of control flags on SMART
+	 * and below.  We want them all to be zero.
+	 */
+	c->hdr.size = 0;
+
+	outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
+
+	outl(c->busaddr, h->io_mem_addr + SMART1_LISTADDR);
+	outw(c->size, h->io_mem_addr + SMART1_LISTLEN);
+
+	outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
+}
+
+static void smart1_intr_mask(ctlr_info_t *h, unsigned long val)
+{
+	if (val == 1) {
+		outb(0xFD, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
+		outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
+		outb(0x01, h->io_mem_addr + SMART1_INTR_MASK);
+		outb(0x01, h->io_mem_addr + SMART1_SYSTEM_MASK);
+	} else {
+		outb(0, h->io_mem_addr + 0xC8E);
+	}
+}
+
+static unsigned long smart1_fifo_full(ctlr_info_t *h)
+{
+	unsigned char chan;
+	chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_CLEAR;
+	return chan;
+}
+
+static unsigned long smart1_completed(ctlr_info_t *h)
+{
+	unsigned char status;
+	unsigned long cmd;
+
+	if (inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY) {
+		outb(CHANNEL_BUSY, h->io_mem_addr + SMART1_SYSTEM_DOORBELL);
+
+		cmd = inl(h->io_mem_addr + SMART1_COMPLETE_ADDR);
+		status = inb(h->io_mem_addr + SMART1_LISTSTATUS);
+
+		outb(CHANNEL_CLEAR, h->io_mem_addr + SMART1_LOCAL_DOORBELL);
+
+		/*
+		 * this is x86 (actually compaq x86) only, so it's ok
+		 */
+		if (cmd) ((cmdlist_t*)bus_to_virt(cmd))->req.hdr.rcode = status;
+	} else {
+		cmd = 0;
+	}
+	return cmd;
+}
+
+static unsigned long smart1_intr_pending(ctlr_info_t *h)
+{
+	unsigned char chan;
+	chan = inb(h->io_mem_addr + SMART1_SYSTEM_DOORBELL) & CHANNEL_BUSY;
+	return chan;
+}
+
+static struct access_method smart1_access = {
+	smart1_submit_command,
+	smart1_intr_mask,
+	smart1_fifo_full,
+	smart1_intr_pending,
+	smart1_completed,
+};
diff --git a/drivers/block/swim3.c b/drivers/block/swim3.c
new file mode 100644
index 0000000..5b09cf1
--- /dev/null
+++ b/drivers/block/swim3.c
@@ -0,0 +1,1154 @@
+/*
+ * Driver for the SWIM3 (Super Woz Integrated Machine 3)
+ * floppy controller found on Power Macintoshes.
+ *
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * TODO:
+ * handle 2 drives
+ * handle GCR disks
+ */
+
+#include <linux/config.h>
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/fd.h>
+#include <linux/ioctl.h>
+#include <linux/blkdev.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <asm/io.h>
+#include <asm/dbdma.h>
+#include <asm/prom.h>
+#include <asm/uaccess.h>
+#include <asm/mediabay.h>
+#include <asm/machdep.h>
+#include <asm/pmac_feature.h>
+
+static struct request_queue *swim3_queue;
+static struct gendisk *disks[2];
+static struct request *fd_req;
+
+#define MAX_FLOPPIES	2
+
+enum swim_state {
+	idle,
+	locating,
+	seeking,
+	settling,
+	do_transfer,
+	jogging,
+	available,
+	revalidating,
+	ejecting
+};
+
+#define REG(x)	unsigned char x; char x ## _pad[15];
+
+/*
+ * The names for these registers mostly represent speculation on my part.
+ * It will be interesting to see how close they are to the names Apple uses.
+ */
+struct swim3 {
+	REG(data);
+	REG(timer);		/* counts down at 1MHz */
+	REG(error);
+	REG(mode);
+	REG(select);		/* controls CA0, CA1, CA2 and LSTRB signals */
+	REG(setup);
+	REG(control);		/* writing bits clears them */
+	REG(status);		/* writing bits sets them in control */
+	REG(intr);
+	REG(nseek);		/* # tracks to seek */
+	REG(ctrack);		/* current track number */
+	REG(csect);		/* current sector number */
+	REG(gap3);		/* size of gap 3 in track format */
+	REG(sector);		/* sector # to read or write */
+	REG(nsect);		/* # sectors to read or write */
+	REG(intr_enable);
+};
+
+#define control_bic	control
+#define control_bis	status
+
+/* Bits in select register */
+#define CA_MASK		7
+#define LSTRB		8
+
+/* Bits in control register */
+#define DO_SEEK		0x80
+#define FORMAT		0x40
+#define SELECT		0x20
+#define WRITE_SECTORS	0x10
+#define DO_ACTION	0x08
+#define DRIVE2_ENABLE	0x04
+#define DRIVE_ENABLE	0x02
+#define INTR_ENABLE	0x01
+
+/* Bits in status register */
+#define FIFO_1BYTE	0x80
+#define FIFO_2BYTE	0x40
+#define ERROR		0x20
+#define DATA		0x08
+#define RDDATA		0x04
+#define INTR_PENDING	0x02
+#define MARK_BYTE	0x01
+
+/* Bits in intr and intr_enable registers */
+#define ERROR_INTR	0x20
+#define DATA_CHANGED	0x10
+#define TRANSFER_DONE	0x08
+#define SEEN_SECTOR	0x04
+#define SEEK_DONE	0x02
+#define TIMER_DONE	0x01
+
+/* Bits in error register */
+#define ERR_DATA_CRC	0x80
+#define ERR_ADDR_CRC	0x40
+#define ERR_OVERRUN	0x04
+#define ERR_UNDERRUN	0x01
+
+/* Bits in setup register */
+#define S_SW_RESET	0x80
+#define S_GCR_WRITE	0x40
+#define S_IBM_DRIVE	0x20
+#define S_TEST_MODE	0x10
+#define S_FCLK_DIV2	0x08
+#define S_GCR		0x04
+#define S_COPY_PROT	0x02
+#define S_INV_WDATA	0x01
+
+/* Select values for swim3_action */
+#define SEEK_POSITIVE	0
+#define SEEK_NEGATIVE	4
+#define STEP		1
+#define MOTOR_ON	2
+#define MOTOR_OFF	6
+#define INDEX		3
+#define EJECT		7
+#define SETMFM		9
+#define SETGCR		13
+
+/* Select values for swim3_select and swim3_readbit */
+#define STEP_DIR	0
+#define STEPPING	1
+#define MOTOR_ON	2
+#define RELAX		3	/* also eject in progress */
+#define READ_DATA_0	4
+#define TWOMEG_DRIVE	5
+#define SINGLE_SIDED	6	/* drive or diskette is 4MB type? */
+#define DRIVE_PRESENT	7
+#define DISK_IN		8
+#define WRITE_PROT	9
+#define TRACK_ZERO	10
+#define TACHO		11
+#define READ_DATA_1	12
+#define MFM_MODE	13
+#define SEEK_COMPLETE	14
+#define ONEMEG_MEDIA	15
+
+/* Definitions of values used in writing and formatting */
+#define DATA_ESCAPE	0x99
+#define GCR_SYNC_EXC	0x3f
+#define GCR_SYNC_CONV	0x80
+#define GCR_FIRST_MARK	0xd5
+#define GCR_SECOND_MARK	0xaa
+#define GCR_ADDR_MARK	"\xd5\xaa\x00"
+#define GCR_DATA_MARK	"\xd5\xaa\x0b"
+#define GCR_SLIP_BYTE	"\x27\xaa"
+#define GCR_SELF_SYNC	"\x3f\xbf\x1e\x34\x3c\x3f"
+
+#define DATA_99		"\x99\x99"
+#define MFM_ADDR_MARK	"\x99\xa1\x99\xa1\x99\xa1\x99\xfe"
+#define MFM_INDEX_MARK	"\x99\xc2\x99\xc2\x99\xc2\x99\xfc"
+#define MFM_GAP_LEN	12
+
+struct floppy_state {
+	enum swim_state	state;
+	struct swim3 __iomem *swim3;	/* hardware registers */
+	struct dbdma_regs __iomem *dma;	/* DMA controller registers */
+	int	swim3_intr;	/* interrupt number for SWIM3 */
+	int	dma_intr;	/* interrupt number for DMA channel */
+	int	cur_cyl;	/* cylinder head is on, or -1 */
+	int	cur_sector;	/* last sector we saw go past */
+	int	req_cyl;	/* the cylinder for the current r/w request */
+	int	head;		/* head number ditto */
+	int	req_sector;	/* sector number ditto */
+	int	scount;		/* # sectors we're transferring at present */
+	int	retries;
+	int	settle_time;
+	int	secpercyl;	/* disk geometry information */
+	int	secpertrack;
+	int	total_secs;
+	int	write_prot;	/* 1 if write-protected, 0 if not, -1 dunno */
+	struct dbdma_cmd *dma_cmd;
+	int	ref_count;
+	int	expect_cyl;
+	struct timer_list timeout;
+	int	timeout_pending;
+	int	ejected;
+	wait_queue_head_t wait;
+	int	wanted;
+	struct device_node*	media_bay; /* NULL when not in bay */
+	char	dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+};
+
+static struct floppy_state floppy_states[MAX_FLOPPIES];
+static int floppy_count = 0;
+static DEFINE_SPINLOCK(swim3_lock);
+
+static unsigned short write_preamble[] = {
+	0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e, 0x4e4e,	/* gap field */
+	0, 0, 0, 0, 0, 0,			/* sync field */
+	0x99a1, 0x99a1, 0x99a1, 0x99fb,		/* data address mark */
+	0x990f					/* no escape for 512 bytes */
+};
+
+static unsigned short write_postamble[] = {
+	0x9904,					/* insert CRC */
+	0x4e4e, 0x4e4e,
+	0x9908,					/* stop writing */
+	0, 0, 0, 0, 0, 0
+};
+
+static void swim3_select(struct floppy_state *fs, int sel);
+static void swim3_action(struct floppy_state *fs, int action);
+static int swim3_readbit(struct floppy_state *fs, int bit);
+static void do_fd_request(request_queue_t * q);
+static void start_request(struct floppy_state *fs);
+static void set_timeout(struct floppy_state *fs, int nticks,
+			void (*proc)(unsigned long));
+static void scan_track(struct floppy_state *fs);
+static void seek_track(struct floppy_state *fs, int n);
+static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
+static void setup_transfer(struct floppy_state *fs);
+static void act(struct floppy_state *fs);
+static void scan_timeout(unsigned long data);
+static void seek_timeout(unsigned long data);
+static void settle_timeout(unsigned long data);
+static void xfer_timeout(unsigned long data);
+static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs);
+/*static void fd_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs);*/
+static int grab_drive(struct floppy_state *fs, enum swim_state state,
+		      int interruptible);
+static void release_drive(struct floppy_state *fs);
+static int fd_eject(struct floppy_state *fs);
+static int floppy_ioctl(struct inode *inode, struct file *filp,
+			unsigned int cmd, unsigned long param);
+static int floppy_open(struct inode *inode, struct file *filp);
+static int floppy_release(struct inode *inode, struct file *filp);
+static int floppy_check_change(struct gendisk *disk);
+static int floppy_revalidate(struct gendisk *disk);
+static int swim3_add_device(struct device_node *swims);
+int swim3_init(void);
+
+#ifndef CONFIG_PMAC_PBOOK
+#define check_media_bay(which, what)	1
+#endif
+
+static void swim3_select(struct floppy_state *fs, int sel)
+{
+	struct swim3 __iomem *sw = fs->swim3;
+
+	out_8(&sw->select, RELAX);
+	if (sel & 8)
+		out_8(&sw->control_bis, SELECT);
+	else
+		out_8(&sw->control_bic, SELECT);
+	out_8(&sw->select, sel & CA_MASK);
+}
+
+static void swim3_action(struct floppy_state *fs, int action)
+{
+	struct swim3 __iomem *sw = fs->swim3;
+
+	swim3_select(fs, action);
+	udelay(1);
+	out_8(&sw->select, sw->select | LSTRB);
+	udelay(2);
+	out_8(&sw->select, sw->select & ~LSTRB);
+	udelay(1);
+}
+
+static int swim3_readbit(struct floppy_state *fs, int bit)
+{
+	struct swim3 __iomem *sw = fs->swim3;
+	int stat;
+
+	swim3_select(fs, bit);
+	udelay(1);
+	stat = in_8(&sw->status);
+	return (stat & DATA) == 0;
+}
+
+static void do_fd_request(request_queue_t * q)
+{
+	int i;
+	for(i=0;i<floppy_count;i++)
+	{
+		if (floppy_states[i].media_bay &&
+			check_media_bay(floppy_states[i].media_bay, MB_FD))
+			continue;
+		start_request(&floppy_states[i]);
+	}
+	sti();
+}
+
+static void start_request(struct floppy_state *fs)
+{
+	struct request *req;
+	unsigned long x;
+
+	if (fs->state == idle && fs->wanted) {
+		fs->state = available;
+		wake_up(&fs->wait);
+		return;
+	}
+	while (fs->state == idle && (req = elv_next_request(swim3_queue))) {
+#if 0
+		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
+		       req->rq_disk->disk_name, req->cmd,
+		       (long)req->sector, req->nr_sectors, req->buffer);
+		printk("           rq_status=%d errors=%d current_nr_sectors=%ld\n",
+		       req->rq_status, req->errors, req->current_nr_sectors);
+#endif
+
+		if (req->sector < 0 || req->sector >= fs->total_secs) {
+			end_request(req, 0);
+			continue;
+		}
+		if (req->current_nr_sectors == 0) {
+			end_request(req, 1);
+			continue;
+		}
+		if (fs->ejected) {
+			end_request(req, 0);
+			continue;
+		}
+
+		if (rq_data_dir(req) == WRITE) {
+			if (fs->write_prot < 0)
+				fs->write_prot = swim3_readbit(fs, WRITE_PROT);
+			if (fs->write_prot) {
+				end_request(req, 0);
+				continue;
+			}
+		}
+
+		/* Do not remove the cast. req->sector is now a sector_t and
+		 * can be 64 bits, but it will never go past 32 bits for this
+		 * driver anyway, so we can safely cast it down and not have
+		 * to do a 64/32 division
+		 */
+		fs->req_cyl = ((long)req->sector) / fs->secpercyl;
+		x = ((long)req->sector) % fs->secpercyl;
+		fs->head = x / fs->secpertrack;
+		fs->req_sector = x % fs->secpertrack + 1;
+		fd_req = req;
+		fs->state = do_transfer;
+		fs->retries = 0;
+
+		act(fs);
+	}
+}
+
+static void set_timeout(struct floppy_state *fs, int nticks,
+			void (*proc)(unsigned long))
+{
+	unsigned long flags;
+
+	save_flags(flags); cli();
+	if (fs->timeout_pending)
+		del_timer(&fs->timeout);
+	fs->timeout.expires = jiffies + nticks;
+	fs->timeout.function = proc;
+	fs->timeout.data = (unsigned long) fs;
+	add_timer(&fs->timeout);
+	fs->timeout_pending = 1;
+	restore_flags(flags);
+}
+
+static inline void scan_track(struct floppy_state *fs)
+{
+	struct swim3 __iomem *sw = fs->swim3;
+
+	swim3_select(fs, READ_DATA_0);
+	in_8(&sw->intr);		/* clear SEEN_SECTOR bit */
+	in_8(&sw->error);
+	out_8(&sw->intr_enable, SEEN_SECTOR);
+	out_8(&sw->control_bis, DO_ACTION);
+	/* enable intr when track found */
+	set_timeout(fs, HZ, scan_timeout);	/* enable timeout */
+}
+
+static inline void seek_track(struct floppy_state *fs, int n)
+{
+	struct swim3 __iomem *sw = fs->swim3;
+
+	if (n >= 0) {
+		swim3_action(fs, SEEK_POSITIVE);
+		sw->nseek = n;
+	} else {
+		swim3_action(fs, SEEK_NEGATIVE);
+		sw->nseek = -n;
+	}
+	fs->expect_cyl = (fs->cur_cyl >= 0)? fs->cur_cyl + n: -1;
+	swim3_select(fs, STEP);
+	in_8(&sw->error);
+	/* enable intr when seek finished */
+	out_8(&sw->intr_enable, SEEK_DONE);
+	out_8(&sw->control_bis, DO_SEEK);
+	set_timeout(fs, 3*HZ, seek_timeout);	/* enable timeout */
+	fs->settle_time = 0;
+}
+
+static inline void init_dma(struct dbdma_cmd *cp, int cmd,
+			    void *buf, int count)
+{
+	st_le16(&cp->req_count, count);
+	st_le16(&cp->command, cmd);
+	st_le32(&cp->phy_addr, virt_to_bus(buf));
+	cp->xfer_status = 0;
+}
+
+static inline void setup_transfer(struct floppy_state *fs)
+{
+	int n;
+	struct swim3 __iomem *sw = fs->swim3;
+	struct dbdma_cmd *cp = fs->dma_cmd;
+	struct dbdma_regs __iomem *dr = fs->dma;
+
+	if (fd_req->current_nr_sectors <= 0) {
+		printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+		return;
+	}
+	if (rq_data_dir(fd_req) == WRITE)
+		n = 1;
+	else {
+		n = fs->secpertrack - fs->req_sector + 1;
+		if (n > fd_req->current_nr_sectors)
+			n = fd_req->current_nr_sectors;
+	}
+	fs->scount = n;
+	swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
+	out_8(&sw->sector, fs->req_sector);
+	out_8(&sw->nsect, n);
+	out_8(&sw->gap3, 0);
+	out_le32(&dr->cmdptr, virt_to_bus(cp));
+	if (rq_data_dir(fd_req) == WRITE) {
+		/* Set up 3 dma commands: write preamble, data, postamble */
+		init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
+		++cp;
+		init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+		++cp;
+		init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
+	} else {
+		init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+	}
+	++cp;
+	out_le16(&cp->command, DBDMA_STOP);
+	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
+	in_8(&sw->error);
+	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
+	if (rq_data_dir(fd_req) == WRITE)
+		out_8(&sw->control_bis, WRITE_SECTORS);
+	in_8(&sw->intr);
+	out_le32(&dr->control, (RUN << 16) | RUN);
+	/* enable intr when transfer complete */
+	out_8(&sw->intr_enable, TRANSFER_DONE);
+	out_8(&sw->control_bis, DO_ACTION);
+	set_timeout(fs, 2*HZ, xfer_timeout);	/* enable timeout */
+}
+
+static void act(struct floppy_state *fs)
+{
+	for (;;) {
+		switch (fs->state) {
+		case idle:
+			return;		/* XXX shouldn't get here */
+
+		case locating:
+			if (swim3_readbit(fs, TRACK_ZERO)) {
+				fs->cur_cyl = 0;
+				if (fs->req_cyl == 0)
+					fs->state = do_transfer;
+				else
+					fs->state = seeking;
+				break;
+			}
+			scan_track(fs);
+			return;
+
+		case seeking:
+			if (fs->cur_cyl < 0) {
+				fs->expect_cyl = -1;
+				fs->state = locating;
+				break;
+			}
+			if (fs->req_cyl == fs->cur_cyl) {
+				printk("whoops, seeking 0\n");
+				fs->state = do_transfer;
+				break;
+			}
+			seek_track(fs, fs->req_cyl - fs->cur_cyl);
+			return;
+
+		case settling:
+			/* check for SEEK_COMPLETE after 30ms */
+			fs->settle_time = (HZ + 32) / 33;
+			set_timeout(fs, fs->settle_time, settle_timeout);
+			return;
+
+		case do_transfer:
+			if (fs->cur_cyl != fs->req_cyl) {
+				if (fs->retries > 5) {
+					end_request(fd_req, 0);
+					fs->state = idle;
+					return;
+				}
+				fs->state = seeking;
+				break;
+			}
+			setup_transfer(fs);
+			return;
+
+		case jogging:
+			seek_track(fs, -5);
+			return;
+
+		default:
+			printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+			return;
+		}
+	}
+}
+
+static void scan_timeout(unsigned long data)
+{
+	struct floppy_state *fs = (struct floppy_state *) data;
+	struct swim3 __iomem *sw = fs->swim3;
+
+	fs->timeout_pending = 0;
+	out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
+	out_8(&sw->select, RELAX);
+	out_8(&sw->intr_enable, 0);
+	fs->cur_cyl = -1;
+	if (fs->retries > 5) {
+		end_request(fd_req, 0);
+		fs->state = idle;
+		start_request(fs);
+	} else {
+		fs->state = jogging;
+		act(fs);
+	}
+}
+
+static void seek_timeout(unsigned long data)
+{
+	struct floppy_state *fs = (struct floppy_state *) data;
+	struct swim3 __iomem *sw = fs->swim3;
+
+	fs->timeout_pending = 0;
+	out_8(&sw->control_bic, DO_SEEK);
+	out_8(&sw->select, RELAX);
+	out_8(&sw->intr_enable, 0);
+	printk(KERN_ERR "swim3: seek timeout\n");
+	end_request(fd_req, 0);
+	fs->state = idle;
+	start_request(fs);
+}
+
+static void settle_timeout(unsigned long data)
+{
+	struct floppy_state *fs = (struct floppy_state *) data;
+	struct swim3 __iomem *sw = fs->swim3;
+
+	fs->timeout_pending = 0;
+	if (swim3_readbit(fs, SEEK_COMPLETE)) {
+		out_8(&sw->select, RELAX);
+		fs->state = locating;
+		act(fs);
+		return;
+	}
+	out_8(&sw->select, RELAX);
+	if (fs->settle_time < 2*HZ) {
+		++fs->settle_time;
+		set_timeout(fs, 1, settle_timeout);
+		return;
+	}
+	printk(KERN_ERR "swim3: seek settle timeout\n");
+	end_request(fd_req, 0);
+	fs->state = idle;
+	start_request(fs);
+}
+
+static void xfer_timeout(unsigned long data)
+{
+	struct floppy_state *fs = (struct floppy_state *) data;
+	struct swim3 __iomem *sw = fs->swim3;
+	struct dbdma_regs __iomem *dr = fs->dma;
+	struct dbdma_cmd *cp = fs->dma_cmd;
+	unsigned long s;
+	int n;
+
+	fs->timeout_pending = 0;
+	out_le32(&dr->control, RUN << 16);
+	/* We must wait a bit for dbdma to stop */
+	for (n = 0; (in_le32(&dr->status) & ACTIVE) && n < 1000; n++)
+		udelay(1);
+	out_8(&sw->intr_enable, 0);
+	out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
+	out_8(&sw->select, RELAX);
+	if (rq_data_dir(fd_req) == WRITE)
+		++cp;
+	if (ld_le16(&cp->xfer_status) != 0)
+		s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9);
+	else
+		s = 0;
+	fd_req->sector += s;
+	fd_req->current_nr_sectors -= s;
+	printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
+	       (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector);
+	end_request(fd_req, 0);
+	fs->state = idle;
+	start_request(fs);
+}
+
+static irqreturn_t swim3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+	struct floppy_state *fs = (struct floppy_state *) dev_id;
+	struct swim3 __iomem *sw = fs->swim3;
+	int intr, err, n;
+	int stat, resid;
+	struct dbdma_regs __iomem *dr;
+	struct dbdma_cmd *cp;
+
+	intr = in_8(&sw->intr);
+	err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
+	if ((intr & ERROR_INTR) && fs->state != do_transfer)
+		printk(KERN_ERR "swim3_interrupt, state=%d, dir=%lx, intr=%x, err=%x\n",
+		       fs->state, rq_data_dir(fd_req), intr, err);
+	switch (fs->state) {
+	case locating:
+		if (intr & SEEN_SECTOR) {
+			out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
+			out_8(&sw->select, RELAX);
+			out_8(&sw->intr_enable, 0);
+			del_timer(&fs->timeout);
+			fs->timeout_pending = 0;
+			if (sw->ctrack == 0xff) {
+				printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+				fs->cur_cyl = -1;
+				if (fs->retries > 5) {
+					end_request(fd_req, 0);
+					fs->state = idle;
+					start_request(fs);
+				} else {
+					fs->state = jogging;
+					act(fs);
+				}
+				break;
+			}
+			fs->cur_cyl = sw->ctrack;
+			fs->cur_sector = sw->csect;
+			if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
+				printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
+				       fs->expect_cyl, fs->cur_cyl);
+			fs->state = do_transfer;
+			act(fs);
+		}
+		break;
+	case seeking:
+	case jogging:
+		if (sw->nseek == 0) {
+			out_8(&sw->control_bic, DO_SEEK);
+			out_8(&sw->select, RELAX);
+			out_8(&sw->intr_enable, 0);
+			del_timer(&fs->timeout);
+			fs->timeout_pending = 0;
+			if (fs->state == seeking)
+				++fs->retries;
+			fs->state = settling;
+			act(fs);
+		}
+		break;
+	case settling:
+		out_8(&sw->intr_enable, 0);
+		del_timer(&fs->timeout);
+		fs->timeout_pending = 0;
+		act(fs);
+		break;
+	case do_transfer:
+		if ((intr & (ERROR_INTR | TRANSFER_DONE)) == 0)
+			break;
+		out_8(&sw->intr_enable, 0);
+		out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
+		out_8(&sw->select, RELAX);
+		del_timer(&fs->timeout);
+		fs->timeout_pending = 0;
+		dr = fs->dma;
+		cp = fs->dma_cmd;
+		if (rq_data_dir(fd_req) == WRITE)
+			++cp;
+		/*
+		 * Check that the main data transfer has finished.
+		 * On writing, the swim3 sometimes doesn't use
+		 * up all the bytes of the postamble, so we can still
+		 * see DMA active here.  That doesn't matter as long
+		 * as all the sector data has been transferred.
+		 */
+		if ((intr & ERROR_INTR) == 0 && cp->xfer_status == 0) {
+			/* wait a little while for DMA to complete */
+			for (n = 0; n < 100; ++n) {
+				if (cp->xfer_status != 0)
+					break;
+				udelay(1);
+				barrier();
+			}
+		}
+		/* turn off DMA */
+		out_le32(&dr->control, (RUN | PAUSE) << 16);
+		stat = ld_le16(&cp->xfer_status);
+		resid = ld_le16(&cp->res_count);
+		if (intr & ERROR_INTR) {
+			n = fs->scount - 1 - resid / 512;
+			if (n > 0) {
+				fd_req->sector += n;
+				fd_req->current_nr_sectors -= n;
+				fd_req->buffer += n * 512;
+				fs->req_sector += n;
+			}
+			if (fs->retries < 5) {
+				++fs->retries;
+				act(fs);
+			} else {
+				printk("swim3: error %sing block %ld (err=%x)\n",
+				       rq_data_dir(fd_req) == WRITE? "writ": "read",
+				       (long)fd_req->sector, err);
+				end_request(fd_req, 0);
+				fs->state = idle;
+			}
+		} else {
+			if ((stat & ACTIVE) == 0 || resid != 0) {
+				/* musta been an error */
+				printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
+				printk(KERN_ERR "  state=%d, dir=%lx, intr=%x, err=%x\n",
+				       fs->state, rq_data_dir(fd_req), intr, err);
+				end_request(fd_req, 0);
+				fs->state = idle;
+				start_request(fs);
+				break;
+			}
+			fd_req->sector += fs->scount;
+			fd_req->current_nr_sectors -= fs->scount;
+			fd_req->buffer += fs->scount * 512;
+			if (fd_req->current_nr_sectors <= 0) {
+				end_request(fd_req, 1);
+				fs->state = idle;
+			} else {
+				fs->req_sector += fs->scount;
+				if (fs->req_sector > fs->secpertrack) {
+					fs->req_sector -= fs->secpertrack;
+					if (++fs->head > 1) {
+						fs->head = 0;
+						++fs->req_cyl;
+					}
+				}
+				act(fs);
+			}
+		}
+		if (fs->state == idle)
+			start_request(fs);
+		break;
+	default:
+		printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+	}
+	return IRQ_HANDLED;
+}
+
+/*
+static void fd_dma_interrupt(int irq, void *dev_id, struct pt_regs *regs)
+{
+}
+*/
+
+static int grab_drive(struct floppy_state *fs, enum swim_state state,
+		      int interruptible)
+{
+	unsigned long flags;
+
+	save_flags(flags);
+	cli();
+	if (fs->state != idle) {
+		++fs->wanted;
+		while (fs->state != available) {
+			if (interruptible && signal_pending(current)) {
+				--fs->wanted;
+				restore_flags(flags);
+				return -EINTR;
+			}
+			interruptible_sleep_on(&fs->wait);
+		}
+		--fs->wanted;
+	}
+	fs->state = state;
+	restore_flags(flags);
+	return 0;
+}
+
+static void release_drive(struct floppy_state *fs)
+{
+	unsigned long flags;
+
+	save_flags(flags);
+	cli();
+	fs->state = idle;
+	start_request(fs);
+	restore_flags(flags);
+}
+
+static int fd_eject(struct floppy_state *fs)
+{
+	int err, n;
+
+	err = grab_drive(fs, ejecting, 1);
+	if (err)
+		return err;
+	swim3_action(fs, EJECT);
+	for (n = 20; n > 0; --n) {
+		if (signal_pending(current)) {
+			err = -EINTR;
+			break;
+		}
+		swim3_select(fs, RELAX);
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+		if (swim3_readbit(fs, DISK_IN) == 0)
+			break;
+	}
+	swim3_select(fs, RELAX);
+	udelay(150);
+	fs->ejected = 1;
+	release_drive(fs);
+	return err;
+}
+
+static struct floppy_struct floppy_type =
+	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };	/*  7 1.44MB 3.5"   */
+
+static int floppy_ioctl(struct inode *inode, struct file *filp,
+			unsigned int cmd, unsigned long param)
+{
+	struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+	int err;
+		
+	if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
+		return -ENXIO;
+
+	switch (cmd) {
+	case FDEJECT:
+		if (fs->ref_count != 1)
+			return -EBUSY;
+		err = fd_eject(fs);
+		return err;
+	case FDGETPRM:
+	        if (copy_to_user((void __user *) param, &floppy_type,
+				 sizeof(struct floppy_struct)))
+			return -EFAULT;
+		return 0;
+	}
+	return -ENOTTY;
+}
+
+static int floppy_open(struct inode *inode, struct file *filp)
+{
+	struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+	struct swim3 __iomem *sw = fs->swim3;
+	int n, err = 0;
+
+	if (fs->ref_count == 0) {
+		if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
+			return -ENXIO;
+		out_8(&sw->setup, S_IBM_DRIVE | S_FCLK_DIV2);
+		out_8(&sw->control_bic, 0xff);
+		out_8(&sw->mode, 0x95);
+		udelay(10);
+		out_8(&sw->intr_enable, 0);
+		out_8(&sw->control_bis, DRIVE_ENABLE | INTR_ENABLE);
+		swim3_action(fs, MOTOR_ON);
+		fs->write_prot = -1;
+		fs->cur_cyl = -1;
+		for (n = 0; n < 2 * HZ; ++n) {
+			if (n >= HZ/30 && swim3_readbit(fs, SEEK_COMPLETE))
+				break;
+			if (signal_pending(current)) {
+				err = -EINTR;
+				break;
+			}
+			swim3_select(fs, RELAX);
+			current->state = TASK_INTERRUPTIBLE;
+			schedule_timeout(1);
+		}
+		if (err == 0 && (swim3_readbit(fs, SEEK_COMPLETE) == 0
+				 || swim3_readbit(fs, DISK_IN) == 0))
+			err = -ENXIO;
+		swim3_action(fs, SETMFM);
+		swim3_select(fs, RELAX);
+
+	} else if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
+		return -EBUSY;
+
+	if (err == 0 && (filp->f_flags & O_NDELAY) == 0
+	    && (filp->f_mode & 3)) {
+		check_disk_change(inode->i_bdev);
+		if (fs->ejected)
+			err = -ENXIO;
+	}
+
+	if (err == 0 && (filp->f_mode & 2)) {
+		if (fs->write_prot < 0)
+			fs->write_prot = swim3_readbit(fs, WRITE_PROT);
+		if (fs->write_prot)
+			err = -EROFS;
+	}
+
+	if (err) {
+		if (fs->ref_count == 0) {
+			swim3_action(fs, MOTOR_OFF);
+			out_8(&sw->control_bic, DRIVE_ENABLE | INTR_ENABLE);
+			swim3_select(fs, RELAX);
+		}
+		return err;
+	}
+
+	if (filp->f_flags & O_EXCL)
+		fs->ref_count = -1;
+	else
+		++fs->ref_count;
+
+	return 0;
+}
+
+static int floppy_release(struct inode *inode, struct file *filp)
+{
+	struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+	struct swim3 __iomem *sw = fs->swim3;
+	if (fs->ref_count > 0 && --fs->ref_count == 0) {
+		swim3_action(fs, MOTOR_OFF);
+		out_8(&sw->control_bic, 0xff);
+		swim3_select(fs, RELAX);
+	}
+	return 0;
+}
+
+static int floppy_check_change(struct gendisk *disk)
+{
+	struct floppy_state *fs = disk->private_data;
+	return fs->ejected;
+}
+
+static int floppy_revalidate(struct gendisk *disk)
+{
+	struct floppy_state *fs = disk->private_data;
+	struct swim3 __iomem *sw;
+	int ret, n;
+
+	if (fs->media_bay && check_media_bay(fs->media_bay, MB_FD))
+		return -ENXIO;
+
+	sw = fs->swim3;
+	grab_drive(fs, revalidating, 0);
+	out_8(&sw->intr_enable, 0);
+	out_8(&sw->control_bis, DRIVE_ENABLE);
+	swim3_action(fs, MOTOR_ON);	/* necessary? */
+	fs->write_prot = -1;
+	fs->cur_cyl = -1;
+	mdelay(1);
+	for (n = HZ; n > 0; --n) {
+		if (swim3_readbit(fs, SEEK_COMPLETE))
+			break;
+		if (signal_pending(current))
+			break;
+		swim3_select(fs, RELAX);
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+	}
+	ret = swim3_readbit(fs, SEEK_COMPLETE) == 0
+		|| swim3_readbit(fs, DISK_IN) == 0;
+	if (ret)
+		swim3_action(fs, MOTOR_OFF);
+	else {
+		fs->ejected = 0;
+		swim3_action(fs, SETMFM);
+	}
+	swim3_select(fs, RELAX);
+
+	release_drive(fs);
+	return ret;
+}
+
+static struct block_device_operations floppy_fops = {
+	.open		= floppy_open,
+	.release	= floppy_release,
+	.ioctl		= floppy_ioctl,
+	.media_changed	= floppy_check_change,
+	.revalidate_disk= floppy_revalidate,
+};
+
+int swim3_init(void)
+{
+	struct device_node *swim;
+	int err = -ENOMEM;
+	int i;
+
+	devfs_mk_dir("floppy");
+
+	swim = find_devices("floppy");
+	while (swim && (floppy_count < MAX_FLOPPIES))
+	{
+		swim3_add_device(swim);
+		swim = swim->next;
+	}
+
+	swim = find_devices("swim3");
+	while (swim && (floppy_count < MAX_FLOPPIES))
+	{
+		swim3_add_device(swim);
+		swim = swim->next;
+	}
+
+	if (!floppy_count)
+		return -ENODEV;
+
+	for (i = 0; i < floppy_count; i++) {
+		disks[i] = alloc_disk(1);
+		if (!disks[i])
+			goto out;
+	}
+
+	if (register_blkdev(FLOPPY_MAJOR, "fd")) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
+	if (!swim3_queue) {
+		err = -ENOMEM;
+		goto out_queue;
+	}
+
+	for (i = 0; i < floppy_count; i++) {
+		struct gendisk *disk = disks[i];
+		disk->major = FLOPPY_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &floppy_fops;
+		disk->private_data = &floppy_states[i];
+		disk->queue = swim3_queue;
+		disk->flags |= GENHD_FL_REMOVABLE;
+		sprintf(disk->disk_name, "fd%d", i);
+		sprintf(disk->devfs_name, "floppy/%d", i);
+		set_capacity(disk, 2880);
+		add_disk(disk);
+	}
+	return 0;
+
+out_queue:
+	unregister_blkdev(FLOPPY_MAJOR, "fd");
+out:
+	while (i--)
+		put_disk(disks[i]);
+	/* shouldn't we do something with results of swim_add_device()? */
+	return err;
+}
+
+static int swim3_add_device(struct device_node *swim)
+{
+	struct device_node *mediabay;
+	struct floppy_state *fs = &floppy_states[floppy_count];
+
+	if (swim->n_addrs < 2)
+	{
+		printk(KERN_INFO "swim3: expecting 2 addrs (n_addrs:%d, n_intrs:%d)\n",
+		       swim->n_addrs, swim->n_intrs);
+		return -EINVAL;
+	}
+
+	if (swim->n_intrs < 2)
+	{
+		printk(KERN_INFO "swim3: expecting 2 intrs (n_addrs:%d, n_intrs:%d)\n",
+		       swim->n_addrs, swim->n_intrs);
+		return -EINVAL;
+	}
+
+	if (!request_OF_resource(swim, 0, NULL)) {
+		printk(KERN_INFO "swim3: can't request IO resource !\n");
+		return -EINVAL;
+	}
+
+	mediabay = (strcasecmp(swim->parent->type, "media-bay") == 0) ? swim->parent : NULL;
+	if (mediabay == NULL)
+		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
+	
+	memset(fs, 0, sizeof(*fs));
+	fs->state = idle;
+	fs->swim3 = (struct swim3 __iomem *)
+		ioremap(swim->addrs[0].address, 0x200);
+	fs->dma = (struct dbdma_regs __iomem *)
+		ioremap(swim->addrs[1].address, 0x200);
+	fs->swim3_intr = swim->intrs[0].line;
+	fs->dma_intr = swim->intrs[1].line;
+	fs->cur_cyl = -1;
+	fs->cur_sector = -1;
+	fs->secpercyl = 36;
+	fs->secpertrack = 18;
+	fs->total_secs = 2880;
+	fs->media_bay = mediabay;
+	init_waitqueue_head(&fs->wait);
+
+	fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
+	memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
+	st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+
+	if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
+		printk(KERN_ERR "Couldn't get irq %d for SWIM3\n", fs->swim3_intr);
+		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
+		return -EBUSY;
+	}
+/*
+	if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
+		printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
+		       fs->dma_intr);
+		pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
+		return -EBUSY;
+	}
+*/
+
+	init_timer(&fs->timeout);
+
+	printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+		mediabay ? "in media bay" : "");
+
+	floppy_count++;
+	
+	return 0;
+}
+
+module_init(swim3_init)
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Paul Mackerras");
+MODULE_ALIAS_BLOCKDEV_MAJOR(FLOPPY_MAJOR);
diff --git a/drivers/block/swim_iop.c b/drivers/block/swim_iop.c
new file mode 100644
index 0000000..a1283f6
--- /dev/null
+++ b/drivers/block/swim_iop.c
@@ -0,0 +1,579 @@
+/*
+ * Driver for the SWIM (Super Woz Integrated Machine) IOP
+ * floppy controller on the Macintosh IIfx and Quadra 900/950
+ *
+ * Written by Joshua M. Thompson (funaho@jurai.org)
+ * based on the SWIM3 driver (c) 1996 by Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * 1999-06-12 (jmt) - Initial implementation.
+ */
+
+/*
+ * -------------------
+ * Theory of Operation
+ * -------------------
+ *
+ * Since the SWIM IOP is message-driven we implement a simple request queue
+ * system.  One outstanding request may be queued at any given time (this is
+ * an IOP limitation); only when that request has completed can a new request
+ * be sent.
+ */
+
+#include <linux/stddef.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/fd.h>
+#include <linux/ioctl.h>
+#include <linux/blkdev.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/mac_iop.h>
+#include <asm/swim_iop.h>
+
+#define DRIVER_VERSION "Version 0.1 (1999-06-12)"
+
+#define MAX_FLOPPIES	4
+
+enum swim_state {
+	idle,
+	available,
+	revalidating,
+	transferring,
+	ejecting
+};
+
+struct floppy_state {
+	enum swim_state state;
+	int	drive_num;	/* device number */
+	int	secpercyl;	/* disk geometry information */
+	int	secpertrack;
+	int	total_secs;
+	int	write_prot;	/* 1 if write-protected, 0 if not, -1 dunno */
+	int	ref_count;
+	struct timer_list timeout;
+	int	ejected;
+	struct wait_queue *wait;
+	int	wanted;
+	int	timeout_pending;
+};
+
+struct swim_iop_req {
+	int	sent;
+	int	complete;
+	__u8	command[32];
+	struct floppy_state *fs;
+	void	(*done)(struct swim_iop_req *);
+};
+
+static struct swim_iop_req *current_req;
+static int floppy_count;
+
+static struct floppy_state floppy_states[MAX_FLOPPIES];
+static DEFINE_SPINLOCK(swim_iop_lock);
+
+#define CURRENT elv_next_request(swim_queue)
+
+static char *drive_names[7] = {
+	"not installed",	/* DRV_NONE    */
+	"unknown (1)",		/* DRV_UNKNOWN */
+	"a 400K drive",		/* DRV_400K    */
+	"an 800K drive"		/* DRV_800K    */
+	"unknown (4)",		/* ????        */
+	"an FDHD",		/* DRV_FDHD    */
+	"unknown (6)",		/* ????        */
+	"an Apple HD20"		/* DRV_HD20    */
+};
+
+int swimiop_init(void);
+static void swimiop_init_request(struct swim_iop_req *);
+static int swimiop_send_request(struct swim_iop_req *);
+static void swimiop_receive(struct iop_msg *, struct pt_regs *);
+static void swimiop_status_update(int, struct swim_drvstatus *);
+static int swimiop_eject(struct floppy_state *fs);
+
+static int floppy_ioctl(struct inode *inode, struct file *filp,
+			unsigned int cmd, unsigned long param);
+static int floppy_open(struct inode *inode, struct file *filp);
+static int floppy_release(struct inode *inode, struct file *filp);
+static int floppy_check_change(struct gendisk *disk);
+static int floppy_revalidate(struct gendisk *disk);
+static int grab_drive(struct floppy_state *fs, enum swim_state state,
+		      int interruptible);
+static void release_drive(struct floppy_state *fs);
+static void set_timeout(struct floppy_state *fs, int nticks,
+			void (*proc)(unsigned long));
+static void fd_request_timeout(unsigned long);
+static void do_fd_request(request_queue_t * q);
+static void start_request(struct floppy_state *fs);
+
+static struct block_device_operations floppy_fops = {
+	.open		= floppy_open,
+	.release	= floppy_release,
+	.ioctl		= floppy_ioctl,
+	.media_changed	= floppy_check_change,
+	.revalidate_disk= floppy_revalidate,
+};
+
+static struct request_queue *swim_queue;
+/*
+ * SWIM IOP initialization
+ */
+
+int swimiop_init(void)
+{
+	volatile struct swim_iop_req req;
+	struct swimcmd_status *cmd = (struct swimcmd_status *) &req.command[0];
+	struct swim_drvstatus *ds = &cmd->status;
+	struct floppy_state *fs;
+	int i;
+
+	current_req = NULL;
+	floppy_count = 0;
+
+	if (!iop_ism_present)
+		return -ENODEV;
+
+	if (register_blkdev(FLOPPY_MAJOR, "fd"))
+		return -EBUSY;
+
+	swim_queue = blk_init_queue(do_fd_request, &swim_iop_lock);
+	if (!swim_queue) {
+		unregister_blkdev(FLOPPY_MAJOR, "fd");
+		return -ENOMEM;
+	}
+
+	printk("SWIM-IOP: %s by Joshua M. Thompson (funaho@jurai.org)\n",
+		DRIVER_VERSION);
+
+	if (iop_listen(SWIM_IOP, SWIM_CHAN, swimiop_receive, "SWIM") != 0) {
+		printk(KERN_ERR "SWIM-IOP: IOP channel already in use; can't initialize.\n");
+		unregister_blkdev(FLOPPY_MAJOR, "fd");
+		blk_cleanup_queue(swim_queue);
+		return -EBUSY;
+	}
+
+	printk(KERN_ERR "SWIM_IOP: probing for installed drives.\n");
+
+	for (i = 0 ; i < MAX_FLOPPIES ; i++) {
+		memset(&floppy_states[i], 0, sizeof(struct floppy_state));
+		fs = &floppy_states[floppy_count];
+
+		swimiop_init_request(&req);
+		cmd->code = CMD_STATUS;
+		cmd->drive_num = i + 1;
+		if (swimiop_send_request(&req) != 0) continue;
+		while (!req.complete);
+		if (cmd->error != 0) {
+			printk(KERN_ERR "SWIM-IOP: probe on drive %d returned error %d\n", i, (uint) cmd->error);
+			continue;
+		}
+		if (ds->installed != 0x01) continue;
+		printk("SWIM-IOP: drive %d is %s (%s, %s, %s, %s)\n", i,
+			drive_names[ds->info.type],
+			ds->info.external? "ext" : "int",
+			ds->info.scsi? "scsi" : "floppy",
+			ds->info.fixed? "fixed" : "removable",
+			ds->info.secondary? "secondary" : "primary");
+		swimiop_status_update(floppy_count, ds);
+		fs->state = idle;
+
+		init_timer(&fs->timeout);
+		floppy_count++;
+	}
+	printk("SWIM-IOP: detected %d installed drives.\n", floppy_count);
+
+	for (i = 0; i < floppy_count; i++) {
+		struct gendisk *disk = alloc_disk(1);
+		if (!disk)
+			continue;
+		disk->major = FLOPPY_MAJOR;
+		disk->first_minor = i;
+		disk->fops = &floppy_fops;
+		sprintf(disk->disk_name, "fd%d", i);
+		disk->private_data = &floppy_states[i];
+		disk->queue = swim_queue;
+		set_capacity(disk, 2880 * 2);
+		add_disk(disk);
+	}
+
+	return 0;
+}
+
+static void swimiop_init_request(struct swim_iop_req *req)
+{
+	req->sent = 0;
+	req->complete = 0;
+	req->done = NULL;
+}
+
+static int swimiop_send_request(struct swim_iop_req *req)
+{
+	unsigned long flags;
+	int err;
+
+	/* It's doubtful an interrupt routine would try to send */
+	/* a SWIM request, but I'd rather play it safe here.    */
+
+	local_irq_save(flags);
+
+	if (current_req != NULL) {
+		local_irq_restore(flags);
+		return -ENOMEM;
+	}
+
+	current_req = req;
+
+	/* Interrupts should be back on for iop_send_message() */
+
+	local_irq_restore(flags);
+
+	err = iop_send_message(SWIM_IOP, SWIM_CHAN, (void *) req,
+				sizeof(req->command), (__u8 *) &req->command[0],
+				swimiop_receive);
+
+	/* No race condition here; we own current_req at this point */
+
+	if (err) {
+		current_req = NULL;
+	} else {
+		req->sent = 1;
+	}
+	return err;
+}
+
+/*
+ * Receive a SWIM message from the IOP.
+ *
+ * This will be called in two cases:
+ *
+ * 1. A message has been successfully sent to the IOP.
+ * 2. An unsolicited message was received from the IOP.
+ */
+
+void swimiop_receive(struct iop_msg *msg, struct pt_regs *regs)
+{
+	struct swim_iop_req *req;
+	struct swimmsg_status *sm;
+	struct swim_drvstatus *ds;
+
+	req = current_req;
+
+	switch(msg->status) {
+		case IOP_MSGSTATUS_COMPLETE:
+			memcpy(&req->command[0], &msg->reply[0], sizeof(req->command));
+			req->complete = 1;
+			if (req->done) (*req->done)(req);
+			current_req = NULL;
+			break;
+		case IOP_MSGSTATUS_UNSOL:
+			sm = (struct swimmsg_status *) &msg->message[0];
+			ds = &sm->status;
+			swimiop_status_update(sm->drive_num, ds);
+			iop_complete_message(msg);
+			break;
+	}
+}
+
+static void swimiop_status_update(int drive_num, struct swim_drvstatus *ds)
+{
+	struct floppy_state *fs = &floppy_states[drive_num];
+
+	fs->write_prot = (ds->write_prot == 0x80);
+	if ((ds->disk_in_drive != 0x01) && (ds->disk_in_drive != 0x02)) {
+		fs->ejected = 1;
+	} else {
+		fs->ejected = 0;
+	}
+	switch(ds->info.type) {
+		case DRV_400K:
+			fs->secpercyl = 10;
+			fs->secpertrack = 10;
+			fs->total_secs = 800;
+			break;
+		case DRV_800K:
+			fs->secpercyl = 20;
+			fs->secpertrack = 10;
+			fs->total_secs = 1600;
+			break;
+		case DRV_FDHD:
+			fs->secpercyl = 36;
+			fs->secpertrack = 18;
+			fs->total_secs = 2880;
+			break;
+		default:
+			fs->secpercyl = 0;
+			fs->secpertrack = 0;
+			fs->total_secs = 0;
+			break;
+	}
+}
+
+static int swimiop_eject(struct floppy_state *fs)
+{
+	int err, n;
+	struct swim_iop_req req;
+	struct swimcmd_eject *cmd = (struct swimcmd_eject *) &req.command[0];
+
+	err = grab_drive(fs, ejecting, 1);
+	if (err) return err;
+
+	swimiop_init_request(&req);
+	cmd->code = CMD_EJECT;
+	cmd->drive_num = fs->drive_num;
+	err = swimiop_send_request(&req);
+	if (err) {
+		release_drive(fs);
+		return err;
+	}
+	for (n = 2*HZ; n > 0; --n) {
+		if (req.complete) break;
+		if (signal_pending(current)) {
+			err = -EINTR;
+			break;
+		}
+		current->state = TASK_INTERRUPTIBLE;
+		schedule_timeout(1);
+	}
+	release_drive(fs);
+	return cmd->error;
+}
+
+static struct floppy_struct floppy_type =
+	{ 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL };	/*  7 1.44MB 3.5"   */
+
+static int floppy_ioctl(struct inode *inode, struct file *filp,
+			unsigned int cmd, unsigned long param)
+{
+	struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+	int err;
+
+	if ((cmd & 0x80) && !capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	switch (cmd) {
+	case FDEJECT:
+		if (fs->ref_count != 1)
+			return -EBUSY;
+		err = swimiop_eject(fs);
+		return err;
+	case FDGETPRM:
+	        if (copy_to_user((void *) param, (void *) &floppy_type,
+				 sizeof(struct floppy_struct)))
+			return -EFAULT;
+		return 0;
+	}
+	return -ENOTTY;
+}
+
+static int floppy_open(struct inode *inode, struct file *filp)
+{
+	struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+
+	if (fs->ref_count == -1 || filp->f_flags & O_EXCL)
+		return -EBUSY;
+
+	if ((filp->f_flags & O_NDELAY) == 0 && (filp->f_mode & 3)) {
+		check_disk_change(inode->i_bdev);
+		if (fs->ejected)
+			return -ENXIO;
+	}
+
+	if ((filp->f_mode & 2) && fs->write_prot)
+		return -EROFS;
+
+	if (filp->f_flags & O_EXCL)
+		fs->ref_count = -1;
+	else
+		++fs->ref_count;
+
+	return 0;
+}
+
+static int floppy_release(struct inode *inode, struct file *filp)
+{
+	struct floppy_state *fs = inode->i_bdev->bd_disk->private_data;
+	if (fs->ref_count > 0)
+		fs->ref_count--;
+	return 0;
+}
+
+static int floppy_check_change(struct gendisk *disk)
+{
+	struct floppy_state *fs = disk->private_data;
+	return fs->ejected;
+}
+
+static int floppy_revalidate(struct gendisk *disk)
+{
+	struct floppy_state *fs = disk->private_data;
+	grab_drive(fs, revalidating, 0);
+	/* yadda, yadda */
+	release_drive(fs);
+	return 0;
+}
+
+static void floppy_off(unsigned int nr)
+{
+}
+
+static int grab_drive(struct floppy_state *fs, enum swim_state state,
+		      int interruptible)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (fs->state != idle) {
+		++fs->wanted;
+		while (fs->state != available) {
+			if (interruptible && signal_pending(current)) {
+				--fs->wanted;
+				local_irq_restore(flags);
+				return -EINTR;
+			}
+			interruptible_sleep_on(&fs->wait);
+		}
+		--fs->wanted;
+	}
+	fs->state = state;
+	local_irq_restore(flags);
+	return 0;
+}
+
+static void release_drive(struct floppy_state *fs)
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	fs->state = idle;
+	start_request(fs);
+	local_irq_restore(flags);
+}
+
+static void set_timeout(struct floppy_state *fs, int nticks,
+			void (*proc)(unsigned long))
+{
+	unsigned long flags;
+
+	local_irq_save(flags);
+	if (fs->timeout_pending)
+		del_timer(&fs->timeout);
+	init_timer(&fs->timeout);
+	fs->timeout.expires = jiffies + nticks;
+	fs->timeout.function = proc;
+	fs->timeout.data = (unsigned long) fs;
+	add_timer(&fs->timeout);
+	fs->timeout_pending = 1;
+	local_irq_restore(flags);
+}
+
+static void do_fd_request(request_queue_t * q)
+{
+	int i;
+
+	for (i = 0 ; i < floppy_count ; i++) {
+		start_request(&floppy_states[i]);
+	}
+}
+
+static void fd_request_complete(struct swim_iop_req *req)
+{
+	struct floppy_state *fs = req->fs;
+	struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req->command[0];
+
+	del_timer(&fs->timeout);
+	fs->timeout_pending = 0;
+	fs->state = idle;
+	if (cmd->error) {
+		printk(KERN_ERR "SWIM-IOP: error %d on read/write request.\n", cmd->error);
+		end_request(CURRENT, 0);
+	} else {
+		CURRENT->sector += cmd->num_blocks;
+		CURRENT->current_nr_sectors -= cmd->num_blocks;
+		if (CURRENT->current_nr_sectors <= 0) {
+			end_request(CURRENT, 1);
+			return;
+		}
+	}
+	start_request(fs);
+}
+
+static void fd_request_timeout(unsigned long data)
+{
+	struct floppy_state *fs = (struct floppy_state *) data;
+
+	fs->timeout_pending = 0;
+	end_request(CURRENT, 0);
+	fs->state = idle;
+}
+
+static void start_request(struct floppy_state *fs)
+{
+	volatile struct swim_iop_req req;
+	struct swimcmd_rw *cmd = (struct swimcmd_rw *) &req.command[0];
+
+	if (fs->state == idle && fs->wanted) {
+		fs->state = available;
+		wake_up(&fs->wait);
+		return;
+	}
+	while (CURRENT && fs->state == idle) {
+		if (CURRENT->bh && !buffer_locked(CURRENT->bh))
+			panic("floppy: block not locked");
+#if 0
+		printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n",
+		       CURRENT->rq_disk->disk_name, CURRENT->cmd,
+		       CURRENT->sector, CURRENT->nr_sectors, CURRENT->buffer);
+		printk("           rq_status=%d errors=%d current_nr_sectors=%ld\n",
+		       CURRENT->rq_status, CURRENT->errors, CURRENT->current_nr_sectors);
+#endif
+
+		if (CURRENT->sector < 0 || CURRENT->sector >= fs->total_secs) {
+			end_request(CURRENT, 0);
+			continue;
+		}
+		if (CURRENT->current_nr_sectors == 0) {
+			end_request(CURRENT, 1);
+			continue;
+		}
+		if (fs->ejected) {
+			end_request(CURRENT, 0);
+			continue;
+		}
+
+		swimiop_init_request(&req);
+		req.fs = fs;
+		req.done = fd_request_complete;
+
+		if (CURRENT->cmd == WRITE) {
+			if (fs->write_prot) {
+				end_request(CURRENT, 0);
+				continue;
+			}
+			cmd->code = CMD_WRITE;
+		} else {
+			cmd->code = CMD_READ;
+
+		}
+		cmd->drive_num = fs->drive_num;
+		cmd->buffer = CURRENT->buffer;
+		cmd->first_block = CURRENT->sector;
+		cmd->num_blocks = CURRENT->current_nr_sectors;
+
+		if (swimiop_send_request(&req)) {
+			end_request(CURRENT, 0);
+			continue;
+		}
+
+		set_timeout(fs, HZ*CURRENT->current_nr_sectors,
+				fd_request_timeout);
+
+		fs->state = transferring;
+	}
+}
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c
new file mode 100644
index 0000000..797f598
--- /dev/null
+++ b/drivers/block/sx8.c
@@ -0,0 +1,1764 @@
+/*
+ *  sx8.c: Driver for Promise SATA SX8 looks-like-I2O hardware
+ *
+ *  Copyright 2004 Red Hat, Inc.
+ *
+ *  Author/maintainer:  Jeff Garzik <jgarzik@pobox.com>
+ *
+ *  This file is subject to the terms and conditions of the GNU General Public
+ *  License.  See the file "COPYING" in the main directory of this archive
+ *  for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/time.h>
+#include <linux/hdreg.h>
+#include <asm/io.h>
+#include <asm/semaphore.h>
+#include <asm/uaccess.h>
+
+MODULE_AUTHOR("Jeff Garzik");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Promise SATA SX8 block driver");
+
+#if 0
+#define CARM_DEBUG
+#define CARM_VERBOSE_DEBUG
+#else
+#undef CARM_DEBUG
+#undef CARM_VERBOSE_DEBUG
+#endif
+#undef CARM_NDEBUG
+
+#define DRV_NAME "sx8"
+#define DRV_VERSION "0.8"
+#define PFX DRV_NAME ": "
+
+#define NEXT_RESP(idx)	((idx + 1) % RMSG_Q_LEN)
+
+/* 0xf is just arbitrary, non-zero noise; this is sorta like poisoning */
+#define TAG_ENCODE(tag)	(((tag) << 16) | 0xf)
+#define TAG_DECODE(tag)	(((tag) >> 16) & 0x1f)
+#define TAG_VALID(tag)	((((tag) & 0xf) == 0xf) && (TAG_DECODE(tag) < 32))
+
+/* note: prints function name for you */
+#ifdef CARM_DEBUG
+#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
+#ifdef CARM_VERBOSE_DEBUG
+#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args)
+#else
+#define VPRINTK(fmt, args...)
+#endif	/* CARM_VERBOSE_DEBUG */
+#else
+#define DPRINTK(fmt, args...)
+#define VPRINTK(fmt, args...)
+#endif	/* CARM_DEBUG */
+
+#ifdef CARM_NDEBUG
+#define assert(expr)
+#else
+#define assert(expr) \
+        if(unlikely(!(expr))) {                                   \
+        printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \
+        #expr,__FILE__,__FUNCTION__,__LINE__);          \
+        }
+#endif
+
+/* defines only for the constants which don't work well as enums */
+struct carm_host;
+
+enum {
+	/* adapter-wide limits */
+	CARM_MAX_PORTS		= 8,
+	CARM_SHM_SIZE		= (4096 << 7),
+	CARM_MINORS_PER_MAJOR	= 256 / CARM_MAX_PORTS,
+	CARM_MAX_WAIT_Q		= CARM_MAX_PORTS + 1,
+
+	/* command message queue limits */
+	CARM_MAX_REQ		= 64,	       /* max command msgs per host */
+	CARM_MAX_Q		= 1,		   /* one command at a time */
+	CARM_MSG_LOW_WATER	= (CARM_MAX_REQ / 4),	     /* refill mark */
+
+	/* S/G limits, host-wide and per-request */
+	CARM_MAX_REQ_SG		= 32,	     /* max s/g entries per request */
+	CARM_SG_BOUNDARY	= 0xffffUL,	    /* s/g segment boundary */
+	CARM_MAX_HOST_SG	= 600,		/* max s/g entries per host */
+	CARM_SG_LOW_WATER	= (CARM_MAX_HOST_SG / 4),   /* re-fill mark */
+
+	/* hardware registers */
+	CARM_IHQP		= 0x1c,
+	CARM_INT_STAT		= 0x10, /* interrupt status */
+	CARM_INT_MASK		= 0x14, /* interrupt mask */
+	CARM_HMUC		= 0x18, /* host message unit control */
+	RBUF_ADDR_LO		= 0x20, /* response msg DMA buf low 32 bits */
+	RBUF_ADDR_HI		= 0x24, /* response msg DMA buf high 32 bits */
+	RBUF_BYTE_SZ		= 0x28,
+	CARM_RESP_IDX		= 0x2c,
+	CARM_CMS0		= 0x30, /* command message size reg 0 */
+	CARM_LMUC		= 0x48,
+	CARM_HMPHA		= 0x6c,
+	CARM_INITC		= 0xb5,
+
+	/* bits in CARM_INT_{STAT,MASK} */
+	INT_RESERVED		= 0xfffffff0,
+	INT_WATCHDOG		= (1 << 3),	/* watchdog timer */
+	INT_Q_OVERFLOW		= (1 << 2),	/* cmd msg q overflow */
+	INT_Q_AVAILABLE		= (1 << 1),	/* cmd msg q has free space */
+	INT_RESPONSE		= (1 << 0),	/* response msg available */
+	INT_ACK_MASK		= INT_WATCHDOG | INT_Q_OVERFLOW,
+	INT_DEF_MASK		= INT_RESERVED | INT_Q_OVERFLOW |
+				  INT_RESPONSE,
+
+	/* command messages, and related register bits */
+	CARM_HAVE_RESP		= 0x01,
+	CARM_MSG_READ		= 1,
+	CARM_MSG_WRITE		= 2,
+	CARM_MSG_VERIFY		= 3,
+	CARM_MSG_GET_CAPACITY	= 4,
+	CARM_MSG_FLUSH		= 5,
+	CARM_MSG_IOCTL		= 6,
+	CARM_MSG_ARRAY		= 8,
+	CARM_MSG_MISC		= 9,
+	CARM_CME		= (1 << 2),
+	CARM_RME		= (1 << 1),
+	CARM_WZBC		= (1 << 0),
+	CARM_RMI		= (1 << 0),
+	CARM_Q_FULL		= (1 << 3),
+	CARM_MSG_SIZE		= 288,
+	CARM_Q_LEN		= 48,
+
+	/* CARM_MSG_IOCTL messages */
+	CARM_IOC_SCAN_CHAN	= 5,	/* scan channels for devices */
+	CARM_IOC_GET_TCQ	= 13,	/* get tcq/ncq depth */
+	CARM_IOC_SET_TCQ	= 14,	/* set tcq/ncq depth */
+
+	IOC_SCAN_CHAN_NODEV	= 0x1f,
+	IOC_SCAN_CHAN_OFFSET	= 0x40,
+
+	/* CARM_MSG_ARRAY messages */
+	CARM_ARRAY_INFO		= 0,
+
+	ARRAY_NO_EXIST		= (1 << 31),
+
+	/* response messages */
+	RMSG_SZ			= 8,	/* sizeof(struct carm_response) */
+	RMSG_Q_LEN		= 48,	/* resp. msg list length */
+	RMSG_OK			= 1,	/* bit indicating msg was successful */
+					/* length of entire resp. msg buffer */
+	RBUF_LEN		= RMSG_SZ * RMSG_Q_LEN,
+
+	PDC_SHM_SIZE		= (4096 << 7), /* length of entire h/w buffer */
+
+	/* CARM_MSG_MISC messages */
+	MISC_GET_FW_VER		= 2,
+	MISC_ALLOC_MEM		= 3,
+	MISC_SET_TIME		= 5,
+
+	/* MISC_GET_FW_VER feature bits */
+	FW_VER_4PORT		= (1 << 2), /* 1=4 ports, 0=8 ports */
+	FW_VER_NON_RAID		= (1 << 1), /* 1=non-RAID firmware, 0=RAID */
+	FW_VER_ZCR		= (1 << 0), /* zero channel RAID (whatever that is) */
+
+	/* carm_host flags */
+	FL_NON_RAID		= FW_VER_NON_RAID,
+	FL_4PORT		= FW_VER_4PORT,
+	FL_FW_VER_MASK		= (FW_VER_NON_RAID | FW_VER_4PORT),
+	FL_DAC			= (1 << 16),
+	FL_DYN_MAJOR		= (1 << 17),
+};
+
+enum scatter_gather_types {
+	SGT_32BIT		= 0,
+	SGT_64BIT		= 1,
+};
+
+enum host_states {
+	HST_INVALID,		/* invalid state; never used */
+	HST_ALLOC_BUF,		/* setting up master SHM area */
+	HST_ERROR,		/* we never leave here */
+	HST_PORT_SCAN,		/* start dev scan */
+	HST_DEV_SCAN_START,	/* start per-device probe */
+	HST_DEV_SCAN,		/* continue per-device probe */
+	HST_DEV_ACTIVATE,	/* activate devices we found */
+	HST_PROBE_FINISHED,	/* probe is complete */
+	HST_PROBE_START,	/* initiate probe */
+	HST_SYNC_TIME,		/* tell firmware what time it is */
+	HST_GET_FW_VER,		/* get firmware version, adapter port cnt */
+};
+
+#ifdef CARM_DEBUG
+static const char *state_name[] = {
+	"HST_INVALID",
+	"HST_ALLOC_BUF",
+	"HST_ERROR",
+	"HST_PORT_SCAN",
+	"HST_DEV_SCAN_START",
+	"HST_DEV_SCAN",
+	"HST_DEV_ACTIVATE",
+	"HST_PROBE_FINISHED",
+	"HST_PROBE_START",
+	"HST_SYNC_TIME",
+	"HST_GET_FW_VER",
+};
+#endif
+
+struct carm_port {
+	unsigned int			port_no;
+	unsigned int			n_queued;
+	struct gendisk			*disk;
+	struct carm_host		*host;
+
+	/* attached device characteristics */
+	u64				capacity;
+	char				name[41];
+	u16				dev_geom_head;
+	u16				dev_geom_sect;
+	u16				dev_geom_cyl;
+};
+
+struct carm_request {
+	unsigned int			tag;
+	int				n_elem;
+	unsigned int			msg_type;
+	unsigned int			msg_subtype;
+	unsigned int			msg_bucket;
+	struct request			*rq;
+	struct carm_port		*port;
+	struct scatterlist		sg[CARM_MAX_REQ_SG];
+};
+
+struct carm_host {
+	unsigned long			flags;
+	void				__iomem *mmio;
+	void				*shm;
+	dma_addr_t			shm_dma;
+
+	int				major;
+	int				id;
+	char				name[32];
+
+	spinlock_t			lock;
+	struct pci_dev			*pdev;
+	unsigned int			state;
+	u32				fw_ver;
+
+	request_queue_t			*oob_q;
+	unsigned int			n_oob;
+
+	unsigned int			hw_sg_used;
+
+	unsigned int			resp_idx;
+
+	unsigned int			wait_q_prod;
+	unsigned int			wait_q_cons;
+	request_queue_t			*wait_q[CARM_MAX_WAIT_Q];
+
+	unsigned int			n_msgs;
+	u64				msg_alloc;
+	struct carm_request		req[CARM_MAX_REQ];
+	void				*msg_base;
+	dma_addr_t			msg_dma;
+
+	int				cur_scan_dev;
+	unsigned long			dev_active;
+	unsigned long			dev_present;
+	struct carm_port		port[CARM_MAX_PORTS];
+
+	struct work_struct		fsm_task;
+
+	struct semaphore		probe_sem;
+};
+
+struct carm_response {
+	__le32 ret_handle;
+	__le32 status;
+}  __attribute__((packed));
+
+struct carm_msg_sg {
+	__le32 start;
+	__le32 len;
+}  __attribute__((packed));
+
+struct carm_msg_rw {
+	u8 type;
+	u8 id;
+	u8 sg_count;
+	u8 sg_type;
+	__le32 handle;
+	__le32 lba;
+	__le16 lba_count;
+	__le16 lba_high;
+	struct carm_msg_sg sg[32];
+}  __attribute__((packed));
+
+struct carm_msg_allocbuf {
+	u8 type;
+	u8 subtype;
+	u8 n_sg;
+	u8 sg_type;
+	__le32 handle;
+	__le32 addr;
+	__le32 len;
+	__le32 evt_pool;
+	__le32 n_evt;
+	__le32 rbuf_pool;
+	__le32 n_rbuf;
+	__le32 msg_pool;
+	__le32 n_msg;
+	struct carm_msg_sg sg[8];
+}  __attribute__((packed));
+
+struct carm_msg_ioctl {
+	u8 type;
+	u8 subtype;
+	u8 array_id;
+	u8 reserved1;
+	__le32 handle;
+	__le32 data_addr;
+	u32 reserved2;
+}  __attribute__((packed));
+
+struct carm_msg_sync_time {
+	u8 type;
+	u8 subtype;
+	u16 reserved1;
+	__le32 handle;
+	u32 reserved2;
+	__le32 timestamp;
+}  __attribute__((packed));
+
+struct carm_msg_get_fw_ver {
+	u8 type;
+	u8 subtype;
+	u16 reserved1;
+	__le32 handle;
+	__le32 data_addr;
+	u32 reserved2;
+}  __attribute__((packed));
+
+struct carm_fw_ver {
+	__le32 version;
+	u8 features;
+	u8 reserved1;
+	u16 reserved2;
+}  __attribute__((packed));
+
+struct carm_array_info {
+	__le32 size;
+
+	__le16 size_hi;
+	__le16 stripe_size;
+
+	__le32 mode;
+
+	__le16 stripe_blk_sz;
+	__le16 reserved1;
+
+	__le16 cyl;
+	__le16 head;
+
+	__le16 sect;
+	u8 array_id;
+	u8 reserved2;
+
+	char name[40];
+
+	__le32 array_status;
+
+	/* device list continues beyond this point? */
+}  __attribute__((packed));
+
+static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent);
+static void carm_remove_one (struct pci_dev *pdev);
+static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
+			   unsigned int cmd, unsigned long arg);
+
+static struct pci_device_id carm_pci_tbl[] = {
+	{ PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
+	{ }	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, carm_pci_tbl);
+
+static struct pci_driver carm_driver = {
+	.name		= DRV_NAME,
+	.id_table	= carm_pci_tbl,
+	.probe		= carm_init_one,
+	.remove		= carm_remove_one,
+};
+
+static struct block_device_operations carm_bd_ops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= carm_bdev_ioctl,
+};
+
+static unsigned int carm_host_id;
+static unsigned long carm_major_alloc;
+
+
+
+static int carm_bdev_ioctl(struct inode *ino, struct file *fil,
+			   unsigned int cmd, unsigned long arg)
+{
+	void __user *usermem = (void __user *) arg;
+	struct carm_port *port = ino->i_bdev->bd_disk->private_data;
+	struct hd_geometry geom;
+
+	switch (cmd) {
+	case HDIO_GETGEO:
+		if (!usermem)
+			return -EINVAL;
+
+		geom.heads = (u8) port->dev_geom_head;
+		geom.sectors = (u8) port->dev_geom_sect;
+		geom.cylinders = port->dev_geom_cyl;
+		geom.start = get_start_sect(ino->i_bdev);
+
+		if (copy_to_user(usermem, &geom, sizeof(geom)))
+			return -EFAULT;
+		return 0;
+
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+static const u32 msg_sizes[] = { 32, 64, 128, CARM_MSG_SIZE };
+
+static inline int carm_lookup_bucket(u32 msg_size)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
+		if (msg_size <= msg_sizes[i])
+			return i;
+	
+	return -ENOENT;
+}
+
+static void carm_init_buckets(void __iomem *mmio)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(msg_sizes); i++)
+		writel(msg_sizes[i], mmio + CARM_CMS0 + (4 * i));
+}
+
+static inline void *carm_ref_msg(struct carm_host *host,
+				 unsigned int msg_idx)
+{
+	return host->msg_base + (msg_idx * CARM_MSG_SIZE);
+}
+
+static inline dma_addr_t carm_ref_msg_dma(struct carm_host *host,
+					  unsigned int msg_idx)
+{
+	return host->msg_dma + (msg_idx * CARM_MSG_SIZE);
+}
+
+static int carm_send_msg(struct carm_host *host,
+			 struct carm_request *crq)
+{
+	void __iomem *mmio = host->mmio;
+	u32 msg = (u32) carm_ref_msg_dma(host, crq->tag);
+	u32 cm_bucket = crq->msg_bucket;
+	u32 tmp;
+	int rc = 0;
+
+	VPRINTK("ENTER\n");
+
+	tmp = readl(mmio + CARM_HMUC);
+	if (tmp & CARM_Q_FULL) {
+#if 0
+		tmp = readl(mmio + CARM_INT_MASK);
+		tmp |= INT_Q_AVAILABLE;
+		writel(tmp, mmio + CARM_INT_MASK);
+		readl(mmio + CARM_INT_MASK);	/* flush */
+#endif
+		DPRINTK("host msg queue full\n");
+		rc = -EBUSY;
+	} else {
+		writel(msg | (cm_bucket << 1), mmio + CARM_IHQP);
+		readl(mmio + CARM_IHQP);	/* flush */
+	}
+
+	return rc;
+}
+
+static struct carm_request *carm_get_request(struct carm_host *host)
+{
+	unsigned int i;
+
+	/* obey global hardware limit on S/G entries */
+	if (host->hw_sg_used >= (CARM_MAX_HOST_SG - CARM_MAX_REQ_SG))
+		return NULL;
+
+	for (i = 0; i < CARM_MAX_Q; i++)
+		if ((host->msg_alloc & (1ULL << i)) == 0) {
+			struct carm_request *crq = &host->req[i];
+			crq->port = NULL;
+			crq->n_elem = 0;
+
+			host->msg_alloc |= (1ULL << i);
+			host->n_msgs++;
+
+			assert(host->n_msgs <= CARM_MAX_REQ);
+			return crq;
+		}
+	
+	DPRINTK("no request available, returning NULL\n");
+	return NULL;
+}
+
+static int carm_put_request(struct carm_host *host, struct carm_request *crq)
+{
+	assert(crq->tag < CARM_MAX_Q);
+
+	if (unlikely((host->msg_alloc & (1ULL << crq->tag)) == 0))
+		return -EINVAL; /* tried to clear a tag that was not active */
+
+	assert(host->hw_sg_used >= crq->n_elem);
+
+	host->msg_alloc &= ~(1ULL << crq->tag);
+	host->hw_sg_used -= crq->n_elem;
+	host->n_msgs--;
+
+	return 0;
+}
+
+static struct carm_request *carm_get_special(struct carm_host *host)
+{
+	unsigned long flags;
+	struct carm_request *crq = NULL;
+	struct request *rq;
+	int tries = 5000;
+
+	while (tries-- > 0) {
+		spin_lock_irqsave(&host->lock, flags);
+		crq = carm_get_request(host);
+		spin_unlock_irqrestore(&host->lock, flags);
+
+		if (crq)
+			break;
+		msleep(10);
+	}
+
+	if (!crq)
+		return NULL;
+
+	rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL);
+	if (!rq) {
+		spin_lock_irqsave(&host->lock, flags);
+		carm_put_request(host, crq);
+		spin_unlock_irqrestore(&host->lock, flags);
+		return NULL;
+	}
+
+	crq->rq = rq;
+	return crq;
+}
+
+static int carm_array_info (struct carm_host *host, unsigned int array_idx)
+{
+	struct carm_msg_ioctl *ioc;
+	unsigned int idx;
+	u32 msg_data;
+	dma_addr_t msg_dma;
+	struct carm_request *crq;
+	int rc;
+
+	crq = carm_get_special(host);
+	if (!crq) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	idx = crq->tag;
+
+	ioc = carm_ref_msg(host, idx);
+	msg_dma = carm_ref_msg_dma(host, idx);
+	msg_data = (u32) (msg_dma + sizeof(struct carm_array_info));
+
+	crq->msg_type = CARM_MSG_ARRAY;
+	crq->msg_subtype = CARM_ARRAY_INFO;
+	rc = carm_lookup_bucket(sizeof(struct carm_msg_ioctl) +
+				sizeof(struct carm_array_info));
+	BUG_ON(rc < 0);
+	crq->msg_bucket = (u32) rc;
+
+	memset(ioc, 0, sizeof(*ioc));
+	ioc->type	= CARM_MSG_ARRAY;
+	ioc->subtype	= CARM_ARRAY_INFO;
+	ioc->array_id	= (u8) array_idx;
+	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
+	ioc->data_addr	= cpu_to_le32(msg_data);
+
+	spin_lock_irq(&host->lock);
+	assert(host->state == HST_DEV_SCAN_START ||
+	       host->state == HST_DEV_SCAN);
+	spin_unlock_irq(&host->lock);
+
+	DPRINTK("blk_insert_request, tag == %u\n", idx);
+	blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
+
+	return 0;
+
+err_out:
+	spin_lock_irq(&host->lock);
+	host->state = HST_ERROR;
+	spin_unlock_irq(&host->lock);
+	return rc;
+}
+
+typedef unsigned int (*carm_sspc_t)(struct carm_host *, unsigned int, void *);
+
+static int carm_send_special (struct carm_host *host, carm_sspc_t func)
+{
+	struct carm_request *crq;
+	struct carm_msg_ioctl *ioc;
+	void *mem;
+	unsigned int idx, msg_size;
+	int rc;
+
+	crq = carm_get_special(host);
+	if (!crq)
+		return -ENOMEM;
+
+	idx = crq->tag;
+
+	mem = carm_ref_msg(host, idx);
+
+	msg_size = func(host, idx, mem);
+
+	ioc = mem;
+	crq->msg_type = ioc->type;
+	crq->msg_subtype = ioc->subtype;
+	rc = carm_lookup_bucket(msg_size);
+	BUG_ON(rc < 0);
+	crq->msg_bucket = (u32) rc;
+
+	DPRINTK("blk_insert_request, tag == %u\n", idx);
+	blk_insert_request(host->oob_q, crq->rq, 1, crq, 0);
+
+	return 0;
+}
+
+static unsigned int carm_fill_sync_time(struct carm_host *host,
+					unsigned int idx, void *mem)
+{
+	struct timeval tv;
+	struct carm_msg_sync_time *st = mem;
+
+	do_gettimeofday(&tv);
+
+	memset(st, 0, sizeof(*st));
+	st->type	= CARM_MSG_MISC;
+	st->subtype	= MISC_SET_TIME;
+	st->handle	= cpu_to_le32(TAG_ENCODE(idx));
+	st->timestamp	= cpu_to_le32(tv.tv_sec);
+
+	return sizeof(struct carm_msg_sync_time);
+}
+
+static unsigned int carm_fill_alloc_buf(struct carm_host *host,
+					unsigned int idx, void *mem)
+{
+	struct carm_msg_allocbuf *ab = mem;
+
+	memset(ab, 0, sizeof(*ab));
+	ab->type	= CARM_MSG_MISC;
+	ab->subtype	= MISC_ALLOC_MEM;
+	ab->handle	= cpu_to_le32(TAG_ENCODE(idx));
+	ab->n_sg	= 1;
+	ab->sg_type	= SGT_32BIT;
+	ab->addr	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
+	ab->len		= cpu_to_le32(PDC_SHM_SIZE >> 1);
+	ab->evt_pool	= cpu_to_le32(host->shm_dma + (16 * 1024));
+	ab->n_evt	= cpu_to_le32(1024);
+	ab->rbuf_pool	= cpu_to_le32(host->shm_dma);
+	ab->n_rbuf	= cpu_to_le32(RMSG_Q_LEN);
+	ab->msg_pool	= cpu_to_le32(host->shm_dma + RBUF_LEN);
+	ab->n_msg	= cpu_to_le32(CARM_Q_LEN);
+	ab->sg[0].start	= cpu_to_le32(host->shm_dma + (PDC_SHM_SIZE >> 1));
+	ab->sg[0].len	= cpu_to_le32(65536);
+
+	return sizeof(struct carm_msg_allocbuf);
+}
+
+static unsigned int carm_fill_scan_channels(struct carm_host *host,
+					    unsigned int idx, void *mem)
+{
+	struct carm_msg_ioctl *ioc = mem;
+	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) +
+			      IOC_SCAN_CHAN_OFFSET);
+
+	memset(ioc, 0, sizeof(*ioc));
+	ioc->type	= CARM_MSG_IOCTL;
+	ioc->subtype	= CARM_IOC_SCAN_CHAN;
+	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
+	ioc->data_addr	= cpu_to_le32(msg_data);
+
+	/* fill output data area with "no device" default values */
+	mem += IOC_SCAN_CHAN_OFFSET;
+	memset(mem, IOC_SCAN_CHAN_NODEV, CARM_MAX_PORTS);
+
+	return IOC_SCAN_CHAN_OFFSET + CARM_MAX_PORTS;
+}
+
+static unsigned int carm_fill_get_fw_ver(struct carm_host *host,
+					 unsigned int idx, void *mem)
+{
+	struct carm_msg_get_fw_ver *ioc = mem;
+	u32 msg_data = (u32) (carm_ref_msg_dma(host, idx) + sizeof(*ioc));
+
+	memset(ioc, 0, sizeof(*ioc));
+	ioc->type	= CARM_MSG_MISC;
+	ioc->subtype	= MISC_GET_FW_VER;
+	ioc->handle	= cpu_to_le32(TAG_ENCODE(idx));
+	ioc->data_addr	= cpu_to_le32(msg_data);
+
+	return sizeof(struct carm_msg_get_fw_ver) +
+	       sizeof(struct carm_fw_ver);
+}
+
+static inline void carm_end_request_queued(struct carm_host *host,
+					   struct carm_request *crq,
+					   int uptodate)
+{
+	struct request *req = crq->rq;
+	int rc;
+
+	rc = end_that_request_first(req, uptodate, req->hard_nr_sectors);
+	assert(rc == 0);
+
+	end_that_request_last(req);
+
+	rc = carm_put_request(host, crq);
+	assert(rc == 0);
+}
+
+static inline void carm_push_q (struct carm_host *host, request_queue_t *q)
+{
+	unsigned int idx = host->wait_q_prod % CARM_MAX_WAIT_Q;
+
+	blk_stop_queue(q);
+	VPRINTK("STOPPED QUEUE %p\n", q);
+
+	host->wait_q[idx] = q;
+	host->wait_q_prod++;
+	BUG_ON(host->wait_q_prod == host->wait_q_cons); /* overrun */
+}
+
+static inline request_queue_t *carm_pop_q(struct carm_host *host)
+{
+	unsigned int idx;
+
+	if (host->wait_q_prod == host->wait_q_cons)
+		return NULL;
+
+	idx = host->wait_q_cons % CARM_MAX_WAIT_Q;
+	host->wait_q_cons++;
+
+	return host->wait_q[idx];
+}
+
+static inline void carm_round_robin(struct carm_host *host)
+{
+	request_queue_t *q = carm_pop_q(host);
+	if (q) {
+		blk_start_queue(q);
+		VPRINTK("STARTED QUEUE %p\n", q);
+	}
+}
+
+static inline void carm_end_rq(struct carm_host *host, struct carm_request *crq,
+			int is_ok)
+{
+	carm_end_request_queued(host, crq, is_ok);
+	if (CARM_MAX_Q == 1)
+		carm_round_robin(host);
+	else if ((host->n_msgs <= CARM_MSG_LOW_WATER) &&
+		 (host->hw_sg_used <= CARM_SG_LOW_WATER)) {
+		carm_round_robin(host);
+	}
+}
+
+static void carm_oob_rq_fn(request_queue_t *q)
+{
+	struct carm_host *host = q->queuedata;
+	struct carm_request *crq;
+	struct request *rq;
+	int rc;
+
+	while (1) {
+		DPRINTK("get req\n");
+		rq = elv_next_request(q);
+		if (!rq)
+			break;
+
+		blkdev_dequeue_request(rq);
+
+		crq = rq->special;
+		assert(crq != NULL);
+		assert(crq->rq == rq);
+
+		crq->n_elem = 0;
+
+		DPRINTK("send req\n");
+		rc = carm_send_msg(host, crq);
+		if (rc) {
+			blk_requeue_request(q, rq);
+			carm_push_q(host, q);
+			return;		/* call us again later, eventually */
+		}
+	}
+}
+
+static void carm_rq_fn(request_queue_t *q)
+{
+	struct carm_port *port = q->queuedata;
+	struct carm_host *host = port->host;
+	struct carm_msg_rw *msg;
+	struct carm_request *crq;
+	struct request *rq;
+	struct scatterlist *sg;
+	int writing = 0, pci_dir, i, n_elem, rc;
+	u32 tmp;
+	unsigned int msg_size;
+
+queue_one_request:
+	VPRINTK("get req\n");
+	rq = elv_next_request(q);
+	if (!rq)
+		return;
+
+	crq = carm_get_request(host);
+	if (!crq) {
+		carm_push_q(host, q);
+		return;		/* call us again later, eventually */
+	}
+	crq->rq = rq;
+
+	blkdev_dequeue_request(rq);
+
+	if (rq_data_dir(rq) == WRITE) {
+		writing = 1;
+		pci_dir = PCI_DMA_TODEVICE;
+	} else {
+		pci_dir = PCI_DMA_FROMDEVICE;
+	}
+
+	/* get scatterlist from block layer */
+	sg = &crq->sg[0];
+	n_elem = blk_rq_map_sg(q, rq, sg);
+	if (n_elem <= 0) {
+		carm_end_rq(host, crq, 0);
+		return;		/* request with no s/g entries? */
+	}
+
+	/* map scatterlist to PCI bus addresses */
+	n_elem = pci_map_sg(host->pdev, sg, n_elem, pci_dir);
+	if (n_elem <= 0) {
+		carm_end_rq(host, crq, 0);
+		return;		/* request with no s/g entries? */
+	}
+	crq->n_elem = n_elem;
+	crq->port = port;
+	host->hw_sg_used += n_elem;
+
+	/*
+	 * build read/write message
+	 */
+
+	VPRINTK("build msg\n");
+	msg = (struct carm_msg_rw *) carm_ref_msg(host, crq->tag);
+
+	if (writing) {
+		msg->type = CARM_MSG_WRITE;
+		crq->msg_type = CARM_MSG_WRITE;
+	} else {
+		msg->type = CARM_MSG_READ;
+		crq->msg_type = CARM_MSG_READ;
+	}
+
+	msg->id		= port->port_no;
+	msg->sg_count	= n_elem;
+	msg->sg_type	= SGT_32BIT;
+	msg->handle	= cpu_to_le32(TAG_ENCODE(crq->tag));
+	msg->lba	= cpu_to_le32(rq->sector & 0xffffffff);
+	tmp		= (rq->sector >> 16) >> 16;
+	msg->lba_high	= cpu_to_le16( (u16) tmp );
+	msg->lba_count	= cpu_to_le16(rq->nr_sectors);
+
+	msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg);
+	for (i = 0; i < n_elem; i++) {
+		struct carm_msg_sg *carm_sg = &msg->sg[i];
+		carm_sg->start = cpu_to_le32(sg_dma_address(&crq->sg[i]));
+		carm_sg->len = cpu_to_le32(sg_dma_len(&crq->sg[i]));
+		msg_size += sizeof(struct carm_msg_sg);
+	}
+
+	rc = carm_lookup_bucket(msg_size);
+	BUG_ON(rc < 0);
+	crq->msg_bucket = (u32) rc;
+
+	/*
+	 * queue read/write message to hardware
+	 */
+
+	VPRINTK("send msg, tag == %u\n", crq->tag);
+	rc = carm_send_msg(host, crq);
+	if (rc) {
+		carm_put_request(host, crq);
+		blk_requeue_request(q, rq);
+		carm_push_q(host, q);
+		return;		/* call us again later, eventually */
+	}
+
+	goto queue_one_request;
+}
+
+static void carm_handle_array_info(struct carm_host *host,
+				   struct carm_request *crq, u8 *mem,
+				   int is_ok)
+{
+	struct carm_port *port;
+	u8 *msg_data = mem + sizeof(struct carm_array_info);
+	struct carm_array_info *desc = (struct carm_array_info *) msg_data;
+	u64 lo, hi;
+	int cur_port;
+	size_t slen;
+
+	DPRINTK("ENTER\n");
+
+	carm_end_rq(host, crq, is_ok);
+
+	if (!is_ok)
+		goto out;
+	if (le32_to_cpu(desc->array_status) & ARRAY_NO_EXIST)
+		goto out;
+
+	cur_port = host->cur_scan_dev;
+
+	/* should never occur */
+	if ((cur_port < 0) || (cur_port >= CARM_MAX_PORTS)) {
+		printk(KERN_ERR PFX "BUG: cur_scan_dev==%d, array_id==%d\n",
+		       cur_port, (int) desc->array_id);
+		goto out;
+	}
+
+	port = &host->port[cur_port];
+
+	lo = (u64) le32_to_cpu(desc->size);
+	hi = (u64) le16_to_cpu(desc->size_hi);
+
+	port->capacity = lo | (hi << 32);
+	port->dev_geom_head = le16_to_cpu(desc->head);
+	port->dev_geom_sect = le16_to_cpu(desc->sect);
+	port->dev_geom_cyl = le16_to_cpu(desc->cyl);
+
+	host->dev_active |= (1 << cur_port);
+
+	strncpy(port->name, desc->name, sizeof(port->name));
+	port->name[sizeof(port->name) - 1] = 0;
+	slen = strlen(port->name);
+	while (slen && (port->name[slen - 1] == ' ')) {
+		port->name[slen - 1] = 0;
+		slen--;
+	}
+
+	printk(KERN_INFO DRV_NAME "(%s): port %u device %Lu sectors\n",
+	       pci_name(host->pdev), port->port_no,
+	       (unsigned long long) port->capacity);
+	printk(KERN_INFO DRV_NAME "(%s): port %u device \"%s\"\n",
+	       pci_name(host->pdev), port->port_no, port->name);
+
+out:
+	assert(host->state == HST_DEV_SCAN);
+	schedule_work(&host->fsm_task);
+}
+
+static void carm_handle_scan_chan(struct carm_host *host,
+				  struct carm_request *crq, u8 *mem,
+				  int is_ok)
+{
+	u8 *msg_data = mem + IOC_SCAN_CHAN_OFFSET;
+	unsigned int i, dev_count = 0;
+	int new_state = HST_DEV_SCAN_START;
+
+	DPRINTK("ENTER\n");
+
+	carm_end_rq(host, crq, is_ok);
+
+	if (!is_ok) {
+		new_state = HST_ERROR;
+		goto out;
+	}
+
+	/* TODO: scan and support non-disk devices */
+	for (i = 0; i < 8; i++)
+		if (msg_data[i] == 0) { /* direct-access device (disk) */
+			host->dev_present |= (1 << i);
+			dev_count++;
+		}
+
+	printk(KERN_INFO DRV_NAME "(%s): found %u interesting devices\n",
+	       pci_name(host->pdev), dev_count);
+
+out:
+	assert(host->state == HST_PORT_SCAN);
+	host->state = new_state;
+	schedule_work(&host->fsm_task);
+}
+
+static void carm_handle_generic(struct carm_host *host,
+				struct carm_request *crq, int is_ok,
+				int cur_state, int next_state)
+{
+	DPRINTK("ENTER\n");
+
+	carm_end_rq(host, crq, is_ok);
+
+	assert(host->state == cur_state);
+	if (is_ok)
+		host->state = next_state;
+	else
+		host->state = HST_ERROR;
+	schedule_work(&host->fsm_task);
+}
+
+static inline void carm_handle_rw(struct carm_host *host,
+				  struct carm_request *crq, int is_ok)
+{
+	int pci_dir;
+
+	VPRINTK("ENTER\n");
+
+	if (rq_data_dir(crq->rq) == WRITE)
+		pci_dir = PCI_DMA_TODEVICE;
+	else
+		pci_dir = PCI_DMA_FROMDEVICE;
+
+	pci_unmap_sg(host->pdev, &crq->sg[0], crq->n_elem, pci_dir);
+
+	carm_end_rq(host, crq, is_ok);
+}
+
+static inline void carm_handle_resp(struct carm_host *host,
+				    __le32 ret_handle_le, u32 status)
+{
+	u32 handle = le32_to_cpu(ret_handle_le);
+	unsigned int msg_idx;
+	struct carm_request *crq;
+	int is_ok = (status == RMSG_OK);
+	u8 *mem;
+
+	VPRINTK("ENTER, handle == 0x%x\n", handle);
+
+	if (unlikely(!TAG_VALID(handle))) {
+		printk(KERN_ERR DRV_NAME "(%s): BUG: invalid tag 0x%x\n",
+		       pci_name(host->pdev), handle);
+		return;
+	}
+
+	msg_idx = TAG_DECODE(handle);
+	VPRINTK("tag == %u\n", msg_idx);
+
+	crq = &host->req[msg_idx];
+
+	/* fast path */
+	if (likely(crq->msg_type == CARM_MSG_READ ||
+		   crq->msg_type == CARM_MSG_WRITE)) {
+		carm_handle_rw(host, crq, is_ok);
+		return;
+	}
+
+	mem = carm_ref_msg(host, msg_idx);
+
+	switch (crq->msg_type) {
+	case CARM_MSG_IOCTL: {
+		switch (crq->msg_subtype) {
+		case CARM_IOC_SCAN_CHAN:
+			carm_handle_scan_chan(host, crq, mem, is_ok);
+			break;
+		default:
+			/* unknown / invalid response */
+			goto err_out;
+		}
+		break;
+	}
+
+	case CARM_MSG_MISC: {
+		switch (crq->msg_subtype) {
+		case MISC_ALLOC_MEM:
+			carm_handle_generic(host, crq, is_ok,
+					    HST_ALLOC_BUF, HST_SYNC_TIME);
+			break;
+		case MISC_SET_TIME:
+			carm_handle_generic(host, crq, is_ok,
+					    HST_SYNC_TIME, HST_GET_FW_VER);
+			break;
+		case MISC_GET_FW_VER: {
+			struct carm_fw_ver *ver = (struct carm_fw_ver *)
+				mem + sizeof(struct carm_msg_get_fw_ver);
+			if (is_ok) {
+				host->fw_ver = le32_to_cpu(ver->version);
+				host->flags |= (ver->features & FL_FW_VER_MASK);
+			}
+			carm_handle_generic(host, crq, is_ok,
+					    HST_GET_FW_VER, HST_PORT_SCAN);
+			break;
+		}
+		default:
+			/* unknown / invalid response */
+			goto err_out;
+		}
+		break;
+	}
+
+	case CARM_MSG_ARRAY: {
+		switch (crq->msg_subtype) {
+		case CARM_ARRAY_INFO:
+			carm_handle_array_info(host, crq, mem, is_ok);
+			break;
+		default:
+			/* unknown / invalid response */
+			goto err_out;
+		}
+		break;
+	}
+
+	default:
+		/* unknown / invalid response */
+		goto err_out;
+	}
+
+	return;
+
+err_out:
+	printk(KERN_WARNING DRV_NAME "(%s): BUG: unhandled message type %d/%d\n",
+	       pci_name(host->pdev), crq->msg_type, crq->msg_subtype);
+	carm_end_rq(host, crq, 0);
+}
+
+static inline void carm_handle_responses(struct carm_host *host)
+{
+	void __iomem *mmio = host->mmio;
+	struct carm_response *resp = (struct carm_response *) host->shm;
+	unsigned int work = 0;
+	unsigned int idx = host->resp_idx % RMSG_Q_LEN;
+
+	while (1) {
+		u32 status = le32_to_cpu(resp[idx].status);
+
+		if (status == 0xffffffff) {
+			VPRINTK("ending response on index %u\n", idx);
+			writel(idx << 3, mmio + CARM_RESP_IDX);
+			break;
+		}
+
+		/* response to a message we sent */
+		else if ((status & (1 << 31)) == 0) {
+			VPRINTK("handling msg response on index %u\n", idx);
+			carm_handle_resp(host, resp[idx].ret_handle, status);
+			resp[idx].status = cpu_to_le32(0xffffffff);
+		}
+
+		/* asynchronous events the hardware throws our way */
+		else if ((status & 0xff000000) == (1 << 31)) {
+			u8 *evt_type_ptr = (u8 *) &resp[idx];
+			u8 evt_type = *evt_type_ptr;
+			printk(KERN_WARNING DRV_NAME "(%s): unhandled event type %d\n",
+			       pci_name(host->pdev), (int) evt_type);
+			resp[idx].status = cpu_to_le32(0xffffffff);
+		}
+
+		idx = NEXT_RESP(idx);
+		work++;
+	}
+
+	VPRINTK("EXIT, work==%u\n", work);
+	host->resp_idx += work;
+}
+
+static irqreturn_t carm_interrupt(int irq, void *__host, struct pt_regs *regs)
+{
+	struct carm_host *host = __host;
+	void __iomem *mmio;
+	u32 mask;
+	int handled = 0;
+	unsigned long flags;
+
+	if (!host) {
+		VPRINTK("no host\n");
+		return IRQ_NONE;
+	}
+
+	spin_lock_irqsave(&host->lock, flags);
+
+	mmio = host->mmio;
+
+	/* reading should also clear interrupts */
+	mask = readl(mmio + CARM_INT_STAT);
+
+	if (mask == 0 || mask == 0xffffffff) {
+		VPRINTK("no work, mask == 0x%x\n", mask);
+		goto out;
+	}
+
+	if (mask & INT_ACK_MASK)
+		writel(mask, mmio + CARM_INT_STAT);
+
+	if (unlikely(host->state == HST_INVALID)) {
+		VPRINTK("not initialized yet, mask = 0x%x\n", mask);
+		goto out;
+	}
+
+	if (mask & CARM_HAVE_RESP) {
+		handled = 1;
+		carm_handle_responses(host);
+	}
+
+out:
+	spin_unlock_irqrestore(&host->lock, flags);
+	VPRINTK("EXIT\n");
+	return IRQ_RETVAL(handled);
+}
+
+static void carm_fsm_task (void *_data)
+{
+	struct carm_host *host = _data;
+	unsigned long flags;
+	unsigned int state;
+	int rc, i, next_dev;
+	int reschedule = 0;
+	int new_state = HST_INVALID;
+
+	spin_lock_irqsave(&host->lock, flags);
+	state = host->state;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	DPRINTK("ENTER, state == %s\n", state_name[state]);
+
+	switch (state) {
+	case HST_PROBE_START:
+		new_state = HST_ALLOC_BUF;
+		reschedule = 1;
+		break;
+
+	case HST_ALLOC_BUF:
+		rc = carm_send_special(host, carm_fill_alloc_buf);
+		if (rc) {
+			new_state = HST_ERROR;
+			reschedule = 1;
+		}
+		break;
+
+	case HST_SYNC_TIME:
+		rc = carm_send_special(host, carm_fill_sync_time);
+		if (rc) {
+			new_state = HST_ERROR;
+			reschedule = 1;
+		}
+		break;
+
+	case HST_GET_FW_VER:
+		rc = carm_send_special(host, carm_fill_get_fw_ver);
+		if (rc) {
+			new_state = HST_ERROR;
+			reschedule = 1;
+		}
+		break;
+
+	case HST_PORT_SCAN:
+		rc = carm_send_special(host, carm_fill_scan_channels);
+		if (rc) {
+			new_state = HST_ERROR;
+			reschedule = 1;
+		}
+		break;
+
+	case HST_DEV_SCAN_START:
+		host->cur_scan_dev = -1;
+		new_state = HST_DEV_SCAN;
+		reschedule = 1;
+		break;
+
+	case HST_DEV_SCAN:
+		next_dev = -1;
+		for (i = host->cur_scan_dev + 1; i < CARM_MAX_PORTS; i++)
+			if (host->dev_present & (1 << i)) {
+				next_dev = i;
+				break;
+			}
+
+		if (next_dev >= 0) {
+			host->cur_scan_dev = next_dev;
+			rc = carm_array_info(host, next_dev);
+			if (rc) {
+				new_state = HST_ERROR;
+				reschedule = 1;
+			}
+		} else {
+			new_state = HST_DEV_ACTIVATE;
+			reschedule = 1;
+		}
+		break;
+
+	case HST_DEV_ACTIVATE: {
+		int activated = 0;
+		for (i = 0; i < CARM_MAX_PORTS; i++)
+			if (host->dev_active & (1 << i)) {
+				struct carm_port *port = &host->port[i];
+				struct gendisk *disk = port->disk;
+
+				set_capacity(disk, port->capacity);
+				add_disk(disk);
+				activated++;
+			}
+
+		printk(KERN_INFO DRV_NAME "(%s): %d ports activated\n",
+		       pci_name(host->pdev), activated);
+
+		new_state = HST_PROBE_FINISHED;
+		reschedule = 1;
+		break;
+	}
+
+	case HST_PROBE_FINISHED:
+		up(&host->probe_sem);
+		break;
+
+	case HST_ERROR:
+		/* FIXME: TODO */
+		break;
+
+	default:
+		/* should never occur */
+		printk(KERN_ERR PFX "BUG: unknown state %d\n", state);
+		assert(0);
+		break;
+	}
+
+	if (new_state != HST_INVALID) {
+		spin_lock_irqsave(&host->lock, flags);
+		host->state = new_state;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	if (reschedule)
+		schedule_work(&host->fsm_task);
+}
+
+static int carm_init_wait(void __iomem *mmio, u32 bits, unsigned int test_bit)
+{
+	unsigned int i;
+
+	for (i = 0; i < 50000; i++) {
+		u32 tmp = readl(mmio + CARM_LMUC);
+		udelay(100);
+
+		if (test_bit) {
+			if ((tmp & bits) == bits)
+				return 0;
+		} else {
+			if ((tmp & bits) == 0)
+				return 0;
+		}
+
+		cond_resched();
+	}
+
+	printk(KERN_ERR PFX "carm_init_wait timeout, bits == 0x%x, test_bit == %s\n",
+	       bits, test_bit ? "yes" : "no");
+	return -EBUSY;
+}
+
+static void carm_init_responses(struct carm_host *host)
+{
+	void __iomem *mmio = host->mmio;
+	unsigned int i;
+	struct carm_response *resp = (struct carm_response *) host->shm;
+
+	for (i = 0; i < RMSG_Q_LEN; i++)
+		resp[i].status = cpu_to_le32(0xffffffff);
+
+	writel(0, mmio + CARM_RESP_IDX);
+}
+
+static int carm_init_host(struct carm_host *host)
+{
+	void __iomem *mmio = host->mmio;
+	u32 tmp;
+	u8 tmp8;
+	int rc;
+
+	DPRINTK("ENTER\n");
+
+	writel(0, mmio + CARM_INT_MASK);
+
+	tmp8 = readb(mmio + CARM_INITC);
+	if (tmp8 & 0x01) {
+		tmp8 &= ~0x01;
+		writeb(tmp8, mmio + CARM_INITC);
+		readb(mmio + CARM_INITC);	/* flush */
+
+		DPRINTK("snooze...\n");
+		msleep(5000);
+	}
+
+	tmp = readl(mmio + CARM_HMUC);
+	if (tmp & CARM_CME) {
+		DPRINTK("CME bit present, waiting\n");
+		rc = carm_init_wait(mmio, CARM_CME, 1);
+		if (rc) {
+			DPRINTK("EXIT, carm_init_wait 1 failed\n");
+			return rc;
+		}
+	}
+	if (tmp & CARM_RME) {
+		DPRINTK("RME bit present, waiting\n");
+		rc = carm_init_wait(mmio, CARM_RME, 1);
+		if (rc) {
+			DPRINTK("EXIT, carm_init_wait 2 failed\n");
+			return rc;
+		}
+	}
+
+	tmp &= ~(CARM_RME | CARM_CME);
+	writel(tmp, mmio + CARM_HMUC);
+	readl(mmio + CARM_HMUC);	/* flush */
+
+	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 0);
+	if (rc) {
+		DPRINTK("EXIT, carm_init_wait 3 failed\n");
+		return rc;
+	}
+
+	carm_init_buckets(mmio);
+
+	writel(host->shm_dma & 0xffffffff, mmio + RBUF_ADDR_LO);
+	writel((host->shm_dma >> 16) >> 16, mmio + RBUF_ADDR_HI);
+	writel(RBUF_LEN, mmio + RBUF_BYTE_SZ);
+
+	tmp = readl(mmio + CARM_HMUC);
+	tmp |= (CARM_RME | CARM_CME | CARM_WZBC);
+	writel(tmp, mmio + CARM_HMUC);
+	readl(mmio + CARM_HMUC);	/* flush */
+
+	rc = carm_init_wait(mmio, CARM_RME | CARM_CME, 1);
+	if (rc) {
+		DPRINTK("EXIT, carm_init_wait 4 failed\n");
+		return rc;
+	}
+
+	writel(0, mmio + CARM_HMPHA);
+	writel(INT_DEF_MASK, mmio + CARM_INT_MASK);
+
+	carm_init_responses(host);
+
+	/* start initialization, probing state machine */
+	spin_lock_irq(&host->lock);
+	assert(host->state == HST_INVALID);
+	host->state = HST_PROBE_START;
+	spin_unlock_irq(&host->lock);
+	schedule_work(&host->fsm_task);
+
+	DPRINTK("EXIT\n");
+	return 0;
+}
+
+static int carm_init_disks(struct carm_host *host)
+{
+	unsigned int i;
+	int rc = 0;
+
+	for (i = 0; i < CARM_MAX_PORTS; i++) {
+		struct gendisk *disk;
+		request_queue_t *q;
+		struct carm_port *port;
+
+		port = &host->port[i];
+		port->host = host;
+		port->port_no = i;
+
+		disk = alloc_disk(CARM_MINORS_PER_MAJOR);
+		if (!disk) {
+			rc = -ENOMEM;
+			break;
+		}
+
+		port->disk = disk;
+		sprintf(disk->disk_name, DRV_NAME "/%u",
+			(unsigned int) (host->id * CARM_MAX_PORTS) + i);
+		sprintf(disk->devfs_name, DRV_NAME "/%u_%u", host->id, i);
+		disk->major = host->major;
+		disk->first_minor = i * CARM_MINORS_PER_MAJOR;
+		disk->fops = &carm_bd_ops;
+		disk->private_data = port;
+
+		q = blk_init_queue(carm_rq_fn, &host->lock);
+		if (!q) {
+			rc = -ENOMEM;
+			break;
+		}
+		disk->queue = q;
+		blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG);
+		blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG);
+		blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
+
+		q->queuedata = port;
+	}
+
+	return rc;
+}
+
+static void carm_free_disks(struct carm_host *host)
+{
+	unsigned int i;
+
+	for (i = 0; i < CARM_MAX_PORTS; i++) {
+		struct gendisk *disk = host->port[i].disk;
+		if (disk) {
+			request_queue_t *q = disk->queue;
+
+			if (disk->flags & GENHD_FL_UP)
+				del_gendisk(disk);
+			if (q)
+				blk_cleanup_queue(q);
+			put_disk(disk);
+		}
+	}
+}
+
+static int carm_init_shm(struct carm_host *host)
+{
+	host->shm = pci_alloc_consistent(host->pdev, CARM_SHM_SIZE,
+					 &host->shm_dma);
+	if (!host->shm)
+		return -ENOMEM;
+
+	host->msg_base = host->shm + RBUF_LEN;
+	host->msg_dma = host->shm_dma + RBUF_LEN;
+
+	memset(host->shm, 0xff, RBUF_LEN);
+	memset(host->msg_base, 0, PDC_SHM_SIZE - RBUF_LEN);
+
+	return 0;
+}
+
+static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	static unsigned int printed_version;
+	struct carm_host *host;
+	unsigned int pci_dac;
+	int rc;
+	request_queue_t *q;
+	unsigned int i;
+
+	if (!printed_version++)
+		printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n");
+
+	rc = pci_enable_device(pdev);
+	if (rc)
+		return rc;
+
+	rc = pci_request_regions(pdev, DRV_NAME);
+	if (rc)
+		goto err_out;
+
+#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
+	rc = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
+	if (!rc) {
+		rc = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
+		if (rc) {
+			printk(KERN_ERR DRV_NAME "(%s): consistent DMA mask failure\n",
+				pci_name(pdev));
+			goto err_out_regions;
+		}
+		pci_dac = 1;
+	} else {
+#endif
+		rc = pci_set_dma_mask(pdev, 0xffffffffULL);
+		if (rc) {
+			printk(KERN_ERR DRV_NAME "(%s): DMA mask failure\n",
+				pci_name(pdev));
+			goto err_out_regions;
+		}
+		pci_dac = 0;
+#if IF_64BIT_DMA_IS_POSSIBLE /* grrrr... */
+	}
+#endif
+
+	host = kmalloc(sizeof(*host), GFP_KERNEL);
+	if (!host) {
+		printk(KERN_ERR DRV_NAME "(%s): memory alloc failure\n",
+		       pci_name(pdev));
+		rc = -ENOMEM;
+		goto err_out_regions;
+	}
+
+	memset(host, 0, sizeof(*host));
+	host->pdev = pdev;
+	host->flags = pci_dac ? FL_DAC : 0;
+	spin_lock_init(&host->lock);
+	INIT_WORK(&host->fsm_task, carm_fsm_task, host);
+	init_MUTEX_LOCKED(&host->probe_sem);
+
+	for (i = 0; i < ARRAY_SIZE(host->req); i++)
+		host->req[i].tag = i;
+
+	host->mmio = ioremap(pci_resource_start(pdev, 0),
+			     pci_resource_len(pdev, 0));
+	if (!host->mmio) {
+		printk(KERN_ERR DRV_NAME "(%s): MMIO alloc failure\n",
+		       pci_name(pdev));
+		rc = -ENOMEM;
+		goto err_out_kfree;
+	}
+
+	rc = carm_init_shm(host);
+	if (rc) {
+		printk(KERN_ERR DRV_NAME "(%s): DMA SHM alloc failure\n",
+		       pci_name(pdev));
+		goto err_out_iounmap;
+	}
+
+	q = blk_init_queue(carm_oob_rq_fn, &host->lock);
+	if (!q) {
+		printk(KERN_ERR DRV_NAME "(%s): OOB queue alloc failure\n",
+		       pci_name(pdev));
+		rc = -ENOMEM;
+		goto err_out_pci_free;
+	}
+	host->oob_q = q;
+	q->queuedata = host;
+
+	/*
+	 * Figure out which major to use: 160, 161, or dynamic
+	 */
+	if (!test_and_set_bit(0, &carm_major_alloc))
+		host->major = 160;
+	else if (!test_and_set_bit(1, &carm_major_alloc))
+		host->major = 161;
+	else
+		host->flags |= FL_DYN_MAJOR;
+
+	host->id = carm_host_id;
+	sprintf(host->name, DRV_NAME "%d", carm_host_id);
+
+	rc = register_blkdev(host->major, host->name);
+	if (rc < 0)
+		goto err_out_free_majors;
+	if (host->flags & FL_DYN_MAJOR)
+		host->major = rc;
+
+	devfs_mk_dir(DRV_NAME);
+
+	rc = carm_init_disks(host);
+	if (rc)
+		goto err_out_blkdev_disks;
+
+	pci_set_master(pdev);
+
+	rc = request_irq(pdev->irq, carm_interrupt, SA_SHIRQ, DRV_NAME, host);
+	if (rc) {
+		printk(KERN_ERR DRV_NAME "(%s): irq alloc failure\n",
+		       pci_name(pdev));
+		goto err_out_blkdev_disks;
+	}
+
+	rc = carm_init_host(host);
+	if (rc)
+		goto err_out_free_irq;
+
+	DPRINTK("waiting for probe_sem\n");
+	down(&host->probe_sem);
+
+	printk(KERN_INFO "%s: pci %s, ports %d, io %lx, irq %u, major %d\n",
+	       host->name, pci_name(pdev), (int) CARM_MAX_PORTS,
+	       pci_resource_start(pdev, 0), pdev->irq, host->major);
+
+	carm_host_id++;
+	pci_set_drvdata(pdev, host);
+	return 0;
+
+err_out_free_irq:
+	free_irq(pdev->irq, host);
+err_out_blkdev_disks:
+	carm_free_disks(host);
+	unregister_blkdev(host->major, host->name);
+err_out_free_majors:
+	if (host->major == 160)
+		clear_bit(0, &carm_major_alloc);
+	else if (host->major == 161)
+		clear_bit(1, &carm_major_alloc);
+	blk_cleanup_queue(host->oob_q);
+err_out_pci_free:
+	pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+err_out_iounmap:
+	iounmap(host->mmio);
+err_out_kfree:
+	kfree(host);
+err_out_regions:
+	pci_release_regions(pdev);
+err_out:
+	pci_disable_device(pdev);
+	return rc;
+}
+
+static void carm_remove_one (struct pci_dev *pdev)
+{
+	struct carm_host *host = pci_get_drvdata(pdev);
+
+	if (!host) {
+		printk(KERN_ERR PFX "BUG: no host data for PCI(%s)\n",
+		       pci_name(pdev));
+		return;
+	}
+
+	free_irq(pdev->irq, host);
+	carm_free_disks(host);
+	devfs_remove(DRV_NAME);
+	unregister_blkdev(host->major, host->name);
+	if (host->major == 160)
+		clear_bit(0, &carm_major_alloc);
+	else if (host->major == 161)
+		clear_bit(1, &carm_major_alloc);
+	blk_cleanup_queue(host->oob_q);
+	pci_free_consistent(pdev, CARM_SHM_SIZE, host->shm, host->shm_dma);
+	iounmap(host->mmio);
+	kfree(host);
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static int __init carm_init(void)
+{
+	return pci_module_init(&carm_driver);
+}
+
+static void __exit carm_exit(void)
+{
+	pci_unregister_driver(&carm_driver);
+}
+
+module_init(carm_init);
+module_exit(carm_exit);
+
+
diff --git a/drivers/block/ub.c b/drivers/block/ub.c
new file mode 100644
index 0000000..ce42889
--- /dev/null
+++ b/drivers/block/ub.c
@@ -0,0 +1,2215 @@
+/*
+ * The low performance USB storage driver (ub).
+ *
+ * Copyright (c) 1999, 2000 Matthew Dharm (mdharm-usb@one-eyed-alien.net)
+ * Copyright (C) 2004 Pete Zaitcev (zaitcev@yahoo.com)
+ *
+ * This work is a part of Linux kernel, is derived from it,
+ * and is not licensed separately. See file COPYING for details.
+ *
+ * TODO (sorted by decreasing priority)
+ *  -- Do resets with usb_device_reset (needs a thread context, use khubd)
+ *  -- set readonly flag for CDs, set removable flag for CF readers
+ *  -- do inquiry and verify we got a disk and not a tape (for LUN mismatch)
+ *  -- support pphaneuf's SDDR-75 with two LUNs (also broken capacity...)
+ *  -- special case some senses, e.g. 3a/0 -> no media present, reduce retries
+ *  -- verify the 13 conditions and do bulk resets
+ *  -- normal pool of commands instead of cmdv[]?
+ *  -- kill last_pipe and simply do two-state clearing on both pipes
+ *  -- verify protocol (bulk) from USB descriptors (maybe...)
+ *  -- highmem and sg
+ *  -- move top_sense and work_bcs into separate allocations (if they survive)
+ *     for cache purists and esoteric architectures.
+ *  -- prune comments, they are too volumnous
+ *  -- Exterminate P3 printks
+ *  -- Resove XXX's
+ *  -- Redo "benh's retries", perhaps have spin-up code to handle them. V:D=?
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/blkdev.h>
+#include <linux/devfs_fs_kernel.h>
+#include <linux/timer.h>
+#include <scsi/scsi.h>
+
+#define DRV_NAME "ub"
+#define DEVFS_NAME DRV_NAME
+
+#define UB_MAJOR 180
+
+/*
+ * Definitions which have to be scattered once we understand the layout better.
+ */
+
+/* Transport (despite PR in the name) */
+#define US_PR_BULK	0x50		/* bulk only */
+
+/* Protocol */
+#define US_SC_SCSI	0x06		/* Transparent */
+
+/*
+ */
+#define UB_MINORS_PER_MAJOR	8
+
+#define UB_MAX_CDB_SIZE      16		/* Corresponds to Bulk */
+
+#define UB_SENSE_SIZE  18
+
+/*
+ */
+
+/* command block wrapper */
+struct bulk_cb_wrap {
+	__le32	Signature;		/* contains 'USBC' */
+	u32	Tag;			/* unique per command id */
+	__le32	DataTransferLength;	/* size of data */
+	u8	Flags;			/* direction in bit 0 */
+	u8	Lun;			/* LUN normally 0 */
+	u8	Length;			/* of of the CDB */
+	u8	CDB[UB_MAX_CDB_SIZE];	/* max command */
+};
+
+#define US_BULK_CB_WRAP_LEN	31
+#define US_BULK_CB_SIGN		0x43425355	/*spells out USBC */
+#define US_BULK_FLAG_IN		1
+#define US_BULK_FLAG_OUT	0
+
+/* command status wrapper */
+struct bulk_cs_wrap {
+	__le32	Signature;		/* should = 'USBS' */
+	u32	Tag;			/* same as original command */
+	__le32	Residue;		/* amount not transferred */
+	u8	Status;			/* see below */
+};
+
+#define US_BULK_CS_WRAP_LEN	13
+#define US_BULK_CS_SIGN		0x53425355	/* spells out 'USBS' */
+/* This is for Olympus Camedia digital cameras */
+#define US_BULK_CS_OLYMPUS_SIGN	0x55425355	/* spells out 'USBU' */
+#define US_BULK_STAT_OK		0
+#define US_BULK_STAT_FAIL	1
+#define US_BULK_STAT_PHASE	2
+
+/* bulk-only class specific requests */
+#define US_BULK_RESET_REQUEST	0xff
+#define US_BULK_GET_MAX_LUN	0xfe
+
+/*
+ */
+struct ub_dev;
+
+#define UB_MAX_REQ_SG	1
+#define UB_MAX_SECTORS 64
+
+/*
+ * A second is more than enough for a 32K transfer (UB_MAX_SECTORS)
+ * even if a webcam hogs the bus, but some devices need time to spin up.
+ */
+#define UB_URB_TIMEOUT	(HZ*2)
+#define UB_DATA_TIMEOUT	(HZ*5)	/* ZIP does spin-ups in the data phase */
+#define UB_STAT_TIMEOUT	(HZ*5)	/* Same spinups and eject for a dataless cmd. */
+#define UB_CTRL_TIMEOUT	(HZ/2)	/* 500ms ought to be enough to clear a stall */
+
+/*
+ * An instance of a SCSI command in transit.
+ */
+#define UB_DIR_NONE	0
+#define UB_DIR_READ	1
+#define UB_DIR_ILLEGAL2	2
+#define UB_DIR_WRITE	3
+
+#define UB_DIR_CHAR(c)  (((c)==UB_DIR_WRITE)? 'w': \
+			 (((c)==UB_DIR_READ)? 'r': 'n'))
+
+enum ub_scsi_cmd_state {
+	UB_CMDST_INIT,			/* Initial state */
+	UB_CMDST_CMD,			/* Command submitted */
+	UB_CMDST_DATA,			/* Data phase */
+	UB_CMDST_CLR2STS,		/* Clearing before requesting status */
+	UB_CMDST_STAT,			/* Status phase */
+	UB_CMDST_CLEAR,			/* Clearing a stall (halt, actually) */
+	UB_CMDST_SENSE,			/* Sending Request Sense */
+	UB_CMDST_DONE			/* Final state */
+};
+
+static char *ub_scsi_cmd_stname[] = {
+	".  ",
+	"Cmd",
+	"dat",
+	"c2s",
+	"sts",
+	"clr",
+	"Sen",
+	"fin"
+};
+
+struct ub_scsi_cmd {
+	unsigned char cdb[UB_MAX_CDB_SIZE];
+	unsigned char cdb_len;
+
+	unsigned char dir;		/* 0 - none, 1 - read, 3 - write. */
+	unsigned char trace_index;
+	enum ub_scsi_cmd_state state;
+	unsigned int tag;
+	struct ub_scsi_cmd *next;
+
+	int error;			/* Return code - valid upon done */
+	unsigned int act_len;		/* Return size */
+	unsigned char key, asc, ascq;	/* May be valid if error==-EIO */
+
+	int stat_count;			/* Retries getting status. */
+
+	/*
+	 * We do not support transfers from highmem pages
+	 * because the underlying USB framework does not do what we need.
+	 */
+	char *data;			/* Requested buffer */
+	unsigned int len;		/* Requested length */
+	// struct scatterlist sgv[UB_MAX_REQ_SG];
+
+	void (*done)(struct ub_dev *, struct ub_scsi_cmd *);
+	void *back;
+};
+
+/*
+ */
+struct ub_capacity {
+	unsigned long nsec;		/* Linux size - 512 byte sectors */
+	unsigned int bsize;		/* Linux hardsect_size */
+	unsigned int bshift;		/* Shift between 512 and hard sects */
+};
+
+/*
+ * The SCSI command tracing structure.
+ */
+
+#define SCMD_ST_HIST_SZ   8
+#define SCMD_TRACE_SZ    63		/* Less than 4KB of 61-byte lines */
+
+struct ub_scsi_cmd_trace {
+	int hcur;
+	unsigned int tag;
+	unsigned int req_size, act_size;
+	unsigned char op;
+	unsigned char dir;
+	unsigned char key, asc, ascq;
+	char st_hst[SCMD_ST_HIST_SZ];	
+};
+
+struct ub_scsi_trace {
+	int cur;
+	struct ub_scsi_cmd_trace vec[SCMD_TRACE_SZ];
+};
+
+/*
+ * This is a direct take-off from linux/include/completion.h
+ * The difference is that I do not wait on this thing, just poll.
+ * When I want to wait (ub_probe), I just use the stock completion.
+ *
+ * Note that INIT_COMPLETION takes no lock. It is correct. But why
+ * in the bloody hell that thing takes struct instead of pointer to struct
+ * is quite beyond me. I just copied it from the stock completion.
+ */
+struct ub_completion {
+	unsigned int done;
+	spinlock_t lock;
+};
+
+static inline void ub_init_completion(struct ub_completion *x)
+{
+	x->done = 0;
+	spin_lock_init(&x->lock);
+}
+
+#define UB_INIT_COMPLETION(x)	((x).done = 0)
+
+static void ub_complete(struct ub_completion *x)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&x->lock, flags);
+	x->done++;
+	spin_unlock_irqrestore(&x->lock, flags);
+}
+
+static int ub_is_completed(struct ub_completion *x)
+{
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&x->lock, flags);
+	ret = x->done;
+	spin_unlock_irqrestore(&x->lock, flags);
+	return ret;
+}
+
+/*
+ */
+struct ub_scsi_cmd_queue {
+	int qlen, qmax;
+	struct ub_scsi_cmd *head, *tail;
+};
+
+/*
+ * The UB device instance.
+ */
+struct ub_dev {
+	spinlock_t lock;
+	int id;				/* Number among ub's */
+	atomic_t poison;		/* The USB device is disconnected */
+	int openc;			/* protected by ub_lock! */
+					/* kref is too implicit for our taste */
+	unsigned int tagcnt;
+	int changed;			/* Media was changed */
+	int removable;
+	int readonly;
+	int first_open;			/* Kludge. See ub_bd_open. */
+	char name[8];
+	struct usb_device *dev;
+	struct usb_interface *intf;
+
+	struct ub_capacity capacity; 
+	struct gendisk *disk;
+
+	unsigned int send_bulk_pipe;	/* cached pipe values */
+	unsigned int recv_bulk_pipe;
+	unsigned int send_ctrl_pipe;
+	unsigned int recv_ctrl_pipe;
+
+	struct tasklet_struct tasklet;
+
+	/* XXX Use Ingo's mempool (once we have more than one) */
+	int cmda[1];
+	struct ub_scsi_cmd cmdv[1];
+
+	struct ub_scsi_cmd_queue cmd_queue;
+	struct ub_scsi_cmd top_rqs_cmd;	/* REQUEST SENSE */
+	unsigned char top_sense[UB_SENSE_SIZE];
+
+	struct ub_completion work_done;
+	struct urb work_urb;
+	struct timer_list work_timer;
+	int last_pipe;			/* What might need clearing */
+	struct bulk_cb_wrap work_bcb;
+	struct bulk_cs_wrap work_bcs;
+	struct usb_ctrlrequest work_cr;
+
+	struct ub_scsi_trace tr;
+};
+
+/*
+ */
+static void ub_cleanup(struct ub_dev *sc);
+static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq);
+static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    struct request *rq);
+static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    struct request *rq);
+static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
+static void ub_end_rq(struct request *rq, int uptodate);
+static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
+static void ub_urb_complete(struct urb *urb, struct pt_regs *pt);
+static void ub_scsi_action(unsigned long _dev);
+static void ub_scsi_dispatch(struct ub_dev *sc);
+static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
+static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc);
+static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
+static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
+static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd);
+static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    int stalled_pipe);
+static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd);
+static int ub_sync_tur(struct ub_dev *sc);
+static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret);
+
+/*
+ */
+static struct usb_device_id ub_usb_ids[] = {
+	// { USB_DEVICE_VER(0x0781, 0x0002, 0x0009, 0x0009) },	/* SDDR-31 */
+	{ USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) },
+	{ }
+};
+
+MODULE_DEVICE_TABLE(usb, ub_usb_ids);
+
+/*
+ * Find me a way to identify "next free minor" for add_disk(),
+ * and the array disappears the next day. However, the number of
+ * hosts has something to do with the naming and /proc/partitions.
+ * This has to be thought out in detail before changing.
+ * If UB_MAX_HOST was 1000, we'd use a bitmap. Or a better data structure.
+ */
+#define UB_MAX_HOSTS  26
+static char ub_hostv[UB_MAX_HOSTS];
+static DEFINE_SPINLOCK(ub_lock);	/* Locks globals and ->openc */
+
+/*
+ * The SCSI command tracing procedures.
+ */
+
+static void ub_cmdtr_new(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	int n;
+	struct ub_scsi_cmd_trace *t;
+
+	if ((n = sc->tr.cur + 1) == SCMD_TRACE_SZ) n = 0;
+	t = &sc->tr.vec[n];
+
+	memset(t, 0, sizeof(struct ub_scsi_cmd_trace));
+	t->tag = cmd->tag;
+	t->op = cmd->cdb[0];
+	t->dir = cmd->dir;
+	t->req_size = cmd->len;
+	t->st_hst[0] = cmd->state;
+
+	sc->tr.cur = n;
+	cmd->trace_index = n;
+}
+
+static void ub_cmdtr_state(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	int n;
+	struct ub_scsi_cmd_trace *t;
+
+	t = &sc->tr.vec[cmd->trace_index];
+	if (t->tag == cmd->tag) {
+		if ((n = t->hcur + 1) == SCMD_ST_HIST_SZ) n = 0;
+		t->st_hst[n] = cmd->state;
+		t->hcur = n;
+	}
+}
+
+static void ub_cmdtr_act_len(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct ub_scsi_cmd_trace *t;
+
+	t = &sc->tr.vec[cmd->trace_index];
+	if (t->tag == cmd->tag)
+		t->act_size = cmd->act_len;
+}
+
+static void ub_cmdtr_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    unsigned char *sense)
+{
+	struct ub_scsi_cmd_trace *t;
+
+	t = &sc->tr.vec[cmd->trace_index];
+	if (t->tag == cmd->tag) {
+		t->key = sense[2] & 0x0F;
+		t->asc = sense[12];
+		t->ascq = sense[13];
+	}
+}
+
+static ssize_t ub_diag_show(struct device *dev, char *page)
+{
+	struct usb_interface *intf;
+	struct ub_dev *sc;
+	int cnt;
+	unsigned long flags;
+	int nc, nh;
+	int i, j;
+	struct ub_scsi_cmd_trace *t;
+
+	intf = to_usb_interface(dev);
+	sc = usb_get_intfdata(intf);
+	if (sc == NULL)
+		return 0;
+
+	cnt = 0;
+	spin_lock_irqsave(&sc->lock, flags);
+
+	cnt += sprintf(page + cnt,
+	    "qlen %d qmax %d changed %d removable %d readonly %d\n",
+	    sc->cmd_queue.qlen, sc->cmd_queue.qmax,
+	    sc->changed, sc->removable, sc->readonly);
+
+	if ((nc = sc->tr.cur + 1) == SCMD_TRACE_SZ) nc = 0;
+	for (j = 0; j < SCMD_TRACE_SZ; j++) {
+		t = &sc->tr.vec[nc];
+
+		cnt += sprintf(page + cnt, "%08x %02x", t->tag, t->op);
+		if (t->op == REQUEST_SENSE) {
+			cnt += sprintf(page + cnt, " [sense %x %02x %02x]",
+					t->key, t->asc, t->ascq);
+		} else {
+			cnt += sprintf(page + cnt, " %c", UB_DIR_CHAR(t->dir));
+			cnt += sprintf(page + cnt, " [%5d %5d]",
+					t->req_size, t->act_size);
+		}
+		if ((nh = t->hcur + 1) == SCMD_ST_HIST_SZ) nh = 0;
+		for (i = 0; i < SCMD_ST_HIST_SZ; i++) {
+			cnt += sprintf(page + cnt, " %s",
+					ub_scsi_cmd_stname[(int)t->st_hst[nh]]);
+			if (++nh == SCMD_ST_HIST_SZ) nh = 0;
+		}
+		cnt += sprintf(page + cnt, "\n");
+
+		if (++nc == SCMD_TRACE_SZ) nc = 0;
+	}
+
+	spin_unlock_irqrestore(&sc->lock, flags);
+	return cnt;
+}
+
+static DEVICE_ATTR(diag, S_IRUGO, ub_diag_show, NULL); /* N.B. World readable */
+
+/*
+ * The id allocator.
+ *
+ * This also stores the host for indexing by minor, which is somewhat dirty.
+ */
+static int ub_id_get(void)
+{
+	unsigned long flags;
+	int i;
+
+	spin_lock_irqsave(&ub_lock, flags);
+	for (i = 0; i < UB_MAX_HOSTS; i++) {
+		if (ub_hostv[i] == 0) {
+			ub_hostv[i] = 1;
+			spin_unlock_irqrestore(&ub_lock, flags);
+			return i;
+		}
+	}
+	spin_unlock_irqrestore(&ub_lock, flags);
+	return -1;
+}
+
+static void ub_id_put(int id)
+{
+	unsigned long flags;
+
+	if (id < 0 || id >= UB_MAX_HOSTS) {
+		printk(KERN_ERR DRV_NAME ": bad host ID %d\n", id);
+		return;
+	}
+
+	spin_lock_irqsave(&ub_lock, flags);
+	if (ub_hostv[id] == 0) {
+		spin_unlock_irqrestore(&ub_lock, flags);
+		printk(KERN_ERR DRV_NAME ": freeing free host ID %d\n", id);
+		return;
+	}
+	ub_hostv[id] = 0;
+	spin_unlock_irqrestore(&ub_lock, flags);
+}
+
+/*
+ * Downcount for deallocation. This rides on two assumptions:
+ *  - once something is poisoned, its refcount cannot grow
+ *  - opens cannot happen at this time (del_gendisk was done)
+ * If the above is true, we can drop the lock, which we need for
+ * blk_cleanup_queue(): the silly thing may attempt to sleep.
+ * [Actually, it never needs to sleep for us, but it calls might_sleep()]
+ */
+static void ub_put(struct ub_dev *sc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&ub_lock, flags);
+	--sc->openc;
+	if (sc->openc == 0 && atomic_read(&sc->poison)) {
+		spin_unlock_irqrestore(&ub_lock, flags);
+		ub_cleanup(sc);
+	} else {
+		spin_unlock_irqrestore(&ub_lock, flags);
+	}
+}
+
+/*
+ * Final cleanup and deallocation.
+ */
+static void ub_cleanup(struct ub_dev *sc)
+{
+	request_queue_t *q;
+
+	/* I don't think queue can be NULL. But... Stolen from sx8.c */
+	if ((q = sc->disk->queue) != NULL)
+		blk_cleanup_queue(q);
+
+	/*
+	 * If we zero disk->private_data BEFORE put_disk, we have to check
+	 * for NULL all over the place in open, release, check_media and
+	 * revalidate, because the block level semaphore is well inside the
+	 * put_disk. But we cannot zero after the call, because *disk is gone.
+	 * The sd.c is blatantly racy in this area.
+	 */
+	/* disk->private_data = NULL; */
+	put_disk(sc->disk);
+	sc->disk = NULL;
+
+	ub_id_put(sc->id);
+	kfree(sc);
+}
+
+/*
+ * The "command allocator".
+ */
+static struct ub_scsi_cmd *ub_get_cmd(struct ub_dev *sc)
+{
+	struct ub_scsi_cmd *ret;
+
+	if (sc->cmda[0])
+		return NULL;
+	ret = &sc->cmdv[0];
+	sc->cmda[0] = 1;
+	return ret;
+}
+
+static void ub_put_cmd(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	if (cmd != &sc->cmdv[0]) {
+		printk(KERN_WARNING "%s: releasing a foreign cmd %p\n",
+		    sc->name, cmd);
+		return;
+	}
+	if (!sc->cmda[0]) {
+		printk(KERN_WARNING "%s: releasing a free cmd\n", sc->name);
+		return;
+	}
+	sc->cmda[0] = 0;
+}
+
+/*
+ * The command queue.
+ */
+static void ub_cmdq_add(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
+
+	if (t->qlen++ == 0) {
+		t->head = cmd;
+		t->tail = cmd;
+	} else {
+		t->tail->next = cmd;
+		t->tail = cmd;
+	}
+
+	if (t->qlen > t->qmax)
+		t->qmax = t->qlen;
+}
+
+static void ub_cmdq_insert(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
+
+	if (t->qlen++ == 0) {
+		t->head = cmd;
+		t->tail = cmd;
+	} else {
+		cmd->next = t->head;
+		t->head = cmd;
+	}
+
+	if (t->qlen > t->qmax)
+		t->qmax = t->qlen;
+}
+
+static struct ub_scsi_cmd *ub_cmdq_pop(struct ub_dev *sc)
+{
+	struct ub_scsi_cmd_queue *t = &sc->cmd_queue;
+	struct ub_scsi_cmd *cmd;
+
+	if (t->qlen == 0)
+		return NULL;
+	if (--t->qlen == 0)
+		t->tail = NULL;
+	cmd = t->head;
+	t->head = cmd->next;
+	cmd->next = NULL;
+	return cmd;
+}
+
+#define ub_cmdq_peek(sc)  ((sc)->cmd_queue.head)
+
+/*
+ * The request function is our main entry point
+ */
+
+static void ub_bd_rq_fn(request_queue_t *q)
+{
+	struct ub_dev *sc = q->queuedata;
+	struct request *rq;
+
+	while ((rq = elv_next_request(q)) != NULL) {
+		if (ub_bd_rq_fn_1(sc, rq) != 0) {
+			blk_stop_queue(q);
+			break;
+		}
+	}
+}
+
+static int ub_bd_rq_fn_1(struct ub_dev *sc, struct request *rq)
+{
+	struct ub_scsi_cmd *cmd;
+	int rc;
+
+	if (atomic_read(&sc->poison) || sc->changed) {
+		blkdev_dequeue_request(rq);
+		ub_end_rq(rq, 0);
+		return 0;
+	}
+
+	if ((cmd = ub_get_cmd(sc)) == NULL)
+		return -1;
+	memset(cmd, 0, sizeof(struct ub_scsi_cmd));
+
+	blkdev_dequeue_request(rq);
+
+	if (blk_pc_request(rq)) {
+		rc = ub_cmd_build_packet(sc, cmd, rq);
+	} else {
+		rc = ub_cmd_build_block(sc, cmd, rq);
+	}
+	if (rc != 0) {
+		ub_put_cmd(sc, cmd);
+		ub_end_rq(rq, 0);
+		blk_start_queue(sc->disk->queue);
+		return 0;
+	}
+
+	cmd->state = UB_CMDST_INIT;
+	cmd->done = ub_rw_cmd_done;
+	cmd->back = rq;
+
+	cmd->tag = sc->tagcnt++;
+	if ((rc = ub_submit_scsi(sc, cmd)) != 0) {
+		ub_put_cmd(sc, cmd);
+		ub_end_rq(rq, 0);
+		blk_start_queue(sc->disk->queue);
+		return 0;
+	}
+
+	return 0;
+}
+
+static int ub_cmd_build_block(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    struct request *rq)
+{
+	int ub_dir;
+#if 0 /* We use rq->buffer for now */
+	struct scatterlist *sg;
+	int n_elem;
+#endif
+	unsigned int block, nblks;
+
+	if (rq_data_dir(rq) == WRITE)
+		ub_dir = UB_DIR_WRITE;
+	else
+		ub_dir = UB_DIR_READ;
+
+	/*
+	 * get scatterlist from block layer
+	 */
+#if 0 /* We use rq->buffer for now */
+	sg = &cmd->sgv[0];
+	n_elem = blk_rq_map_sg(q, rq, sg);
+	if (n_elem <= 0) {
+		ub_put_cmd(sc, cmd);
+		ub_end_rq(rq, 0);
+		blk_start_queue(q);
+		return 0;		/* request with no s/g entries? */
+	}
+
+	if (n_elem != 1) {		/* Paranoia */
+		printk(KERN_WARNING "%s: request with %d segments\n",
+		    sc->name, n_elem);
+		ub_put_cmd(sc, cmd);
+		ub_end_rq(rq, 0);
+		blk_start_queue(q);
+		return 0;
+	}
+#endif
+
+	/*
+	 * XXX Unfortunately, this check does not work. It is quite possible
+	 * to get bogus non-null rq->buffer if you allow sg by mistake.
+	 */
+	if (rq->buffer == NULL) {
+		/*
+		 * This must not happen if we set the queue right.
+		 * The block level must create bounce buffers for us.
+		 */
+		static int do_print = 1;
+		if (do_print) {
+			printk(KERN_WARNING "%s: unmapped block request"
+			    " flags 0x%lx sectors %lu\n",
+			    sc->name, rq->flags, rq->nr_sectors);
+			do_print = 0;
+		}
+		return -1;
+	}
+
+	/*
+	 * build the command
+	 *
+	 * The call to blk_queue_hardsect_size() guarantees that request
+	 * is aligned, but it is given in terms of 512 byte units, always.
+	 */
+	block = rq->sector >> sc->capacity.bshift;
+	nblks = rq->nr_sectors >> sc->capacity.bshift;
+
+	cmd->cdb[0] = (ub_dir == UB_DIR_READ)? READ_10: WRITE_10;
+	/* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */
+	cmd->cdb[2] = block >> 24;
+	cmd->cdb[3] = block >> 16;
+	cmd->cdb[4] = block >> 8;
+	cmd->cdb[5] = block;
+	cmd->cdb[7] = nblks >> 8;
+	cmd->cdb[8] = nblks;
+	cmd->cdb_len = 10;
+
+	cmd->dir = ub_dir;
+	cmd->data = rq->buffer;
+	cmd->len = rq->nr_sectors * 512;
+
+	return 0;
+}
+
+static int ub_cmd_build_packet(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    struct request *rq)
+{
+
+	if (rq->data_len != 0 && rq->data == NULL) {
+		static int do_print = 1;
+		if (do_print) {
+			printk(KERN_WARNING "%s: unmapped packet request"
+			    " flags 0x%lx length %d\n",
+			    sc->name, rq->flags, rq->data_len);
+			do_print = 0;
+		}
+		return -1;
+	}
+
+	memcpy(&cmd->cdb, rq->cmd, rq->cmd_len);
+	cmd->cdb_len = rq->cmd_len;
+
+	if (rq->data_len == 0) {
+		cmd->dir = UB_DIR_NONE;
+	} else {
+		if (rq_data_dir(rq) == WRITE)
+			cmd->dir = UB_DIR_WRITE;
+		else
+			cmd->dir = UB_DIR_READ;
+	}
+	cmd->data = rq->data;
+	cmd->len = rq->data_len;
+
+	return 0;
+}
+
+static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct request *rq = cmd->back;
+	struct gendisk *disk = sc->disk;
+	request_queue_t *q = disk->queue;
+	int uptodate;
+
+	if (blk_pc_request(rq)) {
+		/* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */
+		memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE);
+		rq->sense_len = UB_SENSE_SIZE;
+	}
+
+	if (cmd->error == 0)
+		uptodate = 1;
+	else
+		uptodate = 0;
+
+	ub_put_cmd(sc, cmd);
+	ub_end_rq(rq, uptodate);
+	blk_start_queue(q);
+}
+
+static void ub_end_rq(struct request *rq, int uptodate)
+{
+	int rc;
+
+	rc = end_that_request_first(rq, uptodate, rq->hard_nr_sectors);
+	// assert(rc == 0);
+	end_that_request_last(rq);
+}
+
+/*
+ * Submit a regular SCSI operation (not an auto-sense).
+ *
+ * The Iron Law of Good Submit Routine is:
+ * Zero return - callback is done, Nonzero return - callback is not done.
+ * No exceptions.
+ *
+ * Host is assumed locked.
+ *
+ * XXX We only support Bulk for the moment.
+ */
+static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+
+	if (cmd->state != UB_CMDST_INIT ||
+	    (cmd->dir != UB_DIR_NONE && cmd->len == 0)) {
+		return -EINVAL;
+	}
+
+	ub_cmdq_add(sc, cmd);
+	/*
+	 * We can call ub_scsi_dispatch(sc) right away here, but it's a little
+	 * safer to jump to a tasklet, in case upper layers do something silly.
+	 */
+	tasklet_schedule(&sc->tasklet);
+	return 0;
+}
+
+/*
+ * Submit the first URB for the queued command.
+ * This function does not deal with queueing in any way.
+ */
+static int ub_scsi_cmd_start(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct bulk_cb_wrap *bcb;
+	int rc;
+
+	bcb = &sc->work_bcb;
+
+	/*
+	 * ``If the allocation length is eighteen or greater, and a device
+	 * server returns less than eithteen bytes of data, the application
+	 * client should assume that the bytes not transferred would have been
+	 * zeroes had the device server returned those bytes.''
+	 *
+	 * We zero sense for all commands so that when a packet request
+	 * fails it does not return a stale sense.
+	 */
+	memset(&sc->top_sense, 0, UB_SENSE_SIZE);
+
+	/* set up the command wrapper */
+	bcb->Signature = cpu_to_le32(US_BULK_CB_SIGN);
+	bcb->Tag = cmd->tag;		/* Endianness is not important */
+	bcb->DataTransferLength = cpu_to_le32(cmd->len);
+	bcb->Flags = (cmd->dir == UB_DIR_READ) ? 0x80 : 0;
+	bcb->Lun = 0;			/* No multi-LUN yet */
+	bcb->Length = cmd->cdb_len;
+
+	/* copy the command payload */
+	memcpy(bcb->CDB, cmd->cdb, UB_MAX_CDB_SIZE);
+
+	UB_INIT_COMPLETION(sc->work_done);
+
+	sc->last_pipe = sc->send_bulk_pipe;
+	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->send_bulk_pipe,
+	    bcb, US_BULK_CB_WRAP_LEN, ub_urb_complete, sc);
+	sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
+
+	/* Fill what we shouldn't be filling, because usb-storage did so. */
+	sc->work_urb.actual_length = 0;
+	sc->work_urb.error_count = 0;
+	sc->work_urb.status = 0;
+
+	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
+		/* XXX Clear stalls */
+		printk("ub: cmd #%d start failed (%d)\n", cmd->tag, rc); /* P3 */
+		ub_complete(&sc->work_done);
+		return rc;
+	}
+
+	sc->work_timer.expires = jiffies + UB_URB_TIMEOUT;
+	add_timer(&sc->work_timer);
+
+	cmd->state = UB_CMDST_CMD;
+	ub_cmdtr_state(sc, cmd);
+	return 0;
+}
+
+/*
+ * Timeout handler.
+ */
+static void ub_urb_timeout(unsigned long arg)
+{
+	struct ub_dev *sc = (struct ub_dev *) arg;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->lock, flags);
+	usb_unlink_urb(&sc->work_urb);
+	spin_unlock_irqrestore(&sc->lock, flags);
+}
+
+/*
+ * Completion routine for the work URB.
+ *
+ * This can be called directly from usb_submit_urb (while we have
+ * the sc->lock taken) and from an interrupt (while we do NOT have
+ * the sc->lock taken). Therefore, bounce this off to a tasklet.
+ */
+static void ub_urb_complete(struct urb *urb, struct pt_regs *pt)
+{
+	struct ub_dev *sc = urb->context;
+
+	ub_complete(&sc->work_done);
+	tasklet_schedule(&sc->tasklet);
+}
+
+static void ub_scsi_action(unsigned long _dev)
+{
+	struct ub_dev *sc = (struct ub_dev *) _dev;
+	unsigned long flags;
+
+	spin_lock_irqsave(&sc->lock, flags);
+	del_timer(&sc->work_timer);
+	ub_scsi_dispatch(sc);
+	spin_unlock_irqrestore(&sc->lock, flags);
+}
+
+static void ub_scsi_dispatch(struct ub_dev *sc)
+{
+	struct ub_scsi_cmd *cmd;
+	int rc;
+
+	while ((cmd = ub_cmdq_peek(sc)) != NULL) {
+		if (cmd->state == UB_CMDST_DONE) {
+			ub_cmdq_pop(sc);
+			(*cmd->done)(sc, cmd);
+		} else if (cmd->state == UB_CMDST_INIT) {
+			ub_cmdtr_new(sc, cmd);
+			if ((rc = ub_scsi_cmd_start(sc, cmd)) == 0)
+				break;
+			cmd->error = rc;
+			cmd->state = UB_CMDST_DONE;
+			ub_cmdtr_state(sc, cmd);
+		} else {
+			if (!ub_is_completed(&sc->work_done))
+				break;
+			ub_scsi_urb_compl(sc, cmd);
+		}
+	}
+}
+
+static void ub_scsi_urb_compl(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct urb *urb = &sc->work_urb;
+	struct bulk_cs_wrap *bcs;
+	int pipe;
+	int rc;
+
+	if (atomic_read(&sc->poison)) {
+		/* A little too simplistic, I feel... */
+		goto Bad_End;
+	}
+
+	if (cmd->state == UB_CMDST_CLEAR) {
+		if (urb->status == -EPIPE) {
+			/*
+			 * STALL while clearning STALL.
+			 * The control pipe clears itself - nothing to do.
+			 * XXX Might try to reset the device here and retry.
+			 */
+			printk(KERN_NOTICE "%s: "
+			    "stall on control pipe for device %u\n",
+			    sc->name, sc->dev->devnum);
+			goto Bad_End;
+		}
+
+		/*
+		 * We ignore the result for the halt clear.
+		 */
+
+		/* reset the endpoint toggle */
+		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
+			usb_pipeout(sc->last_pipe), 0);
+
+		ub_state_sense(sc, cmd);
+
+	} else if (cmd->state == UB_CMDST_CLR2STS) {
+		if (urb->status == -EPIPE) {
+			/*
+			 * STALL while clearning STALL.
+			 * The control pipe clears itself - nothing to do.
+			 * XXX Might try to reset the device here and retry.
+			 */
+			printk(KERN_NOTICE "%s: "
+			    "stall on control pipe for device %u\n",
+			    sc->name, sc->dev->devnum);
+			goto Bad_End;
+		}
+
+		/*
+		 * We ignore the result for the halt clear.
+		 */
+
+		/* reset the endpoint toggle */
+		usb_settoggle(sc->dev, usb_pipeendpoint(sc->last_pipe),
+			usb_pipeout(sc->last_pipe), 0);
+
+		ub_state_stat(sc, cmd);
+
+	} else if (cmd->state == UB_CMDST_CMD) {
+		if (urb->status == -EPIPE) {
+			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
+			if (rc != 0) {
+				printk(KERN_NOTICE "%s: "
+				    "unable to submit clear for device %u"
+				    " (code %d)\n",
+				    sc->name, sc->dev->devnum, rc);
+				/*
+				 * This is typically ENOMEM or some other such shit.
+				 * Retrying is pointless. Just do Bad End on it...
+				 */
+				goto Bad_End;
+			}
+			cmd->state = UB_CMDST_CLEAR;
+			ub_cmdtr_state(sc, cmd);
+			return;
+		}
+		if (urb->status != 0) {
+			printk("ub: cmd #%d cmd status (%d)\n", cmd->tag, urb->status); /* P3 */
+			goto Bad_End;
+		}
+		if (urb->actual_length != US_BULK_CB_WRAP_LEN) {
+			printk("ub: cmd #%d xferred %d\n", cmd->tag, urb->actual_length); /* P3 */
+			/* XXX Must do reset here to unconfuse the device */
+			goto Bad_End;
+		}
+
+		if (cmd->dir == UB_DIR_NONE) {
+			ub_state_stat(sc, cmd);
+			return;
+		}
+
+		UB_INIT_COMPLETION(sc->work_done);
+
+		if (cmd->dir == UB_DIR_READ)
+			pipe = sc->recv_bulk_pipe;
+		else
+			pipe = sc->send_bulk_pipe;
+		sc->last_pipe = pipe;
+		usb_fill_bulk_urb(&sc->work_urb, sc->dev, pipe,
+		    cmd->data, cmd->len, ub_urb_complete, sc);
+		sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
+		sc->work_urb.actual_length = 0;
+		sc->work_urb.error_count = 0;
+		sc->work_urb.status = 0;
+
+		if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
+			/* XXX Clear stalls */
+			printk("ub: data #%d submit failed (%d)\n", cmd->tag, rc); /* P3 */
+			ub_complete(&sc->work_done);
+			ub_state_done(sc, cmd, rc);
+			return;
+		}
+
+		sc->work_timer.expires = jiffies + UB_DATA_TIMEOUT;
+		add_timer(&sc->work_timer);
+
+		cmd->state = UB_CMDST_DATA;
+		ub_cmdtr_state(sc, cmd);
+
+	} else if (cmd->state == UB_CMDST_DATA) {
+		if (urb->status == -EPIPE) {
+			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
+			if (rc != 0) {
+				printk(KERN_NOTICE "%s: "
+				    "unable to submit clear for device %u"
+				    " (code %d)\n",
+				    sc->name, sc->dev->devnum, rc);
+				/*
+				 * This is typically ENOMEM or some other such shit.
+				 * Retrying is pointless. Just do Bad End on it...
+				 */
+				goto Bad_End;
+			}
+			cmd->state = UB_CMDST_CLR2STS;
+			ub_cmdtr_state(sc, cmd);
+			return;
+		}
+		if (urb->status == -EOVERFLOW) {
+			/*
+			 * A babble? Failure, but we must transfer CSW now.
+			 */
+			cmd->error = -EOVERFLOW;	/* A cheap trick... */
+		} else {
+			if (urb->status != 0)
+				goto Bad_End;
+		}
+
+		cmd->act_len = urb->actual_length;
+		ub_cmdtr_act_len(sc, cmd);
+
+		ub_state_stat(sc, cmd);
+
+	} else if (cmd->state == UB_CMDST_STAT) {
+		if (urb->status == -EPIPE) {
+			rc = ub_submit_clear_stall(sc, cmd, sc->last_pipe);
+			if (rc != 0) {
+				printk(KERN_NOTICE "%s: "
+				    "unable to submit clear for device %u"
+				    " (code %d)\n",
+				    sc->name, sc->dev->devnum, rc);
+				/*
+				 * This is typically ENOMEM or some other such shit.
+				 * Retrying is pointless. Just do Bad End on it...
+				 */
+				goto Bad_End;
+			}
+			cmd->state = UB_CMDST_CLEAR;
+			ub_cmdtr_state(sc, cmd);
+			return;
+		}
+		if (urb->status != 0)
+			goto Bad_End;
+
+		if (urb->actual_length == 0) {
+			/*
+			 * Some broken devices add unnecessary zero-length
+			 * packets to the end of their data transfers.
+			 * Such packets show up as 0-length CSWs. If we
+			 * encounter such a thing, try to read the CSW again.
+			 */
+			if (++cmd->stat_count >= 4) {
+				printk(KERN_NOTICE "%s: "
+				    "unable to get CSW on device %u\n",
+				    sc->name, sc->dev->devnum);
+				goto Bad_End;
+			}
+			__ub_state_stat(sc, cmd);
+			return;
+		}
+
+		/*
+		 * Check the returned Bulk protocol status.
+		 */
+
+		bcs = &sc->work_bcs;
+		rc = le32_to_cpu(bcs->Residue);
+		if (rc != cmd->len - cmd->act_len) {
+			/*
+			 * It is all right to transfer less, the caller has
+			 * to check. But it's not all right if the device
+			 * counts disagree with our counts.
+			 */
+			/* P3 */ printk("%s: resid %d len %d act %d\n",
+			    sc->name, rc, cmd->len, cmd->act_len);
+			goto Bad_End;
+		}
+
+#if 0
+		if (bcs->Signature != cpu_to_le32(US_BULK_CS_SIGN) &&
+		    bcs->Signature != cpu_to_le32(US_BULK_CS_OLYMPUS_SIGN)) {
+			/* Windows ignores signatures, so do we. */
+		}
+#endif
+
+		if (bcs->Tag != cmd->tag) {
+			/*
+			 * This usually happens when we disagree with the
+			 * device's microcode about something. For instance,
+			 * a few of them throw this after timeouts. They buffer
+			 * commands and reply at commands we timed out before.
+			 * Without flushing these replies we loop forever.
+			 */
+			if (++cmd->stat_count >= 4) {
+				printk(KERN_NOTICE "%s: "
+				    "tag mismatch orig 0x%x reply 0x%x "
+				    "on device %u\n",
+				    sc->name, cmd->tag, bcs->Tag,
+				    sc->dev->devnum);
+				goto Bad_End;
+			}
+			__ub_state_stat(sc, cmd);
+			return;
+		}
+
+		switch (bcs->Status) {
+		case US_BULK_STAT_OK:
+			break;
+		case US_BULK_STAT_FAIL:
+			ub_state_sense(sc, cmd);
+			return;
+		case US_BULK_STAT_PHASE:
+			/* XXX We must reset the transport here */
+			/* P3 */ printk("%s: status PHASE\n", sc->name);
+			goto Bad_End;
+		default:
+			printk(KERN_INFO "%s: unknown CSW status 0x%x\n",
+			    sc->name, bcs->Status);
+			goto Bad_End;
+		}
+
+		/* Not zeroing error to preserve a babble indicator */
+		cmd->state = UB_CMDST_DONE;
+		ub_cmdtr_state(sc, cmd);
+		ub_cmdq_pop(sc);
+		(*cmd->done)(sc, cmd);
+
+	} else if (cmd->state == UB_CMDST_SENSE) {
+		ub_state_done(sc, cmd, -EIO);
+
+	} else {
+		printk(KERN_WARNING "%s: "
+		    "wrong command state %d on device %u\n",
+		    sc->name, cmd->state, sc->dev->devnum);
+		goto Bad_End;
+	}
+	return;
+
+Bad_End: /* Little Excel is dead */
+	ub_state_done(sc, cmd, -EIO);
+}
+
+/*
+ * Factorization helper for the command state machine:
+ * Finish the command.
+ */
+static void ub_state_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd, int rc)
+{
+
+	cmd->error = rc;
+	cmd->state = UB_CMDST_DONE;
+	ub_cmdtr_state(sc, cmd);
+	ub_cmdq_pop(sc);
+	(*cmd->done)(sc, cmd);
+}
+
+/*
+ * Factorization helper for the command state machine:
+ * Submit a CSW read.
+ */
+static void __ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	int rc;
+
+	UB_INIT_COMPLETION(sc->work_done);
+
+	sc->last_pipe = sc->recv_bulk_pipe;
+	usb_fill_bulk_urb(&sc->work_urb, sc->dev, sc->recv_bulk_pipe,
+	    &sc->work_bcs, US_BULK_CS_WRAP_LEN, ub_urb_complete, sc);
+	sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
+	sc->work_urb.actual_length = 0;
+	sc->work_urb.error_count = 0;
+	sc->work_urb.status = 0;
+
+	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
+		/* XXX Clear stalls */
+		printk("%s: CSW #%d submit failed (%d)\n", sc->name, cmd->tag, rc); /* P3 */
+		ub_complete(&sc->work_done);
+		ub_state_done(sc, cmd, rc);
+		return;
+	}
+
+	sc->work_timer.expires = jiffies + UB_STAT_TIMEOUT;
+	add_timer(&sc->work_timer);
+}
+
+/*
+ * Factorization helper for the command state machine:
+ * Submit a CSW read and go to STAT state.
+ */
+static void ub_state_stat(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	__ub_state_stat(sc, cmd);
+
+	cmd->stat_count = 0;
+	cmd->state = UB_CMDST_STAT;
+	ub_cmdtr_state(sc, cmd);
+}
+
+/*
+ * Factorization helper for the command state machine:
+ * Submit a REQUEST SENSE and go to SENSE state.
+ */
+static void ub_state_sense(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct ub_scsi_cmd *scmd;
+	int rc;
+
+	if (cmd->cdb[0] == REQUEST_SENSE) {
+		rc = -EPIPE;
+		goto error;
+	}
+
+	scmd = &sc->top_rqs_cmd;
+	scmd->cdb[0] = REQUEST_SENSE;
+	scmd->cdb[4] = UB_SENSE_SIZE;
+	scmd->cdb_len = 6;
+	scmd->dir = UB_DIR_READ;
+	scmd->state = UB_CMDST_INIT;
+	scmd->data = sc->top_sense;
+	scmd->len = UB_SENSE_SIZE;
+	scmd->done = ub_top_sense_done;
+	scmd->back = cmd;
+
+	scmd->tag = sc->tagcnt++;
+
+	cmd->state = UB_CMDST_SENSE;
+	ub_cmdtr_state(sc, cmd);
+
+	ub_cmdq_insert(sc, scmd);
+	return;
+
+error:
+	ub_state_done(sc, cmd, rc);
+}
+
+/*
+ * A helper for the command's state machine:
+ * Submit a stall clear.
+ */
+static int ub_submit_clear_stall(struct ub_dev *sc, struct ub_scsi_cmd *cmd,
+    int stalled_pipe)
+{
+	int endp;
+	struct usb_ctrlrequest *cr;
+	int rc;
+
+	endp = usb_pipeendpoint(stalled_pipe);
+	if (usb_pipein (stalled_pipe))
+		endp |= USB_DIR_IN;
+
+	cr = &sc->work_cr;
+	cr->bRequestType = USB_RECIP_ENDPOINT;
+	cr->bRequest = USB_REQ_CLEAR_FEATURE;
+	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
+	cr->wIndex = cpu_to_le16(endp);
+	cr->wLength = cpu_to_le16(0);
+
+	UB_INIT_COMPLETION(sc->work_done);
+
+	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
+	    (unsigned char*) cr, NULL, 0, ub_urb_complete, sc);
+	sc->work_urb.transfer_flags = URB_ASYNC_UNLINK;
+	sc->work_urb.actual_length = 0;
+	sc->work_urb.error_count = 0;
+	sc->work_urb.status = 0;
+
+	if ((rc = usb_submit_urb(&sc->work_urb, GFP_ATOMIC)) != 0) {
+		ub_complete(&sc->work_done);
+		return rc;
+	}
+
+	sc->work_timer.expires = jiffies + UB_CTRL_TIMEOUT;
+	add_timer(&sc->work_timer);
+	return 0;
+}
+
+/*
+ */
+static void ub_top_sense_done(struct ub_dev *sc, struct ub_scsi_cmd *scmd)
+{
+	unsigned char *sense = scmd->data;
+	struct ub_scsi_cmd *cmd;
+
+	/*
+	 * Ignoring scmd->act_len, because the buffer was pre-zeroed.
+	 */
+	ub_cmdtr_sense(sc, scmd, sense);
+
+	/*
+	 * Find the command which triggered the unit attention or a check,
+	 * save the sense into it, and advance its state machine.
+	 */
+	if ((cmd = ub_cmdq_peek(sc)) == NULL) {
+		printk(KERN_WARNING "%s: sense done while idle\n", sc->name);
+		return;
+	}
+	if (cmd != scmd->back) {
+		printk(KERN_WARNING "%s: "
+		    "sense done for wrong command 0x%x on device %u\n",
+		    sc->name, cmd->tag, sc->dev->devnum);
+		return;
+	}
+	if (cmd->state != UB_CMDST_SENSE) {
+		printk(KERN_WARNING "%s: "
+		    "sense done with bad cmd state %d on device %u\n",
+		    sc->name, cmd->state, sc->dev->devnum);
+		return;
+	}
+
+	cmd->key = sense[2] & 0x0F;
+	cmd->asc = sense[12];
+	cmd->ascq = sense[13];
+
+	ub_scsi_urb_compl(sc, cmd);
+}
+
+#if 0
+/* Determine what the maximum LUN supported is */
+int usb_stor_Bulk_max_lun(struct us_data *us)
+{
+	int result;
+
+	/* issue the command */
+	result = usb_stor_control_msg(us, us->recv_ctrl_pipe,
+				 US_BULK_GET_MAX_LUN, 
+				 USB_DIR_IN | USB_TYPE_CLASS | 
+				 USB_RECIP_INTERFACE,
+				 0, us->ifnum, us->iobuf, 1, HZ);
+
+	/* 
+	 * Some devices (i.e. Iomega Zip100) need this -- apparently
+	 * the bulk pipes get STALLed when the GetMaxLUN request is
+	 * processed.   This is, in theory, harmless to all other devices
+	 * (regardless of if they stall or not).
+	 */
+	if (result < 0) {
+		usb_stor_clear_halt(us, us->recv_bulk_pipe);
+		usb_stor_clear_halt(us, us->send_bulk_pipe);
+	}
+
+	US_DEBUGP("GetMaxLUN command result is %d, data is %d\n", 
+		  result, us->iobuf[0]);
+
+	/* if we have a successful request, return the result */
+	if (result == 1)
+		return us->iobuf[0];
+
+	/* return the default -- no LUNs */
+	return 0;
+}
+#endif
+
+/*
+ * This is called from a process context.
+ */
+static void ub_revalidate(struct ub_dev *sc)
+{
+
+	sc->readonly = 0;	/* XXX Query this from the device */
+
+	sc->capacity.nsec = 0;
+	sc->capacity.bsize = 512;
+	sc->capacity.bshift = 0;
+
+	if (ub_sync_tur(sc) != 0)
+		return;			/* Not ready */
+	sc->changed = 0;
+
+	if (ub_sync_read_cap(sc, &sc->capacity) != 0) {
+		/*
+		 * The retry here means something is wrong, either with the
+		 * device, with the transport, or with our code.
+		 * We keep this because sd.c has retries for capacity.
+		 */
+		if (ub_sync_read_cap(sc, &sc->capacity) != 0) {
+			sc->capacity.nsec = 0;
+			sc->capacity.bsize = 512;
+			sc->capacity.bshift = 0;
+		}
+	}
+}
+
+/*
+ * The open funcion.
+ * This is mostly needed to keep refcounting, but also to support
+ * media checks on removable media drives.
+ */
+static int ub_bd_open(struct inode *inode, struct file *filp)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct ub_dev *sc;
+	unsigned long flags;
+	int rc;
+
+	if ((sc = disk->private_data) == NULL)
+		return -ENXIO;
+	spin_lock_irqsave(&ub_lock, flags);
+	if (atomic_read(&sc->poison)) {
+		spin_unlock_irqrestore(&ub_lock, flags);
+		return -ENXIO;
+	}
+	sc->openc++;
+	spin_unlock_irqrestore(&ub_lock, flags);
+
+	/*
+	 * This is a workaround for a specific problem in our block layer.
+	 * In 2.6.9, register_disk duplicates the code from rescan_partitions.
+	 * However, if we do add_disk with a device which persistently reports
+	 * a changed media, add_disk calls register_disk, which does do_open,
+	 * which will call rescan_paritions for changed media. After that,
+	 * register_disk attempts to do it all again and causes double kobject
+	 * registration and a eventually an oops on module removal.
+	 *
+	 * The bottom line is, Al Viro says that we should not allow
+	 * bdev->bd_invalidated to be set when doing add_disk no matter what.
+	 */
+	if (sc->first_open) {
+		if (sc->changed) {
+			sc->first_open = 0;
+			rc = -ENOMEDIUM;
+			goto err_open;
+		}
+	}
+
+	if (sc->removable || sc->readonly)
+		check_disk_change(inode->i_bdev);
+
+	/*
+	 * The sd.c considers ->media_present and ->changed not equivalent,
+	 * under some pretty murky conditions (a failure of READ CAPACITY).
+	 * We may need it one day.
+	 */
+	if (sc->removable && sc->changed && !(filp->f_flags & O_NDELAY)) {
+		rc = -ENOMEDIUM;
+		goto err_open;
+	}
+
+	if (sc->readonly && (filp->f_mode & FMODE_WRITE)) {
+		rc = -EROFS;
+		goto err_open;
+	}
+
+	return 0;
+
+err_open:
+	ub_put(sc);
+	return rc;
+}
+
+/*
+ */
+static int ub_bd_release(struct inode *inode, struct file *filp)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct ub_dev *sc = disk->private_data;
+
+	ub_put(sc);
+	return 0;
+}
+
+/*
+ * The ioctl interface.
+ */
+static int ub_bd_ioctl(struct inode *inode, struct file *filp,
+    unsigned int cmd, unsigned long arg)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	void __user *usermem = (void __user *) arg;
+
+	return scsi_cmd_ioctl(filp, disk, cmd, usermem);
+}
+
+/*
+ * This is called once a new disk was seen by the block layer or by ub_probe().
+ * The main onjective here is to discover the features of the media such as
+ * the capacity, read-only status, etc. USB storage generally does not
+ * need to be spun up, but if we needed it, this would be the place.
+ *
+ * This call can sleep.
+ *
+ * The return code is not used.
+ */
+static int ub_bd_revalidate(struct gendisk *disk)
+{
+	struct ub_dev *sc = disk->private_data;
+
+	ub_revalidate(sc);
+	/* This is pretty much a long term P3 */
+	if (!atomic_read(&sc->poison)) {		/* Cover sc->dev */
+		printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
+		    sc->name, sc->dev->devnum,
+		    sc->capacity.nsec, sc->capacity.bsize);
+	}
+
+	/* XXX Support sector size switching like in sr.c */
+	blk_queue_hardsect_size(disk->queue, sc->capacity.bsize);
+	set_capacity(disk, sc->capacity.nsec);
+	// set_disk_ro(sdkp->disk, sc->readonly);
+
+	return 0;
+}
+
+/*
+ * The check is called by the block layer to verify if the media
+ * is still available. It is supposed to be harmless, lightweight and
+ * non-intrusive in case the media was not changed.
+ *
+ * This call can sleep.
+ *
+ * The return code is bool!
+ */
+static int ub_bd_media_changed(struct gendisk *disk)
+{
+	struct ub_dev *sc = disk->private_data;
+
+	if (!sc->removable)
+		return 0;
+
+	/*
+	 * We clean checks always after every command, so this is not
+	 * as dangerous as it looks. If the TEST_UNIT_READY fails here,
+	 * the device is actually not ready with operator or software
+	 * intervention required. One dangerous item might be a drive which
+	 * spins itself down, and come the time to write dirty pages, this
+	 * will fail, then block layer discards the data. Since we never
+	 * spin drives up, such devices simply cannot be used with ub anyway.
+	 */
+	if (ub_sync_tur(sc) != 0) {
+		sc->changed = 1;
+		return 1;
+	}
+
+	return sc->changed;
+}
+
+static struct block_device_operations ub_bd_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ub_bd_open,
+	.release	= ub_bd_release,
+	.ioctl		= ub_bd_ioctl,
+	.media_changed	= ub_bd_media_changed,
+	.revalidate_disk = ub_bd_revalidate,
+};
+
+/*
+ * Common ->done routine for commands executed synchronously.
+ */
+static void ub_probe_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd)
+{
+	struct completion *cop = cmd->back;
+	complete(cop);
+}
+
+/*
+ * Test if the device has a check condition on it, synchronously.
+ */
+static int ub_sync_tur(struct ub_dev *sc)
+{
+	struct ub_scsi_cmd *cmd;
+	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) };
+	unsigned long flags;
+	struct completion compl;
+	int rc;
+
+	init_completion(&compl);
+
+	rc = -ENOMEM;
+	if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
+		goto err_alloc;
+	memset(cmd, 0, ALLOC_SIZE);
+
+	cmd->cdb[0] = TEST_UNIT_READY;
+	cmd->cdb_len = 6;
+	cmd->dir = UB_DIR_NONE;
+	cmd->state = UB_CMDST_INIT;
+	cmd->done = ub_probe_done;
+	cmd->back = &compl;
+
+	spin_lock_irqsave(&sc->lock, flags);
+	cmd->tag = sc->tagcnt++;
+
+	rc = ub_submit_scsi(sc, cmd);
+	spin_unlock_irqrestore(&sc->lock, flags);
+
+	if (rc != 0) {
+		printk("ub: testing ready: submit error (%d)\n", rc); /* P3 */
+		goto err_submit;
+	}
+
+	wait_for_completion(&compl);
+
+	rc = cmd->error;
+
+	if (rc == -EIO && cmd->key != 0)	/* Retries for benh's key */
+		rc = cmd->key;
+
+err_submit:
+	kfree(cmd);
+err_alloc:
+	return rc;
+}
+
+/*
+ * Read the SCSI capacity synchronously (for probing).
+ */
+static int ub_sync_read_cap(struct ub_dev *sc, struct ub_capacity *ret)
+{
+	struct ub_scsi_cmd *cmd;
+	char *p;
+	enum { ALLOC_SIZE = sizeof(struct ub_scsi_cmd) + 8 };
+	unsigned long flags;
+	unsigned int bsize, shift;
+	unsigned long nsec;
+	struct completion compl;
+	int rc;
+
+	init_completion(&compl);
+
+	rc = -ENOMEM;
+	if ((cmd = kmalloc(ALLOC_SIZE, GFP_KERNEL)) == NULL)
+		goto err_alloc;
+	memset(cmd, 0, ALLOC_SIZE);
+	p = (char *)cmd + sizeof(struct ub_scsi_cmd);
+
+	cmd->cdb[0] = 0x25;
+	cmd->cdb_len = 10;
+	cmd->dir = UB_DIR_READ;
+	cmd->state = UB_CMDST_INIT;
+	cmd->data = p;
+	cmd->len = 8;
+	cmd->done = ub_probe_done;
+	cmd->back = &compl;
+
+	spin_lock_irqsave(&sc->lock, flags);
+	cmd->tag = sc->tagcnt++;
+
+	rc = ub_submit_scsi(sc, cmd);
+	spin_unlock_irqrestore(&sc->lock, flags);
+
+	if (rc != 0) {
+		printk("ub: reading capacity: submit error (%d)\n", rc); /* P3 */
+		goto err_submit;
+	}
+
+	wait_for_completion(&compl);
+
+	if (cmd->error != 0) {
+		printk("ub: reading capacity: error %d\n", cmd->error); /* P3 */
+		rc = -EIO;
+		goto err_read;
+	}
+	if (cmd->act_len != 8) {
+		printk("ub: reading capacity: size %d\n", cmd->act_len); /* P3 */
+		rc = -EIO;
+		goto err_read;
+	}
+
+	/* sd.c special-cases sector size of 0 to mean 512. Needed? Safe? */
+	nsec = be32_to_cpu(*(__be32 *)p) + 1;
+	bsize = be32_to_cpu(*(__be32 *)(p + 4));
+	switch (bsize) {
+	case 512:	shift = 0;	break;
+	case 1024:	shift = 1;	break;
+	case 2048:	shift = 2;	break;
+	case 4096:	shift = 3;	break;
+	default:
+		printk("ub: Bad sector size %u\n", bsize); /* P3 */
+		rc = -EDOM;
+		goto err_inv_bsize;
+	}
+
+	ret->bsize = bsize;
+	ret->bshift = shift;
+	ret->nsec = nsec << shift;
+	rc = 0;
+
+err_inv_bsize:
+err_read:
+err_submit:
+	kfree(cmd);
+err_alloc:
+	return rc;
+}
+
+/*
+ */
+static void ub_probe_urb_complete(struct urb *urb, struct pt_regs *pt)
+{
+	struct completion *cop = urb->context;
+	complete(cop);
+}
+
+static void ub_probe_timeout(unsigned long arg)
+{
+	struct completion *cop = (struct completion *) arg;
+	complete(cop);
+}
+
+/*
+ * Clear initial stalls.
+ */
+static int ub_probe_clear_stall(struct ub_dev *sc, int stalled_pipe)
+{
+	int endp;
+	struct usb_ctrlrequest *cr;
+	struct completion compl;
+	struct timer_list timer;
+	int rc;
+
+	init_completion(&compl);
+
+	endp = usb_pipeendpoint(stalled_pipe);
+	if (usb_pipein (stalled_pipe))
+		endp |= USB_DIR_IN;
+
+	cr = &sc->work_cr;
+	cr->bRequestType = USB_RECIP_ENDPOINT;
+	cr->bRequest = USB_REQ_CLEAR_FEATURE;
+	cr->wValue = cpu_to_le16(USB_ENDPOINT_HALT);
+	cr->wIndex = cpu_to_le16(endp);
+	cr->wLength = cpu_to_le16(0);
+
+	usb_fill_control_urb(&sc->work_urb, sc->dev, sc->send_ctrl_pipe,
+	    (unsigned char*) cr, NULL, 0, ub_probe_urb_complete, &compl);
+	sc->work_urb.transfer_flags = 0;
+	sc->work_urb.actual_length = 0;
+	sc->work_urb.error_count = 0;
+	sc->work_urb.status = 0;
+
+	if ((rc = usb_submit_urb(&sc->work_urb, GFP_KERNEL)) != 0) {
+		printk(KERN_WARNING
+		     "%s: Unable to submit a probe clear (%d)\n", sc->name, rc);
+		return rc;
+	}
+
+	init_timer(&timer);
+	timer.function = ub_probe_timeout;
+	timer.data = (unsigned long) &compl;
+	timer.expires = jiffies + UB_CTRL_TIMEOUT;
+	add_timer(&timer);
+
+	wait_for_completion(&compl);
+
+	del_timer_sync(&timer);
+	usb_kill_urb(&sc->work_urb);
+
+	/* reset the endpoint toggle */
+	usb_settoggle(sc->dev, endp, usb_pipeout(sc->last_pipe), 0);
+
+	return 0;
+}
+
+/*
+ * Get the pipe settings.
+ */
+static int ub_get_pipes(struct ub_dev *sc, struct usb_device *dev,
+    struct usb_interface *intf)
+{
+	struct usb_host_interface *altsetting = intf->cur_altsetting;
+	struct usb_endpoint_descriptor *ep_in = NULL;
+	struct usb_endpoint_descriptor *ep_out = NULL;
+	struct usb_endpoint_descriptor *ep;
+	int i;
+
+	/*
+	 * Find the endpoints we need.
+	 * We are expecting a minimum of 2 endpoints - in and out (bulk).
+	 * We will ignore any others.
+	 */
+	for (i = 0; i < altsetting->desc.bNumEndpoints; i++) {
+		ep = &altsetting->endpoint[i].desc;
+
+		/* Is it a BULK endpoint? */
+		if ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
+				== USB_ENDPOINT_XFER_BULK) {
+			/* BULK in or out? */
+			if (ep->bEndpointAddress & USB_DIR_IN)
+				ep_in = ep;
+			else
+				ep_out = ep;
+		}
+	}
+
+	if (ep_in == NULL || ep_out == NULL) {
+		printk(KERN_NOTICE "%s: device %u failed endpoint check\n",
+		    sc->name, sc->dev->devnum);
+		return -EIO;
+	}
+
+	/* Calculate and store the pipe values */
+	sc->send_ctrl_pipe = usb_sndctrlpipe(dev, 0);
+	sc->recv_ctrl_pipe = usb_rcvctrlpipe(dev, 0);
+	sc->send_bulk_pipe = usb_sndbulkpipe(dev,
+		ep_out->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+	sc->recv_bulk_pipe = usb_rcvbulkpipe(dev, 
+		ep_in->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
+
+	return 0;
+}
+
+/*
+ * Probing is done in the process context, which allows us to cheat
+ * and not to build a state machine for the discovery.
+ */
+static int ub_probe(struct usb_interface *intf,
+    const struct usb_device_id *dev_id)
+{
+	struct ub_dev *sc;
+	request_queue_t *q;
+	struct gendisk *disk;
+	int rc;
+	int i;
+
+	rc = -ENOMEM;
+	if ((sc = kmalloc(sizeof(struct ub_dev), GFP_KERNEL)) == NULL)
+		goto err_core;
+	memset(sc, 0, sizeof(struct ub_dev));
+	spin_lock_init(&sc->lock);
+	usb_init_urb(&sc->work_urb);
+	tasklet_init(&sc->tasklet, ub_scsi_action, (unsigned long)sc);
+	atomic_set(&sc->poison, 0);
+
+	init_timer(&sc->work_timer);
+	sc->work_timer.data = (unsigned long) sc;
+	sc->work_timer.function = ub_urb_timeout;
+
+	ub_init_completion(&sc->work_done);
+	sc->work_done.done = 1;		/* A little yuk, but oh well... */
+
+	rc = -ENOSR;
+	if ((sc->id = ub_id_get()) == -1)
+		goto err_id;
+	snprintf(sc->name, 8, DRV_NAME "%c", sc->id + 'a');
+
+	sc->dev = interface_to_usbdev(intf);
+	sc->intf = intf;
+	// sc->ifnum = intf->cur_altsetting->desc.bInterfaceNumber;
+
+	usb_set_intfdata(intf, sc);
+	usb_get_dev(sc->dev);
+	// usb_get_intf(sc->intf);	/* Do we need this? */
+
+	/* XXX Verify that we can handle the device (from descriptors) */
+
+	ub_get_pipes(sc, sc->dev, intf);
+
+	if (device_create_file(&sc->intf->dev, &dev_attr_diag) != 0)
+		goto err_diag;
+
+	/*
+	 * At this point, all USB initialization is done, do upper layer.
+	 * We really hate halfway initialized structures, so from the
+	 * invariants perspective, this ub_dev is fully constructed at
+	 * this point.
+	 */
+
+	/*
+	 * This is needed to clear toggles. It is a problem only if we do
+	 * `rmmod ub && modprobe ub` without disconnects, but we like that.
+	 */
+	ub_probe_clear_stall(sc, sc->recv_bulk_pipe);
+	ub_probe_clear_stall(sc, sc->send_bulk_pipe);
+
+	/*
+	 * The way this is used by the startup code is a little specific.
+	 * A SCSI check causes a USB stall. Our common case code sees it
+	 * and clears the check, after which the device is ready for use.
+	 * But if a check was not present, any command other than
+	 * TEST_UNIT_READY ends with a lockup (including REQUEST_SENSE).
+	 *
+	 * If we neglect to clear the SCSI check, the first real command fails
+	 * (which is the capacity readout). We clear that and retry, but why
+	 * causing spurious retries for no reason.
+	 *
+	 * Revalidation may start with its own TEST_UNIT_READY, but that one
+	 * has to succeed, so we clear checks with an additional one here.
+	 * In any case it's not our business how revaliadation is implemented.
+	 */
+	for (i = 0; i < 3; i++) {	/* Retries for benh's key */
+		if ((rc = ub_sync_tur(sc)) <= 0) break;
+		if (rc != 0x6) break;
+		msleep(10);
+	}
+
+	sc->removable = 1;		/* XXX Query this from the device */
+	sc->changed = 1;		/* ub_revalidate clears only */
+	sc->first_open = 1;
+
+	ub_revalidate(sc);
+	/* This is pretty much a long term P3 */
+	printk(KERN_INFO "%s: device %u capacity nsec %ld bsize %u\n",
+	    sc->name, sc->dev->devnum, sc->capacity.nsec, sc->capacity.bsize);
+
+	/*
+	 * Just one disk per sc currently, but maybe more.
+	 */
+	rc = -ENOMEM;
+	if ((disk = alloc_disk(UB_MINORS_PER_MAJOR)) == NULL)
+		goto err_diskalloc;
+
+	sc->disk = disk;
+	sprintf(disk->disk_name, DRV_NAME "%c", sc->id + 'a');
+	sprintf(disk->devfs_name, DEVFS_NAME "/%c", sc->id + 'a');
+	disk->major = UB_MAJOR;
+	disk->first_minor = sc->id * UB_MINORS_PER_MAJOR;
+	disk->fops = &ub_bd_fops;
+	disk->private_data = sc;
+	disk->driverfs_dev = &intf->dev;
+
+	rc = -ENOMEM;
+	if ((q = blk_init_queue(ub_bd_rq_fn, &sc->lock)) == NULL)
+		goto err_blkqinit;
+
+	disk->queue = q;
+
+        // blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask);
+	blk_queue_max_hw_segments(q, UB_MAX_REQ_SG);
+	blk_queue_max_phys_segments(q, UB_MAX_REQ_SG);
+	// blk_queue_segment_boundary(q, CARM_SG_BOUNDARY);
+	blk_queue_max_sectors(q, UB_MAX_SECTORS);
+	blk_queue_hardsect_size(q, sc->capacity.bsize);
+
+	/*
+	 * This is a serious infraction, caused by a deficiency in the
+	 * USB sg interface (usb_sg_wait()). We plan to remove this once
+	 * we get mileage on the driver and can justify a change to USB API.
+	 * See blk_queue_bounce_limit() to understand this part.
+	 *
+	 * XXX And I still need to be aware of the DMA mask in the HC.
+	 */
+	q->bounce_pfn = blk_max_low_pfn;
+	q->bounce_gfp = GFP_NOIO;
+
+	q->queuedata = sc;
+
+	set_capacity(disk, sc->capacity.nsec);
+	if (sc->removable)
+		disk->flags |= GENHD_FL_REMOVABLE;
+
+	add_disk(disk);
+
+	return 0;
+
+err_blkqinit:
+	put_disk(disk);
+err_diskalloc:
+	device_remove_file(&sc->intf->dev, &dev_attr_diag);
+err_diag:
+	usb_set_intfdata(intf, NULL);
+	// usb_put_intf(sc->intf);
+	usb_put_dev(sc->dev);
+	ub_id_put(sc->id);
+err_id:
+	kfree(sc);
+err_core:
+	return rc;
+}
+
+static void ub_disconnect(struct usb_interface *intf)
+{
+	struct ub_dev *sc = usb_get_intfdata(intf);
+	struct gendisk *disk = sc->disk;
+	unsigned long flags;
+
+	/*
+	 * Prevent ub_bd_release from pulling the rug from under us.
+	 * XXX This is starting to look like a kref.
+	 * XXX Why not to take this ref at probe time?
+	 */
+	spin_lock_irqsave(&ub_lock, flags);
+	sc->openc++;
+	spin_unlock_irqrestore(&ub_lock, flags);
+
+	/*
+	 * Fence stall clearnings, operations triggered by unlinkings and so on.
+	 * We do not attempt to unlink any URBs, because we do not trust the
+	 * unlink paths in HC drivers. Also, we get -84 upon disconnect anyway.
+	 */
+	atomic_set(&sc->poison, 1);
+
+	/*
+	 * Blow away queued commands.
+	 *
+	 * Actually, this never works, because before we get here
+	 * the HCD terminates outstanding URB(s). It causes our
+	 * SCSI command queue to advance, commands fail to submit,
+	 * and the whole queue drains. So, we just use this code to
+	 * print warnings.
+	 */
+	spin_lock_irqsave(&sc->lock, flags);
+	{
+		struct ub_scsi_cmd *cmd;
+		int cnt = 0;
+		while ((cmd = ub_cmdq_pop(sc)) != NULL) {
+			cmd->error = -ENOTCONN;
+			cmd->state = UB_CMDST_DONE;
+			ub_cmdtr_state(sc, cmd);
+			ub_cmdq_pop(sc);
+			(*cmd->done)(sc, cmd);
+			cnt++;
+		}
+		if (cnt != 0) {
+			printk(KERN_WARNING "%s: "
+			    "%d was queued after shutdown\n", sc->name, cnt);
+		}
+	}
+	spin_unlock_irqrestore(&sc->lock, flags);
+
+	/*
+	 * Unregister the upper layer.
+	 */
+	if (disk->flags & GENHD_FL_UP)
+		del_gendisk(disk);
+	/*
+	 * I wish I could do:
+	 *    set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
+	 * As it is, we rely on our internal poisoning and let
+	 * the upper levels to spin furiously failing all the I/O.
+	 */
+
+	/*
+	 * Taking a lock on a structure which is about to be freed
+	 * is very nonsensual. Here it is largely a way to do a debug freeze,
+	 * and a bracket which shows where the nonsensual code segment ends.
+	 *
+	 * Testing for -EINPROGRESS is always a bug, so we are bending
+	 * the rules a little.
+	 */
+	spin_lock_irqsave(&sc->lock, flags);
+	if (sc->work_urb.status == -EINPROGRESS) {	/* janitors: ignore */
+		printk(KERN_WARNING "%s: "
+		    "URB is active after disconnect\n", sc->name);
+	}
+	spin_unlock_irqrestore(&sc->lock, flags);
+
+	/*
+	 * There is virtually no chance that other CPU runs times so long
+	 * after ub_urb_complete should have called del_timer, but only if HCD
+	 * didn't forget to deliver a callback on unlink.
+	 */
+	del_timer_sync(&sc->work_timer);
+
+	/*
+	 * At this point there must be no commands coming from anyone
+	 * and no URBs left in transit.
+	 */
+
+	device_remove_file(&sc->intf->dev, &dev_attr_diag);
+	usb_set_intfdata(intf, NULL);
+	// usb_put_intf(sc->intf);
+	sc->intf = NULL;
+	usb_put_dev(sc->dev);
+	sc->dev = NULL;
+
+	ub_put(sc);
+}
+
+static struct usb_driver ub_driver = {
+	.owner =	THIS_MODULE,
+	.name =		"ub",
+	.probe =	ub_probe,
+	.disconnect =	ub_disconnect,
+	.id_table =	ub_usb_ids,
+};
+
+static int __init ub_init(void)
+{
+	int rc;
+
+	/* P3 */ printk("ub: sizeof ub_scsi_cmd %zu ub_dev %zu\n",
+			sizeof(struct ub_scsi_cmd), sizeof(struct ub_dev));
+
+	if ((rc = register_blkdev(UB_MAJOR, DRV_NAME)) != 0)
+		goto err_regblkdev;
+	devfs_mk_dir(DEVFS_NAME);
+
+	if ((rc = usb_register(&ub_driver)) != 0)
+		goto err_register;
+
+	return 0;
+
+err_register:
+	devfs_remove(DEVFS_NAME);
+	unregister_blkdev(UB_MAJOR, DRV_NAME);
+err_regblkdev:
+	return rc;
+}
+
+static void __exit ub_exit(void)
+{
+	usb_deregister(&ub_driver);
+
+	devfs_remove(DEVFS_NAME);
+	unregister_blkdev(UB_MAJOR, DRV_NAME);
+}
+
+module_init(ub_init);
+module_exit(ub_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
new file mode 100644
index 0000000..0c4c121
--- /dev/null
+++ b/drivers/block/umem.c
@@ -0,0 +1,1256 @@
+/*
+ * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3
+ *
+ * (C) 2001 San Mehat <nettwerk@valinux.com>
+ * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com>
+ * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au>
+ *
+ * This driver for the Micro Memory PCI Memory Module with Battery Backup
+ * is Copyright Micro Memory Inc 2001-2002.  All rights reserved.
+ *
+ * This driver is released to the public under the terms of the
+ *  GNU GENERAL PUBLIC LICENSE version 2
+ * See the file COPYING for details.
+ *
+ * This driver provides a standard block device interface for Micro Memory(tm)
+ * PCI based RAM boards.
+ * 10/05/01: Phap Nguyen - Rebuilt the driver
+ * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning
+ * 29oct2001:NeilBrown   - Use make_request_fn instead of request_fn
+ *                       - use stand disk partitioning (so fdisk works).
+ * 08nov2001:NeilBrown	 - change driver name from "mm" to "umem"
+ *			 - incorporate into main kernel
+ * 08apr2002:NeilBrown   - Move some of interrupt handle to tasklet
+ *			 - use spin_lock_bh instead of _irq
+ *			 - Never block on make_request.  queue
+ *			   bh's instead.
+ *			 - unregister umem from devfs at mod unload
+ *			 - Change version to 2.3
+ * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal)
+ * 07Jan2002: P. Nguyen  - Used PCI Memory Write & Invalidate for DMA
+ * 15May2002:NeilBrown   - convert to bio for 2.5
+ * 17May2002:NeilBrown   - remove init_mem initialisation.  Instead detect
+ *			 - a sequence of writes that cover the card, and
+ *			 - set initialised bit then.
+ */
+
+#include <linux/config.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/ioctl.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp_lock.h>
+#include <linux/timer.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+
+#include <linux/fcntl.h>        /* O_ACCMODE */
+#include <linux/hdreg.h>  /* HDIO_GETGEO */
+
+#include <linux/umem.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#define PRINTK(x...) do {} while (0)
+#define dprintk(x...) do {} while (0)
+/*#define dprintk(x...) printk(x) */
+
+#define MM_MAXCARDS 4
+#define MM_RAHEAD 2      /* two sectors */
+#define MM_BLKSIZE 1024  /* 1k blocks */
+#define MM_HARDSECT 512  /* 512-byte hardware sectors */
+#define MM_SHIFT 6       /* max 64 partitions on 4 cards  */
+
+/*
+ * Version Information
+ */
+
+#define DRIVER_VERSION "v2.3"
+#define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown"
+#define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver"
+
+static int debug;
+/* #define HW_TRACE(x)     writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */
+#define HW_TRACE(x)
+
+#define DEBUG_LED_ON_TRANSFER	0x01
+#define DEBUG_BATTERY_POLLING	0x02
+
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug bitmask");
+
+static int pci_read_cmd = 0x0C;		/* Read Multiple */
+module_param(pci_read_cmd, int, 0);
+MODULE_PARM_DESC(pci_read_cmd, "PCI read command");
+
+static int pci_write_cmd = 0x0F;	/* Write and Invalidate */
+module_param(pci_write_cmd, int, 0);
+MODULE_PARM_DESC(pci_write_cmd, "PCI write command");
+
+static int pci_cmds;
+
+static int major_nr;
+
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+
+struct cardinfo {
+	int		card_number;
+	struct pci_dev	*dev;
+
+	int		irq;
+
+	unsigned long	csr_base;
+	unsigned char	__iomem *csr_remap;
+	unsigned long	csr_len;
+#ifdef CONFIG_MM_MAP_MEMORY
+	unsigned long	mem_base;
+	unsigned char	__iomem *mem_remap;
+	unsigned long	mem_len;
+#endif
+
+	unsigned int	win_size; /* PCI window size */
+	unsigned int	mm_size;  /* size in kbytes */
+
+	unsigned int	init_size; /* initial segment, in sectors,
+				    * that we know to
+				    * have been written
+				    */
+	struct bio	*bio, *currentbio, **biotail;
+
+	request_queue_t *queue;
+
+	struct mm_page {
+		dma_addr_t		page_dma;
+		struct mm_dma_desc	*desc;
+		int	 		cnt, headcnt;
+		struct bio		*bio, **biotail;
+	} mm_pages[2];
+#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
+
+	int  Active, Ready;
+
+	struct tasklet_struct	tasklet;
+	unsigned int dma_status;
+
+	struct {
+		int		good;
+		int		warned;
+		unsigned long	last_change;
+	} battery[2];
+
+	spinlock_t 	lock;
+	int		check_batteries;
+
+	int		flags;
+};
+
+static struct cardinfo cards[MM_MAXCARDS];
+static struct block_device_operations mm_fops;
+static struct timer_list battery_timer;
+
+static int num_cards = 0;
+
+static struct gendisk *mm_gendisk[MM_MAXCARDS];
+
+static void check_batteries(struct cardinfo *card);
+
+/*
+-----------------------------------------------------------------------------------
+--                           get_userbit
+-----------------------------------------------------------------------------------
+*/
+static int get_userbit(struct cardinfo *card, int bit)
+{
+	unsigned char led;
+
+	led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
+	return led & bit;
+}
+/*
+-----------------------------------------------------------------------------------
+--                            set_userbit
+-----------------------------------------------------------------------------------
+*/
+static int set_userbit(struct cardinfo *card, int bit, unsigned char state)
+{
+	unsigned char led;
+
+	led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
+	if (state)
+		led |= bit;
+	else
+		led &= ~bit;
+	writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
+
+	return 0;
+}
+/*
+-----------------------------------------------------------------------------------
+--                             set_led
+-----------------------------------------------------------------------------------
+*/
+/*
+ * NOTE: For the power LED, use the LED_POWER_* macros since they differ
+ */
+static void set_led(struct cardinfo *card, int shift, unsigned char state)
+{
+	unsigned char led;
+
+	led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL);
+	if (state == LED_FLIP)
+		led ^= (1<<shift);
+	else {
+		led &= ~(0x03 << shift);
+		led |= (state << shift);
+	}
+	writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL);
+
+}
+
+#ifdef MM_DIAG
+/*
+-----------------------------------------------------------------------------------
+--                              dump_regs
+-----------------------------------------------------------------------------------
+*/
+static void dump_regs(struct cardinfo *card)
+{
+	unsigned char *p;
+	int i, i1;
+
+	p = card->csr_remap;
+	for (i = 0; i < 8; i++) {
+		printk(KERN_DEBUG "%p   ", p);
+
+		for (i1 = 0; i1 < 16; i1++)
+			printk("%02x ", *p++);
+
+		printk("\n");
+	}
+}
+#endif
+/*
+-----------------------------------------------------------------------------------
+--                            dump_dmastat
+-----------------------------------------------------------------------------------
+*/
+static void dump_dmastat(struct cardinfo *card, unsigned int dmastat)
+{
+	printk(KERN_DEBUG "MM%d*: DMAstat - ", card->card_number);
+	if (dmastat & DMASCR_ANY_ERR)
+		printk("ANY_ERR ");
+	if (dmastat & DMASCR_MBE_ERR)
+		printk("MBE_ERR ");
+	if (dmastat & DMASCR_PARITY_ERR_REP)
+		printk("PARITY_ERR_REP ");
+	if (dmastat & DMASCR_PARITY_ERR_DET)
+		printk("PARITY_ERR_DET ");
+	if (dmastat & DMASCR_SYSTEM_ERR_SIG)
+		printk("SYSTEM_ERR_SIG ");
+	if (dmastat & DMASCR_TARGET_ABT)
+		printk("TARGET_ABT ");
+	if (dmastat & DMASCR_MASTER_ABT)
+		printk("MASTER_ABT ");
+	if (dmastat & DMASCR_CHAIN_COMPLETE)
+		printk("CHAIN_COMPLETE ");
+	if (dmastat & DMASCR_DMA_COMPLETE)
+		printk("DMA_COMPLETE ");
+	printk("\n");
+}
+
+/*
+ * Theory of request handling
+ *
+ * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME
+ * We have two pages of mm_dma_desc, holding about 64 descriptors
+ * each.  These are allocated at init time.
+ * One page is "Ready" and is either full, or can have request added.
+ * The other page might be "Active", which DMA is happening on it.
+ *
+ * Whenever IO on the active page completes, the Ready page is activated
+ * and the ex-Active page is clean out and made Ready.
+ * Otherwise the Ready page is only activated when it becomes full, or
+ * when mm_unplug_device is called via the unplug_io_fn.
+ *
+ * If a request arrives while both pages a full, it is queued, and b_rdev is
+ * overloaded to record whether it was a read or a write.
+ *
+ * The interrupt handler only polls the device to clear the interrupt.
+ * The processing of the result is done in a tasklet.
+ */
+
+static void mm_start_io(struct cardinfo *card)
+{
+	/* we have the lock, we know there is
+	 * no IO active, and we know that card->Active
+	 * is set
+	 */
+	struct mm_dma_desc *desc;
+	struct mm_page *page;
+	int offset;
+
+	/* make the last descriptor end the chain */
+	page = &card->mm_pages[card->Active];
+	PRINTK("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt-1);
+	desc = &page->desc[page->cnt-1];
+
+	desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN);
+	desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN);
+	desc->sem_control_bits = desc->control_bits;
+
+			       
+	if (debug & DEBUG_LED_ON_TRANSFER)
+		set_led(card, LED_REMOVE, LED_ON);
+
+	desc = &page->desc[page->headcnt];
+	writel(0, card->csr_remap + DMA_PCI_ADDR);
+	writel(0, card->csr_remap + DMA_PCI_ADDR + 4);
+
+	writel(0, card->csr_remap + DMA_LOCAL_ADDR);
+	writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4);
+
+	writel(0, card->csr_remap + DMA_TRANSFER_SIZE);
+	writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4);
+
+	writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR);
+	writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4);
+
+	offset = ((char*)desc) - ((char*)page->desc);
+	writel(cpu_to_le32((page->page_dma+offset)&0xffffffff),
+	       card->csr_remap + DMA_DESCRIPTOR_ADDR);
+	/* Force the value to u64 before shifting otherwise >> 32 is undefined C
+	 * and on some ports will do nothing ! */
+	writel(cpu_to_le32(((u64)page->page_dma)>>32),
+	       card->csr_remap + DMA_DESCRIPTOR_ADDR + 4);
+
+	/* Go, go, go */
+	writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds),
+	       card->csr_remap + DMA_STATUS_CTRL);
+}
+
+static int add_bio(struct cardinfo *card);
+
+static void activate(struct cardinfo *card)
+{
+	/* if No page is Active, and Ready is 
+	 * not empty, then switch Ready page
+	 * to active and start IO.
+	 * Then add any bh's that are available to Ready
+	 */
+
+	do {
+		while (add_bio(card))
+			;
+
+		if (card->Active == -1 &&
+		    card->mm_pages[card->Ready].cnt > 0) {
+			card->Active = card->Ready;
+			card->Ready = 1-card->Ready;
+			mm_start_io(card);
+		}
+
+	} while (card->Active == -1 && add_bio(card));
+}
+
+static inline void reset_page(struct mm_page *page)
+{
+	page->cnt = 0;
+	page->headcnt = 0;
+	page->bio = NULL;
+	page->biotail = & page->bio;
+}
+
+static void mm_unplug_device(request_queue_t *q)
+{
+	struct cardinfo *card = q->queuedata;
+	unsigned long flags;
+
+	spin_lock_irqsave(&card->lock, flags);
+	if (blk_remove_plug(q))
+		activate(card);
+	spin_unlock_irqrestore(&card->lock, flags);
+}
+
+/* 
+ * If there is room on Ready page, take
+ * one bh off list and add it.
+ * return 1 if there was room, else 0.
+ */
+static int add_bio(struct cardinfo *card)
+{
+	struct mm_page *p;
+	struct mm_dma_desc *desc;
+	dma_addr_t dma_handle;
+	int offset;
+	struct bio *bio;
+	int rw;
+	int len;
+
+	bio = card->currentbio;
+	if (!bio && card->bio) {
+		card->currentbio = card->bio;
+		card->bio = card->bio->bi_next;
+		if (card->bio == NULL)
+			card->biotail = &card->bio;
+		card->currentbio->bi_next = NULL;
+		return 1;
+	}
+	if (!bio)
+		return 0;
+
+	rw = bio_rw(bio);
+	if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
+		return 0;
+
+	len = bio_iovec(bio)->bv_len;
+	dma_handle = pci_map_page(card->dev, 
+				  bio_page(bio),
+				  bio_offset(bio),
+				  len,
+				  (rw==READ) ?
+				  PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
+
+	p = &card->mm_pages[card->Ready];
+	desc = &p->desc[p->cnt];
+	p->cnt++;
+	if ((p->biotail) != &bio->bi_next) {
+		*(p->biotail) = bio;
+		p->biotail = &(bio->bi_next);
+		bio->bi_next = NULL;
+	}
+
+	desc->data_dma_handle = dma_handle;
+
+	desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
+	desc->local_addr= cpu_to_le64(bio->bi_sector << 9);
+	desc->transfer_size = cpu_to_le32(len);
+	offset = ( ((char*)&desc->sem_control_bits) - ((char*)p->desc));
+	desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
+	desc->zero1 = desc->zero2 = 0;
+	offset = ( ((char*)(desc+1)) - ((char*)p->desc));
+	desc->next_desc_addr = cpu_to_le64(p->page_dma+offset);
+	desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN|
+					 DMASCR_PARITY_INT_EN|
+					 DMASCR_CHAIN_EN |
+					 DMASCR_SEM_EN |
+					 pci_cmds);
+	if (rw == WRITE)
+		desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
+	desc->sem_control_bits = desc->control_bits;
+
+	bio->bi_sector += (len>>9);
+	bio->bi_size -= len;
+	bio->bi_idx++;
+	if (bio->bi_idx >= bio->bi_vcnt) 
+		card->currentbio = NULL;
+
+	return 1;
+}
+
+static void process_page(unsigned long data)
+{
+	/* check if any of the requests in the page are DMA_COMPLETE,
+	 * and deal with them appropriately.
+	 * If we find a descriptor without DMA_COMPLETE in the semaphore, then
+	 * dma must have hit an error on that descriptor, so use dma_status instead
+	 * and assume that all following descriptors must be re-tried.
+	 */
+	struct mm_page *page;
+	struct bio *return_bio=NULL;
+	struct cardinfo *card = (struct cardinfo *)data;
+	unsigned int dma_status = card->dma_status;
+
+	spin_lock_bh(&card->lock);
+	if (card->Active < 0)
+		goto out_unlock;
+	page = &card->mm_pages[card->Active];
+	
+	while (page->headcnt < page->cnt) {
+		struct bio *bio = page->bio;
+		struct mm_dma_desc *desc = &page->desc[page->headcnt];
+		int control = le32_to_cpu(desc->sem_control_bits);
+		int last=0;
+		int idx;
+
+		if (!(control & DMASCR_DMA_COMPLETE)) {
+			control = dma_status;
+			last=1; 
+		}
+		page->headcnt++;
+		idx = bio->bi_phys_segments;
+		bio->bi_phys_segments++;
+		if (bio->bi_phys_segments >= bio->bi_vcnt)
+			page->bio = bio->bi_next;
+
+		pci_unmap_page(card->dev, desc->data_dma_handle, 
+			       bio_iovec_idx(bio,idx)->bv_len,
+				 (control& DMASCR_TRANSFER_READ) ?
+				PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
+		if (control & DMASCR_HARD_ERROR) {
+			/* error */
+			clear_bit(BIO_UPTODATE, &bio->bi_flags);
+			printk(KERN_WARNING "MM%d: I/O error on sector %d/%d\n",
+			       card->card_number, 
+			       le32_to_cpu(desc->local_addr)>>9,
+			       le32_to_cpu(desc->transfer_size));
+			dump_dmastat(card, control);
+		} else if (test_bit(BIO_RW, &bio->bi_rw) &&
+			   le32_to_cpu(desc->local_addr)>>9 == card->init_size) {
+			card->init_size += le32_to_cpu(desc->transfer_size)>>9;
+			if (card->init_size>>1 >= card->mm_size) {
+				printk(KERN_INFO "MM%d: memory now initialised\n",
+				       card->card_number);
+				set_userbit(card, MEMORY_INITIALIZED, 1);
+			}
+		}
+		if (bio != page->bio) {
+			bio->bi_next = return_bio;
+			return_bio = bio;
+		}
+
+		if (last) break;
+	}
+
+	if (debug & DEBUG_LED_ON_TRANSFER)
+		set_led(card, LED_REMOVE, LED_OFF);
+
+	if (card->check_batteries) {
+		card->check_batteries = 0;
+		check_batteries(card);
+	}
+	if (page->headcnt >= page->cnt) {
+		reset_page(page);
+		card->Active = -1;
+		activate(card);
+	} else {
+		/* haven't finished with this one yet */
+		PRINTK("do some more\n");
+		mm_start_io(card);
+	}
+ out_unlock:
+	spin_unlock_bh(&card->lock);
+
+	while(return_bio) {
+		struct bio *bio = return_bio;
+
+		return_bio = bio->bi_next;
+		bio->bi_next = NULL;
+		bio_endio(bio, bio->bi_size, 0);
+	}
+}
+
+/*
+-----------------------------------------------------------------------------------
+--                              mm_make_request
+-----------------------------------------------------------------------------------
+*/
+static int mm_make_request(request_queue_t *q, struct bio *bio)
+{
+	struct cardinfo *card = q->queuedata;
+	PRINTK("mm_make_request %ld %d\n", bh->b_rsector, bh->b_size);
+
+	bio->bi_phys_segments = bio->bi_idx; /* count of completed segments*/
+	spin_lock_irq(&card->lock);
+	*card->biotail = bio;
+	bio->bi_next = NULL;
+	card->biotail = &bio->bi_next;
+	blk_plug_device(q);
+	spin_unlock_irq(&card->lock);
+
+	return 0;
+}
+
+/*
+-----------------------------------------------------------------------------------
+--                              mm_interrupt
+-----------------------------------------------------------------------------------
+*/
+static irqreturn_t mm_interrupt(int irq, void *__card, struct pt_regs *regs)
+{
+	struct cardinfo *card = (struct cardinfo *) __card;
+	unsigned int dma_status;
+	unsigned short cfg_status;
+
+HW_TRACE(0x30);
+
+	dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL));
+
+	if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) {
+		/* interrupt wasn't for me ... */
+		return IRQ_NONE;
+        }
+
+	/* clear COMPLETION interrupts */
+	if (card->flags & UM_FLAG_NO_BYTE_STATUS)
+		writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE),
+		       card->csr_remap+ DMA_STATUS_CTRL);
+	else
+		writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16,
+		       card->csr_remap+ DMA_STATUS_CTRL + 2);
+	
+	/* log errors and clear interrupt status */
+	if (dma_status & DMASCR_ANY_ERR) {
+		unsigned int	data_log1, data_log2;
+		unsigned int	addr_log1, addr_log2;
+		unsigned char	stat, count, syndrome, check;
+
+		stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS);
+
+		data_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG));
+		data_log2 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG + 4));
+		addr_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_ADDR_LOG));
+		addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4);
+
+		count = readb(card->csr_remap + ERROR_COUNT);
+		syndrome = readb(card->csr_remap + ERROR_SYNDROME);
+		check = readb(card->csr_remap + ERROR_CHECK);
+
+		dump_dmastat(card, dma_status);
+
+		if (stat & 0x01)
+			printk(KERN_ERR "MM%d*: Memory access error detected (err count %d)\n",
+				card->card_number, count);
+		if (stat & 0x02)
+			printk(KERN_ERR "MM%d*: Multi-bit EDC error\n",
+				card->card_number);
+
+		printk(KERN_ERR "MM%d*: Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n",
+			card->card_number, addr_log2, addr_log1, data_log2, data_log1);
+		printk(KERN_ERR "MM%d*: Fault Check 0x%02x, Fault Syndrome 0x%02x\n",
+			card->card_number, check, syndrome);
+
+		writeb(0, card->csr_remap + ERROR_COUNT);
+	}
+
+	if (dma_status & DMASCR_PARITY_ERR_REP) {
+		printk(KERN_ERR "MM%d*: PARITY ERROR REPORTED\n", card->card_number);
+		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
+		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
+	}
+
+	if (dma_status & DMASCR_PARITY_ERR_DET) {
+		printk(KERN_ERR "MM%d*: PARITY ERROR DETECTED\n", card->card_number); 
+		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
+		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
+	}
+
+	if (dma_status & DMASCR_SYSTEM_ERR_SIG) {
+		printk(KERN_ERR "MM%d*: SYSTEM ERROR\n", card->card_number); 
+		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
+		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
+	}
+
+	if (dma_status & DMASCR_TARGET_ABT) {
+		printk(KERN_ERR "MM%d*: TARGET ABORT\n", card->card_number); 
+		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
+		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
+	}
+
+	if (dma_status & DMASCR_MASTER_ABT) {
+		printk(KERN_ERR "MM%d*: MASTER ABORT\n", card->card_number); 
+		pci_read_config_word(card->dev, PCI_STATUS, &cfg_status);
+		pci_write_config_word(card->dev, PCI_STATUS, cfg_status);
+	}
+
+	/* and process the DMA descriptors */
+	card->dma_status = dma_status;
+	tasklet_schedule(&card->tasklet);
+
+HW_TRACE(0x36);
+
+	return IRQ_HANDLED; 
+}
+/*
+-----------------------------------------------------------------------------------
+--                         set_fault_to_battery_status
+-----------------------------------------------------------------------------------
+*/
+/*
+ * If both batteries are good, no LED
+ * If either battery has been warned, solid LED
+ * If both batteries are bad, flash the LED quickly
+ * If either battery is bad, flash the LED semi quickly
+ */
+static void set_fault_to_battery_status(struct cardinfo *card)
+{
+	if (card->battery[0].good && card->battery[1].good)
+		set_led(card, LED_FAULT, LED_OFF);
+	else if (card->battery[0].warned || card->battery[1].warned)
+		set_led(card, LED_FAULT, LED_ON);
+	else if (!card->battery[0].good && !card->battery[1].good)
+		set_led(card, LED_FAULT, LED_FLASH_7_0);
+	else
+		set_led(card, LED_FAULT, LED_FLASH_3_5);
+}
+
+static void init_battery_timer(void);
+
+
+/*
+-----------------------------------------------------------------------------------
+--                            check_battery
+-----------------------------------------------------------------------------------
+*/
+static int check_battery(struct cardinfo *card, int battery, int status)
+{
+	if (status != card->battery[battery].good) {
+		card->battery[battery].good = !card->battery[battery].good;
+		card->battery[battery].last_change = jiffies;
+
+		if (card->battery[battery].good) {
+			printk(KERN_ERR "MM%d: Battery %d now good\n",
+				card->card_number, battery + 1);
+			card->battery[battery].warned = 0;
+		} else
+			printk(KERN_ERR "MM%d: Battery %d now FAILED\n",
+				card->card_number, battery + 1);
+
+		return 1;
+	} else if (!card->battery[battery].good &&
+		   !card->battery[battery].warned &&
+		   time_after_eq(jiffies, card->battery[battery].last_change +
+				 (HZ * 60 * 60 * 5))) {
+		printk(KERN_ERR "MM%d: Battery %d still FAILED after 5 hours\n",
+			card->card_number, battery + 1);
+		card->battery[battery].warned = 1;
+
+		return 1;
+	}
+
+	return 0;
+}
+/*
+-----------------------------------------------------------------------------------
+--                              check_batteries
+-----------------------------------------------------------------------------------
+*/
+static void check_batteries(struct cardinfo *card)
+{
+	/* NOTE: this must *never* be called while the card
+	 * is doing (bus-to-card) DMA, or you will need the
+	 * reset switch
+	 */
+	unsigned char status;
+	int ret1, ret2;
+
+	status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
+	if (debug & DEBUG_BATTERY_POLLING)
+		printk(KERN_DEBUG "MM%d: checking battery status, 1 = %s, 2 = %s\n",
+		       card->card_number,
+		       (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK",
+		       (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK");
+
+	ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE));
+	ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE));
+
+	if (ret1 || ret2)
+		set_fault_to_battery_status(card);
+}
+
+static void check_all_batteries(unsigned long ptr)
+{
+	int i;
+
+	for (i = 0; i < num_cards; i++) 
+		if (!(cards[i].flags & UM_FLAG_NO_BATT)) {
+			struct cardinfo *card = &cards[i];
+			spin_lock_bh(&card->lock);
+			if (card->Active >= 0)
+				card->check_batteries = 1;
+			else
+				check_batteries(card);
+			spin_unlock_bh(&card->lock);
+		}
+
+	init_battery_timer();
+}
+/*
+-----------------------------------------------------------------------------------
+--                            init_battery_timer
+-----------------------------------------------------------------------------------
+*/
+static void init_battery_timer(void)
+{
+	init_timer(&battery_timer);
+	battery_timer.function = check_all_batteries;
+	battery_timer.expires = jiffies + (HZ * 60);
+	add_timer(&battery_timer);
+}
+/*
+-----------------------------------------------------------------------------------
+--                              del_battery_timer
+-----------------------------------------------------------------------------------
+*/
+static void del_battery_timer(void)
+{
+	del_timer(&battery_timer);
+}
+/*
+-----------------------------------------------------------------------------------
+--                                mm_revalidate
+-----------------------------------------------------------------------------------
+*/
+/*
+ * Note no locks taken out here.  In a worst case scenario, we could drop
+ * a chunk of system memory.  But that should never happen, since validation
+ * happens at open or mount time, when locks are held.
+ *
+ *	That's crap, since doing that while some partitions are opened
+ * or mounted will give you really nasty results.
+ */
+static int mm_revalidate(struct gendisk *disk)
+{
+	struct cardinfo *card = disk->private_data;
+	set_capacity(disk, card->mm_size << 1);
+	return 0;
+}
+/*
+-----------------------------------------------------------------------------------
+--                            mm_ioctl
+-----------------------------------------------------------------------------------
+*/
+static int mm_ioctl(struct inode *i, struct file *f, unsigned int cmd, unsigned long arg)
+{
+	if (cmd == HDIO_GETGEO) {
+		struct cardinfo *card = i->i_bdev->bd_disk->private_data;
+		int size = card->mm_size * (1024 / MM_HARDSECT);
+		struct hd_geometry geo;
+		/*
+		 * get geometry: we have to fake one...  trim the size to a
+		 * multiple of 2048 (1M): tell we have 32 sectors, 64 heads,
+		 * whatever cylinders.
+		 */
+		geo.heads     = 64;
+		geo.sectors   = 32;
+		geo.start     = get_start_sect(i->i_bdev);
+		geo.cylinders = size / (geo.heads * geo.sectors);
+
+		if (copy_to_user((void __user *) arg, &geo, sizeof(geo)))
+			return -EFAULT;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+/*
+-----------------------------------------------------------------------------------
+--                                mm_check_change
+-----------------------------------------------------------------------------------
+  Future support for removable devices
+*/
+static int mm_check_change(struct gendisk *disk)
+{
+/*  struct cardinfo *dev = disk->private_data; */
+	return 0;
+}
+/*
+-----------------------------------------------------------------------------------
+--                             mm_fops
+-----------------------------------------------------------------------------------
+*/
+static struct block_device_operations mm_fops = {
+	.owner		= THIS_MODULE,
+	.ioctl		= mm_ioctl,
+	.revalidate_disk= mm_revalidate,
+	.media_changed	= mm_check_change,
+};
+/*
+-----------------------------------------------------------------------------------
+--                                mm_pci_probe
+-----------------------------------------------------------------------------------
+*/
+static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int ret = -ENODEV;
+	struct cardinfo *card = &cards[num_cards];
+	unsigned char	mem_present;
+	unsigned char	batt_status;
+	unsigned int	saved_bar, data;
+	int		magic_number;
+
+	if (pci_enable_device(dev) < 0)
+		return -ENODEV;
+
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8);
+	pci_set_master(dev);
+
+	card->dev         = dev;
+	card->card_number = num_cards;
+
+	card->csr_base = pci_resource_start(dev, 0);
+	card->csr_len  = pci_resource_len(dev, 0);
+#ifdef CONFIG_MM_MAP_MEMORY
+	card->mem_base = pci_resource_start(dev, 1);
+	card->mem_len  = pci_resource_len(dev, 1);
+#endif
+
+	printk(KERN_INFO "Micro Memory(tm) controller #%d found at %02x:%02x (PCI Mem Module (Battery Backup))\n",
+	       card->card_number, dev->bus->number, dev->devfn);
+
+	if (pci_set_dma_mask(dev, 0xffffffffffffffffLL) &&
+	    !pci_set_dma_mask(dev, 0xffffffffLL)) {
+		printk(KERN_WARNING "MM%d: NO suitable DMA found\n",num_cards);
+		return  -ENOMEM;
+	}
+	if (!request_mem_region(card->csr_base, card->csr_len, "Micro Memory")) {
+		printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
+		ret = -ENOMEM;
+
+		goto failed_req_csr;
+	}
+
+	card->csr_remap = ioremap_nocache(card->csr_base, card->csr_len);
+	if (!card->csr_remap) {
+		printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
+		ret = -ENOMEM;
+
+		goto failed_remap_csr;
+	}
+
+	printk(KERN_INFO "MM%d: CSR 0x%08lx -> 0x%p (0x%lx)\n", card->card_number,
+	       card->csr_base, card->csr_remap, card->csr_len);
+
+#ifdef CONFIG_MM_MAP_MEMORY
+	if (!request_mem_region(card->mem_base, card->mem_len, "Micro Memory")) {
+		printk(KERN_ERR "MM%d: Unable to request memory region\n", card->card_number);
+		ret = -ENOMEM;
+
+		goto failed_req_mem;
+	}
+
+	if (!(card->mem_remap = ioremap(card->mem_base, cards->mem_len))) {
+		printk(KERN_ERR "MM%d: Unable to remap memory region\n", card->card_number);
+		ret = -ENOMEM;
+
+		goto failed_remap_mem;
+	}
+
+	printk(KERN_INFO "MM%d: MEM 0x%8lx -> 0x%8lx (0x%lx)\n", card->card_number,
+	       card->mem_base, card->mem_remap, card->mem_len);
+#else
+	printk(KERN_INFO "MM%d: MEM area not remapped (CONFIG_MM_MAP_MEMORY not set)\n",
+	       card->card_number);
+#endif
+	switch(card->dev->device) {
+	case 0x5415:
+		card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG;
+		magic_number = 0x59;
+		break;
+
+	case 0x5425:
+		card->flags |= UM_FLAG_NO_BYTE_STATUS;
+		magic_number = 0x5C;
+		break;
+
+	case 0x6155:
+		card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT;
+		magic_number = 0x99;
+		break;
+
+	default:
+		magic_number = 0x100;
+		break;
+	}
+
+	if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) {
+		printk(KERN_ERR "MM%d: Magic number invalid\n", card->card_number);
+		ret = -ENOMEM;
+		goto failed_magic;
+	}
+
+	card->mm_pages[0].desc = pci_alloc_consistent(card->dev,
+						      PAGE_SIZE*2,
+						      &card->mm_pages[0].page_dma);
+	card->mm_pages[1].desc = pci_alloc_consistent(card->dev,
+						      PAGE_SIZE*2,
+						      &card->mm_pages[1].page_dma);
+	if (card->mm_pages[0].desc == NULL ||
+	    card->mm_pages[1].desc == NULL) {
+		printk(KERN_ERR "MM%d: alloc failed\n", card->card_number);
+		goto failed_alloc;
+	}
+	reset_page(&card->mm_pages[0]);
+	reset_page(&card->mm_pages[1]);
+	card->Ready = 0;	/* page 0 is ready */
+	card->Active = -1;	/* no page is active */
+	card->bio = NULL;
+	card->biotail = &card->bio;
+
+	card->queue = blk_alloc_queue(GFP_KERNEL);
+	if (!card->queue)
+		goto failed_alloc;
+
+	blk_queue_make_request(card->queue, mm_make_request);
+	card->queue->queuedata = card;
+	card->queue->unplug_fn = mm_unplug_device;
+
+	tasklet_init(&card->tasklet, process_page, (unsigned long)card);
+
+	card->check_batteries = 0;
+	
+	mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY);
+	switch (mem_present) {
+	case MEM_128_MB:
+		card->mm_size = 1024 * 128;
+		break;
+	case MEM_256_MB:
+		card->mm_size = 1024 * 256;
+		break;
+	case MEM_512_MB:
+		card->mm_size = 1024 * 512;
+		break;
+	case MEM_1_GB:
+		card->mm_size = 1024 * 1024;
+		break;
+	case MEM_2_GB:
+		card->mm_size = 1024 * 2048;
+		break;
+	default:
+		card->mm_size = 0;
+		break;
+	}
+
+	/* Clear the LED's we control */
+	set_led(card, LED_REMOVE, LED_OFF);
+	set_led(card, LED_FAULT, LED_OFF);
+
+	batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY);
+
+	card->battery[0].good = !(batt_status & BATTERY_1_FAILURE);
+	card->battery[1].good = !(batt_status & BATTERY_2_FAILURE);
+	card->battery[0].last_change = card->battery[1].last_change = jiffies;
+
+	if (card->flags & UM_FLAG_NO_BATT) 
+		printk(KERN_INFO "MM%d: Size %d KB\n",
+		       card->card_number, card->mm_size);
+	else {
+		printk(KERN_INFO "MM%d: Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n",
+		       card->card_number, card->mm_size,
+		       (batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled"),
+		       card->battery[0].good ? "OK" : "FAILURE",
+		       (batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled"),
+		       card->battery[1].good ? "OK" : "FAILURE");
+
+		set_fault_to_battery_status(card);
+	}
+
+	pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar);
+	data = 0xffffffff;
+	pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data);
+	pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data);
+	pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar);
+	data &= 0xfffffff0;
+	data = ~data;
+	data += 1;
+
+	card->win_size = data;
+
+
+	if (request_irq(dev->irq, mm_interrupt, SA_SHIRQ, "pci-umem", card)) {
+		printk(KERN_ERR "MM%d: Unable to allocate IRQ\n", card->card_number);
+		ret = -ENODEV;
+
+		goto failed_req_irq;
+	}
+
+	card->irq = dev->irq;
+	printk(KERN_INFO "MM%d: Window size %d bytes, IRQ %d\n", card->card_number,
+	       card->win_size, card->irq);
+
+        spin_lock_init(&card->lock);
+
+	pci_set_drvdata(dev, card);
+
+	if (pci_write_cmd != 0x0F) 	/* If not Memory Write & Invalidate */
+		pci_write_cmd = 0x07;	/* then Memory Write command */
+
+	if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */
+		unsigned short cfg_command;
+		pci_read_config_word(dev, PCI_COMMAND, &cfg_command);
+		cfg_command |= 0x10; /* Memory Write & Invalidate Enable */
+		pci_write_config_word(dev, PCI_COMMAND, cfg_command);
+	}
+	pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24);
+
+	num_cards++;
+
+	if (!get_userbit(card, MEMORY_INITIALIZED)) {
+		printk(KERN_INFO "MM%d: memory NOT initialized. Consider over-writing whole device.\n", card->card_number);
+		card->init_size = 0;
+	} else {
+		printk(KERN_INFO "MM%d: memory already initialized\n", card->card_number);
+		card->init_size = card->mm_size;
+	}
+
+	/* Enable ECC */
+	writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL);
+
+	return 0;
+
+ failed_req_irq:
+ failed_alloc:
+	if (card->mm_pages[0].desc)
+		pci_free_consistent(card->dev, PAGE_SIZE*2,
+				    card->mm_pages[0].desc,
+				    card->mm_pages[0].page_dma);
+	if (card->mm_pages[1].desc)
+		pci_free_consistent(card->dev, PAGE_SIZE*2,
+				    card->mm_pages[1].desc,
+				    card->mm_pages[1].page_dma);
+ failed_magic:
+#ifdef CONFIG_MM_MAP_MEMORY
+	iounmap(card->mem_remap);
+ failed_remap_mem:
+	release_mem_region(card->mem_base, card->mem_len);
+ failed_req_mem:
+#endif
+	iounmap(card->csr_remap);
+ failed_remap_csr:
+	release_mem_region(card->csr_base, card->csr_len);
+ failed_req_csr:
+
+	return ret;
+}
+/*
+-----------------------------------------------------------------------------------
+--                              mm_pci_remove
+-----------------------------------------------------------------------------------
+*/
+static void mm_pci_remove(struct pci_dev *dev)
+{
+	struct cardinfo *card = pci_get_drvdata(dev);
+
+	tasklet_kill(&card->tasklet);
+	iounmap(card->csr_remap);
+	release_mem_region(card->csr_base, card->csr_len);
+#ifdef CONFIG_MM_MAP_MEMORY
+	iounmap(card->mem_remap);
+	release_mem_region(card->mem_base, card->mem_len);
+#endif
+	free_irq(card->irq, card);
+
+	if (card->mm_pages[0].desc)
+		pci_free_consistent(card->dev, PAGE_SIZE*2,
+				    card->mm_pages[0].desc,
+				    card->mm_pages[0].page_dma);
+	if (card->mm_pages[1].desc)
+		pci_free_consistent(card->dev, PAGE_SIZE*2,
+				    card->mm_pages[1].desc,
+				    card->mm_pages[1].page_dma);
+	blk_put_queue(card->queue);
+}
+
+static const struct pci_device_id mm_pci_ids[] = { {
+	.vendor =	PCI_VENDOR_ID_MICRO_MEMORY,
+	.device =	PCI_DEVICE_ID_MICRO_MEMORY_5415CN,
+	}, {
+	.vendor =	PCI_VENDOR_ID_MICRO_MEMORY,
+	.device =	PCI_DEVICE_ID_MICRO_MEMORY_5425CN,
+	}, {
+	.vendor =	PCI_VENDOR_ID_MICRO_MEMORY,
+	.device =	PCI_DEVICE_ID_MICRO_MEMORY_6155,
+	}, {
+	.vendor	=	0x8086,
+	.device	=	0xB555,
+	.subvendor=	0x1332,
+	.subdevice=	0x5460,
+	.class	=	0x050000,
+	.class_mask=	0,
+	}, { /* end: all zeroes */ }
+};
+
+MODULE_DEVICE_TABLE(pci, mm_pci_ids);
+
+static struct pci_driver mm_pci_driver = {
+	.name =		"umem",
+	.id_table =	mm_pci_ids,
+	.probe =	mm_pci_probe,
+	.remove =	mm_pci_remove,
+};
+/*
+-----------------------------------------------------------------------------------
+--                               mm_init
+-----------------------------------------------------------------------------------
+*/
+
+static int __init mm_init(void)
+{
+	int retval, i;
+	int err;
+
+	printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n");
+
+	retval = pci_module_init(&mm_pci_driver);
+	if (retval)
+		return -ENOMEM;
+
+	err = major_nr = register_blkdev(0, "umem");
+	if (err < 0)
+		return -EIO;
+
+	for (i = 0; i < num_cards; i++) {
+		mm_gendisk[i] = alloc_disk(1 << MM_SHIFT);
+		if (!mm_gendisk[i])
+			goto out;
+	}
+
+	for (i = 0; i < num_cards; i++) {
+		struct gendisk *disk = mm_gendisk[i];
+		sprintf(disk->disk_name, "umem%c", 'a'+i);
+		sprintf(disk->devfs_name, "umem/card%d", i);
+		spin_lock_init(&cards[i].lock);
+		disk->major = major_nr;
+		disk->first_minor  = i << MM_SHIFT;
+		disk->fops = &mm_fops;
+		disk->private_data = &cards[i];
+		disk->queue = cards[i].queue;
+		set_capacity(disk, cards[i].mm_size << 1);
+		add_disk(disk);
+	}
+
+	init_battery_timer();
+	printk("MM: desc_per_page = %ld\n", DESC_PER_PAGE);
+/* printk("mm_init: Done. 10-19-01 9:00\n"); */
+	return 0;
+
+out:
+	unregister_blkdev(major_nr, "umem");
+	while (i--)
+		put_disk(mm_gendisk[i]);
+	return -ENOMEM;
+}
+/*
+-----------------------------------------------------------------------------------
+--                             mm_cleanup
+-----------------------------------------------------------------------------------
+*/
+static void __exit mm_cleanup(void)
+{
+	int i;
+
+	del_battery_timer();
+
+	for (i=0; i < num_cards ; i++) {
+		del_gendisk(mm_gendisk[i]);
+		put_disk(mm_gendisk[i]);
+	}
+
+	pci_unregister_driver(&mm_pci_driver);
+
+	unregister_blkdev(major_nr, "umem");
+}
+
+module_init(mm_init);
+module_exit(mm_cleanup);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL");
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c
new file mode 100644
index 0000000..46e56a2
--- /dev/null
+++ b/drivers/block/viodasd.c
@@ -0,0 +1,846 @@
+/* -*- linux-c -*-
+ * viodasd.c
+ *  Authors: Dave Boutcher <boutcher@us.ibm.com>
+ *           Ryan Arnold <ryanarn@us.ibm.com>
+ *           Colin Devilbiss <devilbis@us.ibm.com>
+ *           Stephen Rothwell <sfr@au1.ibm.com>
+ *
+ * (C) Copyright 2000-2004 IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * This routine provides access to disk space (termed "DASD" in historical
+ * IBM terms) owned and managed by an OS/400 partition running on the
+ * same box as this Linux partition.
+ *
+ * All disk operations are performed by sending messages back and forth to
+ * the OS/400 partition.
+ */
+#include <linux/major.h>
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/blkdev.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include <asm/uaccess.h>
+#include <asm/vio.h>
+#include <asm/iSeries/HvTypes.h>
+#include <asm/iSeries/HvLpEvent.h>
+#include <asm/iSeries/HvLpConfig.h>
+#include <asm/iSeries/vio.h>
+
+MODULE_DESCRIPTION("iSeries Virtual DASD");
+MODULE_AUTHOR("Dave Boutcher");
+MODULE_LICENSE("GPL");
+
+/*
+ * We only support 7 partitions per physical disk....so with minor
+ * numbers 0-255 we get a maximum of 32 disks.
+ */
+#define VIOD_GENHD_NAME		"iseries/vd"
+#define VIOD_GENHD_DEVFS_NAME	"iseries/disc"
+
+#define VIOD_VERS		"1.64"
+
+#define VIOD_KERN_WARNING	KERN_WARNING "viod: "
+#define VIOD_KERN_INFO		KERN_INFO "viod: "
+
+enum {
+	PARTITION_SHIFT = 3,
+	MAX_DISKNO = HVMAXARCHITECTEDVIRTUALDISKS,
+	MAX_DISK_NAME = sizeof(((struct gendisk *)0)->disk_name)
+};
+
+static DEFINE_SPINLOCK(viodasd_spinlock);
+
+#define VIOMAXREQ		16
+#define VIOMAXBLOCKDMA		12
+
+#define DEVICE_NO(cell)	((struct viodasd_device *)(cell) - &viodasd_devices[0])
+
+struct open_data {
+	u64	disk_size;
+	u16	max_disk;
+	u16	cylinders;
+	u16	tracks;
+	u16	sectors;
+	u16	bytes_per_sector;
+};
+
+struct rw_data {
+	u64	offset;
+	struct {
+		u32	token;
+		u32	reserved;
+		u64	len;
+	} dma_info[VIOMAXBLOCKDMA];
+};
+
+struct vioblocklpevent {
+	struct HvLpEvent	event;
+	u32			reserved;
+	u16			version;
+	u16			sub_result;
+	u16			disk;
+	u16			flags;
+	union {
+		struct open_data	open_data;
+		struct rw_data		rw_data;
+		u64			changed;
+	} u;
+};
+
+#define vioblockflags_ro   0x0001
+
+enum vioblocksubtype {
+	vioblockopen = 0x0001,
+	vioblockclose = 0x0002,
+	vioblockread = 0x0003,
+	vioblockwrite = 0x0004,
+	vioblockflush = 0x0005,
+	vioblockcheck = 0x0007
+};
+
+struct viodasd_waitevent {
+	struct completion	com;
+	int			rc;
+	u16			sub_result;
+	int			max_disk;	/* open */
+};
+
+static const struct vio_error_entry viodasd_err_table[] = {
+	{ 0x0201, EINVAL, "Invalid Range" },
+	{ 0x0202, EINVAL, "Invalid Token" },
+	{ 0x0203, EIO, "DMA Error" },
+	{ 0x0204, EIO, "Use Error" },
+	{ 0x0205, EIO, "Release Error" },
+	{ 0x0206, EINVAL, "Invalid Disk" },
+	{ 0x0207, EBUSY, "Cant Lock" },
+	{ 0x0208, EIO, "Already Locked" },
+	{ 0x0209, EIO, "Already Unlocked" },
+	{ 0x020A, EIO, "Invalid Arg" },
+	{ 0x020B, EIO, "Bad IFS File" },
+	{ 0x020C, EROFS, "Read Only Device" },
+	{ 0x02FF, EIO, "Internal Error" },
+	{ 0x0000, 0, NULL },
+};
+
+/*
+ * Figure out the biggest I/O request (in sectors) we can accept
+ */
+#define VIODASD_MAXSECTORS (4096 / 512 * VIOMAXBLOCKDMA)
+
+/*
+ * Number of disk I/O requests we've sent to OS/400
+ */
+static int num_req_outstanding;
+
+/*
+ * This is our internal structure for keeping track of disk devices
+ */
+struct viodasd_device {
+	u16		cylinders;
+	u16		tracks;
+	u16		sectors;
+	u16		bytes_per_sector;
+	u64		size;
+	int		read_only;
+	spinlock_t	q_lock;
+	struct gendisk	*disk;
+	struct device	*dev;
+} viodasd_devices[MAX_DISKNO];
+
+/*
+ * External open entry point.
+ */
+static int viodasd_open(struct inode *ino, struct file *fil)
+{
+	struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
+	HvLpEvent_Rc hvrc;
+	struct viodasd_waitevent we;
+	u16 flags = 0;
+
+	if (d->read_only) {
+		if ((fil != NULL) && (fil->f_mode & FMODE_WRITE))
+			return -EROFS;
+		flags = vioblockflags_ro;
+	}
+
+	init_completion(&we.com);
+
+	/* Send the open event to OS/400 */
+	hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+			HvLpEvent_Type_VirtualIo,
+			viomajorsubtype_blockio | vioblockopen,
+			HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+			viopath_sourceinst(viopath_hostLp),
+			viopath_targetinst(viopath_hostLp),
+			(u64)(unsigned long)&we, VIOVERSION << 16,
+			((u64)DEVICE_NO(d) << 48) | ((u64)flags << 32),
+			0, 0, 0);
+	if (hvrc != 0) {
+		printk(VIOD_KERN_WARNING "HV open failed %d\n", (int)hvrc);
+		return -EIO;
+	}
+
+	wait_for_completion(&we.com);
+
+	/* Check the return code */
+	if (we.rc != 0) {
+		const struct vio_error_entry *err =
+			vio_lookup_rc(viodasd_err_table, we.sub_result);
+
+		printk(VIOD_KERN_WARNING
+				"bad rc opening disk: %d:0x%04x (%s)\n",
+				(int)we.rc, we.sub_result, err->msg);
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * External release entry point.
+ */
+static int viodasd_release(struct inode *ino, struct file *fil)
+{
+	struct viodasd_device *d = ino->i_bdev->bd_disk->private_data;
+	HvLpEvent_Rc hvrc;
+
+	/* Send the event to OS/400.  We DON'T expect a response */
+	hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+			HvLpEvent_Type_VirtualIo,
+			viomajorsubtype_blockio | vioblockclose,
+			HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
+			viopath_sourceinst(viopath_hostLp),
+			viopath_targetinst(viopath_hostLp),
+			0, VIOVERSION << 16,
+			((u64)DEVICE_NO(d) << 48) /* | ((u64)flags << 32) */,
+			0, 0, 0);
+	if (hvrc != 0)
+		printk(VIOD_KERN_WARNING "HV close call failed %d\n",
+				(int)hvrc);
+	return 0;
+}
+
+
+/* External ioctl entry point.
+ */
+static int viodasd_ioctl(struct inode *ino, struct file *fil,
+			 unsigned int cmd, unsigned long arg)
+{
+	unsigned char sectors;
+	unsigned char heads;
+	unsigned short cylinders;
+	struct hd_geometry *geo;
+	struct gendisk *gendisk;
+	struct viodasd_device *d;
+
+	switch (cmd) {
+	case HDIO_GETGEO:
+		geo = (struct hd_geometry *)arg;
+		if (geo == NULL)
+			return -EINVAL;
+		if (!access_ok(VERIFY_WRITE, geo, sizeof(*geo)))
+			return -EFAULT;
+		gendisk = ino->i_bdev->bd_disk;
+		d = gendisk->private_data;
+		sectors = d->sectors;
+		if (sectors == 0)
+			sectors = 32;
+		heads = d->tracks;
+		if (heads == 0)
+			heads = 64;
+		cylinders = d->cylinders;
+		if (cylinders == 0)
+			cylinders = get_capacity(gendisk) / (sectors * heads);
+		if (__put_user(sectors, &geo->sectors) ||
+		    __put_user(heads, &geo->heads) ||
+		    __put_user(cylinders, &geo->cylinders) ||
+		    __put_user(get_start_sect(ino->i_bdev), &geo->start))
+			return -EFAULT;
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Our file operations table
+ */
+static struct block_device_operations viodasd_fops = {
+	.owner = THIS_MODULE,
+	.open = viodasd_open,
+	.release = viodasd_release,
+	.ioctl = viodasd_ioctl,
+};
+
+/*
+ * End a request
+ */
+static void viodasd_end_request(struct request *req, int uptodate,
+		int num_sectors)
+{
+	if (end_that_request_first(req, uptodate, num_sectors))
+		return;
+	add_disk_randomness(req->rq_disk);
+	end_that_request_last(req);
+}
+
+/*
+ * Send an actual I/O request to OS/400
+ */
+static int send_request(struct request *req)
+{
+	u64 start;
+	int direction;
+	int nsg;
+	u16 viocmd;
+	HvLpEvent_Rc hvrc;
+	struct vioblocklpevent *bevent;
+	struct scatterlist sg[VIOMAXBLOCKDMA];
+	int sgindex;
+	int statindex;
+	struct viodasd_device *d;
+	unsigned long flags;
+
+	start = (u64)req->sector << 9;
+
+	if (rq_data_dir(req) == READ) {
+		direction = DMA_FROM_DEVICE;
+		viocmd = viomajorsubtype_blockio | vioblockread;
+		statindex = 0;
+	} else {
+		direction = DMA_TO_DEVICE;
+		viocmd = viomajorsubtype_blockio | vioblockwrite;
+		statindex = 1;
+	}
+
+        d = req->rq_disk->private_data;
+
+	/* Now build the scatter-gather list */
+	nsg = blk_rq_map_sg(req->q, req, sg);
+	nsg = dma_map_sg(d->dev, sg, nsg, direction);
+
+	spin_lock_irqsave(&viodasd_spinlock, flags);
+	num_req_outstanding++;
+
+	/* This optimization handles a single DMA block */
+	if (nsg == 1)
+		hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+				HvLpEvent_Type_VirtualIo, viocmd,
+				HvLpEvent_AckInd_DoAck,
+				HvLpEvent_AckType_ImmediateAck,
+				viopath_sourceinst(viopath_hostLp),
+				viopath_targetinst(viopath_hostLp),
+				(u64)(unsigned long)req, VIOVERSION << 16,
+				((u64)DEVICE_NO(d) << 48), start,
+				((u64)sg_dma_address(&sg[0])) << 32,
+				sg_dma_len(&sg[0]));
+	else {
+		bevent = (struct vioblocklpevent *)
+			vio_get_event_buffer(viomajorsubtype_blockio);
+		if (bevent == NULL) {
+			printk(VIOD_KERN_WARNING
+			       "error allocating disk event buffer\n");
+			goto error_ret;
+		}
+
+		/*
+		 * Now build up the actual request.  Note that we store
+		 * the pointer to the request in the correlation
+		 * token so we can match the response up later
+		 */
+		memset(bevent, 0, sizeof(struct vioblocklpevent));
+		bevent->event.xFlags.xValid = 1;
+		bevent->event.xFlags.xFunction = HvLpEvent_Function_Int;
+		bevent->event.xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
+		bevent->event.xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
+		bevent->event.xType = HvLpEvent_Type_VirtualIo;
+		bevent->event.xSubtype = viocmd;
+		bevent->event.xSourceLp = HvLpConfig_getLpIndex();
+		bevent->event.xTargetLp = viopath_hostLp;
+		bevent->event.xSizeMinus1 =
+			offsetof(struct vioblocklpevent, u.rw_data.dma_info) +
+			(sizeof(bevent->u.rw_data.dma_info[0]) * nsg) - 1;
+		bevent->event.xSourceInstanceId =
+			viopath_sourceinst(viopath_hostLp);
+		bevent->event.xTargetInstanceId =
+			viopath_targetinst(viopath_hostLp);
+		bevent->event.xCorrelationToken = (u64)req;
+		bevent->version = VIOVERSION;
+		bevent->disk = DEVICE_NO(d);
+		bevent->u.rw_data.offset = start;
+
+		/*
+		 * Copy just the dma information from the sg list
+		 * into the request
+		 */
+		for (sgindex = 0; sgindex < nsg; sgindex++) {
+			bevent->u.rw_data.dma_info[sgindex].token =
+				sg_dma_address(&sg[sgindex]);
+			bevent->u.rw_data.dma_info[sgindex].len =
+				sg_dma_len(&sg[sgindex]);
+		}
+
+		/* Send the request */
+		hvrc = HvCallEvent_signalLpEvent(&bevent->event);
+		vio_free_event_buffer(viomajorsubtype_blockio, bevent);
+	}
+
+	if (hvrc != HvLpEvent_Rc_Good) {
+		printk(VIOD_KERN_WARNING
+		       "error sending disk event to OS/400 (rc %d)\n",
+		       (int)hvrc);
+		goto error_ret;
+	}
+	spin_unlock_irqrestore(&viodasd_spinlock, flags);
+	return 0;
+
+error_ret:
+	num_req_outstanding--;
+	spin_unlock_irqrestore(&viodasd_spinlock, flags);
+	dma_unmap_sg(d->dev, sg, nsg, direction);
+	return -1;
+}
+
+/*
+ * This is the external request processing routine
+ */
+static void do_viodasd_request(request_queue_t *q)
+{
+	struct request *req;
+
+	/*
+	 * If we already have the maximum number of requests
+	 * outstanding to OS/400 just bail out. We'll come
+	 * back later.
+	 */
+	while (num_req_outstanding < VIOMAXREQ) {
+		req = elv_next_request(q);
+		if (req == NULL)
+			return;
+		/* dequeue the current request from the queue */
+		blkdev_dequeue_request(req);
+		/* check that request contains a valid command */
+		if (!blk_fs_request(req)) {
+			viodasd_end_request(req, 0, req->hard_nr_sectors);
+			continue;
+		}
+		/* Try sending the request */
+		if (send_request(req) != 0)
+			viodasd_end_request(req, 0, req->hard_nr_sectors);
+	}
+}
+
+/*
+ * Probe a single disk and fill in the viodasd_device structure
+ * for it.
+ */
+static void probe_disk(struct viodasd_device *d)
+{
+	HvLpEvent_Rc hvrc;
+	struct viodasd_waitevent we;
+	int dev_no = DEVICE_NO(d);
+	struct gendisk *g;
+	struct request_queue *q;
+	u16 flags = 0;
+
+retry:
+	init_completion(&we.com);
+
+	/* Send the open event to OS/400 */
+	hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+			HvLpEvent_Type_VirtualIo,
+			viomajorsubtype_blockio | vioblockopen,
+			HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
+			viopath_sourceinst(viopath_hostLp),
+			viopath_targetinst(viopath_hostLp),
+			(u64)(unsigned long)&we, VIOVERSION << 16,
+			((u64)dev_no << 48) | ((u64)flags<< 32),
+			0, 0, 0);
+	if (hvrc != 0) {
+		printk(VIOD_KERN_WARNING "bad rc on HV open %d\n", (int)hvrc);
+		return;
+	}
+
+	wait_for_completion(&we.com);
+
+	if (we.rc != 0) {
+		if (flags != 0)
+			return;
+		/* try again with read only flag set */
+		flags = vioblockflags_ro;
+		goto retry;
+	}
+	if (we.max_disk > (MAX_DISKNO - 1)) {
+		static int warned;
+
+		if (warned == 0) {
+			warned++;
+			printk(VIOD_KERN_INFO
+				"Only examining the first %d "
+				"of %d disks connected\n",
+				MAX_DISKNO, we.max_disk + 1);
+		}
+	}
+
+	/* Send the close event to OS/400.  We DON'T expect a response */
+	hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
+			HvLpEvent_Type_VirtualIo,
+			viomajorsubtype_blockio | vioblockclose,
+			HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
+			viopath_sourceinst(viopath_hostLp),
+			viopath_targetinst(viopath_hostLp),
+			0, VIOVERSION << 16,
+			((u64)dev_no << 48) | ((u64)flags << 32),
+			0, 0, 0);
+	if (hvrc != 0) {
+		printk(VIOD_KERN_WARNING
+		       "bad rc sending event to OS/400 %d\n", (int)hvrc);
+		return;
+	}
+	/* create the request queue for the disk */
+	spin_lock_init(&d->q_lock);
+	q = blk_init_queue(do_viodasd_request, &d->q_lock);
+	if (q == NULL) {
+		printk(VIOD_KERN_WARNING "cannot allocate queue for disk %d\n",
+				dev_no);
+		return;
+	}
+	g = alloc_disk(1 << PARTITION_SHIFT);
+	if (g == NULL) {
+		printk(VIOD_KERN_WARNING
+				"cannot allocate disk structure for disk %d\n",
+				dev_no);
+		blk_cleanup_queue(q);
+		return;
+	}
+
+	d->disk = g;
+	blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA);
+	blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA);
+	blk_queue_max_sectors(q, VIODASD_MAXSECTORS);
+	g->major = VIODASD_MAJOR;
+	g->first_minor = dev_no << PARTITION_SHIFT;
+	if (dev_no >= 26)
+		snprintf(g->disk_name, sizeof(g->disk_name),
+				VIOD_GENHD_NAME "%c%c",
+				'a' + (dev_no / 26) - 1, 'a' + (dev_no % 26));
+	else
+		snprintf(g->disk_name, sizeof(g->disk_name),
+				VIOD_GENHD_NAME "%c", 'a' + (dev_no % 26));
+	snprintf(g->devfs_name, sizeof(g->devfs_name),
+			"%s%d", VIOD_GENHD_DEVFS_NAME, dev_no);
+	g->fops = &viodasd_fops;
+	g->queue = q;
+	g->private_data = d;
+	g->driverfs_dev = d->dev;
+	set_capacity(g, d->size >> 9);
+
+	printk(VIOD_KERN_INFO "disk %d: %lu sectors (%lu MB) "
+			"CHS=%d/%d/%d sector size %d%s\n",
+			dev_no, (unsigned long)(d->size >> 9),
+			(unsigned long)(d->size >> 20),
+			(int)d->cylinders, (int)d->tracks,
+			(int)d->sectors, (int)d->bytes_per_sector,
+			d->read_only ? " (RO)" : "");
+
+	/* register us in the global list */
+	add_disk(g);
+}
+
+/* returns the total number of scatterlist elements converted */
+static int block_event_to_scatterlist(const struct vioblocklpevent *bevent,
+		struct scatterlist *sg, int *total_len)
+{
+	int i, numsg;
+	const struct rw_data *rw_data = &bevent->u.rw_data;
+	static const int offset =
+		offsetof(struct vioblocklpevent, u.rw_data.dma_info);
+	static const int element_size = sizeof(rw_data->dma_info[0]);
+
+	numsg = ((bevent->event.xSizeMinus1 + 1) - offset) / element_size;
+	if (numsg > VIOMAXBLOCKDMA)
+		numsg = VIOMAXBLOCKDMA;
+
+	*total_len = 0;
+	memset(sg, 0, sizeof(sg[0]) * VIOMAXBLOCKDMA);
+
+	for (i = 0; (i < numsg) && (rw_data->dma_info[i].len > 0); ++i) {
+		sg_dma_address(&sg[i]) = rw_data->dma_info[i].token;
+		sg_dma_len(&sg[i]) = rw_data->dma_info[i].len;
+		*total_len += rw_data->dma_info[i].len;
+	}
+	return i;
+}
+
+/*
+ * Restart all queues, starting with the one _after_ the disk given,
+ * thus reducing the chance of starvation of higher numbered disks.
+ */
+static void viodasd_restart_all_queues_starting_from(int first_index)
+{
+	int i;
+
+	for (i = first_index + 1; i < MAX_DISKNO; ++i)
+		if (viodasd_devices[i].disk)
+			blk_run_queue(viodasd_devices[i].disk->queue);
+	for (i = 0; i <= first_index; ++i)
+		if (viodasd_devices[i].disk)
+			blk_run_queue(viodasd_devices[i].disk->queue);
+}
+
+/*
+ * For read and write requests, decrement the number of outstanding requests,
+ * Free the DMA buffers we allocated.
+ */
+static int viodasd_handle_read_write(struct vioblocklpevent *bevent)
+{
+	int num_sg, num_sect, pci_direction, total_len;
+	struct request *req;
+	struct scatterlist sg[VIOMAXBLOCKDMA];
+	struct HvLpEvent *event = &bevent->event;
+	unsigned long irq_flags;
+	struct viodasd_device *d;
+	int error;
+	spinlock_t *qlock;
+
+	num_sg = block_event_to_scatterlist(bevent, sg, &total_len);
+	num_sect = total_len >> 9;
+	if (event->xSubtype == (viomajorsubtype_blockio | vioblockread))
+		pci_direction = DMA_FROM_DEVICE;
+	else
+		pci_direction = DMA_TO_DEVICE;
+	req = (struct request *)bevent->event.xCorrelationToken;
+	d = req->rq_disk->private_data;
+
+	dma_unmap_sg(d->dev, sg, num_sg, pci_direction);
+
+	/*
+	 * Since this is running in interrupt mode, we need to make sure
+	 * we're not stepping on any global I/O operations
+	 */
+	spin_lock_irqsave(&viodasd_spinlock, irq_flags);
+	num_req_outstanding--;
+	spin_unlock_irqrestore(&viodasd_spinlock, irq_flags);
+
+	error = event->xRc != HvLpEvent_Rc_Good;
+	if (error) {
+		const struct vio_error_entry *err;
+		err = vio_lookup_rc(viodasd_err_table, bevent->sub_result);
+		printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n",
+				event->xRc, bevent->sub_result, err->msg);
+		num_sect = req->hard_nr_sectors;
+	}
+	qlock = req->q->queue_lock;
+	spin_lock_irqsave(qlock, irq_flags);
+	viodasd_end_request(req, !error, num_sect);
+	spin_unlock_irqrestore(qlock, irq_flags);
+
+	/* Finally, try to get more requests off of this device's queue */
+	viodasd_restart_all_queues_starting_from(DEVICE_NO(d));
+
+	return 0;
+}
+
+/* This routine handles incoming block LP events */
+static void handle_block_event(struct HvLpEvent *event)
+{
+	struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
+	struct viodasd_waitevent *pwe;
+
+	if (event == NULL)
+		/* Notification that a partition went away! */
+		return;
+	/* First, we should NEVER get an int here...only acks */
+	if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
+		printk(VIOD_KERN_WARNING
+		       "Yikes! got an int in viodasd event handler!\n");
+		if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+			event->xRc = HvLpEvent_Rc_InvalidSubtype;
+			HvCallEvent_ackLpEvent(event);
+		}
+	}
+
+	switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
+	case vioblockopen:
+		/*
+		 * Handle a response to an open request.  We get all the
+		 * disk information in the response, so update it.  The
+		 * correlation token contains a pointer to a waitevent
+		 * structure that has a completion in it.  update the
+		 * return code in the waitevent structure and post the
+		 * completion to wake up the guy who sent the request
+		 */
+		pwe = (struct viodasd_waitevent *)event->xCorrelationToken;
+		pwe->rc = event->xRc;
+		pwe->sub_result = bevent->sub_result;
+		if (event->xRc == HvLpEvent_Rc_Good) {
+			const struct open_data *data = &bevent->u.open_data;
+			struct viodasd_device *device =
+				&viodasd_devices[bevent->disk];
+			device->read_only =
+				bevent->flags & vioblockflags_ro;
+			device->size = data->disk_size;
+			device->cylinders = data->cylinders;
+			device->tracks = data->tracks;
+			device->sectors = data->sectors;
+			device->bytes_per_sector = data->bytes_per_sector;
+			pwe->max_disk = data->max_disk;
+		}
+		complete(&pwe->com);
+		break;
+	case vioblockclose:
+		break;
+	case vioblockread:
+	case vioblockwrite:
+		viodasd_handle_read_write(bevent);
+		break;
+
+	default:
+		printk(VIOD_KERN_WARNING "invalid subtype!");
+		if (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck) {
+			event->xRc = HvLpEvent_Rc_InvalidSubtype;
+			HvCallEvent_ackLpEvent(event);
+		}
+	}
+}
+
+/*
+ * Get the driver to reprobe for more disks.
+ */
+static ssize_t probe_disks(struct device_driver *drv, const char *buf,
+		size_t count)
+{
+	struct viodasd_device *d;
+
+	for (d = viodasd_devices; d < &viodasd_devices[MAX_DISKNO]; d++) {
+		if (d->disk == NULL)
+			probe_disk(d);
+	}
+	return count;
+}
+static DRIVER_ATTR(probe, S_IWUSR, NULL, probe_disks);
+
+static int viodasd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+{
+	struct viodasd_device *d = &viodasd_devices[vdev->unit_address];
+
+	d->dev = &vdev->dev;
+	probe_disk(d);
+	if (d->disk == NULL)
+		return -ENODEV;
+	return 0;
+}
+
+static int viodasd_remove(struct vio_dev *vdev)
+{
+	struct viodasd_device *d;
+
+	d = &viodasd_devices[vdev->unit_address];
+	if (d->disk) {
+		del_gendisk(d->disk);
+		blk_cleanup_queue(d->disk->queue);
+		put_disk(d->disk);
+		d->disk = NULL;
+	}
+	d->dev = NULL;
+	return 0;
+}
+
+/**
+ * viodasd_device_table: Used by vio.c to match devices that we
+ * support.
+ */
+static struct vio_device_id viodasd_device_table[] __devinitdata = {
+	{ "viodasd", "" },
+	{ 0, }
+};
+
+MODULE_DEVICE_TABLE(vio, viodasd_device_table);
+static struct vio_driver viodasd_driver = {
+	.name = "viodasd",
+	.id_table = viodasd_device_table,
+	.probe = viodasd_probe,
+	.remove = viodasd_remove
+};
+
+/*
+ * Initialize the whole device driver.  Handle module and non-module
+ * versions
+ */
+static int __init viodasd_init(void)
+{
+	int rc;
+
+	/* Try to open to our host lp */
+	if (viopath_hostLp == HvLpIndexInvalid)
+		vio_set_hostlp();
+
+	if (viopath_hostLp == HvLpIndexInvalid) {
+		printk(VIOD_KERN_WARNING "invalid hosting partition\n");
+		return -EIO;
+	}
+
+	printk(VIOD_KERN_INFO "vers " VIOD_VERS ", hosting partition %d\n",
+			viopath_hostLp);
+
+        /* register the block device */
+	if (register_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME)) {
+		printk(VIOD_KERN_WARNING
+				"Unable to get major number %d for %s\n",
+				VIODASD_MAJOR, VIOD_GENHD_NAME);
+		return -EIO;
+	}
+	/* Actually open the path to the hosting partition */
+	if (viopath_open(viopath_hostLp, viomajorsubtype_blockio,
+				VIOMAXREQ + 2)) {
+		printk(VIOD_KERN_WARNING
+		       "error opening path to host partition %d\n",
+		       viopath_hostLp);
+		unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
+		return -EIO;
+	}
+
+	/* Initialize our request handler */
+	vio_setHandler(viomajorsubtype_blockio, handle_block_event);
+
+	rc = vio_register_driver(&viodasd_driver);
+	if (rc == 0)
+		driver_create_file(&viodasd_driver.driver, &driver_attr_probe);
+	return rc;
+}
+module_init(viodasd_init);
+
+void viodasd_exit(void)
+{
+	driver_remove_file(&viodasd_driver.driver, &driver_attr_probe);
+	vio_unregister_driver(&viodasd_driver);
+	vio_clearHandler(viomajorsubtype_blockio);
+	unregister_blkdev(VIODASD_MAJOR, VIOD_GENHD_NAME);
+	viopath_close(viopath_hostLp, viomajorsubtype_blockio, VIOMAXREQ + 2);
+}
+
+module_exit(viodasd_exit);
diff --git a/drivers/block/xd.c b/drivers/block/xd.c
new file mode 100644
index 0000000..1676033
--- /dev/null
+++ b/drivers/block/xd.c
@@ -0,0 +1,1112 @@
+/*
+ * This file contains the driver for an XT hard disk controller
+ * (at least the DTC 5150X) for Linux.
+ *
+ * Author: Pat Mackinlay, pat@it.com.au
+ * Date: 29/09/92
+ * 
+ * Revised: 01/01/93, ...
+ *
+ * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler,
+ *   kevinf@agora.rain.com)
+ * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and
+ *   Wim Van Dorst.
+ *
+ * Revised: 04/04/94 by Risto Kankkunen
+ *   Moved the detection code from xd_init() to xd_geninit() as it needed
+ *   interrupts enabled and Linus didn't want to enable them in that first
+ *   phase. xd_geninit() is the place to do these kinds of things anyway,
+ *   he says.
+ *
+ * Modularized: 04/10/96 by Todd Fries, tfries@umr.edu
+ *
+ * Revised: 13/12/97 by Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl
+ *   Fixed some problems with disk initialization and module initiation.
+ *   Added support for manual geometry setting (except Seagate controllers)
+ *   in form:
+ *      xd_geo=<cyl_xda>,<head_xda>,<sec_xda>[,<cyl_xdb>,<head_xdb>,<sec_xdb>]
+ *   Recovered DMA access. Abridged messages. Added support for DTC5051CX,
+ *   WD1002-27X & XEBEC controllers. Driver uses now some jumper settings.
+ *   Extended ioctl() support.
+ *
+ * Bugfix: 15/02/01, Paul G. - inform queue layer of tiny xd_maxsect.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/genhd.h>
+#include <linux/hdreg.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/dma.h>
+
+#include "xd.h"
+
+static void __init do_xd_setup (int *integers);
+#ifdef MODULE
+static int xd[5] = { -1,-1,-1,-1, };
+#endif
+
+#define XD_DONT_USE_DMA		0  /* Initial value. may be overriden using
+				      "nodma" module option */
+#define XD_INIT_DISK_DELAY	(30*HZ/1000)  /* 30 ms delay during disk initialization */
+
+/* Above may need to be increased if a problem with the 2nd drive detection
+   (ST11M controller) or resetting a controller (WD) appears */
+
+static XD_INFO xd_info[XD_MAXDRIVES];
+
+/* If you try this driver and find that your card is not detected by the driver at bootup, you need to add your BIOS
+   signature and details to the following list of signatures. A BIOS signature is a string embedded into the first
+   few bytes of your controller's on-board ROM BIOS. To find out what yours is, use something like MS-DOS's DEBUG
+   command. Run DEBUG, and then you can examine your BIOS signature with:
+
+	d xxxx:0000
+
+   where xxxx is the segment of your controller (like C800 or D000 or something). On the ASCII dump at the right, you should
+   be able to see a string mentioning the manufacturer's copyright etc. Add this string into the table below. The parameters
+   in the table are, in order:
+
+	offset			; this is the offset (in bytes) from the start of your ROM where the signature starts
+	signature		; this is the actual text of the signature
+	xd_?_init_controller	; this is the controller init routine used by your controller
+	xd_?_init_drive		; this is the drive init routine used by your controller
+
+   The controllers directly supported at the moment are: DTC 5150x, WD 1004A27X, ST11M/R and override. If your controller is
+   made by the same manufacturer as one of these, try using the same init routines as they do. If that doesn't work, your
+   best bet is to use the "override" routines. These routines use a "portable" method of getting the disk's geometry, and
+   may work with your card. If none of these seem to work, try sending me some email and I'll see what I can do <grin>.
+
+   NOTE: You can now specify your XT controller's parameters from the command line in the form xd=TYPE,IRQ,IO,DMA. The driver
+   should be able to detect your drive's geometry from this info. (eg: xd=0,5,0x320,3 is the "standard"). */
+
+#include <asm/page.h>
+#define xd_dma_mem_alloc(size) __get_dma_pages(GFP_KERNEL,get_order(size))
+#define xd_dma_mem_free(addr, size) free_pages(addr, get_order(size))
+static char *xd_dma_buffer;
+
+static XD_SIGNATURE xd_sigs[] __initdata = {
+	{ 0x0000,"Override geometry handler",NULL,xd_override_init_drive,"n unknown" }, /* Pat Mackinlay, pat@it.com.au */
+	{ 0x0008,"[BXD06 (C) DTC 17-MAY-1985]",xd_dtc_init_controller,xd_dtc5150cx_init_drive," DTC 5150CX" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
+	{ 0x000B,"CRD18A   Not an IBM rom. (C) Copyright Data Technology Corp. 05/31/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Todd Fries, tfries@umr.edu */
+	{ 0x000B,"CXD23A Not an IBM ROM (C)Copyright Data Technology Corp 12/03/88",xd_dtc_init_controller,xd_dtc_init_drive," DTC 5150X" }, /* Pat Mackinlay, pat@it.com.au */
+	{ 0x0008,"07/15/86(C) Copyright 1986 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. 1002-27X" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
+	{ 0x0008,"06/24/88(C) Copyright 1988 Western Digital Corp.",xd_wd_init_controller,xd_wd_init_drive," Western Dig. WDXT-GEN2" }, /* Dan Newcombe, newcombe@aa.csc.peachnet.edu */
+	{ 0x0015,"SEAGATE ST11 BIOS REVISION",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Salvador Abreu, spa@fct.unl.pt */
+	{ 0x0010,"ST11R BIOS",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11M/R" }, /* Risto Kankkunen, risto.kankkunen@cs.helsinki.fi */
+	{ 0x0010,"ST11 BIOS v1.7",xd_seagate_init_controller,xd_seagate_init_drive," Seagate ST11R" }, /* Alan Hourihane, alanh@fairlite.demon.co.uk */
+	{ 0x1000,"(c)Copyright 1987 SMS",xd_omti_init_controller,xd_omti_init_drive,"n OMTI 5520" }, /* Dirk Melchers, dirk@merlin.nbg.sub.org */
+	{ 0x0006,"COPYRIGHT XEBEC (C) 1984",xd_xebec_init_controller,xd_xebec_init_drive," XEBEC" }, /* Andrzej Krzysztofowicz, ankry@mif.pg.gda.pl */
+	{ 0x0008,"(C) Copyright 1984 Western Digital Corp", xd_wd_init_controller, xd_wd_init_drive," Western Dig. 1002s-wx2" },
+	{ 0x0008,"(C) Copyright 1986 Western Digital Corporation", xd_wd_init_controller, xd_wd_init_drive," 1986 Western Digital" }, /* jfree@sovereign.org */
+};
+
+static unsigned int xd_bases[] __initdata =
+{
+	0xC8000, 0xCA000, 0xCC000,
+	0xCE000, 0xD0000, 0xD2000,
+	0xD4000, 0xD6000, 0xD8000,
+	0xDA000, 0xDC000, 0xDE000,
+	0xE0000
+};
+
+static DEFINE_SPINLOCK(xd_lock);
+
+static struct gendisk *xd_gendisk[2];
+
+static struct block_device_operations xd_fops = {
+	.owner	= THIS_MODULE,
+	.ioctl	= xd_ioctl,
+};
+static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int);
+static u_char xd_drives, xd_irq = 5, xd_dma = 3, xd_maxsectors;
+static u_char xd_override __initdata = 0, xd_type __initdata = 0;
+static u_short xd_iobase = 0x320;
+static int xd_geo[XD_MAXDRIVES*3] __initdata = { 0, };
+
+static volatile int xdc_busy;
+static struct timer_list xd_watchdog_int;
+
+static volatile u_char xd_error;
+static int nodma = XD_DONT_USE_DMA;
+
+static struct request_queue *xd_queue;
+
+/* xd_init: register the block device number and set up pointer tables */
+static int __init xd_init(void)
+{
+	u_char i,controller;
+	unsigned int address;
+	int err;
+
+#ifdef MODULE
+	{
+		u_char count = 0;
+		for (i = 4; i > 0; i--)
+			if (((xd[i] = xd[i-1]) >= 0) && !count)
+				count = i;
+		if ((xd[0] = count))
+			do_xd_setup(xd);
+	}
+#endif
+
+	init_timer (&xd_watchdog_int); xd_watchdog_int.function = xd_watchdog;
+
+	if (!xd_dma_buffer)
+		xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
+	if (!xd_dma_buffer) {
+		printk(KERN_ERR "xd: Out of memory.\n");
+		return -ENOMEM;
+	}
+
+	err = -EBUSY;
+	if (register_blkdev(XT_DISK_MAJOR, "xd"))
+		goto out1;
+
+	err = -ENOMEM;
+	xd_queue = blk_init_queue(do_xd_request, &xd_lock);
+	if (!xd_queue)
+		goto out1a;
+
+	if (xd_detect(&controller,&address)) {
+
+		printk("Detected a%s controller (type %d) at address %06x\n",
+			xd_sigs[controller].name,controller,address);
+		if (!request_region(xd_iobase,4,"xd")) {
+			printk("xd: Ports at 0x%x are not available\n",
+				xd_iobase);
+			goto out2;
+		}
+		if (controller)
+			xd_sigs[controller].init_controller(address);
+		xd_drives = xd_initdrives(xd_sigs[controller].init_drive);
+		
+		printk("Detected %d hard drive%s (using IRQ%d & DMA%d)\n",
+			xd_drives,xd_drives == 1 ? "" : "s",xd_irq,xd_dma);
+	}
+
+	err = -ENODEV;
+	if (!xd_drives)
+		goto out3;
+
+	for (i = 0; i < xd_drives; i++) {
+		XD_INFO *p = &xd_info[i];
+		struct gendisk *disk = alloc_disk(64);
+		if (!disk)
+			goto Enomem;
+		p->unit = i;
+		disk->major = XT_DISK_MAJOR;
+		disk->first_minor = i<<6;
+		sprintf(disk->disk_name, "xd%c", i+'a');
+		sprintf(disk->devfs_name, "xd/target%d", i);
+		disk->fops = &xd_fops;
+		disk->private_data = p;
+		disk->queue = xd_queue;
+		set_capacity(disk, p->heads * p->cylinders * p->sectors);
+		printk(" %s: CHS=%d/%d/%d\n", disk->disk_name,
+			p->cylinders, p->heads, p->sectors);
+		xd_gendisk[i] = disk;
+	}
+
+	err = -EBUSY;
+	if (request_irq(xd_irq,xd_interrupt_handler, 0, "XT hard disk", NULL)) {
+		printk("xd: unable to get IRQ%d\n",xd_irq);
+		goto out4;
+	}
+
+	if (request_dma(xd_dma,"xd")) {
+		printk("xd: unable to get DMA%d\n",xd_dma);
+		goto out5;
+	}
+
+	/* xd_maxsectors depends on controller - so set after detection */
+	blk_queue_max_sectors(xd_queue, xd_maxsectors);
+
+	for (i = 0; i < xd_drives; i++)
+		add_disk(xd_gendisk[i]);
+
+	return 0;
+
+out5:
+	free_irq(xd_irq, NULL);
+out4:
+	for (i = 0; i < xd_drives; i++)
+		put_disk(xd_gendisk[i]);
+out3:
+	release_region(xd_iobase,4);
+out2:
+	blk_cleanup_queue(xd_queue);
+out1a:
+	unregister_blkdev(XT_DISK_MAJOR, "xd");
+out1:
+	if (xd_dma_buffer)
+		xd_dma_mem_free((unsigned long)xd_dma_buffer,
+				xd_maxsectors * 0x200);
+	return err;
+Enomem:
+	err = -ENOMEM;
+	while (i--)
+		put_disk(xd_gendisk[i]);
+	goto out3;
+}
+
+/* xd_detect: scan the possible BIOS ROM locations for the signature strings */
+static u_char __init xd_detect (u_char *controller, unsigned int *address)
+{
+	int i, j;
+
+	if (xd_override)
+	{
+		*controller = xd_type;
+		*address = 0;
+		return(1);
+	}
+
+	for (i = 0; i < (sizeof(xd_bases) / sizeof(xd_bases[0])); i++) {
+		void __iomem *p = ioremap(xd_bases[i], 0x2000);
+		if (!p)
+			continue;
+		for (j = 1; j < (sizeof(xd_sigs) / sizeof(xd_sigs[0])); j++) {
+			const char *s = xd_sigs[j].string;
+			if (check_signature(p + xd_sigs[j].offset, s, strlen(s))) {
+				*controller = j;
+				xd_type = j;
+				*address = xd_bases[i];
+				iounmap(p);
+				return 1;
+			}
+		}
+		iounmap(p);
+	}
+	return 0;
+}
+
+/* do_xd_request: handle an incoming request */
+static void do_xd_request (request_queue_t * q)
+{
+	struct request *req;
+
+	if (xdc_busy)
+		return;
+
+	while ((req = elv_next_request(q)) != NULL) {
+		unsigned block = req->sector;
+		unsigned count = req->nr_sectors;
+		int rw = rq_data_dir(req);
+		XD_INFO *disk = req->rq_disk->private_data;
+		int res = 0;
+		int retry;
+
+		if (!(req->flags & REQ_CMD)) {
+			end_request(req, 0);
+			continue;
+		}
+		if (block + count > get_capacity(req->rq_disk)) {
+			end_request(req, 0);
+			continue;
+		}
+		if (rw != READ && rw != WRITE) {
+			printk("do_xd_request: unknown request\n");
+			end_request(req, 0);
+			continue;
+		}
+		for (retry = 0; (retry < XD_RETRIES) && !res; retry++)
+			res = xd_readwrite(rw, disk, req->buffer, block, count);
+		end_request(req, res);	/* wrap up, 0 = fail, 1 = success */
+	}
+}
+
+/* xd_ioctl: handle device ioctl's */
+static int xd_ioctl (struct inode *inode,struct file *file,u_int cmd,u_long arg)
+{
+	XD_INFO *p = inode->i_bdev->bd_disk->private_data;
+
+	switch (cmd) {
+		case HDIO_GETGEO:
+		{
+			struct hd_geometry g;
+			struct hd_geometry __user *geom= (void __user *)arg;
+			g.heads = p->heads;
+			g.sectors = p->sectors;
+			g.cylinders = p->cylinders;
+			g.start = get_start_sect(inode->i_bdev);
+			return copy_to_user(geom, &g, sizeof(g)) ? -EFAULT : 0;
+		}
+		case HDIO_SET_DMA:
+			if (!capable(CAP_SYS_ADMIN)) return -EACCES;
+			if (xdc_busy) return -EBUSY;
+			nodma = !arg;
+			if (nodma && xd_dma_buffer) {
+				xd_dma_mem_free((unsigned long)xd_dma_buffer,
+						xd_maxsectors * 0x200);
+				xd_dma_buffer = NULL;
+			} else if (!nodma && !xd_dma_buffer) {
+				xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
+				if (!xd_dma_buffer) {
+					nodma = XD_DONT_USE_DMA;
+					return -ENOMEM;
+				}
+			}
+			return 0;
+		case HDIO_GET_DMA:
+			return put_user(!nodma, (long __user *) arg);
+		case HDIO_GET_MULTCOUNT:
+			return put_user(xd_maxsectors, (long __user *) arg);
+		default:
+			return -EINVAL;
+	}
+}
+
+/* xd_readwrite: handle a read/write request */
+static int xd_readwrite (u_char operation,XD_INFO *p,char *buffer,u_int block,u_int count)
+{
+	int drive = p->unit;
+	u_char cmdblk[6],sense[4];
+	u_short track,cylinder;
+	u_char head,sector,control,mode = PIO_MODE,temp;
+	char **real_buffer;
+	register int i;
+	
+#ifdef DEBUG_READWRITE
+	printk("xd_readwrite: operation = %s, drive = %d, buffer = 0x%X, block = %d, count = %d\n",operation == READ ? "read" : "write",drive,buffer,block,count);
+#endif /* DEBUG_READWRITE */
+
+	spin_unlock_irq(&xd_lock);
+
+	control = p->control;
+	if (!xd_dma_buffer)
+		xd_dma_buffer = (char *)xd_dma_mem_alloc(xd_maxsectors * 0x200);
+	while (count) {
+		temp = count < xd_maxsectors ? count : xd_maxsectors;
+
+		track = block / p->sectors;
+		head = track % p->heads;
+		cylinder = track / p->heads;
+		sector = block % p->sectors;
+
+#ifdef DEBUG_READWRITE
+		printk("xd_readwrite: drive = %d, head = %d, cylinder = %d, sector = %d, count = %d\n",drive,head,cylinder,sector,temp);
+#endif /* DEBUG_READWRITE */
+
+		if (xd_dma_buffer) {
+			mode = xd_setup_dma(operation == READ ? DMA_MODE_READ : DMA_MODE_WRITE,(u_char *)(xd_dma_buffer),temp * 0x200);
+			real_buffer = &xd_dma_buffer;
+			for (i=0; i < (temp * 0x200); i++)
+				xd_dma_buffer[i] = buffer[i];
+		}
+		else
+			real_buffer = &buffer;
+
+		xd_build(cmdblk,operation == READ ? CMD_READ : CMD_WRITE,drive,head,cylinder,sector,temp & 0xFF,control);
+
+		switch (xd_command(cmdblk,mode,(u_char *)(*real_buffer),(u_char *)(*real_buffer),sense,XD_TIMEOUT)) {
+			case 1:
+				printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write"));
+				xd_recalibrate(drive);
+				spin_lock_irq(&xd_lock);
+				return (0);
+			case 2:
+				if (sense[0] & 0x30) {
+					printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing"));
+					switch ((sense[0] & 0x30) >> 4) {
+					case 0: printk("drive error, code = 0x%X",sense[0] & 0x0F);
+						break;
+					case 1: printk("controller error, code = 0x%X",sense[0] & 0x0F);
+						break;
+					case 2: printk("command error, code = 0x%X",sense[0] & 0x0F);
+						break;
+					case 3: printk("miscellaneous error, code = 0x%X",sense[0] & 0x0F);
+						break;
+					}
+				}
+				if (sense[0] & 0x80)
+					printk(" - CHS = %d/%d/%d\n",((sense[2] & 0xC0) << 2) | sense[3],sense[1] & 0x1F,sense[2] & 0x3F);
+				/*	reported drive number = (sense[1] & 0xE0) >> 5 */
+				else
+					printk(" - no valid disk address\n");
+				spin_lock_irq(&xd_lock);
+				return (0);
+		}
+		if (xd_dma_buffer)
+			for (i=0; i < (temp * 0x200); i++)
+				buffer[i] = xd_dma_buffer[i];
+
+		count -= temp, buffer += temp * 0x200, block += temp;
+	}
+	spin_lock_irq(&xd_lock);
+	return (1);
+}
+
+/* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
+static void xd_recalibrate (u_char drive)
+{
+	u_char cmdblk[6];
+	
+	xd_build(cmdblk,CMD_RECALIBRATE,drive,0,0,0,0,0);
+	if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 8))
+		printk("xd%c: warning! error recalibrating, controller may be unstable\n", 'a'+drive);
+}
+
+/* xd_interrupt_handler: interrupt service routine */
+static irqreturn_t xd_interrupt_handler(int irq, void *dev_id,
+					struct pt_regs *regs)
+{
+	if (inb(XD_STATUS) & STAT_INTERRUPT) {							/* check if it was our device */
+#ifdef DEBUG_OTHER
+		printk("xd_interrupt_handler: interrupt detected\n");
+#endif /* DEBUG_OTHER */
+		outb(0,XD_CONTROL);								/* acknowledge interrupt */
+		wake_up(&xd_wait_int);	/* and wake up sleeping processes */
+		return IRQ_HANDLED;
+	}
+	else
+		printk("xd: unexpected interrupt\n");
+	return IRQ_NONE;
+}
+
+/* xd_setup_dma: set up the DMA controller for a data transfer */
+static u_char xd_setup_dma (u_char mode,u_char *buffer,u_int count)
+{
+	unsigned long f;
+	
+	if (nodma)
+		return (PIO_MODE);
+	if (((unsigned long) buffer & 0xFFFF0000) != (((unsigned long) buffer + count) & 0xFFFF0000)) {
+#ifdef DEBUG_OTHER
+		printk("xd_setup_dma: using PIO, transfer overlaps 64k boundary\n");
+#endif /* DEBUG_OTHER */
+		return (PIO_MODE);
+	}
+	
+	f=claim_dma_lock();
+	disable_dma(xd_dma);
+	clear_dma_ff(xd_dma);
+	set_dma_mode(xd_dma,mode);
+	set_dma_addr(xd_dma, (unsigned long) buffer);
+	set_dma_count(xd_dma,count);
+	
+	release_dma_lock(f);
+
+	return (DMA_MODE);			/* use DMA and INT */
+}
+
+/* xd_build: put stuff into an array in a format suitable for the controller */
+static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control)
+{
+	cmdblk[0] = command;
+	cmdblk[1] = ((drive & 0x07) << 5) | (head & 0x1F);
+	cmdblk[2] = ((cylinder & 0x300) >> 2) | (sector & 0x3F);
+	cmdblk[3] = cylinder & 0xFF;
+	cmdblk[4] = count;
+	cmdblk[5] = control;
+	
+	return (cmdblk);
+}
+
+static void xd_watchdog (unsigned long unused)
+{
+	xd_error = 1;
+	wake_up(&xd_wait_int);
+}
+
+/* xd_waitport: waits until port & mask == flags or a timeout occurs. return 1 for a timeout */
+static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout)
+{
+	u_long expiry = jiffies + timeout;
+	int success;
+
+	xdc_busy = 1;
+	while ((success = ((inb(port) & mask) != flags)) && time_before(jiffies, expiry)) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+	xdc_busy = 0;
+	return (success);
+}
+
+static inline u_int xd_wait_for_IRQ (void)
+{
+	unsigned long flags;
+	xd_watchdog_int.expires = jiffies + 8 * HZ;
+	add_timer(&xd_watchdog_int);
+	
+	flags=claim_dma_lock();
+	enable_dma(xd_dma);
+	release_dma_lock(flags);
+	
+	sleep_on(&xd_wait_int);
+	del_timer(&xd_watchdog_int);
+	xdc_busy = 0;
+	
+	flags=claim_dma_lock();
+	disable_dma(xd_dma);
+	release_dma_lock(flags);
+	
+	if (xd_error) {
+		printk("xd: missed IRQ - command aborted\n");
+		xd_error = 0;
+		return (1);
+	}
+	return (0);
+}
+
+/* xd_command: handle all data transfers necessary for a single command */
+static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout)
+{
+	u_char cmdblk[6],csb,complete = 0;
+
+#ifdef DEBUG_COMMAND
+	printk("xd_command: command = 0x%X, mode = 0x%X, indata = 0x%X, outdata = 0x%X, sense = 0x%X\n",command,mode,indata,outdata,sense);
+#endif /* DEBUG_COMMAND */
+
+	outb(0,XD_SELECT);
+	outb(mode,XD_CONTROL);
+
+	if (xd_waitport(XD_STATUS,STAT_SELECT,STAT_SELECT,timeout))
+		return (1);
+
+	while (!complete) {
+		if (xd_waitport(XD_STATUS,STAT_READY,STAT_READY,timeout))
+			return (1);
+
+		switch (inb(XD_STATUS) & (STAT_COMMAND | STAT_INPUT)) {
+			case 0:
+				if (mode == DMA_MODE) {
+					if (xd_wait_for_IRQ())
+						return (1);
+				} else
+					outb(outdata ? *outdata++ : 0,XD_DATA);
+				break;
+			case STAT_INPUT:
+				if (mode == DMA_MODE) {
+					if (xd_wait_for_IRQ())
+						return (1);
+				} else
+					if (indata)
+						*indata++ = inb(XD_DATA);
+					else
+						inb(XD_DATA);
+				break;
+			case STAT_COMMAND:
+				outb(command ? *command++ : 0,XD_DATA);
+				break;
+			case STAT_COMMAND | STAT_INPUT:
+				complete = 1;
+				break;
+		}
+	}
+	csb = inb(XD_DATA);
+
+	if (xd_waitport(XD_STATUS,0,STAT_SELECT,timeout))					/* wait until deselected */
+		return (1);
+
+	if (csb & CSB_ERROR) {									/* read sense data if error */
+		xd_build(cmdblk,CMD_SENSE,(csb & CSB_LUN) >> 5,0,0,0,0,0);
+		if (xd_command(cmdblk,0,sense,NULL,NULL,XD_TIMEOUT))
+			printk("xd: warning! sense command failed!\n");
+	}
+
+#ifdef DEBUG_COMMAND
+	printk("xd_command: completed with csb = 0x%X\n",csb);
+#endif /* DEBUG_COMMAND */
+
+	return (csb & CSB_ERROR);
+}
+
+static u_char __init xd_initdrives (void (*init_drive)(u_char drive))
+{
+	u_char cmdblk[6],i,count = 0;
+
+	for (i = 0; i < XD_MAXDRIVES; i++) {
+		xd_build(cmdblk,CMD_TESTREADY,i,0,0,0,0,0);
+		if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT*8)) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(XD_INIT_DISK_DELAY);
+
+			init_drive(count);
+			count++;
+
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(XD_INIT_DISK_DELAY);
+		}
+	}
+	return (count);
+}
+
+static void __init xd_manual_geo_set (u_char drive)
+{
+	xd_info[drive].heads = (u_char)(xd_geo[3 * drive + 1]);
+	xd_info[drive].cylinders = (u_short)(xd_geo[3 * drive]);
+	xd_info[drive].sectors = (u_char)(xd_geo[3 * drive + 2]);
+}
+
+static void __init xd_dtc_init_controller (unsigned int address)
+{
+	switch (address) {
+		case 0x00000:
+		case 0xC8000:	break;			/*initial: 0x320 */
+		case 0xCA000:	xd_iobase = 0x324; 
+		case 0xD0000:				/*5150CX*/
+		case 0xD8000:	break;			/*5150CX & 5150XL*/
+		default:        printk("xd_dtc_init_controller: unsupported BIOS address %06x\n",address);
+				break;
+	}
+	xd_maxsectors = 0x01;		/* my card seems to have trouble doing multi-block transfers? */
+
+	outb(0,XD_RESET);		/* reset the controller */
+}
+
+
+static void __init xd_dtc5150cx_init_drive (u_char drive)
+{
+	/* values from controller's BIOS - BIOS chip may be removed */
+	static u_short geometry_table[][4] = {
+		{0x200,8,0x200,0x100},
+		{0x267,2,0x267,0x267},
+		{0x264,4,0x264,0x80},
+		{0x132,4,0x132,0x0},
+		{0x132,2,0x80, 0x132},
+		{0x177,8,0x177,0x0},
+		{0x132,8,0x84, 0x0},
+		{},  /* not used */
+		{0x132,6,0x80, 0x100},
+		{0x200,6,0x100,0x100},
+		{0x264,2,0x264,0x80},
+		{0x280,4,0x280,0x100},
+		{0x2B9,3,0x2B9,0x2B9},
+		{0x2B9,5,0x2B9,0x2B9},
+		{0x280,6,0x280,0x100},
+		{0x132,4,0x132,0x0}};
+	u_char n;
+
+	n = inb(XD_JUMPER);
+	n = (drive ? n : (n >> 2)) & 0x33;
+	n = (n | (n >> 2)) & 0x0F;
+	if (xd_geo[3*drive])
+		xd_manual_geo_set(drive);
+	else
+		if (n != 7) {	
+			xd_info[drive].heads = (u_char)(geometry_table[n][1]);			/* heads */
+			xd_info[drive].cylinders = geometry_table[n][0];	/* cylinders */
+			xd_info[drive].sectors = 17;				/* sectors */
+#if 0
+			xd_info[drive].rwrite = geometry_table[n][2];	/* reduced write */
+			xd_info[drive].precomp = geometry_table[n][3]		/* write precomp */
+			xd_info[drive].ecc = 0x0B;				/* ecc length */
+#endif /* 0 */
+		}
+		else {
+			printk("xd%c: undetermined drive geometry\n",'a'+drive);
+			return;
+		}
+	xd_info[drive].control = 5;				/* control byte */
+	xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
+	xd_recalibrate(drive);
+}
+
+static void __init xd_dtc_init_drive (u_char drive)
+{
+	u_char cmdblk[6],buf[64];
+
+	xd_build(cmdblk,CMD_DTCGETGEOM,drive,0,0,0,0,0);
+	if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
+		xd_info[drive].heads = buf[0x0A];			/* heads */
+		xd_info[drive].cylinders = ((u_short *) (buf))[0x04];	/* cylinders */
+		xd_info[drive].sectors = 17;				/* sectors */
+		if (xd_geo[3*drive])
+			xd_manual_geo_set(drive);
+#if 0
+		xd_info[drive].rwrite = ((u_short *) (buf + 1))[0x05];	/* reduced write */
+		xd_info[drive].precomp = ((u_short *) (buf + 1))[0x06];	/* write precomp */
+		xd_info[drive].ecc = buf[0x0F];				/* ecc length */
+#endif /* 0 */
+		xd_info[drive].control = 0;				/* control byte */
+
+		xd_setparam(CMD_DTCSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,((u_short *) (buf + 1))[0x05],((u_short *) (buf + 1))[0x06],buf[0x0F]);
+		xd_build(cmdblk,CMD_DTCSETSTEP,drive,0,0,0,0,7);
+		if (xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
+			printk("xd_dtc_init_drive: error setting step rate for xd%c\n", 'a'+drive);
+	}
+	else
+		printk("xd_dtc_init_drive: error reading geometry for xd%c\n", 'a'+drive);
+}
+
+static void __init xd_wd_init_controller (unsigned int address)
+{
+	switch (address) {
+		case 0x00000:
+		case 0xC8000:	break;			/*initial: 0x320 */
+		case 0xCA000:	xd_iobase = 0x324; break;
+		case 0xCC000:   xd_iobase = 0x328; break;
+		case 0xCE000:   xd_iobase = 0x32C; break;
+		case 0xD0000:	xd_iobase = 0x328; break; /* ? */
+		case 0xD8000:	xd_iobase = 0x32C; break; /* ? */
+		default:        printk("xd_wd_init_controller: unsupported BIOS address %06x\n",address);
+				break;
+	}
+	xd_maxsectors = 0x01;		/* this one doesn't wrap properly either... */
+
+	outb(0,XD_RESET);		/* reset the controller */
+
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(XD_INIT_DISK_DELAY);
+}
+
+static void __init xd_wd_init_drive (u_char drive)
+{
+	/* values from controller's BIOS - BIOS may be disabled */
+	static u_short geometry_table[][4] = {
+		{0x264,4,0x1C2,0x1C2},   /* common part */
+		{0x132,4,0x099,0x0},
+		{0x267,2,0x1C2,0x1C2},
+		{0x267,4,0x1C2,0x1C2},
+
+		{0x334,6,0x335,0x335},   /* 1004 series RLL */
+		{0x30E,4,0x30F,0x3DC},
+		{0x30E,2,0x30F,0x30F},
+		{0x267,4,0x268,0x268},
+
+		{0x3D5,5,0x3D6,0x3D6},   /* 1002 series RLL */
+		{0x3DB,7,0x3DC,0x3DC},
+		{0x264,4,0x265,0x265},
+		{0x267,4,0x268,0x268}};
+
+	u_char cmdblk[6],buf[0x200];
+	u_char n = 0,rll,jumper_state,use_jumper_geo;
+	u_char wd_1002 = (xd_sigs[xd_type].string[7] == '6');
+	
+	jumper_state = ~(inb(0x322));
+	if (jumper_state & 0x40)
+		xd_irq = 9;
+	rll = (jumper_state & 0x30) ? (0x04 << wd_1002) : 0;
+	xd_build(cmdblk,CMD_READ,drive,0,0,0,1,0);
+	if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
+		xd_info[drive].heads = buf[0x1AF];				/* heads */
+		xd_info[drive].cylinders = ((u_short *) (buf + 1))[0xD6];	/* cylinders */
+		xd_info[drive].sectors = 17;					/* sectors */
+		if (xd_geo[3*drive])
+			xd_manual_geo_set(drive);
+#if 0
+		xd_info[drive].rwrite = ((u_short *) (buf))[0xD8];		/* reduced write */
+		xd_info[drive].wprecomp = ((u_short *) (buf))[0xDA];		/* write precomp */
+		xd_info[drive].ecc = buf[0x1B4];				/* ecc length */
+#endif /* 0 */
+		xd_info[drive].control = buf[0x1B5];				/* control byte */
+		use_jumper_geo = !(xd_info[drive].heads) || !(xd_info[drive].cylinders);
+		if (xd_geo[3*drive]) {
+			xd_manual_geo_set(drive);
+			xd_info[drive].control = rll ? 7 : 5;
+		}
+		else if (use_jumper_geo) {
+			n = (((jumper_state & 0x0F) >> (drive << 1)) & 0x03) | rll;
+			xd_info[drive].cylinders = geometry_table[n][0];
+			xd_info[drive].heads = (u_char)(geometry_table[n][1]);
+			xd_info[drive].control = rll ? 7 : 5;
+#if 0
+			xd_info[drive].rwrite = geometry_table[n][2];
+			xd_info[drive].wprecomp = geometry_table[n][3];
+			xd_info[drive].ecc = 0x0B;
+#endif /* 0 */
+		}
+		if (!wd_1002) {
+			if (use_jumper_geo)
+				xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
+					geometry_table[n][2],geometry_table[n][3],0x0B);
+			else
+				xd_setparam(CMD_WDSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,
+					((u_short *) (buf))[0xD8],((u_short *) (buf))[0xDA],buf[0x1B4]);
+		}
+	/* 1002 based RLL controller requests converted addressing, but reports physical 
+	   (physical 26 sec., logical 17 sec.) 
+	   1004 based ???? */
+		if (rll & wd_1002) {
+			if ((xd_info[drive].cylinders *= 26,
+			     xd_info[drive].cylinders /= 17) > 1023)
+				xd_info[drive].cylinders = 1023;  /* 1024 ? */
+#if 0
+			xd_info[drive].rwrite *= 26; 
+			xd_info[drive].rwrite /= 17;
+			xd_info[drive].wprecomp *= 26
+			xd_info[drive].wprecomp /= 17;
+#endif /* 0 */
+		}
+	}
+	else
+		printk("xd_wd_init_drive: error reading geometry for xd%c\n",'a'+drive);	
+
+}
+
+static void __init xd_seagate_init_controller (unsigned int address)
+{
+	switch (address) {
+		case 0x00000:
+		case 0xC8000:	break;			/*initial: 0x320 */
+		case 0xD0000:	xd_iobase = 0x324; break;
+		case 0xD8000:	xd_iobase = 0x328; break;
+		case 0xE0000:	xd_iobase = 0x32C; break;
+		default:	printk("xd_seagate_init_controller: unsupported BIOS address %06x\n",address);
+				break;
+	}
+	xd_maxsectors = 0x40;
+
+	outb(0,XD_RESET);		/* reset the controller */
+}
+
+static void __init xd_seagate_init_drive (u_char drive)
+{
+	u_char cmdblk[6],buf[0x200];
+
+	xd_build(cmdblk,CMD_ST11GETGEOM,drive,0,0,0,1,0);
+	if (!xd_command(cmdblk,PIO_MODE,buf,NULL,NULL,XD_TIMEOUT * 2)) {
+		xd_info[drive].heads = buf[0x04];				/* heads */
+		xd_info[drive].cylinders = (buf[0x02] << 8) | buf[0x03];	/* cylinders */
+		xd_info[drive].sectors = buf[0x05];				/* sectors */
+		xd_info[drive].control = 0;					/* control byte */
+	}
+	else
+		printk("xd_seagate_init_drive: error reading geometry from xd%c\n", 'a'+drive);
+}
+
+/* Omti support courtesy Dirk Melchers */
+static void __init xd_omti_init_controller (unsigned int address)
+{
+	switch (address) {
+		case 0x00000:
+		case 0xC8000:	break;			/*initial: 0x320 */
+		case 0xD0000:	xd_iobase = 0x324; break;
+		case 0xD8000:	xd_iobase = 0x328; break;
+		case 0xE0000:	xd_iobase = 0x32C; break;
+		default:	printk("xd_omti_init_controller: unsupported BIOS address %06x\n",address);
+				break;
+	}
+	
+	xd_maxsectors = 0x40;
+
+	outb(0,XD_RESET);		/* reset the controller */
+}
+
+static void __init xd_omti_init_drive (u_char drive)
+{
+	/* gets infos from drive */
+	xd_override_init_drive(drive);
+
+	/* set other parameters, Hardcoded, not that nice :-) */
+	xd_info[drive].control = 2;
+}
+
+/* Xebec support (AK) */
+static void __init xd_xebec_init_controller (unsigned int address)
+{
+/* iobase may be set manually in range 0x300 - 0x33C
+      irq may be set manually to 2(9),3,4,5,6,7
+      dma may be set manually to 1,2,3
+	(How to detect them ???)
+BIOS address may be set manually in range 0x0 - 0xF8000
+If you need non-standard settings use the xd=... command */
+
+	switch (address) {
+		case 0x00000:
+		case 0xC8000:	/* initially: xd_iobase==0x320 */
+		case 0xD0000:
+		case 0xD2000:
+		case 0xD4000:
+		case 0xD6000:
+		case 0xD8000:
+		case 0xDA000:
+		case 0xDC000:
+		case 0xDE000:
+		case 0xE0000:	break;
+		default:	printk("xd_xebec_init_controller: unsupported BIOS address %06x\n",address);
+				break;
+		}
+
+	xd_maxsectors = 0x01;
+	outb(0,XD_RESET);		/* reset the controller */
+
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(XD_INIT_DISK_DELAY);
+}
+
+static void __init xd_xebec_init_drive (u_char drive)
+{
+	/* values from controller's BIOS - BIOS chip may be removed */
+	static u_short geometry_table[][5] = {
+		{0x132,4,0x080,0x080,0x7},
+		{0x132,4,0x080,0x080,0x17},
+		{0x264,2,0x100,0x100,0x7},
+		{0x264,2,0x100,0x100,0x17},
+		{0x132,8,0x080,0x080,0x7},
+		{0x132,8,0x080,0x080,0x17},
+		{0x264,4,0x100,0x100,0x6},
+		{0x264,4,0x100,0x100,0x17},
+		{0x2BC,5,0x2BC,0x12C,0x6},
+		{0x3A5,4,0x3A5,0x3A5,0x7},
+		{0x26C,6,0x26C,0x26C,0x7},
+		{0x200,8,0x200,0x100,0x17},
+		{0x400,5,0x400,0x400,0x7},
+		{0x400,6,0x400,0x400,0x7},
+		{0x264,8,0x264,0x200,0x17},
+		{0x33E,7,0x33E,0x200,0x7}};
+	u_char n;
+
+	n = inb(XD_JUMPER) & 0x0F; /* BIOS's drive number: same geometry 
+					is assumed for BOTH drives */
+	if (xd_geo[3*drive])
+		xd_manual_geo_set(drive);
+	else {
+		xd_info[drive].heads = (u_char)(geometry_table[n][1]);			/* heads */
+		xd_info[drive].cylinders = geometry_table[n][0];	/* cylinders */
+		xd_info[drive].sectors = 17;				/* sectors */
+#if 0
+		xd_info[drive].rwrite = geometry_table[n][2];	/* reduced write */
+		xd_info[drive].precomp = geometry_table[n][3]		/* write precomp */
+		xd_info[drive].ecc = 0x0B;				/* ecc length */
+#endif /* 0 */
+	}
+	xd_info[drive].control = geometry_table[n][4];			/* control byte */
+	xd_setparam(CMD_XBSETPARAM,drive,xd_info[drive].heads,xd_info[drive].cylinders,geometry_table[n][2],geometry_table[n][3],0x0B);
+	xd_recalibrate(drive);
+}
+
+/* xd_override_init_drive: this finds disk geometry in a "binary search" style, narrowing in on the "correct" number of heads
+   etc. by trying values until it gets the highest successful value. Idea courtesy Salvador Abreu (spa@fct.unl.pt). */
+static void __init xd_override_init_drive (u_char drive)
+{
+	u_short min[] = { 0,0,0 },max[] = { 16,1024,64 },test[] = { 0,0,0 };
+	u_char cmdblk[6],i;
+
+	if (xd_geo[3*drive])
+		xd_manual_geo_set(drive);
+	else {
+		for (i = 0; i < 3; i++) {
+			while (min[i] != max[i] - 1) {
+				test[i] = (min[i] + max[i]) / 2;
+				xd_build(cmdblk,CMD_SEEK,drive,(u_char) test[0],(u_short) test[1],(u_char) test[2],0,0);
+				if (!xd_command(cmdblk,PIO_MODE,NULL,NULL,NULL,XD_TIMEOUT * 2))
+					min[i] = test[i];
+				else
+					max[i] = test[i];
+			}
+			test[i] = min[i];
+		}
+		xd_info[drive].heads = (u_char) min[0] + 1;
+		xd_info[drive].cylinders = (u_short) min[1] + 1;
+		xd_info[drive].sectors = (u_char) min[2] + 1;
+	}
+	xd_info[drive].control = 0;
+}
+
+/* xd_setup: initialise controller from command line parameters */
+static void __init do_xd_setup (int *integers)
+{
+	switch (integers[0]) {
+		case 4: if (integers[4] < 0)
+				nodma = 1;
+			else if (integers[4] < 8)
+				xd_dma = integers[4];
+		case 3: if ((integers[3] > 0) && (integers[3] <= 0x3FC))
+				xd_iobase = integers[3];
+		case 2: if ((integers[2] > 0) && (integers[2] < 16))
+				xd_irq = integers[2];
+		case 1: xd_override = 1;
+			if ((integers[1] >= 0) && (integers[1] < (sizeof(xd_sigs) / sizeof(xd_sigs[0]))))
+				xd_type = integers[1];
+		case 0: break;
+		default:printk("xd: too many parameters for xd\n");
+	}
+	xd_maxsectors = 0x01;
+}
+
+/* xd_setparam: set the drive characteristics */
+static void __init xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc)
+{
+	u_char cmdblk[14];
+
+	xd_build(cmdblk,command,drive,0,0,0,0,0);
+	cmdblk[6] = (u_char) (cylinders >> 8) & 0x03;
+	cmdblk[7] = (u_char) (cylinders & 0xFF);
+	cmdblk[8] = heads & 0x1F;
+	cmdblk[9] = (u_char) (rwrite >> 8) & 0x03;
+	cmdblk[10] = (u_char) (rwrite & 0xFF);
+	cmdblk[11] = (u_char) (wprecomp >> 8) & 0x03;
+	cmdblk[12] = (u_char) (wprecomp & 0xFF);
+	cmdblk[13] = ecc;
+
+	/* Some controllers require geometry info as data, not command */
+
+	if (xd_command(cmdblk,PIO_MODE,NULL,&cmdblk[6],NULL,XD_TIMEOUT * 2))
+		printk("xd: error setting characteristics for xd%c\n", 'a'+drive);
+}
+
+
+#ifdef MODULE
+
+module_param_array(xd, int, NULL, 0);
+module_param_array(xd_geo, int, NULL, 0);
+module_param(nodma, bool, 0);
+
+MODULE_LICENSE("GPL");
+
+void cleanup_module(void)
+{
+	int i;
+	unregister_blkdev(XT_DISK_MAJOR, "xd");
+	for (i = 0; i < xd_drives; i++) {
+		del_gendisk(xd_gendisk[i]);
+		put_disk(xd_gendisk[i]);
+	}
+	blk_cleanup_queue(xd_queue);
+	release_region(xd_iobase,4);
+	if (xd_drives) {
+		free_irq(xd_irq, NULL);
+		free_dma(xd_dma);
+		if (xd_dma_buffer)
+			xd_dma_mem_free((unsigned long)xd_dma_buffer, xd_maxsectors * 0x200);
+	}
+}
+#else
+
+static int __init xd_setup (char *str)
+{
+	int ints[5];
+	get_options (str, ARRAY_SIZE (ints), ints);
+	do_xd_setup (ints);
+	return 1;
+}
+
+/* xd_manual_geo_init: initialise drive geometry from command line parameters
+   (used only for WD drives) */
+static int __init xd_manual_geo_init (char *str)
+{
+	int i, integers[1 + 3*XD_MAXDRIVES];
+
+	get_options (str, ARRAY_SIZE (integers), integers);
+	if (integers[0]%3 != 0) {
+		printk("xd: incorrect number of parameters for xd_geo\n");
+		return 1;
+	}
+	for (i = 0; (i < integers[0]) && (i < 3*XD_MAXDRIVES); i++)
+		xd_geo[i] = integers[i+1];
+	return 1;
+}
+
+__setup ("xd=", xd_setup);
+__setup ("xd_geo=", xd_manual_geo_init);
+
+#endif /* MODULE */
+
+module_init(xd_init);
+MODULE_ALIAS_BLOCKDEV_MAJOR(XT_DISK_MAJOR);
diff --git a/drivers/block/xd.h b/drivers/block/xd.h
new file mode 100644
index 0000000..71ac2e3
--- /dev/null
+++ b/drivers/block/xd.h
@@ -0,0 +1,135 @@
+#ifndef _LINUX_XD_H
+#define _LINUX_XD_H
+
+/*
+ * This file contains the definitions for the IO ports and errors etc. for XT hard disk controllers (at least the DTC 5150X).
+ *
+ * Author: Pat Mackinlay, pat@it.com.au
+ * Date: 29/09/92
+ *
+ * Revised: 01/01/93, ...
+ *
+ * Ref: DTC 5150X Controller Specification (thanks to Kevin Fowler, kevinf@agora.rain.com)
+ * Also thanks to: Salvador Abreu, Dave Thaler, Risto Kankkunen and Wim Van Dorst.
+ */
+
+#include <linux/interrupt.h>
+
+/* XT hard disk controller registers */
+#define XD_DATA		(xd_iobase + 0x00)	/* data RW register */
+#define XD_RESET	(xd_iobase + 0x01)	/* reset WO register */
+#define XD_STATUS	(xd_iobase + 0x01)	/* status RO register */
+#define XD_SELECT	(xd_iobase + 0x02)	/* select WO register */
+#define XD_JUMPER	(xd_iobase + 0x02)	/* jumper RO register */
+#define XD_CONTROL	(xd_iobase + 0x03)	/* DMAE/INTE WO register */
+#define XD_RESERVED	(xd_iobase + 0x03)	/* reserved */
+
+/* XT hard disk controller commands (incomplete list) */
+#define CMD_TESTREADY	0x00	/* test drive ready */
+#define CMD_RECALIBRATE	0x01	/* recalibrate drive */
+#define CMD_SENSE	0x03	/* request sense */
+#define CMD_FORMATDRV	0x04	/* format drive */
+#define CMD_VERIFY	0x05	/* read verify */
+#define CMD_FORMATTRK	0x06	/* format track */
+#define CMD_FORMATBAD	0x07	/* format bad track */
+#define CMD_READ	0x08	/* read */
+#define CMD_WRITE	0x0A	/* write */
+#define CMD_SEEK	0x0B	/* seek */
+
+/* Controller specific commands */
+#define CMD_DTCSETPARAM	0x0C	/* set drive parameters (DTC 5150X & CX only?) */
+#define CMD_DTCGETECC	0x0D	/* get ecc error length (DTC 5150X only?) */
+#define CMD_DTCREADBUF	0x0E	/* read sector buffer (DTC 5150X only?) */
+#define CMD_DTCWRITEBUF 0x0F	/* write sector buffer (DTC 5150X only?) */
+#define CMD_DTCREMAPTRK	0x11	/* assign alternate track (DTC 5150X only?) */
+#define CMD_DTCGETPARAM	0xFB	/* get drive parameters (DTC 5150X only?) */
+#define CMD_DTCSETSTEP	0xFC	/* set step rate (DTC 5150X only?) */
+#define CMD_DTCSETGEOM	0xFE	/* set geometry data (DTC 5150X only?) */
+#define CMD_DTCGETGEOM	0xFF	/* get geometry data (DTC 5150X only?) */
+#define CMD_ST11GETGEOM 0xF8	/* get geometry data (Seagate ST11R/M only?) */
+#define CMD_WDSETPARAM	0x0C	/* set drive parameters (WD 1004A27X only?) */
+#define CMD_XBSETPARAM	0x0C	/* set drive parameters (XEBEC only?) */
+
+/* Bits for command status byte */
+#define CSB_ERROR	0x02	/* error */
+#define CSB_LUN		0x20	/* logical Unit Number */
+
+/* XT hard disk controller status bits */
+#define STAT_READY	0x01	/* controller is ready */
+#define STAT_INPUT	0x02	/* data flowing from controller to host */
+#define STAT_COMMAND	0x04	/* controller in command phase */
+#define STAT_SELECT	0x08	/* controller is selected */
+#define STAT_REQUEST	0x10	/* controller requesting data */
+#define STAT_INTERRUPT	0x20	/* controller requesting interrupt */
+
+/* XT hard disk controller control bits */
+#define PIO_MODE	0x00	/* control bits to set for PIO */
+#define DMA_MODE	0x03	/* control bits to set for DMA & interrupt */
+
+#define XD_MAXDRIVES	2	/* maximum 2 drives */
+#define XD_TIMEOUT	HZ	/* 1 second timeout */
+#define XD_RETRIES	4	/* maximum 4 retries */
+
+#undef DEBUG			/* define for debugging output */
+
+#ifdef DEBUG
+	#define DEBUG_STARTUP	/* debug driver initialisation */
+	#define DEBUG_OVERRIDE	/* debug override geometry detection */
+	#define DEBUG_READWRITE	/* debug each read/write command */
+	#define DEBUG_OTHER	/* debug misc. interrupt/DMA stuff */
+	#define DEBUG_COMMAND	/* debug each controller command */
+#endif /* DEBUG */
+
+/* this structure defines the XT drives and their types */
+typedef struct {
+	u_char heads;
+	u_short cylinders;
+	u_char sectors;
+	u_char control;
+	int unit;
+} XD_INFO;
+
+/* this structure defines a ROM BIOS signature */
+typedef struct {
+	unsigned int offset;
+	const char *string;
+	void (*init_controller)(unsigned int address);
+	void (*init_drive)(u_char drive);
+	const char *name;
+} XD_SIGNATURE;
+
+#ifndef MODULE
+static int xd_manual_geo_init (char *command);
+#endif /* MODULE */
+static u_char xd_detect (u_char *controller, unsigned int *address);
+static u_char xd_initdrives (void (*init_drive)(u_char drive));
+
+static void do_xd_request (request_queue_t * q);
+static int xd_ioctl (struct inode *inode,struct file *file,unsigned int cmd,unsigned long arg);
+static int xd_readwrite (u_char operation,XD_INFO *disk,char *buffer,u_int block,u_int count);
+static void xd_recalibrate (u_char drive);
+
+static irqreturn_t xd_interrupt_handler(int irq, void *dev_id,
+					struct pt_regs *regs);
+static u_char xd_setup_dma (u_char opcode,u_char *buffer,u_int count);
+static u_char *xd_build (u_char *cmdblk,u_char command,u_char drive,u_char head,u_short cylinder,u_char sector,u_char count,u_char control);
+static void xd_watchdog (unsigned long unused);
+static inline u_char xd_waitport (u_short port,u_char flags,u_char mask,u_long timeout);
+static u_int xd_command (u_char *command,u_char mode,u_char *indata,u_char *outdata,u_char *sense,u_long timeout);
+
+/* card specific setup and geometry gathering code */
+static void xd_dtc_init_controller (unsigned int address);
+static void xd_dtc5150cx_init_drive (u_char drive);
+static void xd_dtc_init_drive (u_char drive);
+static void xd_wd_init_controller (unsigned int address);
+static void xd_wd_init_drive (u_char drive);
+static void xd_seagate_init_controller (unsigned int address);
+static void xd_seagate_init_drive (u_char drive);
+static void xd_omti_init_controller (unsigned int address);
+static void xd_omti_init_drive (u_char drive);
+static void xd_xebec_init_controller (unsigned int address);
+static void xd_xebec_init_drive (u_char drive);
+static void xd_setparam (u_char command,u_char drive,u_char heads,u_short cylinders,u_short rwrite,u_short wprecomp,u_char ecc);
+static void xd_override_init_drive (u_char drive);
+
+#endif /* _LINUX_XD_H */
diff --git a/drivers/block/z2ram.c b/drivers/block/z2ram.c
new file mode 100644
index 0000000..007f6a6
--- /dev/null
+++ b/drivers/block/z2ram.c
@@ -0,0 +1,429 @@
+/*
+** z2ram - Amiga pseudo-driver to access 16bit-RAM in ZorroII space
+**         as a block device, to be used as a RAM disk or swap space
+** 
+** Copyright (C) 1994 by Ingo Wilken (Ingo.Wilken@informatik.uni-oldenburg.de)
+**
+** ++Geert: support for zorro_unused_z2ram, better range checking
+** ++roman: translate accesses via an array
+** ++Milan: support for ChipRAM usage
+** ++yambo: converted to 2.0 kernel
+** ++yambo: modularized and support added for 3 minor devices including:
+**          MAJOR  MINOR  DESCRIPTION
+**          -----  -----  ----------------------------------------------
+**          37     0       Use Zorro II and Chip ram
+**          37     1       Use only Zorro II ram
+**          37     2       Use only Chip ram
+**          37     4-7     Use memory list entry 1-4 (first is 0)
+** ++jskov: support for 1-4th memory list entry.
+**
+** Permission to use, copy, modify, and distribute this software and its
+** documentation for any purpose and without fee is hereby granted, provided
+** that the above copyright notice appear in all copies and that both that
+** copyright notice and this permission notice appear in supporting
+** documentation.  This software is provided "as is" without express or
+** implied warranty.
+*/
+
+#define DEVICE_NAME "Z2RAM"
+
+#include <linux/major.h>
+#include <linux/vmalloc.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/blkdev.h>
+#include <linux/bitops.h>
+
+#include <asm/setup.h>
+#include <asm/amigahw.h>
+#include <asm/pgtable.h>
+
+#include <linux/zorro.h>
+
+
+extern int m68k_realnum_memory;
+extern struct mem_info m68k_memory[NUM_MEMINFO];
+
+#define TRUE                  (1)
+#define FALSE                 (0)
+
+#define Z2MINOR_COMBINED      (0)
+#define Z2MINOR_Z2ONLY        (1)
+#define Z2MINOR_CHIPONLY      (2)
+#define Z2MINOR_MEMLIST1      (4)
+#define Z2MINOR_MEMLIST2      (5)
+#define Z2MINOR_MEMLIST3      (6)
+#define Z2MINOR_MEMLIST4      (7)
+#define Z2MINOR_COUNT         (8) /* Move this down when adding a new minor */
+
+#define Z2RAM_CHUNK1024       ( Z2RAM_CHUNKSIZE >> 10 )
+
+static u_long *z2ram_map    = NULL;
+static u_long z2ram_size    = 0;
+static int z2_count         = 0;
+static int chip_count       = 0;
+static int list_count       = 0;
+static int current_device   = -1;
+
+static DEFINE_SPINLOCK(z2ram_lock);
+
+static struct block_device_operations z2_fops;
+static struct gendisk *z2ram_gendisk;
+
+static void do_z2_request(request_queue_t *q)
+{
+	struct request *req;
+	while ((req = elv_next_request(q)) != NULL) {
+		unsigned long start = req->sector << 9;
+		unsigned long len  = req->current_nr_sectors << 9;
+
+		if (start + len > z2ram_size) {
+			printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n",
+				req->sector, req->current_nr_sectors);
+			end_request(req, 0);
+			continue;
+		}
+		while (len) {
+			unsigned long addr = start & Z2RAM_CHUNKMASK;
+			unsigned long size = Z2RAM_CHUNKSIZE - addr;
+			if (len < size)
+				size = len;
+			addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
+			if (rq_data_dir(req) == READ)
+				memcpy(req->buffer, (char *)addr, size);
+			else
+				memcpy((char *)addr, req->buffer, size);
+			start += size;
+			len -= size;
+		}
+		end_request(req, 1);
+	}
+}
+
+static void
+get_z2ram( void )
+{
+    int i;
+
+    for ( i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++ )
+    {
+	if ( test_bit( i, zorro_unused_z2ram ) )
+	{
+	    z2_count++;
+	    z2ram_map[ z2ram_size++ ] = 
+		ZTWO_VADDR( Z2RAM_START ) + ( i << Z2RAM_CHUNKSHIFT );
+	    clear_bit( i, zorro_unused_z2ram );
+	}
+    }
+
+    return;
+}
+
+static void
+get_chipram( void )
+{
+
+    while ( amiga_chip_avail() > ( Z2RAM_CHUNKSIZE * 4 ) )
+    {
+	chip_count++;
+	z2ram_map[ z2ram_size ] =
+	    (u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE, "z2ram" );
+
+	if ( z2ram_map[ z2ram_size ] == 0 )
+	{
+	    break;
+	}
+
+	z2ram_size++;
+    }
+	
+    return;
+}
+
+static int
+z2_open( struct inode *inode, struct file *filp )
+{
+    int device;
+    int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
+	sizeof( z2ram_map[0] );
+    int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) *
+	sizeof( z2ram_map[0] );
+    int rc = -ENOMEM;
+
+    device = iminor(inode);
+
+    if ( current_device != -1 && current_device != device )
+    {
+	rc = -EBUSY;
+	goto err_out;
+    }
+
+    if ( current_device == -1 )
+    {
+	z2_count   = 0;
+	chip_count = 0;
+	list_count = 0;
+	z2ram_size = 0;
+
+	/* Use a specific list entry. */
+	if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
+		int index = device - Z2MINOR_MEMLIST1 + 1;
+		unsigned long size, paddr, vaddr;
+
+		if (index >= m68k_realnum_memory) {
+			printk( KERN_ERR DEVICE_NAME
+				": no such entry in z2ram_map\n" );
+		        goto err_out;
+		}
+
+		paddr = m68k_memory[index].addr;
+		size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1);
+
+#ifdef __powerpc__
+		/* FIXME: ioremap doesn't build correct memory tables. */
+		{
+			vfree(vmalloc (size));
+		}
+
+		vaddr = (unsigned long) __ioremap (paddr, size, 
+						   _PAGE_WRITETHRU);
+
+#else
+		vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
+#endif
+		z2ram_map = 
+			kmalloc((size/Z2RAM_CHUNKSIZE)*sizeof(z2ram_map[0]),
+				GFP_KERNEL);
+		if ( z2ram_map == NULL )
+		{
+		    printk( KERN_ERR DEVICE_NAME
+			": cannot get mem for z2ram_map\n" );
+		    goto err_out;
+		}
+
+		while (size) {
+			z2ram_map[ z2ram_size++ ] = vaddr;
+			size -= Z2RAM_CHUNKSIZE;
+			vaddr += Z2RAM_CHUNKSIZE;
+			list_count++;
+		}
+
+		if ( z2ram_size != 0 )
+		    printk( KERN_INFO DEVICE_NAME
+			": using %iK List Entry %d Memory\n",
+			list_count * Z2RAM_CHUNK1024, index );
+	} else
+
+	switch ( device )
+	{
+	    case Z2MINOR_COMBINED:
+
+		z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL );
+		if ( z2ram_map == NULL )
+		{
+		    printk( KERN_ERR DEVICE_NAME
+			": cannot get mem for z2ram_map\n" );
+		    goto err_out;
+		}
+
+		get_z2ram();
+		get_chipram();
+
+		if ( z2ram_size != 0 )
+		    printk( KERN_INFO DEVICE_NAME 
+			": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
+			z2_count * Z2RAM_CHUNK1024,
+			chip_count * Z2RAM_CHUNK1024,
+			( z2_count + chip_count ) * Z2RAM_CHUNK1024 );
+
+	    break;
+
+    	    case Z2MINOR_Z2ONLY:
+		z2ram_map = kmalloc( max_z2_map, GFP_KERNEL );
+		if ( z2ram_map == NULL )
+		{
+		    printk( KERN_ERR DEVICE_NAME
+			": cannot get mem for z2ram_map\n" );
+		    goto err_out;
+		}
+
+		get_z2ram();
+
+		if ( z2ram_size != 0 )
+		    printk( KERN_INFO DEVICE_NAME 
+			": using %iK of Zorro II RAM\n",
+			z2_count * Z2RAM_CHUNK1024 );
+
+	    break;
+
+	    case Z2MINOR_CHIPONLY:
+		z2ram_map = kmalloc( max_chip_map, GFP_KERNEL );
+		if ( z2ram_map == NULL )
+		{
+		    printk( KERN_ERR DEVICE_NAME
+			": cannot get mem for z2ram_map\n" );
+		    goto err_out;
+		}
+
+		get_chipram();
+
+		if ( z2ram_size != 0 )
+		    printk( KERN_INFO DEVICE_NAME 
+			": using %iK Chip RAM\n",
+			chip_count * Z2RAM_CHUNK1024 );
+		    
+	    break;
+
+	    default:
+		rc = -ENODEV;
+		goto err_out;
+	
+	    break;
+	}
+
+	if ( z2ram_size == 0 )
+	{
+	    printk( KERN_NOTICE DEVICE_NAME
+		": no unused ZII/Chip RAM found\n" );
+	    goto err_out_kfree;
+	}
+
+	current_device = device;
+	z2ram_size <<= Z2RAM_CHUNKSHIFT;
+	set_capacity(z2ram_gendisk, z2ram_size >> 9);
+    }
+
+    return 0;
+
+err_out_kfree:
+    kfree( z2ram_map );
+err_out:
+    return rc;
+}
+
+static int
+z2_release( struct inode *inode, struct file *filp )
+{
+    if ( current_device == -1 )
+	return 0;     
+
+    /*
+     * FIXME: unmap memory
+     */
+
+    return 0;
+}
+
+static struct block_device_operations z2_fops =
+{
+	.owner		= THIS_MODULE,
+	.open		= z2_open,
+	.release	= z2_release,
+};
+
+static struct kobject *z2_find(dev_t dev, int *part, void *data)
+{
+	*part = 0;
+	return get_disk(z2ram_gendisk);
+}
+
+static struct request_queue *z2_queue;
+
+int __init 
+z2_init(void)
+{
+    int ret;
+
+    if (!MACH_IS_AMIGA)
+	return -ENXIO;
+
+    ret = -EBUSY;
+    if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
+	goto err;
+
+    ret = -ENOMEM;
+    z2ram_gendisk = alloc_disk(1);
+    if (!z2ram_gendisk)
+	goto out_disk;
+
+    z2_queue = blk_init_queue(do_z2_request, &z2ram_lock);
+    if (!z2_queue)
+	goto out_queue;
+
+    z2ram_gendisk->major = Z2RAM_MAJOR;
+    z2ram_gendisk->first_minor = 0;
+    z2ram_gendisk->fops = &z2_fops;
+    sprintf(z2ram_gendisk->disk_name, "z2ram");
+    strcpy(z2ram_gendisk->devfs_name, z2ram_gendisk->disk_name);
+
+    z2ram_gendisk->queue = z2_queue;
+    add_disk(z2ram_gendisk);
+    blk_register_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT, THIS_MODULE,
+				z2_find, NULL, NULL);
+
+    return 0;
+
+out_queue:
+    put_disk(z2ram_gendisk);
+out_disk:
+    unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
+err:
+    return ret;
+}
+
+#if defined(MODULE)
+
+MODULE_LICENSE("GPL");
+
+int
+init_module( void )
+{
+    int error;
+    
+    error = z2_init();
+    if ( error == 0 )
+    {
+	printk( KERN_INFO DEVICE_NAME ": loaded as module\n" );
+    }
+    
+    return error;
+}
+
+void
+cleanup_module( void )
+{
+    int i, j;
+    blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), 256);
+    if ( unregister_blkdev( Z2RAM_MAJOR, DEVICE_NAME ) != 0 )
+	printk( KERN_ERR DEVICE_NAME ": unregister of device failed\n");
+
+    del_gendisk(z2ram_gendisk);
+    put_disk(z2ram_gendisk);
+    blk_cleanup_queue(z2_queue);
+
+    if ( current_device != -1 )
+    {
+	i = 0;
+
+	for ( j = 0 ; j < z2_count; j++ )
+	{
+	    set_bit( i++, zorro_unused_z2ram ); 
+	}
+
+	for ( j = 0 ; j < chip_count; j++ )
+	{
+	    if ( z2ram_map[ i ] )
+	    {
+		amiga_chip_free( (void *) z2ram_map[ i++ ] );
+	    }
+	}
+
+	if ( z2ram_map != NULL )
+	{
+	    kfree( z2ram_map );
+	}
+    }
+
+    return;
+} 
+#endif